id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/Lotlan_Scheduler-1.1.2.tar.gz/Lotlan_Scheduler-1.1.2/lotlan_scheduler/logger/sqlite_logger.py |
# standard libraries
from pathlib import Path
import hashlib
import time
import threading
# 3rd party packages
from sqlalchemy import create_engine, MetaData, Table
# globals defines
from lotlan_scheduler.defines import SQLCommands
init_lock = threading.Lock()
mf_lock = threading.Lock()
to_lock = threading.Lock()
location_lock = threading.Lock()
class SQLiteLogger():
"""
Establishes a SQLite connection and inserts logging data
"""
def __init__(self, dabase_path = SQLCommands.DATABASE_PATH):
with init_lock:
database_file = Path(dabase_path)
create_tables = True
# check if db file is already there
if database_file.is_file():
create_tables = False
self.database_engine = create_engine("sqlite:///" + dabase_path, echo=False)
self.metadata = MetaData(bind=self.database_engine)
self.con = self.database_engine.connect()
self.mf_uuid_to_mf_instance_id = {}
if create_tables:
self.database_engine.execute(SQLCommands.CREATE_MATERIALFLOW_TABLE)
self.database_engine.execute(SQLCommands.CREATE_MATERIALFLOW_INSTANCE_TABLE)
self.database_engine.execute(SQLCommands.CREATE_TRANSPORT_ORDER_TABLE)
self.database_engine.execute(SQLCommands.CREATE_TRANSPORT_ORDER_IDS_TABLE)
self.database_engine.execute(SQLCommands.CREATE_LOCATION_TABLE)
def insert_materialflow_in_sql(self, mf_uuid, lotlan_string):
"""
Inserts materialflow information into the tables
'materialflow' and 'materialflow_instance'
Returns materailflow_instance id if no error occured
"""
materialflow_table = Table("materialflow", self.metadata, autoload=True)
lotlan_hash = hashlib.md5(lotlan_string.encode()).hexdigest()
mf_uuid = str(mf_uuid)
materialflow_id = None
with mf_lock:
select_stmt = materialflow_table.select(materialflow_table.c.hash==lotlan_hash)
result = select_stmt.execute().first()
if result is None:
result = self.con.execute(materialflow_table.insert(),
lotlan=lotlan_string, hash=lotlan_hash)
materialflow_id = result.lastrowid
else:
materialflow_id = result.id
if materialflow_id is not None:
now = int(time.time()) # time in utc
materialflow_instance_table = Table("materialflow_instance",
self.metadata, autoload=True)
result = self.con.execute(materialflow_instance_table.insert(),
materialflow_id=materialflow_id,
uuid=str(mf_uuid),
timestamp=now)
materialflow_instance_id = result.lastrowid
self.mf_uuid_to_mf_instance_id[mf_uuid] = materialflow_instance_id
def insert_transport_order(self, mf_uuid, to_uuid, state, pickup, delivery):
"""
Inserts a TransportOrder state into the database
pickup and delivery have to be Location objects
"""
now = int(time.time()) # time in utc
mf_uuid = str(mf_uuid)
# Get foreign keys in transport_order table
transport_uuid = self.get_transport_uuid(to_uuid)
pickup_id = self.get_location_id(pickup)
delivery_id = self.get_location_id(delivery)
mf_instance_id = self.mf_uuid_to_mf_instance_id[mf_uuid]
transport_order_table = Table("transport_order", self.metadata, autoload=True)
self.con.execute(transport_order_table.insert(),
materialflow_id=mf_instance_id,
timestamp=now,
transport_uuid=transport_uuid,
state=state,
location_id_pickup=pickup_id,
location_id_delivery =delivery_id)
def get_transport_uuid(self, to_uuid):
"""
Searches for foreign key of given TransportOrder.
If no entry is found a new one is inserted.
"""
transport_order_ids_table = Table("transport_order_ids", self.metadata, autoload=True)
transport_uuid = None
to_uuid = str(to_uuid)
with to_lock:
select_stmt = transport_order_ids_table.select(transport_order_ids_table.c.uuid==to_uuid)
result = select_stmt.execute().first()
if result is None:
result = self.con.execute(transport_order_ids_table.insert(), uuid=to_uuid)
transport_uuid = result.lastrowid
else:
transport_uuid = result.id
return transport_uuid
def get_location_id(self, location):
"""
Searches for foreign key for given location
If no entry is found a new one is inserted.
Returns the foreign key if no error occured and location object is defined
Returns None otherwise
"""
location_id = None
if location:
location_table = Table("location", self.metadata, autoload=True)
with location_lock:
select_stmt = location_table.select(location_table.c.logical_name==location.logical_name)
result = select_stmt.execute().first()
if result is None:
insert_result = self.con.execute(location_table.insert(),
logical_name=location.logical_name,
physical_name=location.physical_name,
location_type=location.location_type)
location_id = insert_result.lastrowid
else:
location_id = result.id
return location_id | PypiClean |
/GPy_ABCD-1.2.1-py3-none-any.whl/GPy_ABCD/KernelExpressions/commutative_base.py | from abc import abstractmethod
from collections import Counter
from itertools import chain
from GPy_ABCD.KernelExpressions.base import KernelExpression
from GPy_ABCD.KernelExpansion.kernelOperations import non_stationary_kerns, base_k_param_names
from GPy_ABCD.KernelExpansion.kernelInterpretation import *
from GPy_ABCD.Util.genericUtil import partition, eq_elems
class SumOrProductKE(KernelExpression): # Abstract
def __init__(self, base_terms, composite_terms = [], root: KernelExpression = None, parent: KernelExpression = None, symbol = None, GPy_name = None):
super().__init__(root, parent, GPy_name)
self.base_terms = deepcopy(base_terms) if isinstance(base_terms, Counter) else Counter(base_terms)
self.composite_terms = deepcopy(composite_terms)
self.symbol = symbol
# self.simplify_base_terms() # Activate only this instead of full simplify for some testing
self.simplify()
for ct in self.composite_terms: ct.set_parent(self).set_root(self.root)
def __str__(self):
return (' ' + self.symbol + ' ').join([self.bracket_if_needed(f) for f in order_base_kerns(list(self.base_terms.elements())) + self.composite_terms])
def __repr__(self):
res = type(self).__name__ + '([' + ', '.join(["'"+bt+"'" for bt in self.base_terms.elements()]) + ']'
cts = ', [' + ', '.join([ct.__repr__() for ct in self.composite_terms]) + ']' if self.composite_terms else ''
return res + cts + ')'
def __eq__(self, other): ## NOTE: this is intended to check equality of data fields only, i.e. it does not check root or parent
return type(self) == type(other) and self.base_terms == other.base_terms and eq_elems(self.composite_terms, other.composite_terms)
@staticmethod
def bracket_if_needed(kex):
return str(kex)
def simplify(self):
self.composite_terms = [ct.simplify() for ct in self.composite_terms]
return self.absorb_homogeneous_composites().absorb_singletons().simplify_base_terms()
@abstractmethod
def simplify_base_terms(self):
pass
def absorb_singletons(self):
(bts, self.composite_terms) = partition(lambda x: isinstance(x, str), [ct.extract_if_singleton() for ct in self.composite_terms])
self.new_base(bts)
return self
def _is_singleton(self):
return sum(self.base_terms.values()) + len(self.composite_terms) == 1
def extract_if_singleton(self): # This modifies the composite_child's parent if that kind of singleton
if sum(self.base_terms.values()) == 1 and len(self.composite_terms) == 0: return list(self.base_terms.elements())[0]
elif sum(self.base_terms.values()) == 0 and len(self.composite_terms) == 1: return self.composite_terms[0].set_parent(self.parent)
else: return self
def absorb_homogeneous_composites(self):
homogeneous_composites = [ct for ct in self.composite_terms if isinstance(ct, type(self))] # Annoyingly less problematic than lambda-using-self filter
for hc in homogeneous_composites:
self.base_terms.update(hc.base_terms)
self.simplify_base_terms()
for hcct in hc.composite_terms: hcct.parent = self
self.composite_terms += hc.composite_terms
self.composite_terms.remove(hc)
return self
def traverse(self):
return [self] + list(chain.from_iterable([ct.traverse() for ct in self.composite_terms]))
def reduce(self, func, acc):
return reduce(lambda acc2, ct: ct.reduce(func, acc2), self.composite_terms, func(self, acc))
def set_root(self, new_root = None):
if new_root is None: new_root = self
self.root = new_root
for ct in self.composite_terms: ct.set_root(new_root)
return self
def _set_all_parents(self):
for ct in self.composite_terms:
ct.parent = self
ct._set_all_parents()
return self
def _check_all_parents(self):
return all([ct.parent is self and ct._check_all_parents() for ct in self.composite_terms])
def reassign_child(self, old_child, new_child):
self.composite_terms.remove(old_child)
self.composite_terms.append(new_child) # NOT A deepcopy!
return new_child # NOTE THIS RETURN VALUE (used by new_tree_with_self_replaced)
def new_base(self, new_base_terms):
if isinstance(new_base_terms, str):
self.base_terms[new_base_terms] += 1
else: # list or Counter
self.base_terms.update(new_base_terms)
self.simplify_base_terms()
return self
def new_composite(self, new_composite_terms):
if isinstance(new_composite_terms, KernelExpression):
self.composite_terms += [deepcopy(new_composite_terms).set_parent(self).set_root(self.root)]
else: # list
self.composite_terms += [deepcopy(nct).set_parent(self).set_root(self.root) for nct in new_composite_terms]
return self
def term_count(self):
return sum(self.base_terms.values()) + len(self.composite_terms)
def contains_base(self, bts):
if not isinstance(bts, list): bts = [bts]
return any([bt in self.base_terms for bt in bts]) or any([ct.contains_base(bts) for ct in self.composite_terms])
def is_stationary(self):
return all([ns not in self.base_terms for ns in non_stationary_kerns]) and all([ct.is_stationary() for ct in self.composite_terms])
@abstractmethod
def to_kernel(self):
pass
# Methods for after fit
def match_up_fit_parameters(self, param_dict, prefix = ''):
if self.is_root(): prefix += '' if self._is_singleton() else self.GPy_name + '.'
elif prefix == '': raise ValueError('No prefix but not root node in match_up_fit_parameters')
seen_terms = Counter([])
for bt in list(self.base_terms.elements()):
seen_terms.update([bt])
postfix = '_' + str(seen_terms[bt] - 1) + '.' if seen_terms[bt] > 1 else '.'
self.parameters[bt].append({ p: param_dict[p_full] for p in base_k_param_names[bt]['parameters']
for p_full in [prefix + base_k_param_names[bt]['name'] + postfix + p] # Clunky 'let' assignment; for Python 3.8+: 'if (p_full := '.'.join([prefix, base_k_param_names[bt]['name'], p]))'
if not (p == 'variance' and p_full not in param_dict) }) # I.e. skip variances if absent
for ct in self.composite_terms:
seen_terms.update([ct.GPy_name])
postfix = '_' + str(seen_terms[ct.GPy_name] - 1) + '.' if seen_terms[ct.GPy_name] > 1 else '.'
ct.match_up_fit_parameters(param_dict, prefix + ct.GPy_name + postfix)
return self
@abstractmethod
def sum_of_prods_form(self):
pass | PypiClean |
/Cibyl-1.0.0.0rc1.tar.gz/Cibyl-1.0.0.0rc1/docs/source/sources/elasticsearch.rst | Elasticsearch
=============
The Elasticsearch source pulls data from the different indexes of the Elasticsearch database.
Usage
^^^^^
To following is a configuration sample of how to configure the Elasticsearch source
.. include:: ../config_samples/elasticsearch_configuration.rst
Fields
^^^^^^
Elasticsearch should include the following fields in order to be fully operational:
- job_name
- build_number
- build_result
- current_build_result
Plugin Support
^^^^^^^^^^^^^^
The Elasticsearch source is supported by the following built-in plugins:
* OpenStack
| PypiClean |
/BrewPi-Remix-0.0.0.1.tar.gz/BrewPi-Remix-0.0.0.1/utils/updater.py |
# Copyright (C) 2018, 2019 Lee C. Bussy (@LBussy)
# This file is part of LBussy's BrewPi Script Remix (BrewPi-Script-RMX).
#
# BrewPi Script RMX is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# BrewPi Script RMX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BrewPi Script RMX. If not, see <https://www.gnu.org/licenses/>.
# These scripts were originally a part of brewpi-script, an installer for
# the BrewPi project. Legacy support (for the very popular Arduino
# controller) seems to have been discontinued in favor of new hardware.
# All credit for the original brewpi-script goes to @elcojacobs,
# @m-mcgowan, @rbrady, @steersbob, @glibersat, @Niels-R and I'm sure
# many more contributors around the world. My apologies if I have
# missed anyone; those were the names listed as contributors on the
# Legacy branch.
# See: 'original-license.md' for notes about the original project's
# license and credits
############
### Init
############
import subprocess
from time import localtime, strftime
import sys
import os
import pwd
import grp
import stat
#import urllib2
import argparse
from git import Repo
import requests
from pprint import pprint as pp
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..") # append parent directory to be able to import files
try:
import BrewPiUtil
from BrewPiUtil import addSlash, printStdErr, printStdOut, stopThisChamber, scriptPath, readCfgWithDefaults, removeDontRunFile
import brewpiVersion
except ImportError as e:
print("Not part of a BrewPi Git repository, error:\n{0}".format(e), file=sys.stderr)
# Configuration items
rawurl = "https://raw.githubusercontent.com/brewpi-remix/brewpi-script-rmx/THISBRANCH/utils/updater.py"
tmpscriptname = "tmpUpdate.py" # Name of script running from GitHub
scriptname = "updater.py" # Name of core script
usedts = False # Use Date/Time stamps
#### ********************************************************************
####
#### IMPORTANT NOTE: I don't care if you play with the code, but if
#### you do, please comment out the next lines. Otherwise I will
#### receive a notice for every mistake you make.
####
#### ********************************************************************
# import sentry_sdk
# sentry_sdk.init("https://5644cfdc9bd24dfbaadea6bc867a8f5b@sentry.io/1803681")
def logMessage(*objs):
global usedts
if usedts:
printStdOut(strftime("%Y-%m-%d %H:%M:%S "), *objs)
else:
printStdOut(*objs)
def logError(*objs):
global usedts
if usedts:
printStdErr(strftime("%Y-%m-%d %H:%M:%S "), *objs)
else:
printStdErr(*objs)
def stopBrewPi(scriptPath, wwwPath): # Quits all running instances of BrewPi
startAfterUpdate = None
print("\nStopping running instances of BrewPi.")
stopResult = stopThisChamber(scriptPath, wwwPath)
if stopResult is True:
# BrewPi was running and stopped. Start after update.
startAfterUpdate = True
elif stopResult is False:
# Unable to stop BrewPi
startAfterUpdate = False
elif stopResult is None:
# BrewPi was not probably not running, don't start after update.
startAfterUpdate = None
return startAfterUpdate
def updateMeAndRun(scriptpath, args) -> bool: # Pull down current version and run it instead
# Download current script from Git and run it instead
retval = True
global rawurl
global scriptname
tmpscript = os.path.join(scriptpath, tmpscriptname)
repo = Repo(scriptpath)
branch = repo.active_branch
url = rawurl.replace("THISBRANCH", str(branch))
response = requests.get(url)
if response.status_code == 200:
logMessage("Downloading current version of this script.")
try:
owner = 'brewpi'
group = 'brewpi'
uid = pwd.getpwnam(owner).pw_uid # Get UID
gid = grp.getgrnam(group).gr_gid # Get GID
filemode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH | stat.S_IROTH | stat.S_IXOTH # 775
file = open(tmpscript, 'w')
file.write(response.text)
file.close()
os.chown(tmpscript, uid, gid) # chown root directory
os.chmod(tmpscript, filemode) # chmod root directory
except Exception as e:
logError("Failed to write temp file, error: {0}".format(e))
retval = False
else:
logError("Failed to download update script from GitHub.")
retval = False
if retval:
logMessage("Executing online version of script.")
arguments = []
if args[1:]:
arguments = args[1:]
arguments.insert(0, tmpscript)
print("DEBUG: Process args = {0}".format(arguments))
try:
pout = subprocess.run(arguments)
if pout.returncode > 0:
retval = False # Error
except Exception as e:
logError("Failed to execute online file, error: {0}".format(e))
retval = False
return retval
def getRepoName(url: str) -> str:
last_slash_index = url.rfind("/")
last_suffix_index = url.rfind(".git")
if last_suffix_index < 0:
last_suffix_index = len(url)
if last_slash_index < 0 or last_suffix_index <= last_slash_index:
logError("Badly formatted url: '{}'".format(url))
return url[last_slash_index + 1:last_suffix_index]
def checkRoot(): # Determine if we are running as root or not
if os.geteuid() != 0:
return False
else:
return True
def deleteFile(file): # Delete a file
if os.path.exists(file):
os.remove(file)
return True
else:
return False
def doArgs(scriptpath) -> bool:
retval = False
# Initiate the parser
helptext = "This script will update your current chamber to the latest version,\nor allow " +\
"you to change your current branch. Be sure to run as root or with sudo."
parser = argparse.ArgumentParser(description = helptext)
# Add arguments
parser.add_argument("-v", "--version", help="show current version and exit", action="store_true")
parser.add_argument("-a", "--ask",
help="ask which branch to check out",
action="store_true")
parser.add_argument("-d", "--datetime", help="use date/time stamp on logs", action="store_true")
# Read arguments from the command line
args = parser.parse_args()
# Check for --version or -V
if args.version:
repo = Repo(scriptpath)
tags = repo.tags
tag = tags[len(tags) - 1]
url = ""
for remote in repo.remotes:
url = remote.url # Assuming only one remote at this time
reponame = getRepoName(url)
print("Current version of '{0}': {1}.".format(reponame, tag))
exit(0)
# Check for --ask or -a
if args.ask:
retval = True # Change branches
return retval
def refreshBranches() -> bool:
logMessage("Refreshing branch information.")
pout = subprocess.run([
"git",
"config",
"remote.origin.fetch",
"+refs/heads/*:refs/remotes/origin/*"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if pout.returncode > 0:
return False # Error
pout = subprocess.run([
"git",
"fetch",
"--all"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
if pout.returncode > 0:
return False # Error
return True # Ok
def banner(thisscript, adj):
logMessage("\n***Script {0} {1}.***".format(thisscript, adj))
def runAfterUpdate(scriptpath): # Handle dependencies update and cleanup
retval = True
logMessage("Updating dependencies as needed.")
dodepends = os.path.join(scriptpath, "utils/doDepends.sh")
pout = subprocess.run([
"bash",
dodepends])
if pout.returncode > 0:
logError("Updating dependencies failed.")
retval = False # Error
return retval
def check_repo(repo): # Check most recent commit date on the repo passed to it
updated = False
localBranch = repo.active_branch.name
newBranch = localBranch
remoteRef = None
print("You are on branch " + localBranch)
if not localBranch in ["master", "legacy"] and not userInput:
print("Your checked out branch is not master, our stable release branch.")
print("It is highly recommended that you switch to the stable master branch.")
choice = raw_input("Would you like to do that? [Y/n]: ")
if any(choice == x for x in ["", "yes", "Yes", "YES", "yes", "y", "Y"]):
print("Switching branch to master.")
newBranch = "master"
### Get available remotes
remote = repo.remotes[0] # default to first found remote
if userInput and len(repo.remotes) > 1:
print("Multiple remotes found in " + repo.working_tree_dir)
for i, rem in enumerate(repo.remotes):
print("[%d] %s" % (i, rem.name))
print("[" + str(len(repo.remotes)) + "] Skip updating this repository.")
while 1:
try:
choice = raw_input("From which remote do you want to update? [%s]: " % remote)
if choice == "":
print("Updating from default remote %s." % remote)
break
else:
selection = int(choice)
except ValueError:
print("Use the number!")
continue
if selection == len(repo.remotes):
return False # choice = skip updating
try:
remote = repo.remotes[selection]
except IndexError:
print("Not a valid selection. Try again.")
continue
break
repo.git.fetch(remote.name, "--prune")
### Get available branches on the remote
try:
remoteBranches = remote.refs
except AssertionError as e:
print("Failed to get references from remote: " + repr(e))
print("Aborting update of " + repo.working_tree_dir)
return False
if userInput:
print("\nAvailable branches on the remote '%s' for %s: " % (remote.name, repo.working_tree_dir))
for i, ref in enumerate(remoteBranches):
remoteRefName = "%s" % ref
if "/HEAD" in remoteRefName:
remoteBranches.pop(i) # remove HEAD from list
for i, ref in enumerate(remoteBranches):
remoteRefName = "%s" % ref
remoteBranchName = remoteRefName.replace(remote.name + "/", "")
if remoteBranchName == newBranch:
remoteRef = ref
if userInput:
print("[%d] %s" % (i, remoteBranchName))
if userInput:
print("[" + str(len(remoteBranches)) + "] Skip updating this repository.")
while 1:
try:
choice = raw_input("Enter the number of the branch you wish to update [%s]: " % localBranch)
if choice == "":
print("Keeping current branch %s" % localBranch)
break
else:
selection = int(choice)
except ValueError:
print("Please make a valid choice.")
continue
if selection == len(remoteBranches):
return False # choice = skip updating
try:
remoteRef = remoteBranches[selection]
except IndexError:
print("Not a valid selection. Try again.")
continue
break
if remoteRef is None:
print("Could not find branch selected branch on remote. Aborting.")
return False
remoteBranch = ("%s" % remoteRef).replace(remote.name + "/", "")
checkedOutDifferentBranch = False
if localBranch != remoteBranch:
print("The " + remoteBranch + " branch is not your currently active branch - ")
choice = raw_input("would you like me to check it out for you now? (Required to continue) [Y/n]: ")
if any(choice == x for x in ["", "yes", "Yes", "YES", "yes", "y", "Y"]):
stashedForCheckout = False
while True:
try:
if remoteBranch in repo.branches:
print(repo.git.checkout(remoteBranch))
else:
print(repo.git.checkout(remoteRef, b=remoteBranch))
print("Successfully switched to " + remoteBranch)
checkedOutDifferentBranch = True
break
except git.GitCommandError as e:
if not stashedForCheckout:
if "Your local changes to the following files would be overwritten by checkout" in str(e):
print("Local changes exist in your current files that need to be stashed to continue.")
if not stashChanges(repo):
return
print("Trying to checkout again.")
stashedForCheckout = True # keep track of stashing, so it is only tried once
continue # retry after stash
else:
print(e)
print("I was unable to checkout. Please try it manually from the command line and\nre-run this tool.")
return False
else:
print("Skipping this branch.")
return False
if remoteRef is None:
print("Error: Could not determine which remote reference to use, aborting.")
return False
localDate = repo.head.commit.committed_date
localDateString = strftime("%a, %d %b %Y %H:%M:%S", localtime(localDate))
localSha = repo.head.commit.hexsha
localName = repo.working_tree_dir
remoteDate = remoteRef.commit.committed_date
remoteDateString = strftime("%a, %d %b %Y %H:%M:%S", localtime(remoteDate))
remoteSha = remoteRef.commit.hexsha
remoteName = remoteRef.name
alignLength = max(len(localName), len(remoteName))
print("The latest commit in " + localName.ljust(alignLength) + " is " + localSha + " on " + localDateString)
print("The latest commit on " + remoteName.ljust(alignLength) + " is " + remoteSha + " on " + remoteDateString)
if localDate < remoteDate:
print("*** Updates are available ****")
choice = raw_input("Would you like to update " + localName + " from " + remoteName + " [Y/n]: ")
if any(choice == x for x in ["", "yes", "Yes", "YES", "yes", "y", "Y"]):
updated = update_repo(repo, remote.name, remoteBranch)
else:
print("Your local version of " + localName + " is up to date.")
return updated or checkedOutDifferentBranch
def stashChanges(repo): # Stash any local repo changes
print ("\nYou have local changes in this repository, that are prevent a successful merge.\n" + \
"These changes can be stashed to bring your repository back to its original\n" + \
"state so we can merge.\n" + \
"Your changes are not lost, but saved on the stash. You can (optionally) get\n" + \
"them back later with 'git stash pop'.")
choice = raw_input("Would you like to stash local changes? (Required to continue) [Y/n]: ")
if any(choice == x for x in ["", "yes", "Yes", "YES", "yes", "y", "Y"]):
print("Attempting to stash any changes.\n")
try:
repo.git.config('--get', 'user.name')
except git.GitCommandError as e:
print("Warning: No user name set for git, which is necessary to stash.")
print("--> Please enter a global username for git on this system:")
userName = raw_input()
repo.git.config('--global', 'user.name', userName)
try:
repo.git.config('--get', 'user.email')
except git.GitCommandError as e:
print("Warning: No user e-mail address set for git, which is necessary to stash.")
print("--> Please enter a global user e-mail address for git on this system: ")
userEmail = raw_input()
repo.git.config('--global', 'user.email', userEmail)
try:
resp = repo.git.stash()
print("\n" + resp + "\n")
print("Stash successful.")
print("##################################################################")
print("#Your local changes were in conflict with the last update of code.#")
print("##################################################################")
print("The conflict was:\n")
print("-------------------------------------------------------")
print(repo.git.stash("show", "--full-diff", "stash@{0}"))
print("-------------------------------------------------------")
print ("\nTo make merging possible, these changes were stashed.\n" + \
"To merge the changes back in, you can use 'git stash pop'.\n" + \
"Only do this if you really know what you are doing. Your\n" + \
"changes might be incompatible with the update or could\n" + \
"cause a new merge conflict.")
return True
except git.GitCommandError as e:
print(e)
print("Unable to stash, don't want to overwrite your stuff, aborting this branch\nupdate.")
return False
else:
print("Changes are not stashed, cannot continue without stashing. Aborting update.")
return False
def update_repo(repo, remote, branch): # Update a branch passed to it
stashed = False
repo.git.fetch(remote, branch)
try:
print(repo.git.merge(remote + '/' + branch))
except git.GitCommandError as e:
print(e)
if "Your local changes to the following files would be overwritten by merge" in str(e):
stashed = stashChanges(repo)
if not stashed:
return False
print("Trying to merge again.")
try:
print(repo.git.merge(remote + '/' + branch))
except git.GitCommandError as e:
print(e)
print("Sorry, cannot automatically stash/discard local changes. Aborting.")
return False
print(branch + " updated.")
return True
def main(args):
retval = True
if not checkRoot():
logError("Must run as root or with sudo.")
retval = False
else: # Running as root/sudo
global tmpscriptname
thisscript = os.path.basename(__file__)
scriptpath = addSlash(scriptPath())
configfile = os.path.join(scriptpath, "settings/config.cfg")
config = readCfgWithDefaults(configfile)
wwwpath = config['wwwPath']
# Check command line arguments
userinput = doArgs(scriptpath)
if thisscript == tmpscriptname:
# This is the online version, do update
# Delete the temp script before we do the update
deleteFile(os.path.join(scriptpath, thisscript))
if userinput:
refreshBranches() # Make sure all remote branches are present
# TODO: Change branch
logMessage("DEBUG: Running tempscript, userinput = True")
else:
logMessage("DEBUG: Running tempscript, userinput = False")
# TODO: Loop through directories to do an update
logMessage("DEBUG: Should be looping through and updating repositories here")
#getrepos "$@" # Get list of repositories to update
#if [ -d "$toolPath" ]; then process "$toolPath"; fi # Check and process updates
#if [ -d "$SCRIPTPATH" ]; then process "$SCRIPTPATH"; fi # Check and process updates
#if [ -d "$wwwPath" ]; then process "$wwwPath"; fi # Check and process updates
else: # Download temp file and run it
banner(scriptname, "starting")
restart = stopBrewPi(scriptpath, wwwpath)
if restart:
# Get the latest update script and run it instead
arg = None
if userinput:
arg = "--ask"
if not updateMeAndRun(scriptpath, args):
retval = False
else:
logMessage("Refresh your browser with ctrl-F5 if open.")
removeDontRunFile(os.path.join(wwwpath, "do_not_run_brewpi"))
runAfterUpdate(scriptpath)
# flash # Offer to flash controller
banner(scriptname, "complete")
else:
logError("Unable to stop running BrewPi.")
retval = False
return retval
if __name__ == '__main__':
if main(sys.argv):
sys.exit(0)
else:
sys.exit(1) | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/data/storage_loader.py | from dataclasses import dataclass
import os
import torch
import numpy as np
import uuid
import treetensor.torch as ttorch
from abc import ABC, abstractmethod
from ditk import logging
from time import sleep, time
from threading import Lock, Thread
from typing import Any, Callable, Dict, List, Optional, Union
from ding.data import FileStorage, Storage
from os import path
from ding.data.shm_buffer import ShmBuffer
from ding.framework.supervisor import RecvPayload, Supervisor, ChildType, SendPayload
@dataclass
class ShmObject:
id_: ShmBuffer
buf: Any
class StorageWorker:
def load(self, storage: Storage) -> Any:
return storage.load()
class StorageLoader(Supervisor, ABC):
def __init__(self, worker_num: int = 3) -> None:
"""
Overview:
Save and send data synchronously and load them asynchronously.
Arguments:
- worker_num (:obj:`int`): Subprocess worker number.
"""
super().__init__(type_=ChildType.PROCESS)
self._load_lock = Lock() # Load (first meet) should be called one by one.
self._callback_map: Dict[str, Callable] = {}
self._shm_obj_map: Dict[int, ShmObject] = {}
self._worker_num = worker_num
self._req_count = 0
def shutdown(self, timeout: Optional[float] = None) -> None:
super().shutdown(timeout)
self._recv_loop = None
self._callback_map = {}
self._shm_obj_map = {}
self._req_count = 0
def start_link(self) -> None:
if not self._running:
super().start_link()
self._recv_loop = Thread(target=self._loop_recv, daemon=True)
self._recv_loop.start()
@property
def _next_proc_id(self):
return self._req_count % self._worker_num
@abstractmethod
def save(self, obj: Union[Dict, List]) -> Storage:
"""
Overview:
Save data with a storage object synchronously.
Arguments:
- obj (:obj:`Union[Dict, List]`): The data (traj or episodes), can be numpy, tensor or treetensor.
Returns:
- storage (:obj:`Storage`): The storage object.
"""
raise NotImplementedError
def load(self, storage: Storage, callback: Callable):
"""
Overview:
Load data from a storage object asynchronously. \
This function will analysis the data structure when first meet a new data, \
then alloc a shared memory buffer for each subprocess, these shared memory buffer \
will be responsible for asynchronously loading data into memory.
Arguments:
- storage (:obj:`Storage`): The storage object.
- callback (:obj:`Callable`): Callback function after data loaded.
"""
with self._load_lock:
if not self._running:
self._first_meet(storage, callback)
return
payload = SendPayload(proc_id=self._next_proc_id, method="load", args=[storage])
self._callback_map[payload.req_id] = callback
self.send(payload)
self._req_count += 1
def _first_meet(self, storage: Storage, callback: Callable):
"""
Overview:
When first meet an object type, we'll load this object directly and analysis the structure,
to allocate the shared memory object and create subprocess workers.
Arguments:
- storage (:obj:`Storage`): The storage object.
- callback (:obj:`Callable`): Callback function after data loaded.
"""
obj = storage.load()
# Create three workers for each usage type.
for i in range(self._worker_num):
shm_obj = self._create_shm_buffer(obj)
self._shm_obj_map[i] = shm_obj
self.register(StorageWorker, shm_buffer=shm_obj, shm_callback=self._shm_callback)
self.start_link()
callback(obj)
def _loop_recv(self):
while True:
payload = self.recv(ignore_err=True)
if payload.err:
logging.warning("Got error when loading data: {}".format(payload.err))
if payload.req_id in self._callback_map:
del self._callback_map[payload.req_id]
else:
self._shm_putback(payload, self._shm_obj_map[payload.proc_id])
if payload.req_id in self._callback_map:
callback = self._callback_map.pop(payload.req_id)
callback(payload.data)
def _create_shm_buffer(self, obj: Union[Dict, List]) -> Optional[ShmObject]:
"""
Overview:
Create shared object (buf and callback) by walk through the data structure.
Arguments:
- obj (:obj:`Union[Dict, List]`): The data (traj or episodes), can be numpy, tensor or treetensor.
Returns:
- shm_buf (:obj:`Optional[ShmObject]`): The shared memory buffer.
"""
max_level = 2
def to_shm(obj: Dict, level: int):
if level > max_level:
return
shm_buf = None
if isinstance(obj, Dict) or isinstance(obj, ttorch.Tensor):
shm_buf = {}
for key, val in obj.items():
# Only numpy array can fill into shm buffer
if isinstance(val, np.ndarray):
shm_buf[key] = ShmBuffer(val.dtype, val.shape, copy_on_get=False)
elif isinstance(val, torch.Tensor):
shm_buf[key] = ShmBuffer(
val.numpy().dtype, val.numpy().shape, copy_on_get=False, ctype=torch.Tensor
)
# Recursive parsing structure
elif isinstance(val, Dict) or isinstance(val, ttorch.Tensor) or isinstance(val, List):
buf = to_shm(val, level=level + 1)
if buf:
shm_buf[key] = buf
elif isinstance(obj, List):
# Double the size of buffer
shm_buf = [to_shm(o, level=level) for o in obj] * 2
if all(s is None for s in shm_buf):
shm_buf = []
return shm_buf
shm_buf = to_shm(obj, level=0)
if shm_buf is not None:
random_id = self._random_id()
shm_buf = ShmObject(id_=ShmBuffer(random_id.dtype, random_id.shape, copy_on_get=False), buf=shm_buf)
return shm_buf
def _random_id(self) -> np.ndarray:
return np.random.randint(1, 9e6, size=(1))
def _shm_callback(self, payload: RecvPayload, shm_obj: ShmObject):
"""
Overview:
Called in subprocess, put payload.data into buf.
Arguments:
- payload (:obj:`RecvPayload`): The recv payload with meta info of the data.
- shm_obj (:obj:`ShmObject`): The shm buffer.
"""
assert isinstance(payload.data, type(
shm_obj.buf
)), "Data type ({}) and buf type ({}) are not match!".format(type(payload.data), type(shm_obj.buf))
# Sleep while shm object is not ready.
while shm_obj.id_.get()[0] != 0:
sleep(0.001)
max_level = 2
def shm_callback(data: Union[Dict, List, ttorch.Tensor], buf: Union[Dict, List], level: int):
if level > max_level:
return
if isinstance(buf, List):
assert isinstance(data, List), "Data ({}) and buf ({}) type not match".format(type(data), type(buf))
elif isinstance(buf, Dict):
assert isinstance(data, ttorch.Tensor) or isinstance(
data, Dict
), "Data ({}) and buf ({}) type not match".format(type(data), type(buf))
if isinstance(data, Dict) or isinstance(data, ttorch.Tensor):
for key, val in data.items():
if isinstance(val, torch.Tensor):
val = val.numpy()
buf_val = buf.get(key)
if buf_val is None:
continue
if isinstance(buf_val, ShmBuffer) and isinstance(val, np.ndarray):
buf_val.fill(val)
data[key] = None
else:
shm_callback(val, buf_val, level=level + 1)
elif isinstance(data, List):
for i, data_ in enumerate(data):
shm_callback(data_, buf[i], level=level)
shm_callback(payload.data, buf=shm_obj.buf, level=0)
id_ = self._random_id()
shm_obj.id_.fill(id_)
payload.extra = id_
def _shm_putback(self, payload: RecvPayload, shm_obj: ShmObject):
"""
Overview:
Called in main process, put buf back into payload.data.
Arguments:
- payload (:obj:`RecvPayload`): The recv payload with meta info of the data.
- shm_obj (:obj:`ShmObject`): The shm buffer.
"""
assert isinstance(payload.data, type(
shm_obj.buf
)), "Data type ({}) and buf type ({}) are not match!".format(type(payload.data), type(shm_obj.buf))
assert shm_obj.id_.get()[0] == payload.extra[0], "Shm object and payload do not match ({} - {}).".format(
shm_obj.id_.get()[0], payload.extra[0]
)
def shm_putback(data: Union[Dict, List], buf: Union[Dict, List]):
if isinstance(data, Dict) or isinstance(data, ttorch.Tensor):
for key, val in data.items():
buf_val = buf.get(key)
if buf_val is None:
continue
if val is None and isinstance(buf_val, ShmBuffer):
data[key] = buf[key].get()
else:
shm_putback(val, buf_val)
elif isinstance(data, List):
for i, data_ in enumerate(data):
shm_putback(data_, buf[i])
shm_putback(payload.data, buf=shm_obj.buf)
shm_obj.id_.fill(np.array([0]))
class FileStorageLoader(StorageLoader):
def __init__(self, dirname: str, ttl: int = 20, worker_num: int = 3) -> None:
"""
Overview:
Dump and load object with file storage.
Arguments:
- dirname (:obj:`str`): The directory to save files.
- ttl (:obj:`str`): Maximum time to keep a file, after which it will be deleted.
- worker_num (:obj:`int`): Number of subprocess worker loaders.
"""
super().__init__(worker_num)
self._dirname = dirname
self._files = []
self._cleanup_thread = None
self._ttl = ttl # # Delete files created 10 minutes ago.
def save(self, obj: Union[Dict, List]) -> FileStorage:
if not path.exists(self._dirname):
os.mkdir(self._dirname)
filename = "{}.pkl".format(uuid.uuid1())
full_path = path.join(self._dirname, filename)
f = FileStorage(full_path)
f.save(obj)
self._files.append([time(), f.path])
self._start_cleanup()
return f
def _start_cleanup(self):
"""
Overview:
Start a cleanup thread to clean up files that are taking up too much time on the disk.
"""
if self._cleanup_thread is None:
self._cleanup_thread = Thread(target=self._loop_cleanup, daemon=True)
self._cleanup_thread.start()
def shutdown(self, timeout: Optional[float] = None) -> None:
super().shutdown(timeout)
self._cleanup_thread = None
def _loop_cleanup(self):
while True:
if len(self._files) == 0 or time() - self._files[0][0] < self._ttl:
sleep(1)
continue
_, file_path = self._files.pop(0)
if path.exists(file_path):
os.remove(file_path) | PypiClean |
/GSAS-II-WONDER_linux-1.0.1.tar.gz/GSAS-II-WONDER_linux-1.0.1/GSAS-II-WONDER/imports/G2img_Rigaku.py | from __future__ import division, print_function
import os
import GSASIIobj as G2obj
import GSASIIpath
import numpy as np
GSASIIpath.SetVersionNumber("$Revision: 3136 $")
class Rigaku_ReaderClass(G2obj.ImportImage):
'''Routine to read a Rigaku R-Axis IV image file.
'''
def __init__(self):
super(self.__class__,self).__init__( # fancy way to self-reference
extensionlist=('.stl',),
strictExtension=True,
formatName = 'Rigaku image',
longFormatName = 'Read Rigaku R-Axis IV image file'
)
def ContentsValidator(self, filename):
'''Test by checking if the file size makes sense.
'''
fileSize = os.stat(filename).st_size
Npix = (fileSize-6000)/2
if Npix == 9000000 or Npix == 2250000 or Npix == 36000000:
return True
return False # not valid size
def Reader(self,filename, ParentFrame=None, **unused):
self.Comments,self.Data,self.Npix,self.Image = GetRigaku(filename)
if self.Npix == 0 or not self.Comments:
return False
self.LoadImage(ParentFrame,filename)
return True
def GetRigaku(filename,imageOnly=False):
'Read Rigaku R-Axis IV image file'
import array as ar
if not imageOnly:
print ('Read Rigaku R-Axis IV file: '+filename)
File = open(filename,'rb')
fileSize = os.stat(filename).st_size
Npix = (fileSize-6000)/2
File.read(6000)
head = ['Rigaku R-Axis IV detector data',]
image = np.array(ar.array('H',File.read(fileSize-6000)),dtype=np.int32)
print ('%s %s'%(fileSize,str(image.shape)))
print (head)
if Npix == 9000000:
sizexy = [3000,3000]
pixSize = [100.,100.]
elif Npix == 2250000:
sizexy = [1500,1500]
pixSize = [200.,200.]
else:
sizexy = [6000,6000]
pixSize = [50.,50.]
image = np.reshape(image,(sizexy[1],sizexy[0]))
data = {'pixelSize':pixSize,'wavelength':1.5428,'distance':250.0,'center':[150.,150.],'size':sizexy}
File.close()
if imageOnly:
return image
else:
return head,data,Npix,image | PypiClean |
/Chiplotle-0.4.1.tar.gz/Chiplotle-0.4.1/chiplotle/geometry/core/coordinatearray.py | from chiplotle.geometry.core.coordinate import Coordinate
from chiplotle.geometry.core.coordinatearraypropertiesmixin import \
CoordinateArrayPropertiesMixin
import numpy as np
class CoordinateArray(CoordinateArrayPropertiesMixin):
__slots__ = ('_data', )
def __init__(self, coords=None):
'''`coords` is a list of Coordinate objs or iterables.'''
if coords is None:
coords = [ ]
self._data = [Coordinate(*list(p)) for p in coords]
## PUBLIC PROPERTIES ##
@property
def ndim(self):
return len(self._data[0]) if self._data else None
@property
def dtype(self):
coords = [list(c) for c in self._data]
return np.array(coords).dtype
@property
def coords(self):
return np.array(self._data).transpose().tolist()
@property
def x(self):
return tuple(self.coords[0] if self.coords else ())
@property
def y(self):
return tuple(self.coords[1] if self.coords else ())
## METHODS ##
def append(self, arg):
if not isinstance(arg, Coordinate):
raise TypeError('arg must be a Coordinate')
self._data.append(arg)
def extend(self, arg):
if isinstance(arg, CoordinateArray):
self._data.extend(arg._data)
elif isinstance(arg, (list, tuple)):
for e in arg:
self.append(e)
else:
raise TypeError('`arg` must be a list or CoordinateArray.')
## OVERRIDES ##
def __len__(self):
return len(self._data)
def __repr__(self):
return 'CoordinateArray(%s)' % self._data
def __str__(self):
return 'CoordinateArray(%s)' % ', '.join([str(coord) for coord in self._data])
## accessors / modifiers ##
def __iter__(self):
for c in self._data:
yield c
def __delitem__(self, i):
del(self._data[i])
def __getitem__(self, arg):
return self._data[arg]
def __setitem__(self, i, arg):
if isinstance(i, int):
if not isinstance(arg, Coordinate):
raise TypeError
self._data[i] = arg
else:
arg = [Coordinate(*list(coord)) for coord in arg]
self._data[i.start : i.stop] = arg
## math ##
## addition ##
def __add__(self, arg):
if isinstance(arg, Coordinate):
return CoordinateArray([coord + arg for coord in self._data])
if isinstance(arg, CoordinateArray):
if len(self) != len(arg):
raise ValueError("CoordinateArrays must have same length.")
coords = [a + b for a, b in zip(self._data, arg._data)]
return CoordinateArray(coords)
raise TypeError('Unknown type for CoordinateArray addition')
def __radd__(self, arg):
return self + arg
def __iadd__(self, arg):
self._data = (self + arg)._data
return self
## substraction ##
def __sub__(self, arg):
return self + (-arg)
## division ##
def __div__(self, arg):
return CoordinateArray([a / arg for a in self._data])
def __truediv__(self, arg):
return self / arg
def __idiv__(self, arg):
self._data = (self / arg)._data
return self
## multiplication ##
def __mul__(self, arg):
return CoordinateArray([a * arg for a in self._data])
def __rmul__(self, arg):
return self * arg
def __imul__(self, arg):
self._data = (self * arg)._data
return self
## ##
def __eq__(self, arg):
try:
return self._data == arg._data
except AttributeError:
return False
def __ne__(self, arg):
return not (self == arg)
def __neg__(self):
return CoordinateArray([-c for c in self])
def __invert__(self):
'''Returns the perpendiculars of the Coordinates contained in self.'''
if self.ndim != 2:
raise ValueError('inversion only works on 2D currently.')
return CoordinateArray([~v for v in self])
if __name__ == '__main__':
ca = CoordinateArray([(1, 2), (3, 4)])
print ca
print ca.coords
print ca.x
print ca.y | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/SVG/fonts/TeX/Size1/Regular/Main.js | MathJax.OutputJax.SVG.FONTDATA.FONTS.MathJax_Size1={directory:"Size1/Regular",family:"MathJax_Size1",id:"MJSZ1",32:[0,0,250,0,0,""],40:[850,349,458,152,422,"152 251Q152 646 388 850H416Q422 844 422 841Q422 837 403 816T357 753T302 649T255 482T236 250Q236 124 255 19T301 -147T356 -251T403 -315T422 -340Q422 -343 416 -349H388Q359 -325 332 -296T271 -213T212 -97T170 56T152 251"],41:[850,349,458,35,305,"305 251Q305 -145 69 -349H56Q43 -349 39 -347T35 -338Q37 -333 60 -307T108 -239T160 -136T204 27T221 250T204 473T160 636T108 740T60 807T35 839Q35 850 50 850H56H69Q197 743 256 566Q305 425 305 251"],47:[850,349,578,55,522,"481 838Q489 850 500 850Q508 850 515 844T522 827Q521 824 311 248T96 -337Q90 -349 77 -349Q68 -349 62 -343T55 -326Q56 -323 266 253T481 838"],91:[850,349,417,202,394,"202 -349V850H394V810H242V-309H394V-349H202"],92:[850,349,578,54,522,"522 -326Q522 -337 515 -343T500 -349Q487 -349 481 -337Q477 -328 267 248T55 827Q54 835 60 842T76 850Q89 850 96 838Q100 829 310 253T522 -326"],93:[850,349,417,22,214,"22 810V850H214V-349H22V-309H174V810H22"],123:[851,349,583,105,477,"477 -343L471 -349H458Q432 -349 367 -325T273 -263Q258 -245 250 -212L249 -51Q249 -27 249 12Q248 118 244 128Q243 129 243 130Q220 189 121 228Q109 232 107 235T105 250Q105 256 105 257T105 261T107 265T111 268T118 272T128 276T142 283T162 291Q224 324 243 371Q243 372 244 373Q248 384 249 469Q249 475 249 489Q249 528 249 552L250 714Q253 728 256 736T271 761T299 789T347 816T422 843Q440 849 441 849H443Q445 849 447 849T452 850T457 850H471L477 844V830Q477 820 476 817T470 811T459 807T437 801T404 785Q353 760 338 724Q333 710 333 550Q333 526 333 492T334 447Q334 393 327 368T295 318Q257 280 181 255L169 251L184 245Q318 198 332 112Q333 106 333 -49Q333 -209 338 -223Q351 -255 391 -277T469 -309Q477 -311 477 -329V-343"],125:[850,349,583,105,477,"110 849L115 850Q120 850 125 850Q151 850 215 826T309 764Q324 747 332 714L333 552Q333 528 333 489Q334 383 338 373Q339 372 339 371Q353 336 391 310T469 271Q477 268 477 251Q477 241 476 237T472 232T456 225T428 214Q357 179 339 130Q339 129 338 128Q334 117 333 32Q333 26 333 12Q333 -27 333 -51L332 -212Q328 -228 323 -240T302 -271T255 -307T175 -338Q139 -349 125 -349T108 -346T105 -329Q105 -314 107 -312T130 -304Q233 -271 248 -209Q249 -203 249 -49V57Q249 106 253 125T273 167Q307 213 398 245L413 251L401 255Q265 300 250 389Q249 395 249 550Q249 710 244 724Q224 774 112 811Q105 813 105 830Q105 845 110 849"],710:[744,-551,556,-8,564,"279 669Q273 669 142 610T9 551L0 569Q-8 585 -8 587Q-8 588 -7 588L12 598Q30 608 66 628T136 666L277 744L564 587L555 569Q549 556 547 554T544 552Q539 555 410 612T279 669"],732:[722,-597,556,1,554,"374 597Q337 597 269 627T160 658Q101 658 34 606L24 597L12 611Q1 624 1 626Q1 627 27 648T55 671Q120 722 182 722Q219 722 286 692T395 661Q454 661 521 713L531 722L543 708Q554 695 554 693Q554 692 528 671T500 648Q434 597 374 597"],770:[744,-551,0,-564,8,"-277 669Q-283 669 -414 610T-547 551L-556 569Q-564 585 -564 587Q-564 588 -563 588L-544 598Q-526 608 -490 628T-420 666L-279 744L8 587L-1 569Q-7 556 -9 554T-12 552Q-17 555 -146 612T-277 669"],771:[722,-597,0,-555,-2,"-182 597Q-219 597 -287 627T-396 658Q-455 658 -522 606L-532 597L-544 611Q-555 624 -555 626Q-555 627 -529 648T-501 671Q-436 722 -374 722Q-337 722 -270 692T-161 661Q-102 661 -35 713L-25 722L-13 708Q-2 695 -2 693Q-2 692 -28 671T-56 648Q-122 597 -182 597"],8214:[602,0,778,257,521,"257 0V602H300V0H257ZM478 0V602H521V0H478"],8593:[600,0,667,112,555,"112 421L120 424Q127 427 136 430T161 441T191 458T224 481T260 510T295 546T328 591L333 600L340 589Q380 527 431 489T555 421V377L543 381Q445 418 368 492L355 504V0H312V504L299 492Q222 418 124 381L112 377V421"],8595:[600,0,667,112,555,"312 96V600H355V96L368 108Q445 182 543 219L555 223V179L546 176Q538 173 529 169T505 158T475 141T442 119T407 90T372 53T339 9L334 0L327 11Q287 73 236 111T112 179V223L124 219Q222 182 299 108L312 96"],8657:[599,0,778,57,721,"142 329Q300 419 389 599Q389 598 399 579T420 541T452 494T497 438T558 383T636 329T708 294L721 289V246Q718 246 694 256T623 293T532 356L522 364L521 182V0H478V405L466 417Q436 450 389 516Q388 515 378 500T352 463T312 417L300 405V0H257V364L247 356Q202 320 155 293T82 256L57 246V289L70 294Q101 305 142 329"],8659:[600,-1,778,57,721,"257 236V600H300V195L312 183Q342 150 389 84Q390 85 400 100T426 137T466 183L478 195V600H521V418L522 236L532 244Q576 280 623 307T696 344L721 354V311L708 306Q677 295 636 271Q478 181 389 1Q389 2 379 21T358 59T326 106T281 162T220 217T142 271T70 306L57 311V354Q60 354 83 345T154 308T247 244L257 236"],8719:[750,250,944,55,888,"158 656Q147 684 131 694Q110 707 69 710H55V750H888V710H874Q840 708 820 698T795 678T786 656V-155Q798 -206 874 -210H888V-250H570V-210H584Q618 -208 638 -197T663 -178T673 -155V710H270V277L271 -155Q283 -206 359 -210H373V-250H55V-210H69Q103 -208 123 -197T148 -178T158 -155V656"],8720:[750,250,944,55,888,"158 656Q147 684 131 694Q110 707 69 710H55V750H373V710H359Q325 708 305 698T280 678T271 656L270 223V-210H673V656Q666 672 663 679T639 697T584 710H570V750H888V710H874Q840 708 820 698T795 678T786 656V-155Q798 -206 874 -210H888V-250H55V-210H69Q103 -208 123 -197T148 -178T158 -155V656"],8721:[750,250,1056,56,999,"61 748Q64 750 489 750H913L954 640Q965 609 976 579T993 533T999 516H979L959 517Q936 579 886 621T777 682Q724 700 655 705T436 710H319Q183 710 183 709Q186 706 348 484T511 259Q517 250 513 244L490 216Q466 188 420 134T330 27L149 -187Q149 -188 362 -188Q388 -188 436 -188T506 -189Q679 -189 778 -162T936 -43Q946 -27 959 6H999L913 -249L489 -250Q65 -250 62 -248Q56 -246 56 -239Q56 -234 118 -161Q186 -81 245 -11L428 206Q428 207 242 462L57 717L56 728Q56 744 61 748"],8730:[850,350,1000,111,1020,"263 249Q264 249 315 130T417 -108T470 -228L725 302Q981 837 982 839Q989 850 1001 850Q1008 850 1013 844T1020 832V826L741 243Q645 43 540 -176Q479 -303 469 -324T453 -348Q449 -350 436 -350L424 -349L315 -96Q206 156 205 156L171 130Q138 104 137 104L111 130L263 249"],8739:[627,15,333,144,188,"146 612Q151 627 166 627Q182 627 187 612Q188 610 188 306T187 0Q184 -15 166 -15Q149 -15 146 0V10Q146 19 146 35T146 73T146 122T145 179T145 241T145 306T145 370T145 433T145 489T146 538T146 576T146 602V612"],8741:[627,15,556,144,410,"146 612Q151 627 166 627Q182 627 187 612Q188 610 188 306T187 0Q184 -15 166 -15Q149 -15 146 0V10Q146 19 146 35T146 73T146 122T145 179T145 241T145 306T145 370T145 433T145 489T146 538T146 576T146 602V612ZM368 612Q373 627 388 627Q404 627 409 612Q410 610 410 306T409 0Q406 -15 389 -15Q371 -15 368 0V10Q368 19 368 35T368 73T368 122T367 179T367 241T367 306T367 370T367 433T367 489T368 538T368 576T368 602V612"],8747:[805,306,472,55,610,"113 -244Q113 -246 119 -251T139 -263T167 -269Q186 -269 199 -260Q220 -247 232 -218T251 -133T262 -15T276 155T297 367Q300 390 305 438T314 512T325 580T340 647T361 703T390 751T428 784T479 804Q481 804 488 804T501 805Q552 802 581 769T610 695Q610 669 594 657T561 645Q542 645 527 658T512 694Q512 705 516 714T526 729T538 737T548 742L552 743Q552 745 545 751T525 762T498 768Q475 768 460 756T434 716T418 652T407 559T398 444T387 300T369 133Q349 -38 337 -102T303 -207Q256 -306 169 -306Q119 -306 87 -272T55 -196Q55 -170 71 -158T104 -146Q123 -146 138 -159T153 -195Q153 -206 149 -215T139 -230T127 -238T117 -242L113 -244"],8748:[805,306,819,55,957,"113 -244Q113 -246 119 -251T139 -263T167 -269Q186 -269 199 -260Q220 -247 232 -218T251 -133T262 -15T276 155T297 367Q300 390 305 438T314 512T325 580T340 647T361 703T390 751T428 784T479 804Q481 804 488 804T501 805Q552 802 581 769T610 695Q610 669 594 657T561 645Q542 645 527 658T512 694Q512 705 516 714T526 729T538 737T548 742L552 743Q552 745 545 751T525 762T498 768Q475 768 460 756T434 716T418 652T407 559T398 444T387 300T369 133Q349 -38 337 -102T303 -207Q256 -306 169 -306Q119 -306 87 -272T55 -196Q55 -170 71 -158T104 -146Q123 -146 138 -159T153 -195Q153 -206 149 -215T139 -230T127 -238T117 -242L113 -244ZM460 -244Q460 -246 466 -251T486 -263T514 -269Q532 -269 546 -260Q567 -247 579 -218T598 -133T609 -15T623 155T644 367Q647 390 652 438T661 512T672 580T687 647T708 703T737 751T775 784T826 804Q828 804 835 804T848 805Q899 802 928 769T957 695Q957 669 941 657T908 645Q889 645 874 658T859 694Q859 705 863 714T873 729T885 737T895 742L899 743Q899 745 892 751T872 762T845 768Q822 768 807 756T781 716T765 652T754 559T745 444T734 300T716 133Q696 -38 684 -102T650 -207Q603 -306 516 -306Q466 -306 434 -272T402 -196Q402 -170 418 -158T451 -146Q470 -146 485 -159T500 -195Q500 -206 496 -215T486 -230T474 -238T464 -242L460 -244"],8749:[805,306,1166,55,1304,"113 -244Q113 -246 119 -251T139 -263T167 -269Q186 -269 199 -260Q220 -247 232 -218T251 -133T262 -15T276 155T297 367Q300 390 305 438T314 512T325 580T340 647T361 703T390 751T428 784T479 804Q481 804 488 804T501 805Q552 802 581 769T610 695Q610 669 594 657T561 645Q542 645 527 658T512 694Q512 705 516 714T526 729T538 737T548 742L552 743Q552 745 545 751T525 762T498 768Q475 768 460 756T434 716T418 652T407 559T398 444T387 300T369 133Q349 -38 337 -102T303 -207Q256 -306 169 -306Q119 -306 87 -272T55 -196Q55 -170 71 -158T104 -146Q123 -146 138 -159T153 -195Q153 -206 149 -215T139 -230T127 -238T117 -242L113 -244ZM460 -244Q460 -246 466 -251T486 -263T514 -269Q532 -269 546 -260Q567 -247 579 -218T598 -133T609 -15T623 155T644 367Q647 390 652 438T661 512T672 580T687 647T708 703T737 751T775 784T826 804Q828 804 835 804T848 805Q899 802 928 769T957 695Q957 669 941 657T908 645Q889 645 874 658T859 694Q859 705 863 714T873 729T885 737T895 742L899 743Q899 745 892 751T872 762T845 768Q822 768 807 756T781 716T765 652T754 559T745 444T734 300T716 133Q696 -38 684 -102T650 -207Q603 -306 516 -306Q466 -306 434 -272T402 -196Q402 -170 418 -158T451 -146Q470 -146 485 -159T500 -195Q500 -206 496 -215T486 -230T474 -238T464 -242L460 -244ZM807 -244Q807 -246 813 -251T833 -263T861 -269Q880 -269 893 -260Q914 -247 926 -218T945 -133T956 -15T970 155T991 367Q994 390 999 438T1008 512T1019 580T1034 647T1055 703T1084 751T1122 784T1173 804Q1175 804 1182 804T1195 805Q1246 802 1275 769T1304 695Q1304 669 1288 657T1255 645Q1236 645 1221 658T1206 694Q1206 705 1210 714T1220 729T1232 737T1242 742L1246 743Q1246 745 1239 751T1219 762T1192 768Q1169 768 1154 756T1128 716T1112 652T1101 559T1092 444T1081 300T1063 133Q1043 -38 1031 -102T997 -207Q950 -306 863 -306Q813 -306 781 -272T749 -196Q749 -170 765 -158T798 -146Q817 -146 832 -159T847 -195Q847 -206 843 -215T833 -230T821 -238T811 -242L807 -244"],8750:[805,306,472,55,610,"269 74L256 80Q244 85 227 97T191 128T161 179T148 250Q148 332 199 379T302 433L306 434L307 444Q309 456 313 495T321 553T331 607T345 664T365 712T393 756T431 785T479 804Q481 804 488 804T501 805Q552 802 581 769T610 695Q610 669 594 657T561 645Q542 645 527 658T512 694Q512 705 516 714T526 729T538 737T548 742L552 743Q552 745 545 751T525 762T498 768Q471 768 454 752T427 693T414 626T406 536Q405 530 405 527L397 425L404 422Q410 419 421 413T445 399T470 376T494 345T511 303T518 250Q518 205 502 169T460 112T410 80T364 66L360 65L359 55Q357 38 353 4T346 -43T340 -81T333 -118T326 -148T316 -179T303 -207Q256 -306 169 -306Q119 -306 87 -272T55 -196Q55 -170 71 -158T104 -146Q123 -146 138 -159T153 -195Q153 -206 149 -215T139 -230T127 -238T117 -242L113 -244Q113 -246 119 -251T139 -263T167 -269Q186 -269 199 -260Q231 -241 242 -183T266 33L269 74ZM272 122Q272 156 300 391Q300 392 299 392Q287 392 263 379T213 331T187 249Q187 211 205 180T239 137T272 116V122ZM366 107Q378 107 402 119T453 167T479 249Q479 340 394 383V377Q394 375 394 374T393 371T393 366T392 357T391 342T389 321T386 291T382 251T377 199T369 133Q366 112 366 107"],8896:[750,249,833,55,777,"119 -249T97 -249T65 -235T55 -207Q55 -201 56 -198Q58 -190 218 268T380 729Q392 750 416 750Q438 750 451 732Q453 728 534 498T695 36L775 -194Q777 -204 777 -208Q777 -222 767 -235T735 -249Q713 -249 700 -231Q696 -225 557 177L416 579L276 177Q136 -226 132 -231Q119 -249 97 -249"],8897:[750,249,833,55,777,"55 708Q55 729 68 739T96 750Q119 750 132 731Q136 726 276 323L416 -79L557 323Q696 725 700 731Q713 749 735 749Q756 749 766 736T777 708Q777 700 696 466T533 1T451 -232Q436 -249 416 -249Q402 -249 391 -241Q384 -236 380 -226Q368 -198 219 230Q55 697 55 708"],8898:[750,249,833,54,777,"139 -217Q127 -241 114 -246Q106 -249 97 -249Q67 -249 57 -220Q55 -214 55 102Q55 152 55 221T54 312Q54 422 60 464T91 554Q120 612 165 654T257 714T337 741T392 749Q393 750 402 750Q414 750 422 749Q557 749 660 659T776 430Q777 422 777 102Q777 -214 775 -220Q765 -249 735 -249Q716 -249 708 -241T694 -217L692 428L690 441Q674 540 597 603T416 666H409Q388 666 364 662T294 638T212 581Q156 523 142 441L140 428L139 105V-217"],8899:[750,249,833,55,777,"96 750Q103 750 109 748T120 744T127 737T133 730T137 723T139 718V395L140 73L142 60Q159 -43 237 -104T416 -166Q521 -166 597 -103T690 60L692 73L694 718Q708 749 735 749Q765 749 775 720Q777 714 777 398Q777 78 776 71Q766 -51 680 -140Q571 -249 416 -249H411Q261 -249 152 -140Q66 -51 56 71Q55 78 55 398Q55 714 57 720Q60 734 70 740Q80 750 96 750"],8968:[850,349,472,202,449,"202 -349V850H449V810H242V-349H202"],8969:[850,349,472,22,269,"22 810V850H269V-349H229V810H22"],8970:[850,349,472,202,449,"202 -349V850H242V-309H449V-349H202"],8971:[850,349,472,22,269,"229 -309V850H269V-349H22V-309H229"],9168:[602,0,667,312,355,"312 0V602H355V0H312"],10216:[850,350,472,96,394,"373 850Q392 850 394 832Q394 825 267 538L139 250L267 -38Q394 -325 394 -332Q392 -350 375 -350Q361 -350 356 -338Q354 -331 289 -186T161 103T97 250T160 397T289 685T356 838Q362 850 373 850"],10217:[850,350,472,77,375,"77 832Q77 837 82 843T98 850Q110 849 115 838Q117 831 182 686T310 397T374 250T311 103T182 -185T115 -338Q110 -350 96 -350Q79 -350 77 -332Q77 -325 204 -38L332 250L204 538Q77 825 77 832"],10752:[750,250,1111,56,1054,"555 -250Q420 -250 306 -185T124 -4T56 250Q56 453 193 595T526 749Q528 750 539 750Q554 750 562 749Q688 749 800 687T983 508T1054 250Q1054 112 987 -3T806 -184T555 -250ZM555 -165Q672 -165 767 -108T916 44T970 250Q970 418 861 532T600 664Q591 665 548 665Q446 665 353 614T200 466T140 250V243Q140 88 248 -30Q262 -46 280 -62T338 -105T434 -148T555 -165ZM478 250Q478 288 503 307T551 326Q586 326 609 305T632 250Q632 217 610 196T555 174T500 196T478 250"],10753:[750,250,1111,56,1054,"555 -250Q420 -250 306 -185T124 -4T56 250Q56 453 193 595T526 749Q528 750 539 750Q554 750 562 749Q688 749 800 687T983 508T1054 250Q1054 112 987 -3T806 -184T555 -250ZM513 478Q513 664 512 664Q504 664 481 660T406 637T313 588Q281 564 255 537T211 483T181 431T161 382T150 342T144 310T141 292H513V478ZM798 588Q758 616 711 634T639 658T602 663L597 664V292H969Q969 293 967 309T960 341T949 381T930 430T900 482T856 537T798 588ZM513 -164V208H141Q142 205 144 189T149 160T158 125T173 83T196 39T229 -9Q249 -34 273 -55T318 -92T363 -119T405 -138T444 -150T475 -158T499 -162T513 -164ZM775 -103Q801 -87 823 -68T863 -30T894 10T919 49T937 88T950 123T959 154T964 180T968 198L969 208H597V-164Q599 -163 616 -161T647 -155T683 -145T728 -128T775 -103"],10754:[750,250,1111,56,1054,"555 -250Q420 -250 306 -185T124 -4T56 250Q56 453 193 595T526 749Q528 750 539 750Q554 750 562 749Q688 749 800 687T983 508T1054 250Q1054 112 987 -3T806 -184T555 -250ZM600 664Q591 665 548 665Q414 665 306 583L292 573L423 441L555 310L687 441L818 573L804 583Q714 650 600 664ZM364 118L495 250L364 382L232 513L223 500Q140 391 140 250Q140 107 223 0L232 -13L364 118ZM970 250Q970 389 887 501L878 512Q878 513 861 496T812 447T746 381L615 250L746 118L878 -13L887 0Q970 109 970 250ZM687 59L555 190L423 59L292 -73L306 -83Q416 -166 555 -166T804 -83L818 -73L687 59"],10756:[750,249,833,55,777,"96 750Q103 750 109 748T120 744T127 737T133 730T137 723T139 718V395L140 73L142 60Q159 -43 237 -104T416 -166Q521 -166 597 -103T690 60L692 73L694 718Q708 749 735 749Q765 749 775 720Q777 714 777 398Q777 78 776 71Q766 -51 680 -140Q571 -249 416 -249H411Q261 -249 152 -140Q66 -51 56 71Q55 78 55 398Q55 714 57 720Q60 734 70 740Q80 750 96 750ZM223 276Q223 282 224 287T227 296T232 302T238 308T243 313T250 316L254 319H374V376V406Q374 438 382 454T418 470Q443 467 450 453T458 410V376V319H579Q580 319 583 317T589 313T594 308T600 302T604 295T608 287T609 276Q609 253 587 241Q577 235 513 235H458V178Q458 176 458 166T459 148Q459 84 415 84Q401 84 390 93T375 117Q374 120 374 178V235H319Q317 235 307 235T290 234Q223 234 223 276"],10758:[750,249,833,55,777,"777 -217Q766 -244 745 -249H88Q64 -242 57 -220Q55 -214 55 250T57 720Q60 734 70 740Q80 750 96 750Q127 750 137 720Q139 714 139 274V-166H693V274Q693 714 695 720Q705 749 735 749Q766 749 775 719Q777 713 777 248V-217"]};MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/Size1/Regular/Main.js"); | PypiClean |
/ConcurrentLogHandler-0.9.1.tar.gz/ConcurrentLogHandler-0.9.1/ez_setup.py | import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "0.7.7"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of setuptools (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U setuptools'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main()) | PypiClean |
/BigJob-0.64.5.tar.gz/BigJob-0.64.5/docs/source/tutorial/part3.rst | Chained Ensembles
##################
What if you had two different executables to run? What if this second set of executables had some dependencies on data from A? Can you use one BigJob to run both jobs? Yes!
The below example submits a set of echo jobs (set A) using BigJob, and for every successful job (with state Done), it submits another /bin/echo job (set B) to the same Pilot-Job.
We can think of this as A is comprised of subjobs {a1,a2,a3}, while B is comprised of subjobs {b1,b2,b3}. Rather than wait for each subjob {a1},{a2},{a3} to complete, {b1} can run as soon as {a1} is complete, or {b1} can run as soon as a slot becomes available – i.e. {a2} could finish before {a1}.
The code below demonstrates this behavior. As soon as there is a slot available to run a job in B (i.e. a job in A has completed), it executes the job in B. This keeps the BigJob utilization high.
=======================
Chained Ensemble Code
=======================
Create a new file ``chained_ensembles.py`` and paste the following code:
.. literalinclude:: ../../../examples/tutorial/chained_ensembles.py
:language: python
------------------------
How to Edit The Examples
------------------------
You will need to make the necessary changes to ``chained_ensembles.py`` as you did in the previous example. In the case of this tutorial, you can actually copy the "REQUIRED PILOT JOB" section that you filled out from ``simple_ensembles.py`` and paste it over the corresponding section in ``chained_ensembles.py.`` You may need to revisit the previous part of this tutorial to understand how to fill out this section if you skipped it.
The link to the table is reiterated here for your convenience:
`Valid Variable Parameters
<http://saga-project.github.io/BigJob/sphinxdoc/tutorial/table.html>`_.
The important difference between this file and the previous file is that there are two separate "USER DEFINED TASK DESCRIPTION" sections - numbered 1 and 2. Again, these two sections will not require any modifications for the purposes of this tutorial. We will not review every variable again, but instead, review the relationship between the 2 task descriptions.
Go to line 70, "BEGIN USER DEFINED TASK 1 DESCRIPTION." This looks a lot like the description we saw in the previous example. It is also contained in a for loop from 0 to the NUMBER_JOBS. We are running the same executable, with almost the same arguments, except that we append an 'A' as an additional TASK_SET variable. If we look at lines 90-93, we see that as soon as a task in the "A" set reaches the "Done" state, we start what is defined in "BEGIN USER DEFINED TASK 2 DESCRIPTION" as a "B" task. This shows us an important feature of BigJob. We can call get_state() on a task to find out if it is complete or not. The second task description is to run the same executable, /bin/echo, and print instead that it is a B task, with its task number.
To give you an idea of some sample values for NUMBER_OF_PROCESSES (for the Pilot) and NUMBER_JOBS for this Coupled Ensembles tutorial example, reference the machine Lonestar in the table. Note that the valid NUMBER_OF_PROCESSES values are increments of 12. This tells us there are 12 cores per node on Lonestar. A sample job for this example might have NUMBER_JOBS=24 and PILOT_SIZE=24, which would reserve 2 nodes on Lonestar. Note that when running these examples locally, i.e. with SAGA_ADAPTOR set to either ssh or fork, you cannot request a Pilot larger than the number of cores on the machine in which you are running, e.g. a 2-core computer can run a 2-core Pilot, while a 4-core computer can run a 4-core Pilot.
-------------
Run the Code
-------------
Save the file and execute it **(make sure your virtualenv is activated):**
.. code-block:: bash
python chained_ensembles.py
The output should look something like this:
.. code-block:: none
* Submitted 'A' task '0' with id 'cu-27ab3846-e9a9-11e2-88eb-14109fd519a1'
* Submitted 'A' task '1' with id 'cu-27c2cca4-e9a9-11e2-88eb-14109fd519a1'
[...]
One 'A' task cu-27ab3846-e9a9-11e2-88eb-14109fd519a1 finished. Launching a 'B' task.
* Submitted 'B' task '31' with id 'cu-352139c6-e9a9-11e2-88eb-14109fd519a1'
[...]
Terminating BigJob...
-----------------
Check the Output
-----------------
Again, we will find all the relevant BigJob output in the directory that we defined as "WORKDIR" in the above example. Note that some of the sj-directories in their stdout.txt files will contain A task output while others will contain B task output.
| PypiClean |
/Lavapy-1.6.1-py3-none-any.whl/lavapy/filters.py | from __future__ import annotations
from typing import Any, Dict, List, Tuple, Union
from .exceptions import InvalidFilterArgument
__all__ = ("LavapyFilter",
"Equalizer",
"Karaoke",
"Timescale",
"Tremolo",
"Vibrato",
"Rotation",
"Distortion",
"ChannelMix",
"LowPass")
class LavapyFilter:
"""Base Lavapy Filter. Every filter inherit from this."""
name = ""
def __init__(self) -> None:
self._payload: Any = {}
def __repr__(self) -> str:
return f"<Lavapy Filter (Payload={self._payload})>"
@property
def payload(self) -> Any:
"""Returns the payload to be sent to Lavalink."""
return self._payload
class Equalizer(LavapyFilter):
"""
A class representing a usable equalizer.
Parameters
---------
levels: List[Tuple[int, float]]
A list of tuple pairs containing a band int and gain float.
name: str
A string to name this equalizer.
"""
name = "equalizer"
def __init__(self, levels: List[Tuple[int, float]], name: str) -> None:
super().__init__()
self._levels: List[Tuple[int, float]] = levels
self._equalizerName: str = name
self._payload: List[Dict[str, Union[int, float]]] = self._setup(levels)
def __repr__(self) -> str:
return f"<Lavapy Equalizer (Name={self.name}) (Levels={self.levels})>"
@property
def levels(self) -> List[Tuple[int, float]]:
"""Returns a list of tuple pairs containing a band int and gain float."""
return self._levels
@property
def equalizerName(self) -> str:
"""Returns the name of this equalizer."""
return self._equalizerName
@staticmethod
def _setup(levels: List[Tuple[int, float]]) -> List[Dict[str, Union[int, float]]]:
"""
A function to convert self._levels into a dict for sending to Lavalink.
Parameters
----------
levels: List[Tuple[int, float]]
A list of tuple pairs containing a band int and gain float.
Returns
-------
List[Dict[str, Union[int, float]]]
A list of {'band': int, 'gain': float} pairs.
"""
return [{"band": level[0], "gain": level[1]} for level in levels]
@classmethod
def build(cls, levels: List[Tuple[int, float]], name: str = "CustomEqualizer") -> Equalizer:
"""
Build a custom equalizer with the given levels.
Parameters
----------
levels: List[Tuple[int, float]]
A custom list of tuple pairs containing a band int and gain float. You will have to construct this yourself. There should be between 0 and 14 bands with each gain being between -0.25 and 1
name: str
An optional string to name this equalizer. If this is not supplied, it will be set to 'CustomEqualizer'.
Returns
-------
Equalizer
A custom equalizer object.
"""
if not (0 <= len(levels) <= 14):
raise ValueError("There should be between 0 and 14 bands.")
if not all([-0.25 <= val[1] <= 1 for val in levels]):
raise ValueError("Each gain should be between -0.25 and 1 bands.")
return cls(levels, name)
@classmethod
def flat(cls) -> Equalizer:
"""
A flat equalizer. This will not provide a cut or boost to any frequency.
Returns
-------
Equalizer
A flat equalizer object.
"""
levels = [(0, 0.0), (1, 0.0), (2, 0.0), (3, 0.0), (4, 0.0),
(5, 0.0), (6, 0.0), (7, 0.0), (8, 0.0), (9, 0.0),
(10, 0.0), (11, 0.0), (12, 0.0), (13, 0.0), (14, 0.0)]
return cls(levels, "Flat")
class Karaoke(LavapyFilter):
"""
Simulates an :class:`Equalizer` which specifically targets removing vocals.
Attributes
----------
level: float
The level of the karaoke filter.
monoLevel: float
The monolevel of the karaoke filter.
filterBand: float
The filter band of the karaoke filter.
filterWidth: float
The filter width of the karaoke filter.
"""
name = "karaoke"
def __init__(self, level: float = 1.0, monoLevel: float = 1.0, filterBand: float = 220.0, filterWidth: float = 100.0) -> None:
super().__init__()
self.level = self._payload["level"] = level
self.monoLevel = self._payload["monoLevel"] = monoLevel
self.filterBand = self._payload["filterBand"] = filterBand
self.filterWidth = self._payload["filterWidth"] = filterWidth
def __repr__(self) -> str:
return f"<Lavapy KaraokeFilter (Payload={self._payload})>"
class Timescale(LavapyFilter):
"""
Changes the speed, pitch and rate of a track. You can make some very cool sound effects with this such as a vaporwave-esque filter which slows the track down a certain amount to produce said effect.
Attributes
----------
speed: float
The speed of the timescale filter. This should be more than or equal to 0.
pitch: float
The pitch of the timescale filter. This should be more than or equal to 0.
rate: float
The rate of the timescale filter. This should be more than or equal to 0.
"""
name = "timescale"
def __init__(self, speed: float = 1.0, pitch: float = 1.0, rate: float = 1.0) -> None:
super().__init__()
if speed <= 0.0:
raise InvalidFilterArgument("Speed must be more or equal to than 0.")
if pitch <= 0.0:
raise InvalidFilterArgument("Pitch must be more or equal to than 0.")
if rate <= 0.0:
raise InvalidFilterArgument("Rate must be more or equal to than 0.")
self.speed = self._payload["speed"] = speed
self.pitch = self._payload["pitch"] = pitch
self.rate = self._payload["rate"] = rate
def __repr__(self) -> str:
return f"<Lavapy TimescaleFilter (Payload={self._payload})>"
class Tremolo(LavapyFilter):
"""
Uses amplification to create a shuddering effect, where the volume quickly oscillates.
Here is an `Example <https://en.wikipedia.org/wiki/File:Fuse_Electronics_Tremolo_MK-III_Quick_Demo.ogv>`_.
Attributes
----------
frequency: float
The frequency of the tremolo filter. This must be bigger than 0.
depth: float
The depth of the tremolo filter. This must be between 0 and 1.
Raises
------
InvalidFilterArgument
An invalid filter argument has been passed.
"""
name = "tremolo"
def __init__(self, frequency: float = 2.0, depth: float = 0.5) -> None:
super().__init__()
if frequency < 0.0:
raise InvalidFilterArgument("Frequency must be more than 0.")
if depth < 0.0 or depth > 1.0:
raise InvalidFilterArgument("Depth must be between 0 and 1.")
self.frequency = self._payload["frequency"] = frequency
self.depth = self._payload["depth"] = depth
def __repr__(self) -> str:
return f"<Lavapy TremoloFilter (Payload={self._payload})>"
class Vibrato(LavapyFilter):
"""
Similar to :class:`Tremolo` but oscillates the pitch instead of the volume.
Attributes
----------
frequency: float
The frequency of the vibrato filter. This must be between 0 and 14.
depth: float
The depth of the tremolo filter. This must be between 0 and 1.
Raises
------
InvalidFilterArgument
An invalid filter argument has been passed.
"""
name = "vibrato"
def __init__(self, frequency: float = 2.0, depth: float = 0.5) -> None:
super().__init__()
if frequency < 0.0 or frequency > 14.0:
raise InvalidFilterArgument("Frequency must be between 0 and 14.")
if depth < 0.0 or depth > 1.0:
raise InvalidFilterArgument("Depth must be between 0 and 1.")
self.frequency = self._payload["frequency"] = frequency
self.depth = self._payload["depth"] = depth
def __repr__(self) -> str:
return f"<Lavapy VibratoFilter (Payload={self._payload})>"
class Rotation(LavapyFilter):
"""
Rotates the sound around the stereo channels/user headphones aka Audio Panning.
Here is an `Example <https://en.wikipedia.org/wiki/File:Fuse_Electronics_Tremolo_MK-III_Quick_Demo.ogv>`_ (
without the reverb).
Parameters
----------
rotationHz: float
The frequency of the audio rotating around the listener in hertz (0.2 is similar to the example above).
"""
name = "rotation"
def __init__(self, rotationHz: float = 0.0) -> None:
super().__init__()
self.rotationHz = self._payload["rotationHz"] = rotationHz
def __repr__(self) -> str:
return f"<Lavapy RotationFilter (Payload={self._payload})>"
class Distortion(LavapyFilter):
"""
Distorts the sound. This can generate some pretty unique audio effects.
Attributes
----------
sinOffset: float
The sine offset of the distortion filter.
sinScale: float
The sine scale of the distortion filter.
cosOffset: float
The cosine offset of the distortion filter.
cosScale: float
The cosine scale of the distortion filter.
tanOffset: float
The tangent offset of the distortion filter.
tanScale: float
The tangent scale of the distortion filter.
offset: float
The offset of the distortion filter.
scale: float
The scale of the distortion filter.
"""
name = "distortion"
def __init__(self, sinOffset: float = 0.0, sinScale: float = 1.0, cosOffset: float = 0.0, cosScale: float = 1.0, tanOffset: float = 0.0, tanScale: float = 1.0, offset: float = 0.0, scale: float = 1.0) -> None:
super().__init__()
self.sinOffset = self._payload["sinOffset"] = sinOffset
self.sinScale = self._payload["sinScale"] = sinScale
self.cosOffset = self._payload["cosOffset"] = cosOffset
self.cosScale = self._payload["cosScale"] = cosScale
self.tanOffset = self._payload["tanOffset"] = tanOffset
self.tanScale = self._payload["tanScale"] = tanScale
self.offset = self._payload["offset"] = offset
self.scale = self._payload["scale"] = scale
def __repr__(self) -> str:
return f"<Lavapy DistortionFilter (Payload={self._payload})>"
class ChannelMix(LavapyFilter):
"""
Mixes both channels (left and right) with a configurable factor on how much each channel affects the other. By
default, both channel are kept separate from each other. Setting all factors to 0.5 means both channels get the
same audio.
Attributes
----------
leftToLeft: float
The channel mix of left to left. This must be between 0 and 1.
leftToRight: float
The channel mix of left to right. This must be between 0 and 1.
rightToLeft: float
The channel mix of right to left. This must be between 0 and 1.
rightToRight: float
The channel mix of right to right. This must be between 0 and 1.
"""
name = "channelMix"
def __init__(self, leftToLeft: float = 1.0, leftToRight: float = 0.0, rightToLeft: float = 0.0, rightToRight: float = 1.0) -> None:
super().__init__()
if leftToLeft < 0.0 or leftToLeft > 1.0:
raise InvalidFilterArgument("LeftToLeft must be between 0 and 1.")
if leftToRight < 0.0 or leftToRight > 1.0:
raise InvalidFilterArgument("LeftToRight must be between 0 and 1.")
if rightToLeft < 0.0 or rightToLeft > 1.0:
raise InvalidFilterArgument("RightToLeft must be between 0 and 1.")
if rightToRight < 0.0 or rightToRight > 1.0:
raise InvalidFilterArgument("RightToRight must be between 0 and 1.")
self.leftToLeft = self._payload["leftToLeft"] = leftToLeft
self.leftToRight = self._payload["leftToRight"] = leftToRight
self.rightToLeft = self._payload["rightToLeft"] = rightToLeft
self.rightToRight = self._payload["rightToRight"] = rightToRight
def __repr__(self) -> str:
return f"<Lavapy ChannelMixFilter (Payload={self._payload})>"
class LowPass(LavapyFilter):
"""
Suppresses higher frequencies, while allowing lower frequencies to pass through.
Attributes
----------
smoothing: float
The smoothing of the low pass filter.
"""
name = "lowPass"
def __init__(self, smoothing: float = 20.0) -> None:
super().__init__()
self.smoothing = self._payload["smoothing"] = smoothing
def __repr__(self) -> str:
return f"<Lavapy LowPassFilter (Payload={self._payload})>" | PypiClean |
/LARRY_dataset-0.0.2rc0-py3-none-any.whl/larry/_analysis/_estimate_growth_rates.py | __module_name__ = "_estimate_growth_rates.py"
__author__ = ", ".join(["Michael E. Vinyard"])
__email__ = ", ".join(["vinyard@g.harvard.edu",])
# import packages: ------------------------------------------------------------
import anndata
import numpy as np
import vinplots
import matplotlib.pyplot as plt
# supporting plot function: ---------------------------------------------------
def plot_growth_rates(
adata: anndata.AnnData,
colors: list = ["dodgerblue", "crimson"],
labels: list = ["d4", "d6"],
) -> None:
"""
Parameters:
-----------
Returns:
--------
None
"""
count_dict = adata.uns["n_d2_daughters"]
count_idx = (count_dict[4] + count_dict[6]).argsort()
growth_rates = adata.uns["GrowthRateDict"]
fig, axes = vinplots.quick_plot(nplots=2, ncols=2, wspace=0.2, figsize=0.6)
for n, d_g in enumerate([growth_rates["d2_d4"], growth_rates["d2_d6"]]):
axes[0].scatter(
range(len(d_g)), d_g[count_idx], s=2, c=colors[n], label=labels[n]
)
axes[0].legend(edgecolor="w", markerscale=2)
axl = axes[0].set_ylabel("Relative Growth Rate", fontsize=8)
axl = axes[0].set_xlabel("Clonal Lineage", fontsize=8)
axes[1].set_xlabel("log(d4/d2) growth rate", fontsize=8)
axes[1].set_ylabel("log(d6/d2) growth rate", fontsize=8)
axes[1].scatter(growth_rates["d2_d4"], growth_rates["d2_d6"], s=2, c="k")
st = plt.suptitle("Observed (via counting) Growth Rates", fontsize=10)
# plot growth rate UMAP functions ---------------------------------------------
def _get_plot_vmin_vmax(adata):
growth_rates = adata.uns["GrowthRateDict"]
all_growth_rates = np.hstack(list(growth_rates.values()))
vmin, vmax = all_growth_rates.min(), all_growth_rates.max()
return {"vmin": vmin, "vmax": vmax}
def _background_scatter_UMAP(
ax, X_umap, c="lightgrey", alpha=0.2, s=1, rasterized=True
):
ax.scatter(X_umap[:, 0], X_umap[:, 1], c=c, alpha=alpha, s=s, rasterized=rasterized)
def _d2_lineage_cell_idx(meta_df, d2_mask, t0=2, time_key="Time point"):
return meta_df.loc[meta_df[time_key] == t0][d2_mask].index.astype(int)
def _plot_continuous_highlight_UMAP(
ax,
X_umap,
subset_idx,
c="navy",
s=5,
vmin=None,
vmax=None,
cax=None,
cbar_shrink=0.5,
):
if type(c) is str:
pass
else:
c_idx = np.argsort(c)
x, y = X_umap[subset_idx][c_idx, 0], X_umap[subset_idx][c_idx, 1]
img = ax.scatter(x, y, c=c[c_idx], s=s, vmin=vmin, vmax=vmax, cmap=plt.cm.plasma)
if cax:
cbar = plt.colorbar(mappable=img, ax=cax, shrink=0.5)
def plot_growth_rate_UMAPs(
adata,
plot_keys=["d2_d4", "d2_d6"],
titles=["log(d4/d2) observed growth rate", "log(d6/d2) observed growth rate"],
vmin=None,
vmax=None,
):
if not vmin and not vmax:
v = _get_plot_vmin_vmax(adata)
vmin, vmax = v["vmin"], v["vmax"]
X_umap = adata.obsm["X_umap"]
growth_rates = adata.uns["GrowthRateDict"]
plot_idx = _d2_lineage_cell_idx(
adata.obs.copy(), adata.uns["d2_lin_mask"], t0=2, time_key="Time point"
)
fig, axes = vinplots.quick_plot(nplots=2, ncols=2, hspace=0.2, figsize_width=1.2, rm_ticks=True, spines_to_delete="all")
for n, ax in enumerate(axes):
_background_scatter_UMAP(ax, X_umap)
_plot_continuous_highlight_UMAP(
ax, X_umap, plot_idx, c=growth_rates[plot_keys[n]], cax=ax, vmin=vmin, vmax=vmax
)
ax.set_title(titles[n])
# supporting functions: -------------------------------------------------------
def _enumerate_time_pairs(t):
"""
Get pairs of timepoints
Parameters:
-----------
t
unique time points for which there are counts
type: list
Returns:
--------
time_pairs
type: list
Notes:
------
"""
time_pairs = []
for i in t:
for j in t:
if not (i == j) and not (j < i) and not (i, j) in time_pairs:
time_pairs.append((i, j))
return time_pairs
def _calculate_growth_rate_from_counts(
t0_count: np.ndarray,
tf_count: np.ndarray,
t0: float,
tf: float,
pseudocount: float = 1,
) -> np.ndarray:
"""
Estimate growth rate from clonal lineage counts at multiple timepoints
Parameters:
-----------
t0_count
type: np.ndarray
tf_count
type: np.ndarray
t0
type: float
tf
type: float
pseudocount
type: float
Returns:
--------
growth_rate
type: np.ndarray
Notes:
------
(1) Source: https://github.com/gifford-lab/prescient-analysis/blob/master/notebooks/02b-weinreb2020-proliferation.ipynb
"""
count_ratio = (tf_count + pseudocount) / (t0_count + pseudocount)
return np.log(count_ratio) / (tf - t0)
# primary function: -----------------------------------------------------------
def estimate_growth_rates(
adata: anndata.AnnData,
pseudocount: int = 1,
return_dict: bool = False,
plot: bool = True,
plot_colors: list = ["dodgerblue", "crimson"],
plot_labels: list = ["d4", "d6"],
) -> dict:
"""
Estimate growth rate from counts of daughter cells relative to d2 progenitors, across multiple timepoints.
Parameters:
-----------
count_dict
pseudocount
Returns:
--------
GrowthRateDict
"""
count_dict = adata.uns["n_d2_daughters"]
GrowthRateDict = {}
t = list(count_dict.keys())
time_pairs = _enumerate_time_pairs(t)
for t0, tf in time_pairs:
key = "d{}_d{}".format(int(t0), int(tf))
GrowthRateDict[key] = _calculate_growth_rate_from_counts(
count_dict[t0], count_dict[tf], t0, tf, pseudocount=pseudocount
)
adata.uns["GrowthRateDict"] = GrowthRateDict
if plot:
plot_growth_rates(adata, colors=plot_colors, labels=plot_labels)
plot_growth_rate_UMAPs(adata,
plot_keys=["d2_d4", "d2_d6"],
titles=["log(d4/d2) observed growth rate", "log(d6/d2) observed growth rate"],
vmin=None,
vmax=None,
)
if return_dict:
return GrowthRateDict | PypiClean |
/FsQuota-0.1.0.tar.gz/FsQuota-0.1.0/README.md | # Python File-system Quota module
This repository contains the sources of the Python file-system quota module,
which has its official home at [PyPi](https://pypi.org/project/FsQuota/).
The quota module allows accessing file system quotas on UNIX platforms.
This works both for locally mounted file systems and network file systems (via
RPC, i.e. Remote Procedure Call) for all the operating systems listed below.
The interface is designed to be independent of UNIX flavours as well as file
system types.
The C implementation of this module is derived from the
[Quota module for Perl](https://github.com/tomzox/Perl-Quota)
(also at [CPAN](https://metacpan.org/pod/Quota)).
I started developing the Perl module 1995, while working as a UNIX system
administrator at university and kept maintaining it even after no longer
working in this capacity. Since its beginnings, the module was continuously
extended by porting to more UNIX platforms and file-systems. Numerous people
have contributed to this process; for a complete list of names please see the
CHANGES document in the repository. All this effort is now available also to
Python users.
## Module information
The following operating systems and file systems are supported transparently
through a common API.
Supported operating systems:
* Linux - kernel 2.0.30 - 4.15
* FreeBSD 3 - 12.1, OpenBSD 2.2 - 6.6 & NetBSD 5 - 9
* SunOS 4.1.3 (aka Solaris 1)
* Solaris 2.4 - 2.10
* HP-UX 9.0x & 10.10 & 10.20 & 11.00
* IRIX 5.2 & 5.3 & 6.2 - 6.5
* OSF/1 & Digital Unix 4
* AIX 4.1, 4.2 and 5.3
Supported file systems:
* Standard file systems of the platforms listed above
* NFS (Network file system) on all of the above
* XFS on Linux and IRIX 6
* AFS (Andrew File System) on many of the above (see INSTALL)
* VxFS (Veritas File System) on Solaris 2
## Documentation
For further information please refer to the following files:
* <A HREF="doc/FsQuota.rst">FsQuota.rst</A>: API documentation
* <A HREF="INSTALL">INSTALL</A>: Installation description
* <A HREF="CHANGES">CHANGES</A>: Change log & acknowledgements
* <A HREF="LICENSE">LICENSE</A>: GPL License
| PypiClean |
/Deliverance.Rewrite-0.6.3.tar.gz/Deliverance.Rewrite-0.6.3/deliverance/editor/editorapp.py | from webob import Request, Response, exc
from tempita import HTMLTemplate
import os
from paste.urlparser import StaticURLParser
import mimetypes
class Editor(object):
def __init__(self, base_dir=None, filename=None,
title=None, force_syntax=None):
assert base_dir or filename
assert not base_dir or not filename
if base_dir:
self.base_dir = os.path.normcase(os.path.abspath(base_dir))
else:
self.base_dir = None
self.filename = filename
self.title = title
self.force_syntax = force_syntax
def __call__(self, environ, start_response):
req = Request(environ)
if req.path_info_peek() == '.media':
req.path_info_pop()
app = StaticURLParser(os.path.join(os.path.dirname(__file__), 'media'))
return app(environ, start_response)
if self.base_dir:
filename = os.path.join(self.base_dir, req.path_info.lstrip('/'))
assert filename.startswith(self.base_dir)
else:
filename = self.filename
if req.method not in ('GET', 'POST'):
resp = exc.HTTPMethodNotAllowed('Bad method: %s' % req.method,
allow='GET,POST')
elif os.path.isdir(filename):
if req.method == 'POST':
resp = self.save_create(req, filename)
else:
if not req.path.endswith('/'):
resp = exc.HTTPMovedPermanently(add_slash=True)
else:
resp = self.view_dir(req, filename)
else:
if req.method == 'POST':
resp = self.save_file(req, filename)
elif req.method == 'GET':
resp = self.edit_file(req, filename)
return resp(environ, start_response)
def edit_url(self, req, filename):
if self.filename:
assert self.filename == filename
return req.application_url
else:
assert filename.startswith(self.base_dir)
filename = filename[len(self.base_dir):].lstrip('/').lstrip('\\')
return req.application_url + '/' + filename
def save_file(self, req, filename):
content = req.POST['content']
f = open(filename, 'wb')
f.write(content)
f.close()
return exc.HTTPFound(
location=self.edit_url(req, filename))
syntax_map = {
'.c': 'c',
'.cf': 'coldfusion',
'.cpp': 'cpp',
'.c++': 'cpp',
'.css': 'css',
'.html': 'html',
'.htm': 'html',
'.xhtml': 'html',
'.js': 'js',
'.pas': 'pas',
'.pl': 'perl',
'.php': 'php',
'.py': 'python',
'robots.txt': 'robotstxt',
'.rb': 'ruby',
'.sql': 'sql',
'.tsql': 'tsql',
'.vb': 'vb',
'.xml': 'xml',
}
def syntax_for_filename(self, filename):
if self.force_syntax:
return self.force_syntax
basename = os.path.basename(filename)
if basename in self.syntax_map:
return self.syntax_map[basename]
else:
ext = os.path.splitext(filename)[1].lower()
if ext in self.syntax_map:
return self.syntax_map[ext]
mimetype, enc = mimetypes.guess_type(os.path.splitext(filename)[1])
if mimetype.startswith('application/') and mimetype.endswith('+xml'):
return 'xml'
return None
def edit_file(self, req, filename):
f = open(filename, 'rb')
content = f.read()
f.close()
title = self.title or filename
syntax = self.syntax_for_filename(filename)
body = self.edit_template.substitute(
content=content, filename=filename, title=title,
req=req, edit_url=self.edit_url(req, filename),
syntax=syntax)
resp = Response(body=body)
resp.cache_expires()
return resp
edit_template = HTMLTemplate.from_filename(
os.path.join(os.path.dirname(__file__), 'editor_template.html'))
def save_create(self, req, dir):
file = req.POST.get('file')
if file is None or file == '':
content = req.POST['content']
filename = req.POST['filename']
else:
content = file.value
filename = req.POST.get('filename') or file.filename
filename = filename.replace('\\', '/')
filename = os.path.basename(os.path.normpath(filename))
filename = os.path.join(dir, filename)
if os.path.exists(filename):
return exc.HTTPForbidden(
"The file %s already exists, you cannot upload over it" % filename)
f = open(filename, 'wb')
f.write(content)
f.close()
return exc.HTTPFound(
location=self.edit_url(req, filename))
skip_files = ['.svn', 'CVS', '.hg']
def view_dir(self, req, dir):
dir = os.path.normpath(dir)
show_parent = dir != self.base_dir
children = [os.path.join(dir, name) for name in os.listdir(dir)
if name not in self.skip_files]
def edit_url(filename):
return self.edit_url(req, filename)
title = self.title or dir
body = self.view_dir_template.substitute(
req=req,
dir=dir,
show_parent=show_parent,
title=title,
basename=os.path.basename,
dirname=os.path.dirname,
isdir=os.path.isdir,
children=children,
edit_url=edit_url,
)
resp = Response(body=body)
resp.cache_expires()
return resp
view_dir_template = HTMLTemplate.from_filename(
os.path.join(os.path.dirname(__file__), 'view_dir_template.html')) | PypiClean |
/NodeGraphQt_QuiltiX_fork-0.6.0.tar.gz/NodeGraphQt_QuiltiX_fork-0.6.0/NodeGraphQt/qgraphics/node_abstract.py | from Qt import QtCore, QtWidgets
from NodeGraphQt.constants import (
Z_VAL_NODE,
ITEM_CACHE_MODE,
LayoutDirectionEnum,
NodeEnum
)
class AbstractNodeItem(QtWidgets.QGraphicsItem):
"""
The base class of all node qgraphics item.
"""
def __init__(self, name='node', parent=None):
super(AbstractNodeItem, self).__init__(parent)
self.setFlags(self.ItemIsSelectable | self.ItemIsMovable)
self.setCacheMode(ITEM_CACHE_MODE)
self.setZValue(Z_VAL_NODE)
self._properties = {
'id': None,
'name': name.strip(),
'color': (13, 18, 23, 255),
'border_color': (46, 57, 66, 255),
'text_color': (255, 255, 255, 180),
'type_': 'AbstractBaseNode',
'selected': False,
'disabled': False,
'visible': False,
'layout_direction': LayoutDirectionEnum.HORIZONTAL.value,
}
self._width = NodeEnum.WIDTH.value
self._height = NodeEnum.HEIGHT.value
def __repr__(self):
return '{}.{}(\'{}\')'.format(
self.__module__, self.__class__.__name__, self.name)
def boundingRect(self):
return QtCore.QRectF(0.0, 0.0, self._width, self._height)
def mousePressEvent(self, event):
"""
Re-implemented to update "self._properties['selected']" attribute.
Args:
event (QtWidgets.QGraphicsSceneMouseEvent): mouse event.
"""
self._properties['selected'] = True
super(AbstractNodeItem, self).mousePressEvent(event)
def setSelected(self, selected):
self._properties['selected'] = selected
super(AbstractNodeItem, self).setSelected(selected)
def pre_init(self, viewer, pos=None):
"""
Called before node has been added into the scene.
Args:
viewer (NodeGraphQt.widgets.viewer.NodeViewer): main viewer.
pos (tuple): the cursor pos if node is called with tab search.
"""
pass
def post_init(self, viewer, pos=None):
"""
Called after node has been added into the scene.
Args:
viewer (NodeGraphQt.widgets.viewer.NodeViewer): main viewer
pos (tuple): the cursor pos if node is called with tab search.
"""
pass
@property
def id(self):
return self._properties['id']
@id.setter
def id(self, unique_id=''):
self._properties['id'] = unique_id
@property
def type_(self):
return self._properties['type_']
@type_.setter
def type_(self, node_type='NODE'):
self._properties['type_'] = node_type
@property
def layout_direction(self):
return self._properties['layout_direction']
@layout_direction.setter
def layout_direction(self, value=0):
self._properties['layout_direction'] = value
@property
def size(self):
return self._width, self._height
@property
def width(self):
return self._width
@width.setter
def width(self, width=0.0):
self._width = width
@property
def height(self):
return self._height
@height.setter
def height(self, height=0.0):
self._height = height
@property
def color(self):
return self._properties['color']
@color.setter
def color(self, color=(0, 0, 0, 255)):
self._properties['color'] = color
@property
def text_color(self):
return self._properties['text_color']
@text_color.setter
def text_color(self, color=(100, 100, 100, 255)):
self._properties['text_color'] = color
@property
def border_color(self):
return self._properties['border_color']
@border_color.setter
def border_color(self, color=(0, 0, 0, 255)):
self._properties['border_color'] = color
@property
def disabled(self):
return self._properties['disabled']
@disabled.setter
def disabled(self, state=False):
self._properties['disabled'] = state
@property
def selected(self):
if self._properties['selected'] != self.isSelected():
self._properties['selected'] = self.isSelected()
return self._properties['selected']
@selected.setter
def selected(self, selected=False):
self.setSelected(selected)
@property
def visible(self):
return self._properties['visible']
@visible.setter
def visible(self, visible=False):
self._properties['visible'] = visible
self.setVisible(visible)
@property
def xy_pos(self):
"""
return the item scene postion.
("node.pos" conflicted with "QGraphicsItem.pos()"
so it was refactored to "xy_pos".)
Returns:
list[float]: x, y scene position.
"""
return [float(self.scenePos().x()), float(self.scenePos().y())]
@xy_pos.setter
def xy_pos(self, pos=None):
"""
set the item scene postion.
("node.pos" conflicted with "QGraphicsItem.pos()"
so it was refactored to "xy_pos".)
Args:
pos (list[float]): x, y scene position.
"""
pos = pos or [0.0, 0.0]
self.setPos(pos[0], pos[1])
@property
def name(self):
return self._properties['name']
@name.setter
def name(self, name=''):
self._properties['name'] = name
self.setToolTip('node: {}'.format(name))
@property
def properties(self):
"""
return the node view attributes.
Returns:
dict: {property_name: property_value}
"""
props = {'width': self.width,
'height': self.height,
'pos': self.xy_pos}
props.update(self._properties)
return props
def viewer(self):
"""
return the main viewer.
Returns:
NodeGraphQt.widgets.viewer.NodeViewer: viewer object.
"""
if self.scene():
return self.scene().viewer()
def delete(self):
"""
remove node view from the scene.
"""
if self.scene():
self.scene().removeItem(self)
def from_dict(self, node_dict):
"""
set the node view attributes from the dictionary.
Args:
node_dict (dict): serialized node dict.
"""
node_attrs = list(self._properties.keys()) + ['width', 'height', 'pos']
for name, value in node_dict.items():
if name in node_attrs:
# "node.pos" conflicted with "QGraphicsItem.pos()"
# so it's refactored to "xy_pos".
if name == 'pos':
name = 'xy_pos'
setattr(self, name, value) | PypiClean |
/Blue-DiscordBot-3.2.0.tar.gz/Blue-DiscordBot-3.2.0/bluebot/launcher.py | import getpass
import os
import platform
import subprocess
import sys
import argparse
import asyncio
import aiohttp
import pkg_resources
from bluebot import MIN_PYTHON_VERSION
from bluebot.setup import (
basic_setup,
load_existing_config,
remove_instance,
remove_instance_interaction,
create_backup,
)
from bluebot.core import __version__, version_info as red_version_info, VersionInfo
from bluebot.core.cli import confirm
if sys.platform == "linux":
import distro
INTERACTIVE_MODE = not len(sys.argv) > 1 # CLI flags = non-interactive
INTRO = "==========================\nBlue Discord Bot - Launcher\n==========================\n"
IS_WINDOWS = os.name == "nt"
IS_MAC = sys.platform == "darwin"
PYTHON_OK = sys.version_info >= MIN_PYTHON_VERSION
def is_venv():
"""Return True if the process is in a venv or in a virtualenv."""
# credit to @calebj
return hasattr(sys, "real_prefix") or (
hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix
)
def parse_cli_args():
parser = argparse.ArgumentParser(
description="Blue - Discord Bot's launcher (V3)", allow_abbrev=False
)
instances = load_existing_config()
parser.add_argument(
"instancename",
metavar="instancename",
type=str,
nargs="?",
help="The instance to run",
choices=list(instances.keys()),
)
parser.add_argument("--start", "-s", help="Starts Blue", action="store_true")
parser.add_argument(
"--auto-restart", help="Autorestarts Blue in case of issues", action="store_true"
)
parser.add_argument("--update", help="Updates Blue", action="store_true")
parser.add_argument(
"--update-dev", help="Updates Blue from the Github repo", action="store_true"
)
parser.add_argument("--docs", help="Installs extra 'docs' when updating", action="store_true")
parser.add_argument("--test", help="Installs extra 'test' when updating", action="store_true")
parser.add_argument(
"--style", help="Installs extra 'style' when updating", action="store_true"
)
parser.add_argument(
"--mongo", help="Installs extra 'mongo' when updating", action="store_true"
)
parser.add_argument(
"--debuginfo",
help="Prints basic debug info that would be useful for support",
action="store_true",
)
return parser.parse_known_args()
def update_red(dev=False, style=False, mongo=False, docs=False, test=False):
interpreter = sys.executable
print("Updating Blue...")
# If the user ran bluebot-launcher.exe, updating with pip will fail
# on windows since the file is open and pip will try to overwrite it.
# We have to rename bluebot-launcher.exe in this case.
launcher_script = os.path.abspath(sys.argv[0])
old_name = launcher_script + ".exe"
new_name = launcher_script + ".old"
renamed = False
if "bluebot-launcher" in launcher_script and IS_WINDOWS:
renamed = True
print("Renaming {} to {}".format(old_name, new_name))
if os.path.exists(new_name):
os.remove(new_name)
os.rename(old_name, new_name)
egg_l = []
if style:
egg_l.append("style")
if mongo:
egg_l.append("mongo")
if docs:
egg_l.append("docs")
if test:
egg_l.append("test")
if dev:
package = "git+https://github.com/Cog-Creators/Blue-DiscordBot@V3/develop"
if egg_l:
package += "#egg=Blue-DiscordBot[{}]".format(", ".join(egg_l))
else:
package = "Blue-DiscordBot"
if egg_l:
package += "[{}]".format(", ".join(egg_l))
arguments = [interpreter, "-m", "pip", "install", "-U", package]
if not is_venv():
arguments.append("--user")
code = subprocess.call(arguments)
if code == 0:
print("Blue has been updated")
else:
print("Something went wrong while updating!")
# If bluebot wasn't updated, we renamed our .exe file and didn't replace it
scripts = os.listdir(os.path.dirname(launcher_script))
if renamed and "bluebot-launcher.exe" not in scripts:
print("Renaming {} to {}".format(new_name, old_name))
os.rename(new_name, old_name)
def run_red(selected_instance, autorestart: bool = False, cliflags=None):
interpreter = sys.executable
while True:
print("Starting {}...".format(selected_instance))
cmd_list = [interpreter, "-m", "bluebot", selected_instance]
if cliflags:
cmd_list += cliflags
status = subprocess.call(cmd_list)
if (not autorestart) or (autorestart and status != 26):
break
def cli_flag_getter():
print("Would you like to enter any cli flags to pass to bluebot? (y/n)")
resp = user_choice()
if resp == "n":
return None
elif resp == "y":
flags = []
print("Ok, we will now walk through choosing cli flags")
print("Would you like to specify an owner? (y/n)")
print(
"Please note that the owner is normally determined automatically from "
"the bot's token, so you should only use that if you want to specify a "
"user other than that one as the owner."
)
choice = user_choice()
if choice == "y":
print("Enter the user id for the owner")
owner_id = user_choice()
flags.append("--owner {}".format(owner_id))
print("Would you like to specify any prefixes? (y/n)")
choice = user_choice()
if choice == "y":
print(
"Enter the prefixes, separated by a space (please note "
"that prefixes containing a space will need to be added with [p]set prefix)"
)
prefixes = user_choice().split()
for p in prefixes:
flags.append("-p {}".format(p))
print("Would you like mentioning the bot to be a prefix? (y/n)")
choice = user_choice()
if choice == "y":
flags.append("--mentionable")
print(
"Would you like to disable console input? Please note that features "
"requiring console interaction may fail to work (y/n)"
)
choice = user_choice()
if choice == "y":
flags.append("--no-prompt")
print("Would you like to start with no cogs loaded? (y/n)")
choice = user_choice()
if choice == "y":
flags.append("--no-cogs")
print("Do you want to do a dry run? (y/n)")
choice = user_choice()
if choice == "y":
flags.append("--dry-run")
print("Do you want to set the log level to debug? (y/n)")
choice = user_choice()
if choice == "y":
flags.append("--debug")
print(
"Do you want the Dev cog loaded (thus enabling commands such as debug and repl)? (y/n)"
)
choice = user_choice()
if choice == "y":
flags.append("--dev")
print("Do you want to enable RPC? (y/n)")
choice = user_choice()
if choice == "y":
flags.append("--rpc")
print("You have selected the following cli flags:\n\n")
print("\n".join(flags))
print("\nIf this looks good to you, type y. If you wish to start over, type n")
choice = user_choice()
if choice == "y":
print("Done selecting cli flags")
return flags
else:
print("Starting over")
return cli_flag_getter()
else:
print("Invalid response! Let's try again")
return cli_flag_getter()
def instance_menu():
instances = load_existing_config()
if not instances:
print("No instances found!")
return None
counter = 0
print("Blue instance menu\n")
name_num_map = {}
for name in list(instances.keys()):
print("{}. {}\n".format(counter + 1, name))
name_num_map[str(counter + 1)] = name
counter += 1
while True:
selection = user_choice()
try:
selection = int(selection)
except ValueError:
print("Invalid input! Please enter a number corresponding to an instance.")
else:
if selection not in list(range(1, counter + 1)):
print("Invalid selection! Please try again")
else:
return name_num_map[str(selection)]
async def reset_red():
instances = load_existing_config()
if not instances:
print("No instance to delete.\n")
return
print("WARNING: You are about to remove ALL Blue instances on this computer.")
print(
"If you want to reset data of only one instance, "
"please select option 5 in the launcher."
)
await asyncio.sleep(2)
print("\nIf you continue you will remove these instanes.\n")
for instance in list(instances.keys()):
print(" - {}".format(instance))
await asyncio.sleep(3)
print('\nIf you want to reset all instances, type "I agree".')
response = input("> ").strip()
if response != "I agree":
print("Cancelling...")
return
if confirm("\nDo you want to create a backup for an instance? (y/n) "):
for index, instance in instances.items():
print("\nRemoving {}...".format(index))
await create_backup(index)
await remove_instance(index)
else:
for index, instance in instances.items():
await remove_instance(index)
print("All instances have been removed.")
def clear_screen():
if IS_WINDOWS:
os.system("cls")
else:
os.system("clear")
def wait():
if INTERACTIVE_MODE:
input("Press enter to continue.")
def user_choice():
return input("> ").lower().strip()
def extras_selector():
print("Enter any extra requirements you want installed\n")
print("Options are: style, docs, test, mongo\n")
selected = user_choice()
selected = selected.split()
return selected
def development_choice(can_go_back=True):
while True:
print("\n")
print("Do you want to install stable or development version?")
print("1. Stable version")
print("2. Development version")
if can_go_back:
print("\n")
print("0. Go back")
choice = user_choice()
print("\n")
if choice == "1":
selected = extras_selector()
update_red(
dev=False,
style=True if "style" in selected else False,
docs=True if "docs" in selected else False,
test=True if "test" in selected else False,
mongo=True if "mongo" in selected else False,
)
break
elif choice == "2":
selected = extras_selector()
update_red(
dev=True,
style=True if "style" in selected else False,
docs=True if "docs" in selected else False,
test=True if "test" in selected else False,
mongo=True if "mongo" in selected else False,
)
break
elif choice == "0" and can_go_back:
return False
clear_screen()
return True
def debug_info():
pyver = sys.version
redver = pkg_resources.get_distribution("Blue-DiscordBot").version
if IS_WINDOWS:
os_info = platform.uname()
osver = "{} {} (version {}) {}".format(
os_info.system, os_info.release, os_info.version, os_info.machine
)
elif IS_MAC:
os_info = platform.mac_ver()
osver = "Mac OSX {} {}".format(os_info[0], os_info[2])
else:
os_info = distro.linux_distribution()
osver = "{} {}".format(os_info[0], os_info[1]).strip()
user_who_ran = getpass.getuser()
info = (
"Debug Info for Blue\n\n"
+ "Python version: {}\n".format(pyver)
+ "Blue version: {}\n".format(redver)
+ "OS version: {}\n".format(osver)
+ "System arch: {}\n".format(platform.machine())
+ "User: {}\n".format(user_who_ran)
)
print(info)
sys.exit(0)
async def is_outdated():
red_pypi = "https://pypi.python.org/pypi/Blue-DiscordBot"
async with aiohttp.ClientSession() as session:
async with session.get("{}/json".format(red_pypi)) as r:
data = await r.json()
new_version = data["info"]["version"]
return VersionInfo.from_str(new_version) > red_version_info, new_version
def main_menu():
if IS_WINDOWS:
os.system("TITLE Blue - Discord Bot V3 Launcher")
clear_screen()
loop = asyncio.get_event_loop()
outdated, new_version = loop.run_until_complete(is_outdated())
while True:
print(INTRO)
print("\033[4mCurrent version:\033[0m {}".format(__version__))
if outdated:
print("Blue is outdated. {} is available.".format(new_version))
print("")
print("1. Run Blue w/ autorestart in case of issues")
print("2. Run Blue")
print("3. Update Blue")
print("4. Create Instance")
print("5. Remove Instance")
print("6. Debug information (use this if having issues with the launcher or bot)")
print("7. Reinstall Blue")
print("0. Exit")
choice = user_choice()
if choice == "1":
instance = instance_menu()
if instance:
cli_flags = cli_flag_getter()
run_red(instance, autorestart=True, cliflags=cli_flags)
wait()
elif choice == "2":
instance = instance_menu()
if instance:
cli_flags = cli_flag_getter()
run_red(instance, autorestart=False, cliflags=cli_flags)
wait()
elif choice == "3":
if development_choice():
wait()
elif choice == "4":
basic_setup()
wait()
elif choice == "5":
loop.run_until_complete(remove_instance_interaction())
wait()
elif choice == "6":
debug_info()
elif choice == "7":
while True:
clear_screen()
print("==== Reinstall Blue ====")
print(
"1. Reinstall Blue requirements (discard code changes, keep data and 3rd party cogs)"
)
print("2. Reset all data")
print("3. Factory reset (discard code changes, reset all data)")
print("\n")
print("0. Back")
choice = user_choice()
if choice == "1":
if development_choice():
wait()
elif choice == "2":
loop.run_until_complete(reset_red())
wait()
elif choice == "3":
loop.run_until_complete(reset_red())
development_choice(can_go_back=False)
wait()
elif choice == "0":
break
elif choice == "0":
break
clear_screen()
def main():
args, flags_to_pass = parse_cli_args()
if not PYTHON_OK:
print(
f"Python {'.'.join(map(str, MIN_PYTHON_VERSION))} is required to run Blue, but you "
f"have {sys.version}! Please update Python."
)
sys.exit(1)
if args.debuginfo: # Check first since the function triggers an exit
debug_info()
if args.update and args.update_dev: # Conflicting args, so error out
raise RuntimeError(
"\nUpdate requested but conflicting arguments provided.\n\n"
"Please try again using only one of --update or --update-dev"
)
if args.update:
update_red(style=args.style, docs=args.docs, test=args.test, mongo=args.mongo)
elif args.update_dev:
update_red(dev=True, style=args.style, docs=args.docs, test=args.test, mongo=args.mongo)
if INTERACTIVE_MODE:
main_menu()
elif args.start:
print("Starting Blue...")
run_red(args.instancename, autorestart=args.auto_restart, cliflags=flags_to_pass)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Exiting...") | PypiClean |
/CProfileV-1.0.7.tar.gz/CProfileV-1.0.7/README.rst | =========
cprofilev
=========
An easier way to use `cProfile <https://docs.python.org/2/library/profile.html>`_.
______________________________
about
*****
cprofilev runs and profiles a given python program and outputs a simple html view of the statistics collected.
See: http://ymichael.com/2014/03/08/profiling-python-with-cprofile.html on how to make sense of the profiled statistics.
installation
*****
*on most UNIX-like systems, you'll probably need to run the following `install` command as root or by using sudo*
::
pip install cprofilev
quickstart
**********
1. Simply run your python program in with the **-m cprofilev** flag.
::
$ python -m cprofilev /path/to/python/program ...
2. Navigate to http://localhost:4000 to view profile statistics of your python program (even while its still running!)
Alternatively you can output view cprofile output using the **-f flag**:
::
# NOTE this is cProfile not cprofilev
$ python -m cProfile -o /path/to/save/output /path/to/python/program ...
$ cprofilev -f /path/to/save/output
usage
*****
::
usage: cprofilev.py [--version] [-a ADDRESS] [-p PORT] scriptfile [arg] ...
An easier way to use cProfile.
positional arguments:
scriptfile The python script file to run and profile.
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
-a ADDRESS, --address ADDRESS
The address to listen on. (defaults to 127.0.0.1).
-p PORT, --port PORT The port to listen on. (defaults to 4000).
-f FILE, --file FILE cProfile output to view.
If specified, the scriptfile provided will be ignored.
Dependencies
*****
`bottle <http://bottlepy.org>`_: used for serving the html page.
| PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/image/SlideShow.js | define(["dijit","dojo","dojox","dojo/require!dojo/string,dojo/fx,dijit/_Widget,dijit/_Templated"],function(_1,_2,_3){
_2.provide("dojox.image.SlideShow");
_2.require("dojo.string");
_2.require("dojo.fx");
_2.require("dijit._Widget");
_2.require("dijit._Templated");
_2.declare("dojox.image.SlideShow",[_1._Widget,_1._Templated],{imageHeight:375,imageWidth:500,title:"",titleTemplate:"${title} <span class=\"slideShowCounterText\">(${current} of ${total})</span>",noLink:false,loop:true,hasNav:true,images:[],pageSize:20,autoLoad:true,autoStart:false,fixedHeight:false,imageStore:null,linkAttr:"link",imageLargeAttr:"imageUrl",titleAttr:"title",slideshowInterval:3,templateString:_2.cache("dojox.image","resources/SlideShow.html","<div dojoAttachPoint=\"outerNode\" class=\"slideShowWrapper\">\n\t<div style=\"position:relative;\" dojoAttachPoint=\"innerWrapper\">\n\t\t<div class=\"slideShowNav\" dojoAttachEvent=\"onclick: _handleClick\">\n\t\t\t<div class=\"dijitInline slideShowTitle\" dojoAttachPoint=\"titleNode\">${title}</div>\n\t\t</div>\n\t\t<div dojoAttachPoint=\"navNode\" class=\"slideShowCtrl\" dojoAttachEvent=\"onclick: _handleClick\">\n\t\t\t<span dojoAttachPoint=\"navPrev\" class=\"slideShowCtrlPrev\"></span>\n\t\t\t<span dojoAttachPoint=\"navPlay\" class=\"slideShowCtrlPlay\"></span>\n\t\t\t<span dojoAttachPoint=\"navNext\" class=\"slideShowCtrlNext\"></span>\n\t\t</div>\n\t\t<div dojoAttachPoint=\"largeNode\" class=\"slideShowImageWrapper\"></div>\t\t\n\t\t<div dojoAttachPoint=\"hiddenNode\" class=\"slideShowHidden\"></div>\n\t</div>\n</div>"),_imageCounter:0,_tmpImage:null,_request:null,postCreate:function(){
this.inherited(arguments);
var _4=document.createElement("img");
_4.setAttribute("width",this.imageWidth);
_4.setAttribute("height",this.imageHeight);
if(this.hasNav){
_2.connect(this.outerNode,"onmouseover",this,function(_5){
try{
this._showNav();
}
catch(e){
}
});
_2.connect(this.outerNode,"onmouseout",this,function(_6){
try{
this._hideNav(_6);
}
catch(e){
}
});
}
this.outerNode.style.width=this.imageWidth+"px";
_4.setAttribute("src",this._blankGif);
var _7=this;
this.largeNode.appendChild(_4);
this._tmpImage=this._currentImage=_4;
this._fitSize(true);
this._loadImage(0,_2.hitch(this,"showImage",0));
this._calcNavDimensions();
_2.style(this.navNode,"opacity",0);
},setDataStore:function(_8,_9,_a){
this.reset();
var _b=this;
this._request={query:{},start:_9.start||0,count:_9.count||this.pageSize,onBegin:function(_c,_d){
_b.maxPhotos=_c;
}};
if(_9.query){
_2.mixin(this._request.query,_9.query);
}
if(_a){
_2.forEach(["imageLargeAttr","linkAttr","titleAttr"],function(_e){
if(_a[_e]){
this[_e]=_a[_e];
}
},this);
}
var _f=function(_10){
_b.maxPhotos=_10.length;
_b._request.onComplete=null;
if(_b.autoStart){
_b.imageIndex=-1;
_b.toggleSlideShow();
}else{
_b.showImage(0);
}
};
this.imageStore=_8;
this._request.onComplete=_f;
this._request.start=0;
this.imageStore.fetch(this._request);
},reset:function(){
_2.query("> *",this.largeNode).orphan();
this.largeNode.appendChild(this._tmpImage);
_2.query("> *",this.hiddenNode).orphan();
_2.forEach(this.images,function(img){
if(img&&img.parentNode){
img.parentNode.removeChild(img);
}
});
this.images=[];
this.isInitialized=false;
this._imageCounter=0;
},isImageLoaded:function(_11){
return this.images&&this.images.length>_11&&this.images[_11];
},moveImageLoadingPointer:function(_12){
this._imageCounter=_12;
},destroy:function(){
if(this._slideId){
this._stop();
}
this.inherited(arguments);
},showNextImage:function(_13,_14){
if(_13&&this._timerCancelled){
return false;
}
if(this.imageIndex+1>=this.maxPhotos){
if(_13&&(this.loop||_14)){
this.imageIndex=-1;
}else{
if(this._slideId){
this._stop();
}
return false;
}
}
this.showImage(this.imageIndex+1,_2.hitch(this,function(){
if(_13){
this._startTimer();
}
}));
return true;
},toggleSlideShow:function(){
if(this._slideId){
this._stop();
}else{
_2.toggleClass(this.domNode,"slideShowPaused");
this._timerCancelled=false;
var idx=this.imageIndex;
if(idx<0||(this.images[idx]&&this.images[idx]._img.complete)){
var _15=this.showNextImage(true,true);
if(!_15){
this._stop();
}
}else{
var _16=_2.subscribe(this.getShowTopicName(),_2.hitch(this,function(_17){
setTimeout(_2.hitch(this,function(){
if(_17.index==idx){
var _18=this.showNextImage(true,true);
if(!_18){
this._stop();
}
_2.unsubscribe(_16);
}
}),this.slideshowInterval*1000);
}));
_2.publish(this.getShowTopicName(),[{index:idx,title:"",url:""}]);
}
}
},getShowTopicName:function(){
return (this.widgetId||this.id)+"/imageShow";
},getLoadTopicName:function(){
return (this.widgetId?this.widgetId:this.id)+"/imageLoad";
},showImage:function(_19,_1a){
if(!_1a&&this._slideId){
this.toggleSlideShow();
}
var _1b=this;
var _1c=this.largeNode.getElementsByTagName("div");
this.imageIndex=_19;
var _1d=function(){
if(_1b.images[_19]){
while(_1b.largeNode.firstChild){
_1b.largeNode.removeChild(_1b.largeNode.firstChild);
}
_2.style(_1b.images[_19],"opacity",0);
_1b.largeNode.appendChild(_1b.images[_19]);
_1b._currentImage=_1b.images[_19]._img;
_1b._fitSize();
var _1e=function(a,b,c){
var img=_1b.images[_19].firstChild;
if(img.tagName.toLowerCase()!="img"){
img=img.firstChild;
}
var _1f=img.getAttribute("title")||"";
if(_1b._navShowing){
_1b._showNav(true);
}
_2.publish(_1b.getShowTopicName(),[{index:_19,title:_1f,url:img.getAttribute("src")}]);
if(_1a){
_1a(a,b,c);
}
_1b._setTitle(_1f);
};
_2.fadeIn({node:_1b.images[_19],duration:300,onEnd:_1e}).play();
}else{
_1b._loadImage(_19,function(){
_1b.showImage(_19,_1a);
});
}
};
if(_1c&&_1c.length>0){
_2.fadeOut({node:_1c[0],duration:300,onEnd:function(){
_1b.hiddenNode.appendChild(_1c[0]);
_1d();
}}).play();
}else{
_1d();
}
},_fitSize:function(_20){
if(!this.fixedHeight||_20){
var _21=(this._currentImage.height+(this.hasNav?20:0));
_2.style(this.innerWrapper,"height",_21+"px");
return;
}
_2.style(this.largeNode,"paddingTop",this._getTopPadding()+"px");
},_getTopPadding:function(){
if(!this.fixedHeight){
return 0;
}
return (this.imageHeight-this._currentImage.height)/2;
},_loadNextImage:function(){
if(!this.autoLoad){
return;
}
while(this.images.length>=this._imageCounter&&this.images[this._imageCounter]){
this._imageCounter++;
}
this._loadImage(this._imageCounter);
},_loadImage:function(_22,_23){
if(this.images[_22]||!this._request){
return;
}
var _24=_22-(_22%(this._request.count||this.pageSize));
this._request.start=_24;
this._request.onComplete=function(_25){
var _26=_22-_24;
if(_25&&_25.length>_26){
_27(_25[_26]);
}else{
}
};
var _28=this;
var _29=this.imageStore;
var _27=function(_2a){
var url=_28.imageStore.getValue(_2a,_28.imageLargeAttr);
var img=new Image();
var div=_2.create("div",{id:_28.id+"_imageDiv"+_22});
div._img=img;
var _2b=_28.imageStore.getValue(_2a,_28.linkAttr);
if(!_2b||_28.noLink){
div.appendChild(img);
}else{
var a=_2.create("a",{"href":_2b,"target":"_blank"},div);
a.appendChild(img);
}
_2.connect(img,"onload",function(){
if(_29!=_28.imageStore){
return;
}
_28._fitImage(img);
_2.attr(div,{"width":_28.imageWidth,"height":_28.imageHeight});
_2.publish(_28.getLoadTopicName(),[_22]);
setTimeout(function(){
_28._loadNextImage();
},1);
if(_23){
_23();
}
});
_28.hiddenNode.appendChild(div);
var _2c=_2.create("div",{className:"slideShowTitle"},div);
_28.images[_22]=div;
_2.attr(img,"src",url);
var _2d=_28.imageStore.getValue(_2a,_28.titleAttr);
if(_2d){
_2.attr(img,"title",_2d);
}
};
this.imageStore.fetch(this._request);
},_stop:function(){
if(this._slideId){
clearTimeout(this._slideId);
}
this._slideId=null;
this._timerCancelled=true;
_2.removeClass(this.domNode,"slideShowPaused");
},_prev:function(){
if(this.imageIndex<1){
return;
}
this.showImage(this.imageIndex-1);
},_next:function(){
this.showNextImage();
},_startTimer:function(){
var id=this.id;
this._slideId=setTimeout(function(){
_1.byId(id).showNextImage(true);
},this.slideshowInterval*1000);
},_calcNavDimensions:function(){
_2.style(this.navNode,"position","absolute");
_2.style(this.navNode,"top","-10000px");
_2.style(this.navPlay,"marginLeft",0);
this.navPlay._size=_2.marginBox(this.navPlay);
this.navPrev._size=_2.marginBox(this.navPrev);
this.navNext._size=_2.marginBox(this.navNext);
_2.style(this.navNode,{"position":"",top:""});
},_setTitle:function(_2e){
this.titleNode.innerHTML=_2.string.substitute(this.titleTemplate,{title:_2e,current:1+this.imageIndex,total:this.maxPhotos||""});
},_fitImage:function(img){
var _2f=img.width;
var _30=img.height;
if(_2f>this.imageWidth){
_30=Math.floor(_30*(this.imageWidth/_2f));
img.height=_30;
img.width=_2f=this.imageWidth;
}
if(_30>this.imageHeight){
_2f=Math.floor(_2f*(this.imageHeight/_30));
img.height=this.imageHeight;
img.width=_2f;
}
},_handleClick:function(e){
switch(e.target){
case this.navNext:
this._next();
break;
case this.navPrev:
this._prev();
break;
case this.navPlay:
this.toggleSlideShow();
break;
}
},_showNav:function(_31){
if(this._navShowing&&!_31){
return;
}
this._calcNavDimensions();
_2.style(this.navNode,"marginTop","0px");
var _32=_2.style(this.navNode,"width")/2-this.navPlay._size.w/2-this.navPrev._size.w;
_2.style(this.navPlay,"marginLeft",_32+"px");
var _33=_2.marginBox(this.outerNode);
var _34=this._currentImage.height-this.navPlay._size.h-10+this._getTopPadding();
if(_34>this._currentImage.height){
_34+=10;
}
_2[this.imageIndex<1?"addClass":"removeClass"](this.navPrev,"slideShowCtrlHide");
_2[this.imageIndex+1>=this.maxPhotos?"addClass":"removeClass"](this.navNext,"slideShowCtrlHide");
var _35=this;
if(this._navAnim){
this._navAnim.stop();
}
if(this._navShowing){
return;
}
this._navAnim=_2.fadeIn({node:this.navNode,duration:300,onEnd:function(){
_35._navAnim=null;
}});
this._navAnim.play();
this._navShowing=true;
},_hideNav:function(e){
if(!e||!this._overElement(this.outerNode,e)){
var _36=this;
if(this._navAnim){
this._navAnim.stop();
}
this._navAnim=_2.fadeOut({node:this.navNode,duration:300,onEnd:function(){
_36._navAnim=null;
}});
this._navAnim.play();
this._navShowing=false;
}
},_overElement:function(_37,e){
if(typeof (_2)=="undefined"){
return false;
}
_37=_2.byId(_37);
var m={x:e.pageX,y:e.pageY};
var bb=_2.position(_37,true);
return (m.x>=bb.x&&m.x<=(bb.x+bb.w)&&m.y>=bb.y&&m.y<=(top+bb.h));
}});
}); | PypiClean |
/AndroidTools-0.2.4.tar.gz/AndroidTools-0.2.4/Jce_b/writer.py | from typing import Union, List, Optional, Any, MutableMapping
from .buffer import ByteBuffer
from .struct import IJceStruct
DEFAULT_ENCODING = "utf-8"
class JceWriter:
"""
写入jce字节流
"""
def __init__(self, data: Optional[Union[bytes, bytearray, ByteBuffer]] = None):
if data is None:
self.buffer = ByteBuffer()
elif isinstance(data, (bytes, bytearray)):
self.buffer = ByteBuffer(data)
elif isinstance(data, ByteBuffer):
self.buffer = data
else:
raise TypeError(f"can't init JceWriter with data type {data.__class__.__name__}")
def write_head(self, type_: int, tag: int) -> None:
"""
:param type_:
:param tag:
:return:
"""
if tag < 15:
data = bytes([tag << 4 | type_]) # go的byte就是uint8
self.buffer.write_bytes(data)
elif tag < 256:
data = bytes([0xF0 | type_]) # 修改 0xF0 = 240
self.buffer.write_bytes(data)
self.buffer.write_bytes(bytes([tag]))
def write_byte(self, b: bytes, tag: int) -> "JceWriter":
"""
写入一个字节
:param b:
:param tag:
:return:
"""
if len(b) != 1:
raise ValueError("write_byte only accept single byte")
if b[0] == 0:
self.write_head(12, tag)
else:
self.write_head(0, tag)
self.buffer.write_bytes(b)
return self
def write_bool(self, b: bool, tag: int) -> None:
if b:
data: bytes = bytes([1])
else:
data: bytes = bytes([0])
self.write_byte(data, tag)
def write_int16(self, n: int, tag: int) -> None:
if -128 <= n <= 127:
self.write_byte(bytes([n]), tag)
return
self.write_head(1, tag)
self.buffer.write_int2(n)
def write_int32(self, n: int, tag: int) -> "JceWriter":
if -32768 <= n <= 32767:
self.write_int16(n, tag)
return self
self.write_head(2, tag)
self.buffer.write_int4(n)
return self
def write_int64(self, n: int, tag: int) -> "JceWriter":
if -2147483648 <= n <= 2147483647:
return self.write_int32(n, tag)
self.write_head(3, tag)
self.buffer.write_int8(n)
return self
def write_float32(self, n: float, tag: int):
self.write_head(4, tag)
self.buffer.write_float(n)
def write_float64(self, n: float, tag: int): # 就是double
self.write_head(5, tag)
self.buffer.write_double(n)
def write_string(self, s: str, tag: int) -> "JceWriter":
"""
type 6 or 7 >255就得7了
:param s:
:param tag:
:return:
"""
by: bytes = s.encode(DEFAULT_ENCODING)
if len(by) > 255:
self.write_head(7, tag)
self.buffer.write_bytes(len(by).to_bytes(4, "big")) # 4个字节的长度
self.buffer.write_bytes(by)
return self
self.write_head(6, tag)
self.buffer.write_bytes(bytes([len(by)])) # 1byte
self.buffer.write_bytes(by)
return self
def write_bytes(self, data: Union[bytes, bytearray], tag: int):
self.write_head(13, tag)
self.write_head(0, 0)
self.write_int32(len(data), 0)
self.buffer.write_bytes(data)
return self.buffer.bytes
def write_int64_list(self, data: List[int], tag: int):
"""
go: WriteInt64Slice
:param data:
:param tag:
:return:
"""
self.write_head(9, tag)
if len(data) == 0:
self.write_int32(0, 0)
return
self.write_int32(len(data), 0)
for i in data:
self.write_int64(i, 0)
def write_list(self, data: List[Any], tag: int):
if not isinstance(data, list):
return
self.write_head(9, tag)
if len(data) == 0:
self.write_int32(0, 0)
return
self.write_int32(len(data), 0)
for i in data:
self.write_object(i, 0)
def write_jce_struct_list(self, data: List[IJceStruct], tag: int):
self.write_head(9, tag)
if len(data) == 0:
self.write_int32(0, 0)
return
self.write_int32(len(data), 0)
for i in data:
self.write_jce_struct(i, 0)
def write_map(self, m: dict, tag: int):
if m is None:
self.write_head(8, tag)
self.write_int32(0, 0)
return
if not isinstance(m, MutableMapping):
return
self.write_head(8, tag)
self.write_int32(len(m), 0)
for k, v in m.items():
self.write_object(k, 0)
self.write_object(v, 1)
return self.buffer.bytes
def write_object(self, data: Any, tag: int):
if isinstance(data, MutableMapping):
self.write_map(data, tag)
return
if isinstance(data, list):
self.write_list(data, tag)
return
if isinstance(data, (bytes, bytearray)):
if len(data) == 1:
self.write_byte(data, tag)
else:
self.write_bytes(data, tag)
return
if isinstance(data, bool):
self.write_bool(data, tag)
elif isinstance(data, int):
self.write_int64(data, tag)
elif isinstance(data, float):
self.write_float64(data, tag)
elif isinstance(data, str):
self.write_string(data, tag)
elif isinstance(data, IJceStruct):
self.write_jce_struct(data, tag)
def write_jce_struct_raw(self, data: IJceStruct):
"""
只写内容不写头部
todo 用pydantic给前面的都写加上jceid元数据 不然没法玩
:param data:
:return:
"""
for field_name, val in data.__fields__.items():
jce_id: int = val.field_info.extra["jce_id"]
field_val = getattr(data, field_name)
self.write_object(field_val, jce_id)
def write_jce_struct(self, data: Union[bytes, bytearray], tag: int):
self.write_head(10, tag)
# 修改 原先的 write_jce_struct_raw 不知道是什么操作
self.buffer.write_bytes(data)
self.write_head(11, 0)
return self.buffer.bytes
def bytes(self) -> bytearray:
"""直接返回的数据对象"""
return self.buffer.bytes | PypiClean |
/KubiScanPyPiTest-0.1.48-py3-none-any.whl/KubiScanPyPi/engine/utils.py | from engine.role import Role
from engine.priority import Priority
from static_risky_roles import STATIC_RISKY_ROLES
from engine.role_binding import RoleBinding
from kubernetes.stream import stream
from engine.pod import Pod
from engine.container import Container
import json
from api import api_client
from engine.subject import Subject
from misc.constants import *
from kubernetes.client.rest import ApiException
# region - Roles and ClusteRoles
def is_risky_resource_name_exist(source_rolename, source_resourcenames):
is_risky = False
for resource_name in source_resourcenames:
# prevent cycles.
if resource_name != source_rolename:
# TODO: Need to allow this check also for 'roles' resource_name, should consider namespace...
role = get_role_by_name_and_kind(resource_name, CLUSTER_ROLE_KIND)
if role is not None:
is_risky, priority = is_risky_role(role)
if is_risky:
break
return is_risky
def is_rule_contains_risky_rule(source_role_name, source_rule, risky_rule):
is_contains = True
is_bind_verb_found = False
is_role_resource_found = False
# Optional: uncomment and shift everything bellow till the 'return' to add any rules that have "*" in their verbs or resources.
# currently it is being handled in risky_roles.yaml partially
# if (source_rule.verbs is not None and "*" not in source_rule.verbs) and (source_rule.resources is not None and "*" not in source_rule.resources):
for verb in risky_rule.verbs:
if verb not in source_rule.verbs:
is_contains = False
break
if verb.lower() == "bind":
is_bind_verb_found = True
if is_contains and source_rule.resources is not None:
for resource in risky_rule.resources:
if resource not in source_rule.resources:
is_contains = False
break
if resource.lower() == "roles" or resource.lower() == "clusterroles":
is_role_resource_found = True
if is_contains and risky_rule.resource_names is not None:
is_contains = False
if is_bind_verb_found and is_role_resource_found:
is_risky = is_risky_resource_name_exist(source_role_name, source_rule.resource_names)
if is_risky:
is_contains = True
else:
is_contains = False
return is_contains
def get_role_by_name_and_kind(name, kind, namespace=None):
requested_role = None
roles = get_roles_by_kind(kind)
for role in roles.items:
if role.metadata.name == name:
requested_role = role
break
return requested_role
def are_rules_contain_other_rules(source_role_name, source_rules, target_rules):
is_contains = False
matched_rules = 0
if not (target_rules and source_rules):
return is_contains
for target_rule in target_rules:
if source_rules is not None:
for source_rule in source_rules:
if is_rule_contains_risky_rule(source_role_name, source_rule, target_rule):
matched_rules += 1
if matched_rules == len(target_rules):
is_contains = True
return is_contains
return is_contains
def is_risky_role(role):
is_risky = False
priority = Priority.LOW
for risky_role in STATIC_RISKY_ROLES:
if are_rules_contain_other_rules(role.metadata.name, role.rules, risky_role.rules):
is_risky = True
priority = risky_role.priority
break
return is_risky, priority
def find_risky_roles(roles, kind):
risky_roles = []
for role in roles:
is_risky, priority = is_risky_role(role)
if is_risky:
risky_roles.append(
Role(role.metadata.name, priority, rules=role.rules, namespace=role.metadata.namespace, kind=kind,
time=role.metadata.creation_timestamp))
return risky_roles
def get_roles_by_kind(kind):
all_roles = []
if kind == ROLE_KIND:
all_roles = api_client.RbacAuthorizationV1Api.list_role_for_all_namespaces()
else:
# all_roles = api_client.RbacAuthorizationV1Api.list_cluster_role()
all_roles = api_client.api_temp.list_cluster_role()
return all_roles
def get_risky_role_by_kind(kind):
risky_roles = []
all_roles = get_roles_by_kind(kind)
if all_roles is not None:
risky_roles = find_risky_roles(all_roles.items, kind)
return risky_roles
def get_risky_roles_and_clusterroles():
risky_roles = get_risky_roles()
risky_clusterroles = get_risky_clusterroles()
# return risky_roles, risky_clusterroles
all_risky_roles = risky_roles + risky_clusterroles
return all_risky_roles
def get_risky_roles():
return get_risky_role_by_kind('Role')
def get_risky_clusterroles():
return get_risky_role_by_kind('ClusterRole')
# endregion - Roles and ClusteRoles
# region - RoleBindings and ClusterRoleBindings
def is_risky_rolebinding(risky_roles, rolebinding):
is_risky = False
priority = Priority.LOW
for risky_role in risky_roles:
# It is also possible to add check for role kind
if rolebinding.role_ref.name == risky_role.name:
is_risky = True
priority = risky_role.priority
break
return is_risky, priority
def find_risky_rolebindings_or_clusterrolebindings(risky_roles, rolebindings, kind):
risky_rolebindings = []
for rolebinding in rolebindings:
is_risky, priority = is_risky_rolebinding(risky_roles, rolebinding)
if is_risky:
risky_rolebindings.append(RoleBinding(rolebinding.metadata.name,
priority,
namespace=rolebinding.metadata.namespace,
kind=kind, subjects=rolebinding.subjects,
time=rolebinding.metadata.creation_timestamp))
return risky_rolebindings
def get_rolebinding_by_kind_all_namespaces(kind):
all_roles = []
if kind == ROLE_BINDING_KIND:
all_roles = api_client.RbacAuthorizationV1Api.list_role_binding_for_all_namespaces()
# else:
# TODO: check if it was fixed
# all_roles = api_client.RbacAuthorizationV1Api.list_cluster_role_binding()
return all_roles
def get_all_risky_rolebinding():
all_risky_roles = get_risky_roles_and_clusterroles()
risky_rolebindings = get_risky_rolebindings(all_risky_roles)
risky_clusterrolebindings = get_risky_clusterrolebindings(all_risky_roles)
risky_rolebindings_and_clusterrolebindings = risky_clusterrolebindings + risky_rolebindings
return risky_rolebindings_and_clusterrolebindings
def get_risky_rolebindings(all_risky_roles=None):
if all_risky_roles is None:
all_risky_roles = get_risky_roles_and_clusterroles()
all_rolebindings = get_rolebinding_by_kind_all_namespaces(ROLE_BINDING_KIND)
risky_rolebindings = find_risky_rolebindings_or_clusterrolebindings(all_risky_roles, all_rolebindings.items,
"RoleBinding")
return risky_rolebindings
def get_risky_clusterrolebindings(all_risky_roles=None):
if all_risky_roles is None:
all_risky_roles = get_risky_roles_and_clusterroles()
# Cluster doesn't work.
# https://github.com/kubernetes-client/python/issues/577 - when it will be solve, can remove the comments
# all_clusterrolebindings = api_client.RbacAuthorizationV1Api.list_cluster_role_binding()
all_clusterrolebindings = api_client.api_temp.list_cluster_role_binding()
# risky_clusterrolebindings = find_risky_rolebindings(all_risky_roles, all_clusterrolebindings.items, "ClusterRoleBinding")
risky_clusterrolebindings = find_risky_rolebindings_or_clusterrolebindings(all_risky_roles, all_clusterrolebindings,
"ClusterRoleBinding")
return risky_clusterrolebindings
# endregion - RoleBindings and ClusterRoleBindings
# region- Risky Users
def get_all_risky_subjects():
all_risky_users = []
all_risky_rolebindings = get_all_risky_rolebinding()
passed_users = {}
for risky_rolebinding in all_risky_rolebindings:
# In case 'risky_rolebinding.subjects' is 'None', 'or []' will prevent an exception.
for user in risky_rolebinding.subjects or []:
# Removing duplicated users
if ''.join((user.kind, user.name, str(user.namespace))) not in passed_users:
passed_users[''.join((user.kind, user.name, str(user.namespace)))] = True
if user.namespace == None and (user.kind).lower() == "serviceaccount":
user.namespace = risky_rolebinding.namespace
all_risky_users.append(Subject(user, risky_rolebinding.priority))
return all_risky_users
# endregion - Risky Users
# region- Risky Pods
'''
Example of JWT token decoded:
{
'kubernetes.io/serviceaccount/service-account.uid': '11a8e2a1-6f07-11e8-8d52-000c2904e34b',
'iss': 'kubernetes/serviceaccount',
'sub': 'system:serviceaccount:default:myservice',
'kubernetes.io/serviceaccount/namespace': 'default',
'kubernetes.io/serviceaccount/secret.name': 'myservice-token-btwvr',
'kubernetes.io/serviceaccount/service-account.name': 'myservice'
}
'''
def pod_exec_read_token(pod, container_name, path):
cat_command = 'cat ' + path
exec_command = ['/bin/sh',
'-c',
cat_command]
resp = ''
try:
resp = stream(api_client.CoreV1Api.connect_post_namespaced_pod_exec, pod.metadata.name, pod.metadata.namespace,
command=exec_command, container=container_name,
stderr=False, stdin=False,
stdout=True, tty=False)
except ApiException as e:
print("Exception when calling api_client.CoreV1Api->connect_post_namespaced_pod_exec: %s\n" % e)
print('{0}, {1}'.format(pod.metadata.name, pod.metadata.namespace))
return resp
def pod_exec_read_token_two_paths(pod, container_name):
result = pod_exec_read_token(pod, container_name, '/run/secrets/kubernetes.io/serviceaccount/token')
if result == '':
result = pod_exec_read_token(pod, container_name, '/var/run/secrets/kubernetes.io/serviceaccount/token')
return result
def get_jwt_token_from_container(pod, container_name):
resp = pod_exec_read_token_two_paths(pod, container_name)
token_body = ''
if resp != '' and not resp.startswith('OCI'):
from engine.jwt_token import decode_jwt_token_data
decoded_data = decode_jwt_token_data(resp)
token_body = json.loads(decoded_data)
return token_body, resp
def is_same_user(a_username, a_namespace, b_username, b_namespace):
return (a_username == b_username and a_namespace == b_namespace)
def get_risky_user_from_container(jwt_body, risky_users):
risky_user_in_container = None
for risky_user in risky_users:
if risky_user.user_info.kind == 'ServiceAccount':
if is_same_user(jwt_body['kubernetes.io/serviceaccount/service-account.name'],
jwt_body['kubernetes.io/serviceaccount/namespace'],
risky_user.user_info.name, risky_user.user_info.namespace):
risky_user_in_container = risky_user
break
return risky_user_in_container
def get_risky_containers(pod, risky_users, read_token_from_container=False):
risky_containers = []
fetched_containers = []
if read_token_from_container:
# Skipping terminated and evicted pods
# This will run only on the containers with the "ready" status
if pod.status.container_statuses:
for container in pod.status.container_statuses:
if container.ready:
jwt_body, _ = get_jwt_token_from_container(pod, container.name)
if jwt_body:
risky_user = get_risky_user_from_container(jwt_body, risky_users)
if risky_user:
risky_containers.append(
Container(container.name, risky_user.user_info.name, risky_user.user_info.namespace,
risky_user.priority))
else:
for container in pod.spec.containers:
# Check for duplications
fetched_service_accounts = []
if container.volume_mounts is not None:
for volume_mount in container.volume_mounts:
risky_user = check_name_in_volume(volume_mount, pod, risky_users)
if risky_user is not None:
if risky_user not in fetched_service_accounts:
if not container_exists_in_risky_containers(risky_containers, container.name,
risky_user.user_info.name):
risky_containers.append(
Container(container.name, risky_user.user_info.name, risky_user.user_info.namespace,
risky_user.priority))
fetched_service_accounts.append(risky_user)
return risky_containers
def container_exists_in_risky_containers(risky_containers, container_name, user_name):
for risky_container in risky_containers:
if risky_container.name == container_name:
risky_container.service_account_name.append(user_name)
return True
return False
def check_name_in_volume(volume_mount, pod, risky_users):
risky_user = None
for volume in pod.spec.volumes:
if volume.name == volume_mount.name:
if volume.projected is not None:
for source in volume.projected.sources:
if source.service_account_token is not None:
risky_user = is_user_risky(risky_users, pod.spec.service_account, pod.metadata.namespace)
elif volume.secret is not None:
# if volume_mount.mount_path == '/var/run/secrets/kubernetes.io/serviceaccount' or volume_mount.mount_path == '/run/secrets/kubernetes.io/serviceaccount':
risky_user = get_jwt_and_decode(pod, risky_users, volume)
return risky_user
def default_path_exists(volume_mounts):
for volume_mount in volume_mounts:
if volume_mount.mount_path == "/var/run/secrets/kubernetes.io/serviceaccount":
return True
return False
def is_user_risky(risky_users, service_account, namespace):
for risky_user in risky_users:
if risky_user.user_info.name == service_account and risky_user.user_info.namespace == namespace:
return risky_user
return None
def get_jwt_and_decode(pod, risky_users, volume):
from engine.jwt_token import decode_base64_jwt_token
try:
secret = api_client.CoreV1Api.read_namespaced_secret(name=volume.secret.secret_name,
namespace=pod.metadata.namespace)
except Exception:
secret = None
if secret is not None and secret.data is not None:
if 'token' in secret.data:
decoded_data = decode_base64_jwt_token(secret.data['token'])
token_body = json.loads(decoded_data)
if token_body:
risky_user = get_risky_user_from_container(token_body, risky_users)
return risky_user
return None
def get_risky_pods(namespace=None, deep_analysis=False):
risky_pods = []
pods = list_pods_for_all_namespaces_or_one_namspace(namespace)
risky_users = get_all_risky_subjects()
for pod in pods.items:
risky_containers = get_risky_containers(pod, risky_users, deep_analysis)
if len(risky_containers) > 0:
risky_pods.append(Pod(pod.metadata.name, pod.metadata.namespace, risky_containers))
return risky_pods
# endregion- Risky Pods
def get_rolebindings_all_namespaces_and_clusterrolebindings():
namespaced_rolebindings = api_client.RbacAuthorizationV1Api.list_role_binding_for_all_namespaces()
# TODO: check when this bug will be fixed
# cluster_rolebindings = api_client.RbacAuthorizationV1Api.list_cluster_role_binding()
cluster_rolebindings = api_client.api_temp.list_cluster_role_binding()
return namespaced_rolebindings, cluster_rolebindings
def get_rolebindings_and_clusterrolebindings_associated_to_subject(subject_name, kind, namespace):
rolebindings_all_namespaces, cluster_rolebindings = get_rolebindings_all_namespaces_and_clusterrolebindings()
associated_rolebindings = []
for rolebinding in rolebindings_all_namespaces.items:
# In case 'rolebinding.subjects' is 'None', 'or []' will prevent an exception.
for subject in rolebinding.subjects or []:
if subject.name.lower() == subject_name.lower() and subject.kind.lower() == kind.lower():
if kind == SERVICEACCOUNT_KIND:
if subject.namespace.lower() == namespace.lower():
associated_rolebindings.append(rolebinding)
else:
associated_rolebindings.append(rolebinding)
associated_clusterrolebindings = []
for clusterrolebinding in cluster_rolebindings:
# In case 'clusterrolebinding.subjects' is 'None', 'or []' will prevent an exception.
for subject in clusterrolebinding.subjects or []:
if subject.name == subject_name.lower() and subject.kind.lower() == kind.lower():
if kind == SERVICEACCOUNT_KIND:
if subject.namespace.lower() == namespace.lower():
associated_clusterrolebindings.append(clusterrolebinding)
else:
associated_clusterrolebindings.append(clusterrolebinding)
return associated_rolebindings, associated_clusterrolebindings
# Role can be only inside RoleBinding
def get_rolebindings_associated_to_role(role_name, namespace):
rolebindings_all_namespaces = api_client.RbacAuthorizationV1Api.list_role_binding_for_all_namespaces()
associated_rolebindings = []
for rolebinding in rolebindings_all_namespaces.items:
if rolebinding.role_ref.name.lower() == role_name.lower() and rolebinding.role_ref.kind == ROLE_KIND and rolebinding.metadata.namespace.lower() == namespace.lower():
associated_rolebindings.append(rolebinding)
return associated_rolebindings
def get_rolebindings_and_clusterrolebindings_associated_to_clusterrole(role_name):
rolebindings_all_namespaces, cluster_rolebindings = get_rolebindings_all_namespaces_and_clusterrolebindings()
associated_rolebindings = []
for rolebinding in rolebindings_all_namespaces.items:
if rolebinding.role_ref.name.lower() == role_name.lower() and rolebinding.role_ref.kind == CLUSTER_ROLE_KIND:
associated_rolebindings.append(rolebinding)
associated_clusterrolebindings = []
# for clusterrolebinding in cluster_rolebindings.items:
for clusterrolebinding in cluster_rolebindings:
if clusterrolebinding.role_ref.name.lower() == role_name.lower() and clusterrolebinding.role_ref.kind == CLUSTER_ROLE_KIND:
associated_rolebindings.append(clusterrolebinding)
return associated_rolebindings, associated_clusterrolebindings
def dump_containers_tokens_by_pod(pod_name, namespace, read_token_from_container=False):
containers_with_tokens = []
try:
pod = api_client.CoreV1Api.read_namespaced_pod(name=pod_name, namespace=namespace)
except ApiException:
print(pod_name + " was not found in " + namespace + " namespace")
return None
if read_token_from_container:
if pod.status.container_statuses:
for container in pod.status.container_statuses:
if container.ready:
jwt_body, raw_jwt_token = get_jwt_token_from_container(pod, container.name)
if jwt_body:
containers_with_tokens.append(
Container(container.name, token=jwt_body, raw_jwt_token=raw_jwt_token))
else:
fill_container_with_tokens_list(containers_with_tokens, pod)
return containers_with_tokens
def fill_container_with_tokens_list(containers_with_tokens, pod):
from engine.jwt_token import decode_base64_jwt_token
for container in pod.spec.containers:
for volume_mount in container.volume_mounts or []:
for volume in pod.spec.volumes or []:
if volume.name == volume_mount.name and volume.secret:
try:
secret = api_client.CoreV1Api.read_namespaced_secret(volume.secret.secret_name,
pod.metadata.namespace)
if secret and secret.data and secret.data['token']:
decoded_data = decode_base64_jwt_token(secret.data['token'])
token_body = json.loads(decoded_data)
containers_with_tokens.append(Container(container.name, token=token_body,
raw_jwt_token=None))
except ApiException:
print("No secret found.")
def dump_all_pods_tokens_or_by_namespace(namespace=None, read_token_from_container=False):
pods_with_tokens = []
pods = list_pods_for_all_namespaces_or_one_namspace(namespace)
for pod in pods.items:
containers = dump_containers_tokens_by_pod(pod.metadata.name, pod.metadata.namespace, read_token_from_container)
if containers is not None:
pods_with_tokens.append(Pod(pod.metadata.name, pod.metadata.namespace, containers))
return pods_with_tokens
def dump_pod_tokens(name, namespace, read_token_from_container=False):
pod_with_tokens = []
containers = dump_containers_tokens_by_pod(name, namespace, read_token_from_container)
pod_with_tokens.append(Pod(name, namespace, containers))
return pod_with_tokens
def search_subject_in_subjects_by_kind(subjects, kind):
subjects_found = []
for subject in subjects:
if subject.kind.lower() == kind.lower():
subjects_found.append(subject)
return subjects_found
# It get subjects by kind for all rolebindings.
def get_subjects_by_kind(kind):
subjects_found = []
rolebindings = api_client.RbacAuthorizationV1Api.list_role_binding_for_all_namespaces()
clusterrolebindings = api_client.api_temp.list_cluster_role_binding()
for rolebinding in rolebindings.items:
if rolebinding.subjects is not None:
subjects_found += search_subject_in_subjects_by_kind(rolebinding.subjects, kind)
for clusterrolebinding in clusterrolebindings:
if clusterrolebinding.subjects is not None:
subjects_found += search_subject_in_subjects_by_kind(clusterrolebinding.subjects, kind)
return remove_duplicated_subjects(subjects_found)
def remove_duplicated_subjects(subjects):
seen_subjects = set()
new_subjects = []
for s1 in subjects:
if s1.namespace == None:
s1_unique_name = ''.join([s1.name, s1.kind])
else:
s1_unique_name = ''.join([s1.name, s1.namespace, s1.kind])
if s1_unique_name not in seen_subjects:
new_subjects.append(s1)
seen_subjects.add(s1_unique_name)
return new_subjects
def get_rolebinding_role(rolebinding_name, namespace):
rolebinding = None
role = None
try:
rolebinding = api_client.RbacAuthorizationV1Api.read_namespaced_role_binding(rolebinding_name, namespace)
if rolebinding.role_ref.kind == ROLE_KIND:
role = api_client.RbacAuthorizationV1Api.read_namespaced_role(rolebinding.role_ref.name,
rolebinding.metadata.namespace)
else:
role = api_client.RbacAuthorizationV1Api.read_cluster_role(rolebinding.role_ref.name)
return role
except ApiException:
if rolebinding is None:
print("Could not find " + rolebinding_name + " rolebinding in " + namespace + " namespace")
elif role is None:
print(
"Could not find " + rolebinding.role_ref.name + " role in " + rolebinding.role_ref.name + " rolebinding")
return None
def get_clusterrolebinding_role(cluster_rolebinding_name):
cluster_role = ''
try:
cluster_rolebinding = api_client.RbacAuthorizationV1Api.read_cluster_role_binding(cluster_rolebinding_name)
cluster_role = api_client.RbacAuthorizationV1Api.read_cluster_role(cluster_rolebinding.role_ref.name)
except ApiException as e:
print(e)
exit()
return cluster_role
def get_roles_associated_to_subject(subject_name, kind, namespace):
associated_rolebindings, associated_clusterrolebindings = get_rolebindings_and_clusterrolebindings_associated_to_subject(
subject_name, kind, namespace)
associated_roles = []
for rolebind in associated_rolebindings:
try:
role = get_rolebinding_role(rolebind.metadata.name, rolebind.metadata.namespace)
associated_roles.append(role)
except ApiException as e:
# 404 not found
continue
for clusterrolebinding in associated_clusterrolebindings:
role = get_clusterrolebinding_role(clusterrolebinding.metadata.name)
associated_roles.append(role)
return associated_roles
def list_pods_for_all_namespaces_or_one_namspace(namespace=None):
try:
if namespace is None:
pods = api_client.CoreV1Api.list_pod_for_all_namespaces(watch=False)
else:
pods = api_client.CoreV1Api.list_namespaced_pod(namespace)
return pods
except ApiException:
return None
# https://<master_ip>:<port>/api/v1/namespaces/kube-system/secrets?fieldSelector=type=bootstrap.kubernetes.io/token
def list_boostrap_tokens_decoded():
tokens = []
secrets = api_client.CoreV1Api.list_namespaced_secret(namespace='kube-system',
field_selector='type=bootstrap.kubernetes.io/token')
import base64
for secret in secrets.items:
tokens.append('.'.join((base64.b64decode(secret.data['token-id']).decode('utf-8'),
base64.b64decode(secret.data['token-secret']).decode('utf-8'))))
return tokens | PypiClean |
/Hikka_TL_New-2.0.4-py3-none-any.whl/hikkatl/tl/functions/stickers.py | from ...tl.tlobject import TLObject
from ...tl.tlobject import TLRequest
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
if TYPE_CHECKING:
from ...tl.types import TypeInputDocument, TypeInputStickerSet, TypeInputStickerSetItem, TypeInputUser, TypeMaskCoords
class AddStickerToSetRequest(TLRequest):
CONSTRUCTOR_ID = 0x8653febe
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, stickerset: 'TypeInputStickerSet', sticker: 'TypeInputStickerSetItem'):
"""
:returns messages.StickerSet: Instance of either StickerSet, StickerSetNotModified.
"""
self.stickerset = stickerset
self.sticker = sticker
def to_dict(self):
return {
'_': 'AddStickerToSetRequest',
'stickerset': self.stickerset.to_dict() if isinstance(self.stickerset, TLObject) else self.stickerset,
'sticker': self.sticker.to_dict() if isinstance(self.sticker, TLObject) else self.sticker
}
def _bytes(self):
return b''.join((
b'\xbe\xfeS\x86',
self.stickerset._bytes(),
self.sticker._bytes(),
))
@classmethod
def from_reader(cls, reader):
_stickerset = reader.tgread_object()
_sticker = reader.tgread_object()
return cls(stickerset=_stickerset, sticker=_sticker)
class ChangeStickerRequest(TLRequest):
CONSTRUCTOR_ID = 0xf5537ebc
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, sticker: 'TypeInputDocument', emoji: Optional[str]=None, mask_coords: Optional['TypeMaskCoords']=None, keywords: Optional[str]=None):
"""
:returns messages.StickerSet: Instance of either StickerSet, StickerSetNotModified.
"""
self.sticker = sticker
self.emoji = emoji
self.mask_coords = mask_coords
self.keywords = keywords
async def resolve(self, client, utils):
self.sticker = utils.get_input_document(self.sticker)
def to_dict(self):
return {
'_': 'ChangeStickerRequest',
'sticker': self.sticker.to_dict() if isinstance(self.sticker, TLObject) else self.sticker,
'emoji': self.emoji,
'mask_coords': self.mask_coords.to_dict() if isinstance(self.mask_coords, TLObject) else self.mask_coords,
'keywords': self.keywords
}
def _bytes(self):
return b''.join((
b'\xbc~S\xf5',
struct.pack('<I', (0 if self.emoji is None or self.emoji is False else 1) | (0 if self.mask_coords is None or self.mask_coords is False else 2) | (0 if self.keywords is None or self.keywords is False else 4)),
self.sticker._bytes(),
b'' if self.emoji is None or self.emoji is False else (self.serialize_bytes(self.emoji)),
b'' if self.mask_coords is None or self.mask_coords is False else (self.mask_coords._bytes()),
b'' if self.keywords is None or self.keywords is False else (self.serialize_bytes(self.keywords)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_sticker = reader.tgread_object()
if flags & 1:
_emoji = reader.tgread_string()
else:
_emoji = None
if flags & 2:
_mask_coords = reader.tgread_object()
else:
_mask_coords = None
if flags & 4:
_keywords = reader.tgread_string()
else:
_keywords = None
return cls(sticker=_sticker, emoji=_emoji, mask_coords=_mask_coords, keywords=_keywords)
class ChangeStickerPositionRequest(TLRequest):
CONSTRUCTOR_ID = 0xffb6d4ca
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, sticker: 'TypeInputDocument', position: int):
"""
:returns messages.StickerSet: Instance of either StickerSet, StickerSetNotModified.
"""
self.sticker = sticker
self.position = position
async def resolve(self, client, utils):
self.sticker = utils.get_input_document(self.sticker)
def to_dict(self):
return {
'_': 'ChangeStickerPositionRequest',
'sticker': self.sticker.to_dict() if isinstance(self.sticker, TLObject) else self.sticker,
'position': self.position
}
def _bytes(self):
return b''.join((
b'\xca\xd4\xb6\xff',
self.sticker._bytes(),
struct.pack('<i', self.position),
))
@classmethod
def from_reader(cls, reader):
_sticker = reader.tgread_object()
_position = reader.read_int()
return cls(sticker=_sticker, position=_position)
class CheckShortNameRequest(TLRequest):
CONSTRUCTOR_ID = 0x284b3639
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, short_name: str):
"""
:returns Bool: This type has no constructors.
"""
self.short_name = short_name
def to_dict(self):
return {
'_': 'CheckShortNameRequest',
'short_name': self.short_name
}
def _bytes(self):
return b''.join((
b'96K(',
self.serialize_bytes(self.short_name),
))
@classmethod
def from_reader(cls, reader):
_short_name = reader.tgread_string()
return cls(short_name=_short_name)
class CreateStickerSetRequest(TLRequest):
CONSTRUCTOR_ID = 0x9021ab67
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, user_id: 'TypeInputUser', title: str, short_name: str, stickers: List['TypeInputStickerSetItem'], masks: Optional[bool]=None, animated: Optional[bool]=None, videos: Optional[bool]=None, emojis: Optional[bool]=None, text_color: Optional[bool]=None, thumb: Optional['TypeInputDocument']=None, software: Optional[str]=None):
"""
:returns messages.StickerSet: Instance of either StickerSet, StickerSetNotModified.
"""
self.user_id = user_id
self.title = title
self.short_name = short_name
self.stickers = stickers
self.masks = masks
self.animated = animated
self.videos = videos
self.emojis = emojis
self.text_color = text_color
self.thumb = thumb
self.software = software
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
if self.thumb:
self.thumb = utils.get_input_document(self.thumb)
def to_dict(self):
return {
'_': 'CreateStickerSetRequest',
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'title': self.title,
'short_name': self.short_name,
'stickers': [] if self.stickers is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.stickers],
'masks': self.masks,
'animated': self.animated,
'videos': self.videos,
'emojis': self.emojis,
'text_color': self.text_color,
'thumb': self.thumb.to_dict() if isinstance(self.thumb, TLObject) else self.thumb,
'software': self.software
}
def _bytes(self):
return b''.join((
b'g\xab!\x90',
struct.pack('<I', (0 if self.masks is None or self.masks is False else 1) | (0 if self.animated is None or self.animated is False else 2) | (0 if self.videos is None or self.videos is False else 16) | (0 if self.emojis is None or self.emojis is False else 32) | (0 if self.text_color is None or self.text_color is False else 64) | (0 if self.thumb is None or self.thumb is False else 4) | (0 if self.software is None or self.software is False else 8)),
self.user_id._bytes(),
self.serialize_bytes(self.title),
self.serialize_bytes(self.short_name),
b'' if self.thumb is None or self.thumb is False else (self.thumb._bytes()),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.stickers)),b''.join(x._bytes() for x in self.stickers),
b'' if self.software is None or self.software is False else (self.serialize_bytes(self.software)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_masks = bool(flags & 1)
_animated = bool(flags & 2)
_videos = bool(flags & 16)
_emojis = bool(flags & 32)
_text_color = bool(flags & 64)
_user_id = reader.tgread_object()
_title = reader.tgread_string()
_short_name = reader.tgread_string()
if flags & 4:
_thumb = reader.tgread_object()
else:
_thumb = None
reader.read_int()
_stickers = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_stickers.append(_x)
if flags & 8:
_software = reader.tgread_string()
else:
_software = None
return cls(user_id=_user_id, title=_title, short_name=_short_name, stickers=_stickers, masks=_masks, animated=_animated, videos=_videos, emojis=_emojis, text_color=_text_color, thumb=_thumb, software=_software)
class DeleteStickerSetRequest(TLRequest):
CONSTRUCTOR_ID = 0x87704394
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, stickerset: 'TypeInputStickerSet'):
"""
:returns Bool: This type has no constructors.
"""
self.stickerset = stickerset
def to_dict(self):
return {
'_': 'DeleteStickerSetRequest',
'stickerset': self.stickerset.to_dict() if isinstance(self.stickerset, TLObject) else self.stickerset
}
def _bytes(self):
return b''.join((
b'\x94Cp\x87',
self.stickerset._bytes(),
))
@classmethod
def from_reader(cls, reader):
_stickerset = reader.tgread_object()
return cls(stickerset=_stickerset)
class RemoveStickerFromSetRequest(TLRequest):
CONSTRUCTOR_ID = 0xf7760f51
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, sticker: 'TypeInputDocument'):
"""
:returns messages.StickerSet: Instance of either StickerSet, StickerSetNotModified.
"""
self.sticker = sticker
async def resolve(self, client, utils):
self.sticker = utils.get_input_document(self.sticker)
def to_dict(self):
return {
'_': 'RemoveStickerFromSetRequest',
'sticker': self.sticker.to_dict() if isinstance(self.sticker, TLObject) else self.sticker
}
def _bytes(self):
return b''.join((
b'Q\x0fv\xf7',
self.sticker._bytes(),
))
@classmethod
def from_reader(cls, reader):
_sticker = reader.tgread_object()
return cls(sticker=_sticker)
class RenameStickerSetRequest(TLRequest):
CONSTRUCTOR_ID = 0x124b1c00
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, stickerset: 'TypeInputStickerSet', title: str):
"""
:returns messages.StickerSet: Instance of either StickerSet, StickerSetNotModified.
"""
self.stickerset = stickerset
self.title = title
def to_dict(self):
return {
'_': 'RenameStickerSetRequest',
'stickerset': self.stickerset.to_dict() if isinstance(self.stickerset, TLObject) else self.stickerset,
'title': self.title
}
def _bytes(self):
return b''.join((
b'\x00\x1cK\x12',
self.stickerset._bytes(),
self.serialize_bytes(self.title),
))
@classmethod
def from_reader(cls, reader):
_stickerset = reader.tgread_object()
_title = reader.tgread_string()
return cls(stickerset=_stickerset, title=_title)
class SetStickerSetThumbRequest(TLRequest):
CONSTRUCTOR_ID = 0xa76a5392
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, stickerset: 'TypeInputStickerSet', thumb: Optional['TypeInputDocument']=None, thumb_document_id: Optional[int]=None):
"""
:returns messages.StickerSet: Instance of either StickerSet, StickerSetNotModified.
"""
self.stickerset = stickerset
self.thumb = thumb
self.thumb_document_id = thumb_document_id
async def resolve(self, client, utils):
if self.thumb:
self.thumb = utils.get_input_document(self.thumb)
def to_dict(self):
return {
'_': 'SetStickerSetThumbRequest',
'stickerset': self.stickerset.to_dict() if isinstance(self.stickerset, TLObject) else self.stickerset,
'thumb': self.thumb.to_dict() if isinstance(self.thumb, TLObject) else self.thumb,
'thumb_document_id': self.thumb_document_id
}
def _bytes(self):
return b''.join((
b'\x92Sj\xa7',
struct.pack('<I', (0 if self.thumb is None or self.thumb is False else 1) | (0 if self.thumb_document_id is None or self.thumb_document_id is False else 2)),
self.stickerset._bytes(),
b'' if self.thumb is None or self.thumb is False else (self.thumb._bytes()),
b'' if self.thumb_document_id is None or self.thumb_document_id is False else (struct.pack('<q', self.thumb_document_id)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_stickerset = reader.tgread_object()
if flags & 1:
_thumb = reader.tgread_object()
else:
_thumb = None
if flags & 2:
_thumb_document_id = reader.read_long()
else:
_thumb_document_id = None
return cls(stickerset=_stickerset, thumb=_thumb, thumb_document_id=_thumb_document_id)
class SuggestShortNameRequest(TLRequest):
CONSTRUCTOR_ID = 0x4dafc503
SUBCLASS_OF_ID = 0xc44a4b21
def __init__(self, title: str):
"""
:returns stickers.SuggestedShortName: Instance of SuggestedShortName.
"""
self.title = title
def to_dict(self):
return {
'_': 'SuggestShortNameRequest',
'title': self.title
}
def _bytes(self):
return b''.join((
b'\x03\xc5\xafM',
self.serialize_bytes(self.title),
))
@classmethod
def from_reader(cls, reader):
_title = reader.tgread_string()
return cls(title=_title) | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/DjangoAppCenter/simpleui/static/admin/simpleui-x/elementui/step.js | module.exports =
/******/ (function (modules) { // webpackBootstrap
/******/ // The module cache
/******/
var installedModules = {};
/******/
/******/ // The require function
/******/
function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/
if (installedModules[moduleId]) {
/******/
return installedModules[moduleId].exports;
/******/
}
/******/ // Create a new module (and put it into the cache)
/******/
var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/
};
/******/
/******/ // Execute the module function
/******/
modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/
module.l = true;
/******/
/******/ // Return the exports of the module
/******/
return module.exports;
/******/
}
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/
__webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/
__webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/
__webpack_require__.d = function (exports, name, getter) {
/******/
if (!__webpack_require__.o(exports, name)) {
/******/
Object.defineProperty(exports, name, {enumerable: true, get: getter});
/******/
}
/******/
};
/******/
/******/ // define __esModule on exports
/******/
__webpack_require__.r = function (exports) {
/******/
if (typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/
Object.defineProperty(exports, Symbol.toStringTag, {value: 'Module'});
/******/
}
/******/
Object.defineProperty(exports, '__esModule', {value: true});
/******/
};
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/
__webpack_require__.t = function (value, mode) {
/******/
if (mode & 1) value = __webpack_require__(value);
/******/
if (mode & 8) return value;
/******/
if ((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/
var ns = Object.create(null);
/******/
__webpack_require__.r(ns);
/******/
Object.defineProperty(ns, 'default', {enumerable: true, value: value});
/******/
if (mode & 2 && typeof value != 'string') for (var key in value) __webpack_require__.d(ns, key, function (key) {
return value[key];
}.bind(null, key));
/******/
return ns;
/******/
};
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/
__webpack_require__.n = function (module) {
/******/
var getter = module && module.__esModule ?
/******/ function getDefault() {
return module['default'];
} :
/******/ function getModuleExports() {
return module;
};
/******/
__webpack_require__.d(getter, 'a', getter);
/******/
return getter;
/******/
};
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/
__webpack_require__.o = function (object, property) {
return Object.prototype.hasOwnProperty.call(object, property);
};
/******/
/******/ // __webpack_public_path__
/******/
__webpack_require__.p = "/dist/";
/******/
/******/
/******/ // Load entry module and return exports
/******/
return __webpack_require__(__webpack_require__.s = 105);
/******/
})
/************************************************************************/
/******/({
/***/ 0:
/***/ (function (module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */
__webpack_require__.d(__webpack_exports__, "a", function () {
return normalizeComponent;
});
/* globals __VUE_SSR_CONTEXT__ */
// IMPORTANT: Do NOT use ES2015 features in this file (except for modules).
// This module is a runtime utility for cleaner component module output and will
// be included in the final webpack user bundle.
function normalizeComponent(
scriptExports,
render,
staticRenderFns,
functionalTemplate,
injectStyles,
scopeId,
moduleIdentifier, /* server only */
shadowMode /* vue-cli only */
) {
// Vue.extend constructor export interop
var options = typeof scriptExports === 'function'
? scriptExports.options
: scriptExports
// render functions
if (render) {
options.render = render
options.staticRenderFns = staticRenderFns
options._compiled = true
}
// functional template
if (functionalTemplate) {
options.functional = true
}
// scopedId
if (scopeId) {
options._scopeId = 'data-v-' + scopeId
}
var hook
if (moduleIdentifier) { // server build
hook = function (context) {
// 2.3 injection
context =
context || // cached call
(this.$vnode && this.$vnode.ssrContext) || // stateful
(this.parent && this.parent.$vnode && this.parent.$vnode.ssrContext) // functional
// 2.2 with runInNewContext: true
if (!context && typeof __VUE_SSR_CONTEXT__ !== 'undefined') {
context = __VUE_SSR_CONTEXT__
}
// inject component styles
if (injectStyles) {
injectStyles.call(this, context)
}
// register component module identifier for async chunk inferrence
if (context && context._registeredComponents) {
context._registeredComponents.add(moduleIdentifier)
}
}
// used by ssr in case component is cached and beforeCreate
// never gets called
options._ssrRegister = hook
} else if (injectStyles) {
hook = shadowMode
? function () {
injectStyles.call(this, this.$root.$options.shadowRoot)
}
: injectStyles
}
if (hook) {
if (options.functional) {
// for template-only hot-reload because in that case the render fn doesn't
// go through the normalizer
options._injectStyles = hook
// register for functioal component in vue file
var originalRender = options.render
options.render = function renderWithStyleInjection(h, context) {
hook.call(context)
return originalRender(h, context)
}
} else {
// inject component registration as beforeCreate hook
var existing = options.beforeCreate
options.beforeCreate = existing
? [].concat(existing, hook)
: [hook]
}
}
return {
exports: scriptExports,
options: options
}
}
/***/
}),
/***/ 105:
/***/ (function (module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
// CONCATENATED MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/steps/src/step.vue?vue&type=template&id=f414a87a&
var render = function () {
var _vm = this
var _h = _vm.$createElement
var _c = _vm._self._c || _h
return _c(
"div",
{
staticClass: "el-step",
class: [
!_vm.isSimple && "is-" + _vm.$parent.direction,
_vm.isSimple && "is-simple",
_vm.isLast && !_vm.space && !_vm.isCenter && "is-flex",
_vm.isCenter && !_vm.isVertical && !_vm.isSimple && "is-center"
],
style: _vm.style
},
[
_c(
"div",
{staticClass: "el-step__head", class: "is-" + _vm.currentStatus},
[
_c(
"div",
{
staticClass: "el-step__line",
style: _vm.isLast
? ""
: {marginRight: _vm.$parent.stepOffset + "px"}
},
[
_c("i", {
staticClass: "el-step__line-inner",
style: _vm.lineStyle
})
]
),
_c(
"div",
{
staticClass: "el-step__icon",
class: "is-" + (_vm.icon ? "icon" : "text")
},
[
_vm.currentStatus !== "success" && _vm.currentStatus !== "error"
? _vm._t("icon", [
_vm.icon
? _c("i", {
staticClass: "el-step__icon-inner",
class: [_vm.icon]
})
: _vm._e(),
!_vm.icon && !_vm.isSimple
? _c("div", {staticClass: "el-step__icon-inner"}, [
_vm._v(_vm._s(_vm.index + 1))
])
: _vm._e()
])
: _c("i", {
staticClass: "el-step__icon-inner is-status",
class: [
"el-icon-" +
(_vm.currentStatus === "success" ? "check" : "close")
]
})
],
2
)
]
),
_c("div", {staticClass: "el-step__main"}, [
_c(
"div",
{
ref: "title",
staticClass: "el-step__title",
class: ["is-" + _vm.currentStatus]
},
[_vm._t("title", [_vm._v(_vm._s(_vm.title))])],
2
),
_vm.isSimple
? _c("div", {staticClass: "el-step__arrow"})
: _c(
"div",
{
staticClass: "el-step__description",
class: ["is-" + _vm.currentStatus]
},
[_vm._t("description", [_vm._v(_vm._s(_vm.description))])],
2
)
])
]
)
}
var staticRenderFns = []
render._withStripped = true
// CONCATENATED MODULE: ./packages/steps/src/step.vue?vue&type=template&id=f414a87a&
// CONCATENATED MODULE: ./node_modules/_babel-loader@7.1.5@babel-loader/lib!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/steps/src/step.vue?vue&type=script&lang=js&
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
/* harmony default export */
var stepvue_type_script_lang_js_ = ({
name: 'ElStep',
props: {
title: String,
icon: String,
description: String,
status: String
},
data: function data() {
return {
index: -1,
lineStyle: {},
internalStatus: ''
};
},
beforeCreate: function beforeCreate() {
this.$parent.steps.push(this);
},
beforeDestroy: function beforeDestroy() {
var steps = this.$parent.steps;
var index = steps.indexOf(this);
if (index >= 0) {
steps.splice(index, 1);
}
},
computed: {
currentStatus: function currentStatus() {
return this.status || this.internalStatus;
},
prevStatus: function prevStatus() {
var prevStep = this.$parent.steps[this.index - 1];
return prevStep ? prevStep.currentStatus : 'wait';
},
isCenter: function isCenter() {
return this.$parent.alignCenter;
},
isVertical: function isVertical() {
return this.$parent.direction === 'vertical';
},
isSimple: function isSimple() {
return this.$parent.simple;
},
isLast: function isLast() {
var parent = this.$parent;
return parent.steps[parent.steps.length - 1] === this;
},
stepsCount: function stepsCount() {
return this.$parent.steps.length;
},
space: function space() {
var isSimple = this.isSimple,
space = this.$parent.space;
return isSimple ? '' : space;
},
style: function style() {
var style = {};
var parent = this.$parent;
var len = parent.steps.length;
var space = typeof this.space === 'number' ? this.space + 'px' : this.space ? this.space : 100 / (len - (this.isCenter ? 0 : 1)) + '%';
style.flexBasis = space;
if (this.isVertical) return style;
if (this.isLast) {
style.maxWidth = 100 / this.stepsCount + '%';
} else {
style.marginRight = -this.$parent.stepOffset + 'px';
}
return style;
}
},
methods: {
updateStatus: function updateStatus(val) {
var prevChild = this.$parent.$children[this.index - 1];
if (val > this.index) {
this.internalStatus = this.$parent.finishStatus;
} else if (val === this.index && this.prevStatus !== 'error') {
this.internalStatus = this.$parent.processStatus;
} else {
this.internalStatus = 'wait';
}
if (prevChild) prevChild.calcProgress(this.internalStatus);
},
calcProgress: function calcProgress(status) {
var step = 100;
var style = {};
style.transitionDelay = 150 * this.index + 'ms';
if (status === this.$parent.processStatus) {
step = this.currentStatus !== 'error' ? 0 : 0;
} else if (status === 'wait') {
step = 0;
style.transitionDelay = -150 * this.index + 'ms';
}
style.borderWidth = step && !this.isSimple ? '1px' : 0;
this.$parent.direction === 'vertical' ? style.height = step + '%' : style.width = step + '%';
this.lineStyle = style;
}
},
mounted: function mounted() {
var _this = this;
var unwatch = this.$watch('index', function (val) {
_this.$watch('$parent.active', _this.updateStatus, {immediate: true});
_this.$watch('$parent.processStatus', function () {
var activeIndex = _this.$parent.active;
_this.updateStatus(activeIndex);
}, {immediate: true});
unwatch();
});
}
});
// CONCATENATED MODULE: ./packages/steps/src/step.vue?vue&type=script&lang=js&
/* harmony default export */
var src_stepvue_type_script_lang_js_ = (stepvue_type_script_lang_js_);
// EXTERNAL MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/runtime/componentNormalizer.js
var componentNormalizer = __webpack_require__(0);
// CONCATENATED MODULE: ./packages/steps/src/step.vue
/* normalize component */
var component = Object(componentNormalizer["a" /* default */])(
src_stepvue_type_script_lang_js_,
render,
staticRenderFns,
false,
null,
null,
null
)
/* hot reload */
if (false) {
var api;
}
component.options.__file = "packages/steps/src/step.vue"
/* harmony default export */
var step = (component.exports);
// CONCATENATED MODULE: ./packages/step/index.js
/* istanbul ignore next */
step.install = function (Vue) {
Vue.component(step.name, step);
};
/* harmony default export */
var packages_step = __webpack_exports__["default"] = (step);
/***/
})
/******/
}); | PypiClean |
/Apache-Local-Domain-1.1.2.tar.gz/Apache-Local-Domain-1.1.2/ApacheLocalDomain/app/lib/checkers.py | import os
from ApacheLocalDomain.app import configs
from ApacheLocalDomain.app.lib.log import error
from validators import domain as domainValidator, email as emailValidator
def __validUrl(url):
"""
Validate domain
:param url: Get Url For Validate , Without "http", "https" and "www"
:return: Valid URL , if Not , Return None
"""
try:
if not domainValidator(url):
error('__validEmail from helper file', "Correct Domain: example.com\n without 'http', 'https' , 'www' =)")
return url.replace("www.", '') if url.startswith("www.") else url
except Exception as e:
error('__validUrl from helper file', e)
def __validEmail(email):
"""
Validate email
:param email: get email to Validation
:return: Valid Email , if not show ERROR and exit !
"""
try:
if not emailValidator(email):
error('__validEmail from helper file', "Correct Email: ex@example.com")
return email
except Exception as e:
error('__validEmail from helper file', e)
def _checkWSGIEnabled():
try:
enable_modules = os.listdir(configs.APACHE2_MODULES_PATH)
for emodule in enable_modules:
if emodule.__contains__("wsgi"):
return True
error('_checkWSGIEnabled from helper file', "'mode_wsgi' Module of Apache not found or Disable")
except Exception as e:
error('_checkWSGIEnabled from helper file', e)
def _checkHTTP2Enabled():
try:
enable_modules = os.listdir(configs.APACHE2_MODULES_PATH)
for emodule in enable_modules:
if emodule.__contains__("http2"):
return True
error('_checkHTTP2Enabled from helper file', "'http2' Module of Apache not found or Disable")
except Exception as e:
error('_checkHTTP2Enabled from helper file', e)
def __wsgiAddressValidation(
documentRoot,
wsgiScript,
virtualenv,
StaticFolderName,
enable_static
):
DOCROOT = os.path.abspath(documentRoot)
# Validate DocumentRoot
if ("/" not in documentRoot):
if not os.path.exists(documentRoot):
raise Exception("directory does not exist: {0}".format(documentRoot))
# Validate wsgiScript
if ("/" not in wsgiScript):
if not os.path.exists(wsgiScript):
raise Exception("directory does not exist: {0}".format(wsgiScript))
# Validate Virtualenv Name
if ("/" in virtualenv):
raise Exception("directory does not valid: {0}".format(virtualenv))
VIRTUALENV = os.path.join(DOCROOT, virtualenv)
if not os.path.exists(VIRTUALENV):
raise Exception("directory does not exist: {0}".format(VIRTUALENV))
# validate Static Folder Name
if enable_static:
if ("/" in StaticFolderName):
raise Exception("directory does not valid: {0}".format(StaticFolderName))
STATIC_FOLDER_NAME = os.path.join(DOCROOT, StaticFolderName)
if not os.path.exists(STATIC_FOLDER_NAME):
raise Exception("directory does not exist: {0}".format(STATIC_FOLDER_NAME))
def __phpAddressValidation(
documentRoot
):
DOCROOT = os.path.abspath(documentRoot)
# Validate DocumentRoot
if ("/" not in documentRoot):
if not os.path.exists(documentRoot):
raise Exception("directory does not exist: {0}".format(documentRoot)) | PypiClean |
/GoogleSheetPlot-0.1.10.tar.gz/GoogleSheetPlot-0.1.10/Example/Example.ipynb | ## Install the package
```
!pip install GoogleSheetPlot==0.1.0
```
## import GoogleSheetPlot
Initialize GoogleSheetPlot, by giving your SPREADSHEET_ID(access it from your google spreadsheet link from the Google Drive account), and the range of the X and Y range from your google sheet.
```
from GoogleSheetPlot import GoogleSheetPlot
SAMPLE_SPREADSHEET_ID = '1SrZfvr2ee54r7HR1jGtAE9zHIj_Y-UzK9ok8bdwkpqc'
SAMPLE_RANGE_NAME = 'A1:C50'
```
## Initialize the dataFrame
```
gsp = GoogleSheetPlot()
df = gsp.getDataFrame(SAMPLE_SPREADSHEET_ID, SAMPLE_RANGE_NAME)
df.head()
```
## Plot the graph
Give the valid column names(X and Y axis)with the type of plot you want to get.<br>
Your plot gets saved with the format "col1_col2_category.png".
```
#gsp.getPlot("Column-1", "Column-2","Plot category")
gsp.getPlot("average_sales", "offer_price","line")
```
| PypiClean |
/COWBAT-0.5.0.23.tar.gz/COWBAT-0.5.0.23/cowbat/get/get_rmlst.py | from olctools.accessoryFunctions.accessoryFunctions import printtime, make_path
from cowbat.get import rest_auth_class
from Bio import SeqIO
from argparse import ArgumentParser
from glob import glob
import time
import os
__author__ = 'adamkoziol'
class Get(object):
def getrmlsthelper(self):
"""
Makes a system call to rest_auth.py, a Python script modified from
https://github.com/kjolley/BIGSdb/tree/develop/scripts/test
And downloads the most up-to-date rMLST profile and alleles
"""
printtime('Downloading {} alleles'.format(self.analysistype), self.start)
# Extract the path of the current script from the full path + file name
homepath = os.path.split(os.path.abspath(__file__))[0]
# Set the path/name of the folder to contain the new alleles and profile
newfolder = os.path.join(self.path, self.analysistype)
# Create the path
make_path(newfolder)
# Create arguments to feed into the rest_auth_class script
args = ArgumentParser
args.secret_file = os.path.join(homepath, 'secret.txt')
args.file_path = homepath
args.output_path = newfolder
args.start = self.start
rmlst = rest_auth_class.REST(args)
# Download the profile and alleles
rmlst.main()
# Get the new alleles into a list, and create the combinedAlleles file
alleles = glob(os.path.join(newfolder, '*.tfa'))
self.combinealleles(newfolder, alleles)
def combinealleles(self, allelepath, alleles):
printtime('Creating combined rMLST allele file', self.start)
with open(os.path.join(allelepath, 'rMLST_combined.fasta'), 'w') as combinedfile:
# Open each allele file
for allele in sorted(alleles):
# with open(allele, 'rU') as fasta:
for record in SeqIO.parse(open(allele, "rU"), "fasta"):
# Extract the sequence record from each entry in the multifasta
# Replace and dashes in the record.id with underscores
record.id = record.id.replace('-', '_')
# Remove and dashes or 'N's from the sequence data - makeblastdb can't handle sequences
# with gaps
# noinspection PyProtectedMember
record.seq._data = record.seq._data.replace('-', '').replace('N', '')
# Clear the name and description attributes of the record
record.name = ''
record.description = ''
# Write each record to the combined file
SeqIO.write(record, combinedfile, 'fasta')
def __init__(self, args):
self.path = os.path.join(args.path)
self.start = args.start
self.analysistype = 'rMLST'
self.getrmlsthelper()
if __name__ == '__main__':
# Argument parser for user-inputted values, and a nifty help menu
# Parser for arguments
parser = ArgumentParser(description='')
parser.add_argument('path',
help='Specify input directory')
# Get the arguments into an object
arguments = parser.parse_args()
# Define the start time
arguments.start = time.time()
# Run the script
Get(arguments)
# Print a bold, green exit statement
print('\033[92m' + '\033[1m' + "\nElapsed Time: %0.2f seconds" % (time.time() - arguments.start) + '\033[0m') | PypiClean |
/GLManager-1.1.6.tar.gz/GLManager-1.1.6/bot/commands/helper.py | from telebot import types
from app.data.repositories import UserRepository
from app.domain.use_cases import UserUseCase
from .. import bot
from ..middleware import AdminPermission, DealerPermission, permission_required
from .helpers.dealer import (
find_dealer_by_id,
is_dealer,
get_all_users_of_dealer,
get_available_limit_creation_accounts,
)
def callback_query_menu(user_id: int = None) -> types.InlineKeyboardMarkup:
buttons = [
[types.InlineKeyboardButton('CRIAR USUARIO', callback_data='create_user')],
[types.InlineKeyboardButton('DELETAR USUARIO', callback_data='delete_user')],
[types.InlineKeyboardButton('OBTER USUARIO', callback_data='get_user')],
[types.InlineKeyboardButton('OBTER TODOS OS USUARIOS', callback_data='list_users')],
[types.InlineKeyboardButton('MONITOR', callback_data='monitor')],
]
if not user_id or not is_dealer(user_id):
buttons.append([types.InlineKeyboardButton('REVENDA', callback_data='revenue')])
return types.InlineKeyboardMarkup(buttons)
def callback_query_back_menu(message: str = '🔙MENU') -> types.InlineKeyboardMarkup:
buttons = [
[types.InlineKeyboardButton(message, callback_data='back_menu')],
]
return types.InlineKeyboardMarkup(buttons)
def callback_query_back(callback_data, message: str = '🔙VOLTAR') -> types.InlineKeyboardButton:
return types.InlineKeyboardButton(message, callback_data=callback_data)
def create_message_menu(user_id: int = None) -> str:
text = '<b>🤖OLÁ, BEM VINDO AO BOT🤖</b>\n'
if is_dealer(user_id):
text += '<b>VOCÊ É UM REVENDEDOR</b>\n\n'
text += '<b>LIMITE DE CRIAÇÃO DE CONTA:</b> <code>{}</code>\n'.format(
find_dealer_by_id(user_id).account_creation_limit
)
text += '<b>LIMITE DISPONIVEL:</b> <code>{}</code>\n'.format(
get_available_limit_creation_accounts(user_id)
)
text += '<b>SEU ACESSO EXPIRA EM:</b> <code>{}</code>\n'.format(
find_dealer_by_id(user_id).expires_at
)
text += '<b>TOTAL CONTAS CRIADAS:</b> <code>{}</code>\n'.format(
len(get_all_users_of_dealer(user_id, UserUseCase(UserRepository())))
)
text += '\n'
else:
text += '<b>VOCÊ É UM ADMINISTRADOR</b>\n'
text += '\n'
text += '<b>🖥COMANDOS DISPONIVEIS🖥</b>'
return text
@bot.message_handler(commands=['help', 'start', 'menu'])
@permission_required([AdminPermission(), DealerPermission()])
def send_help(message: types.Message):
bot.reply_to(
message,
create_message_menu(message.from_user.id),
parse_mode='HTML',
reply_markup=callback_query_menu(message.from_user.id),
)
@bot.callback_query_handler(func=lambda call: call.data == 'back_menu')
@permission_required([AdminPermission(), DealerPermission()])
def back_menu(call: types.CallbackQuery):
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=create_message_menu(call.from_user.id),
reply_markup=callback_query_menu(call.from_user.id),
)
@bot.message_handler(commands=['id'])
def send_id(message: types.Message):
bot.reply_to(
message=message,
text='<b>🆔ID:</b> <code>{}</code>'.format(message.from_user.id),
parse_mode='HTML',
) | PypiClean |
/BALISTICA-1.0.0.tar.gz/BALISTICA-1.0.0/balistica/GUI/NumericalV2.py | import numpy as np
import matplotlib
import tkinter as tk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
from matplotlib.figure import Figure
from balistica.PhysicsEngine.NumericalVSqPhysicsHandler import NumericalVSqPhysicsHandler
from tkinter import filedialog
class NumericalV2GUI(tk.Frame):
def __init__(self, master=None):
self.physicshandler = NumericalVSqPhysicsHandler(0, 0, 0)
tk.Frame.__init__(self, master)
self.grid()
# Top level panel structure
self.panels = tk.Frame(self)
self.panels.pack(fill=tk.BOTH, expand=1)
# Left and right panels
self.leftpanel = tk.Frame(self.panels, relief=tk.GROOVE)
self.leftpanel.pack(side=tk.LEFT)
self.rightpanel = tk.Frame(self.panels)
self.rightpanel.pack(side=tk.RIGHT)
# Controls grid for upper left pannel
self.ulpanel = tk.LabelFrame(self.leftpanel, text='Parameters')
self.ulpanel.pack(side=tk.TOP)
# Control for angle
self.anglelable = tk.Label(self.ulpanel, text='Initial angle (degrees)')
self.anglelable.grid(row=0, column=0)
self.angleinput = tk.Scale(self.ulpanel, from_=0, to=90, resolution=1, length=170,orient=tk.HORIZONTAL)
self.angleinput.grid(row=0, column=1)
# Control for drag
self.draglable = tk.Label(self.ulpanel, text='Drag [b/m] (m^-1)')
self.draglable.grid(row=1, column=0)
self.draginput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.draginput.grid(row=1, column=1)
self.draginput.insert(0, '0.0001')
# Control for velocity
self.velocitylabel = tk.Label(self.ulpanel, text='Initial velocity (m/s)')
self.velocitylabel.grid(row=2, column=0)
self.velocityinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.velocityinput.grid(row=2, column=1)
self.velocityinput.insert(0, '125')
self.latIlabel = tk.Label(self.ulpanel, text='I. Lat (m)')
self.latIlabel.grid(row=3, column=0)
self.lonIlabel = tk.Label(self.ulpanel, text='I. Lon (m)')
self.lonIlabel.grid(row=3, column=1)
self.heightIlabel = tk.Label(self.ulpanel, text='I. Height (m)')
self.heightIlabel.grid(row=3, column=2)
self.latIinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.latIinput.grid(row=4, column=0)
self.lonIinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.lonIinput.grid(row=4, column=1)
self.heightIinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.heightIinput.grid(row=4, column=2)
self.latIinput.insert(0, '0')
self.lonIinput.insert(0, '0')
self.heightIinput.insert(0, '0')
self.pblanklabel = tk.Label(self.ulpanel, text='')
self.pblanklabel.grid(row=5, column=0, columnspan=2)
self.latFlabel = tk.Label(self.ulpanel, text='F. Lat (m)')
self.latFlabel.grid(row=6, column=0)
self.lonFlabel = tk.Label(self.ulpanel, text='F. Lon (m)')
self.lonFlabel.grid(row=6, column=1)
self.heightFlabel = tk.Label(self.ulpanel, text='F. Height (m)')
self.heightFlabel.grid(row=6, column=2)
self.latFinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.latFinput.grid(row=7, column=0)
self.lonFinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.lonFinput.grid(row=7, column=1)
self.heightFinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.heightFinput.grid(row=7, column=2)
self.latFinput.insert(0, '100')
self.lonFinput.insert(0, '100')
self.heightFinput.insert(0, '0')
self.barrierset = tk.BooleanVar()
self.barriercheck = tk.Checkbutton(self.ulpanel, justify=tk.RIGHT, variable=self.barrierset, onvalue=True,
offvalue=False, text='Show barrier')
self.barriercheck.grid(row=8, column=0)
# Controls grid for upper left pannel
self.blpanel = tk.Frame(self.leftpanel)
self.blpanel.pack(side=tk.BOTTOM)
# Buttons for various functions
# Buttons for various functions
self.blanklabel= tk.Label(self.blpanel, text="")
self.blanklabel.grid(row=0, column=0, columnspan=2)
self.computebutton = tk.Button(self.blpanel, text="Compute", width=20, command=self.compute, default=tk.NORMAL)
self.computebutton.grid(row=1, column=0, columnspan=3)
self.computebutton = tk.Button(self.blpanel, text="x(t) vs. t", width=10, command=self.txGraph, default=tk.NORMAL)
self.computebutton.grid(row=2, column=0)
self.computebutton = tk.Button(self.blpanel, text="z(t) vs. t", width=10, command=self.tyGraph, default=tk.NORMAL)
self.computebutton.grid(row=2, column=1)
self.computebutton = tk.Button(self.blpanel, text="v(t) vs. t", width=10, command=self.tvGraph, default=tk.NORMAL)
self.computebutton.grid(row=2, column=2)
self.computebutton = tk.Button(self.blpanel, text="z(t) vs. x(t)", width=10, command=self.xyGraph, default=tk.NORMAL)
self.computebutton.grid(row=3, column=0)
self.computebutton = tk.Button(self.blpanel, text="v(t) vs. x(t)", width=10, command=self.xvGraph, default=tk.NORMAL)
self.computebutton.grid(row=3, column=1)
self.computebutton = tk.Button(self.blpanel, text="v(t) vs. z(t)", width=10, command=self.yvGraph, default=tk.NORMAL)
self.computebutton.grid(row=3, column=2)
self.userlabel = tk.Label(self.blpanel, text="", fg="red")
self.userlabel.grid(row=4, column=0, columnspan=3)
self.csvbutton= tk.Button(self.blpanel, text="Save to CSV", command=self.saveCSV, default=tk.NORMAL)
self.csvbutton.grid(row=5, column=0)
self.pngbutton = tk.Button(self.blpanel, text="Save to PNG", command=self.savePNG, default=tk.NORMAL)
self.pngbutton.grid(row=5, column=1)
self.quitbutton = tk.Button(self.blpanel, text="Quit", command=self.bye, default=tk.NORMAL)
self.quitbutton.grid(row=5, column=2)
self.physicshandler.v0 = 0
self.physicshandler.theta = 0
self.physicshandler.b = 1
fig, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
axs.set_xlabel('Distance (m)')
axs.set_ylabel('Height (m)')
axs.set_xlim(0, 100)
axs.set_ylim(0, 100)
axs.set_title('Ballistics with constant drag (b) proportional to v^2')
canvas = FigureCanvasTkAgg(fig, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig : Figure = fig
def geography(self):
latI = 0.0
try:
latI = float(self.latIinput.get())
except:
self.userlabel['text'] = "Initial latitude format incorrect"
latF = 0.0
try:
latF = float(self.latFinput.get())
except:
self.userlabel['text'] = "Final latitude format incorrect"
lonI = 0.0
try:
lonI = float(self.lonIinput.get())
except:
self.userlabel['text'] = "Initial longitude format incorrect"
lonF = 0.0
try:
lonF = float(self.lonFinput.get())
except:
self.userlabel['text'] = "Final longitude format incorrect"
heightI = 0.0
try:
heightI = float(self.heightIinput.get())
except:
self.userlabel['text'] = "Initial latitude format incorrect"
heightF = 0.0
try:
heightF = float(self.heightFinput.get())
except:
self.userlabel['text'] = "Initial latitude format incorrect"
distance = np.sqrt(np.power((latF - latI), 2) + np.power((lonF - lonI), 2))
height = heightF - heightI
return (distance, height)
def compute(self):
self.userlabel['text'] = ""
vel0 = 0.0
try:
vel0 = float(self.velocityinput.get())
except:
self.userlabel['text'] = "Velocity format incorrect"
return
theta = np.deg2rad(float(self.angleinput.get()))
b = float(self.draginput.get())
self.physicshandler.v0 = vel0
self.physicshandler.theta = theta
self.physicshandler.b = b
distance, height = self.geography()
self.physicshandler.distance = distance
if self.barrierset.get():
self.physicshandler.height = height
self.physicshandler.barrier = True
else:
self.physicshandler.height = -1
self.physicshandler.barrier = False
self.physicshandler.compute()
self.xyGraph()
def txGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figtx, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['t'] <= self.physicshandler.totalT()]
axs.plot(selected['t'], selected['x'], '-', linewidth=2, color='b')
axs.set_xlabel('Time (s)')
axs.set_ylabel('Distance (m)')
axs.set_title('Ballistics with constant drag (b) proportional to v^2')
canvas = FigureCanvasTkAgg(figtx, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figtx
def tyGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figty, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['t'] <= self.physicshandler.totalT()]
axs.plot(selected['t'], selected['z'], '-', linewidth=2, color='b')
axs.set_xlabel('Time (s)')
axs.set_ylabel('Height (m)')
axs.set_title('Ballistics with constant drag (b) proportional to v^2')
canvas = FigureCanvasTkAgg(figty, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figty
def tvGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figtv, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['t'] <= self.physicshandler.totalT()]
axs.plot(selected['t'], selected['v'], '-', linewidth=2, color='b')
axs.set_xlabel('Time (s)')
axs.set_ylabel('Velocity (m/s)')
axs.set_title('Ballistics with constant drag (b) proportional to v^2')
canvas = FigureCanvasTkAgg(figtv, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figtv
def xyGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
distance, height = self.geography()
figxy, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
axs.plot(self.physicshandler.data['x'], self.physicshandler.data['z'], '-', linewidth=2, color='b')
axs.set_xlabel('Distance (m)')
axs.set_ylabel('Height (m)')
if self.barrierset.get():
maxax = np.max([self.physicshandler.totalR() + 3, self.physicshandler.maxH() + 3, distance + 3])
minay = np.min([0, self.physicshandler.height])
else:
maxax = np.max([self.physicshandler.totalR() + 3, self.physicshandler.maxH() + 3])
minay = 0
axs.set_xlim(np.min([0, self.physicshandler.totalR()]), maxax)
axs.set_ylim(minay, maxax)
axs.set_title('Ballistics with constant drag (b) proportional to v^2')
if self.barrierset.get():
axs.axvline(x=distance, color='red', linestyle='--')
axs.plot([distance], [height], marker='P', color='green')
canvas = FigureCanvasTkAgg(figxy, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figxy
def xvGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figxv, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['x'] <= self.physicshandler.totalR()]
axs.plot(selected['x'], selected['v'], '-', linewidth=2, color='b')
axs.set_xlabel('Distance (m)')
axs.set_ylabel('Velocity (m/s)')
axs.set_title('Ballistics with constant drag (b) proportional to v^2')
canvas = FigureCanvasTkAgg(figxv, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figxv
def yvGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figyv, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['z'] >= self.physicshandler.height]
axs.plot(selected['z'], selected['v'], '-', linewidth=2, color='b')
axs.set_xlabel('Height (m)')
axs.set_ylabel('Velocity (m/s)')
axs.set_title('Ballistics with constant drag (b) proportional to v^2')
axs.invert_xaxis()
canvas = FigureCanvasTkAgg(figyv, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figyv
def addStatistics(self):
stats = tk.LabelFrame(self.rightpanel, text='Results')
stats.grid(row=1, column=0)
rangeLabel = tk.Label(stats, text=f'Range: {self.physicshandler.totalR():.1f} m')
rangeLabel.grid(row=0, column=0)
rangeLabel = tk.Label(stats, text=f'Max height: {self.physicshandler.maxH():.1f} m')
rangeLabel.grid(row=1, column=0)
mheightLabel = tk.Label(stats, text=f'Time to max height: {self.physicshandler.maxT():.1f} s')
mheightLabel.grid(row=2, column=0)
mheightLabel = tk.Label(stats, text=f'Time of flight: {self.physicshandler.totalT():.1f} s')
mheightLabel.grid(row=3, column=0)
mheightLabel = tk.Label(stats, text=f'Velocity of impact: {self.physicshandler.finalV():.1f} m/s')
mheightLabel.grid(row=4, column=0)
mheightLabel = tk.Label(stats, text=f'Angle of impact: {self.physicshandler.finalTheta():.1f} degrees')
mheightLabel.grid(row=5, column=0)
def saveCSV(self):
if self.physicshandler.data is None:
self.userlabel['text'] = "No computed data exists"
else:
fname = filedialog.asksaveasfilename(initialdir = ".", title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
self.physicshandler.save_csv(fname+".csv")
self.userlabel['text'] = "File saved"
def savePNG(self):
if self.physicshandler.data is None:
self.userlabel['text'] = "No computed data exists"
else:
fname = filedialog.asksaveasfilename(initialdir=".", title="Select file",
filetypes=(("PNG files", "*.png"), ("all files", "*.*")))
self.mostrecentfig.savefig(fname+".png")
self.userlabel['text'] = "File saved"
def bye(self):
self.quit()
self.destroy()
if __name__ == "__main__":
app = NumericalV2GUI()
app.mainloop() | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/pykeyboard/inline_keyboard.py | from typing import List, Union
from fipper.emoji import *
from fipper.types import InlineKeyboardMarkup, InlineKeyboardButton
class InlineKeyboard(InlineKeyboardMarkup):
_SYMBOL_FIRST_PAGE = '« {}'
_SYMBOL_PREVIOUS_PAGE = '‹ {}'
_SYMBOL_CURRENT_PAGE = '· {} ·'
_SYMBOL_NEXT_PAGE = '{} ›'
_SYMBOL_LAST_PAGE = '{} »'
_LOCALES = {
'be_BY': f'{FLAG_BELARUS} Беларуская', # Belarusian - Belarus
'de_DE': f'{FLAG_GERMANY} Deutsch', # German - Germany
'zh_CN': f'{FLAG_CHINA} 中文', # Chinese - China
# English - United States
'en_US': f'{FLAG_UNITED_KINGDOM} English',
'fr_FR': f'{FLAG_FRANCE} Français', # French - France
# Indonesian - Indonesia
'id_ID': f'{FLAG_INDONESIA} Bahasa Indonesia',
'it_IT': f'{FLAG_ITALY} Italiano', # Italian - Italy
'ko_KR': f'{FLAG_SOUTH_KOREA} 한국어', # Korean - Korea
'tr_TR': f'{FLAG_TURKEY} Türkçe', # Turkish - Turkey
'ru_RU': f'{FLAG_RUSSIA} Русский', # Russian - Russia
'es_ES': f'{FLAG_SPAIN} Español', # Spanish - Spain
'uk_UA': f'{FLAG_UKRAINE} Українська', # Ukrainian - Ukraine
'uz_UZ': f'{FLAG_UZBEKISTAN} Oʻzbekcha', # Uzbek - Uzbekistan
}
def __init__(self, row_width=3):
self.inline_keyboard = list()
super().__init__(inline_keyboard=self.inline_keyboard)
self.row_width = row_width
def add(self, *args):
self.inline_keyboard = [
args[i:i + self.row_width]
for i in range(0, len(args), self.row_width)
]
def row(self, *args):
self.inline_keyboard.append([button for button in args])
def _add_button(
self,
text: str,
callback_data: str = None,
url: str = None,
user_id: int = None,
):
if callback_data is not None:
return InlineKeyboardButton(
text=text,
callback_data=self.callback_pattern.format(
number=callback_data)
)
if url is not None:
return InlineKeyboardButton(
text=text,
url=url
)
if user_id is not None:
return InlineKeyboardButton(
text=text,
user_id=user_id
)
@property
def _left_pagination(self):
return [
self._add_button(
self._SYMBOL_CURRENT_PAGE.format(number), number)
if number == self.current_page else self._add_button(
self._SYMBOL_NEXT_PAGE.format(number), number)
if number == 4 else self._add_button(
self._SYMBOL_LAST_PAGE.format(self.count_pages),
self.count_pages)
if number == 5 else self._add_button(number, number)
for number in range(1, 6)
]
@property
def _middle_pagination(self):
return [
self._add_button(
self._SYMBOL_FIRST_PAGE.format(1), 1),
self._add_button(
self._SYMBOL_PREVIOUS_PAGE.format(self.current_page - 1),
self.current_page - 1),
self._add_button(
self._SYMBOL_CURRENT_PAGE.format(self.current_page),
self.current_page),
self._add_button(
self._SYMBOL_NEXT_PAGE.format(self.current_page + 1),
self.current_page + 1),
self._add_button(
self._SYMBOL_LAST_PAGE.format(self.count_pages),
self.count_pages)
]
@property
def _right_pagination(self):
return [
self._add_button(
self._SYMBOL_FIRST_PAGE.format(1), 1),
self._add_button(
self._SYMBOL_PREVIOUS_PAGE.format(self.count_pages - 3),
self.count_pages - 3)
] + [
self._add_button(
self._SYMBOL_CURRENT_PAGE.format(number), number)
if number == self.current_page else self._add_button(number, number)
for number in range(self.count_pages - 2, self.count_pages + 1)
]
@property
def _full_pagination(self):
return [
self._add_button(number, number)
if number != self.current_page else self._add_button(
self._SYMBOL_CURRENT_PAGE.format(number), number)
for number in range(1, self.count_pages + 1)
]
@property
def _build_pagination(self):
if self.count_pages <= 5:
return self._full_pagination
else:
if self.current_page <= 3:
return self._left_pagination
elif self.current_page > self.count_pages - 3:
return self._right_pagination
else:
return self._middle_pagination
def paginate(self, count_pages: int, current_page: int,
callback_pattern: str):
self.count_pages = count_pages
self.current_page = current_page
self.callback_pattern = callback_pattern
return self.inline_keyboard.append(self._build_pagination)
def languages(self, callback_pattern: str, locales: Union[str, List[str]],
row_width: int = 2):
locales = locales if isinstance(locales, list) else [locales]
buttons = [
InlineKeyboardButton(
text=self._LOCALES.get(locales[i], 'Invalid locale'),
callback_data=callback_pattern.format(locale=locales[i])
)
for i in range(0, len(locales))
]
self.inline_keyboard = [
buttons[i:i + row_width]
for i in range(0, len(buttons), row_width)
]
class InlineButton(InlineKeyboardButton):
def __init__(self, text=None, callback_data=None, url=None,
login_url=None, user_id=None, switch_inline_query=None,
switch_inline_query_current_chat=None, callback_game=None):
super().__init__(
text=text,
callback_data=callback_data,
url=url,
login_url=login_url,
user_id=user_id,
switch_inline_query=switch_inline_query,
switch_inline_query_current_chat=switch_inline_query_current_chat,
callback_game=callback_game
) | PypiClean |
/OTSun-2.10.6.tar.gz/OTSun-2.10.6/otsun/source.py | import itertools
import Part
import numpy as np
from FreeCAD import Base
from .math import pick_random_from_cdf, myrandom, tabulated_function, two_orthogonal_vectors, area_of_triangle, random_point_of_triangle
from .optics import dispersion_from_main_direction, random_polarization, dispersion_polarization
from .ray import Ray
from random import choices
from scipy.spatial import ConvexHull
EPSILON = 1E-6
# Tolerance for considering equal to zero
class GeneralizedSunWindow(object):
def __init__(self, scene, main_direction):
bbs = []
for shape in itertools.chain(scene.solids, scene.faces):
bbs.append(shape.BoundBox)
origin = (scene.boundbox.Center -
main_direction * 0.5 * scene.boundbox.DiagonalLength)
u, v = two_orthogonal_vectors(main_direction)
points = []
plane_points = []
for bb in bbs:
xs = [bb.XMin, bb.XMax]
ys = [bb.YMin, bb.YMax]
zs = [bb.ZMin, bb.ZMax]
coords = itertools.product(xs, ys, zs)
for c in coords:
point = Base.Vector(c)
points.append(point)
pu = u.dot(point - origin) * 1.02
pv = v.dot(point - origin) * 1.02
plane_points.append([pu,pv])
hull = ConvexHull(plane_points)
plane_vertices = [plane_points[i] for i in hull.vertices]
self.vertices = [origin + u*pu + v*pv for (pu,pv) in plane_vertices]
self.triangles = [[self.vertices[0], self.vertices[i], self.vertices[i+1]]
for i in range(1,len(self.vertices)-1)]
self.triangle_areas = list(map(area_of_triangle, self.triangles))
self.aperture = sum(self.triangle_areas)
self.main_direction = main_direction
def add_to_document(self, doc):
sw = Part.makePolygon(self.vertices, True)
doc.addObject("Part::Feature", "SunWindow").Shape = sw
def random_point(self):
random_triangle = choices(self.triangles, self.triangle_areas)[0]
return random_point_of_triangle(random_triangle)
def random_direction(self):
"""
Returns the main direction
Maybe in the future will return some random vector
Returns
-------
Base.Vector
"""
return self.main_direction
class SunWindow(object):
"""
Class that implements a Sun window (rectangle that emits rays)
Source of light defined by a rectangle "at infinity" in space that emits rays perpendicular to it.
Parameters
----------
scene : Scene
Scene that contains the sun window
main_direction : Base.Vector
Vector orthogonal to the emitting region
Attributes
----------
origin : Base.Vector
Center of the rectangle
v1, v2 : Base.Vector
Unit vectors parallel to the sides of the rectangle
length1, length2 : float
Lengths of the sides of the rectangle
aperture : float
Area of the rectangle
"""
def __init__(self, scene, main_direction):
bbs = []
for shape in itertools.chain(scene.solids, scene.faces):
bbs.append(shape.BoundBox)
projected_points = []
for bb in bbs:
xs = [bb.XMin, bb.XMax]
ys = [bb.YMin, bb.YMax]
zs = [bb.ZMin, bb.ZMax]
coords = itertools.product(xs, ys, zs)
points = [Base.Vector(c) for c in coords]
point_of_plane = (scene.boundbox.Center -
main_direction * 0.5 * scene.boundbox.DiagonalLength)
projected_points.extend([p.projectToPlane(point_of_plane, main_direction)
for p in points])
(self.origin, self.v1, self.v2, self.length1, self.length2) = (
SunWindow.find_min_rectangle(projected_points, main_direction))
self.aperture = self.length1 * self.length2
self.main_direction = main_direction
@staticmethod
def find_min_rectangle(points, normal):
"""
Computes the minimum rectangle covering points in a direction
Given a list of `points`, take its projection in a `normal` direction,
and the rectangle with minimum area that encloses this projections
Parameters
----------
points : list of Base.Vector
List of points to enclose
normal : Base.Vector
Vector orthogonal to the rectangle to be found
Returns
-------
origin : Base.Vector
Center of the rectangle
best_v1, best_v2 : Base.Vector
Unit vector parallel to the sides of the rectangle
best_v2 : Base.Vector
Other unit vector parallel to the sides of the rectangle
length1 : float
Length of side of the rectangle
length2 : float
Length of other side of the rectangle
"""
min_area = None
for (p, q) in itertools.combinations(points, 2):
v1 = q - p
if v1.Length < EPSILON: # TODO: customize
continue
v1.normalize()
v2 = v1.cross(normal)
v2.normalize()
xs = [v1.dot(r - p) for r in points]
ys = [v2.dot(r - p) for r in points]
minx = min(xs)
maxx = max(xs)
miny = min(ys)
maxy = max(ys)
length1 = maxx - minx
length2 = maxy - miny
area = length1 * length2
if not min_area or area < min_area:
min_area = area
best_origin = p + v1 * minx + v2 * miny
best_v1 = v1
best_v2 = v2
best_length1 = length1
best_length2 = length2
length1 = best_length1 * 1.04
length2 = best_length2 * 1.04
origin = best_origin - best_v1 * length1 * 0.02 - best_v2 * length2 * 0.02
return origin, best_v1, best_v2, length1, length2
def random_point(self):
"""
Returns a random point on the rectangle
Returns
-------
Base.Vector
"""
return (self.origin + self.v1 * self.length1 * myrandom() +
self.v2 * self.length2 * myrandom())
def random_direction(self):
"""
Returns the main direction
Maybe in the future will return some random vector
Returns
-------
Base.Vector
"""
return self.main_direction
def add_to_document(self, doc):
"""
Adds the rectangle to the FreeCAD document
Parameters
----------
doc : App.Document
"""
sw = Part.makePolygon([self.origin,
self.origin + self.v1 * self.length1,
self.origin + self.v1 * self.length1 +
self.v2 * self.length2,
self.origin + self.v2 * self.length2
], True)
doc.addObject("Part::Feature", "SunWindow").Shape = sw
class LightSource(object):
"""
Sets up a light source with a given scene, a given emitting region and a given light spectrum.
The emitting region must provide the main direction.
Light spectrum could be: a constant value (for a single wavelength in nanometers), or a spectrum distribution.
The distribution (dispersion) for the main direction is provided in "direction_distribution".
The polarization_vector is a Base.Vector for polarized light. If is not given unpolarized light is generated.
"""
def __init__(self, scene, emitting_region, light_spectrum, initial_energy, direction_distribution=None,
polarization_vector=None):
self.scene = scene
self.emitting_region = emitting_region
self.light_spectrum = light_spectrum
self.initial_energy = initial_energy
self.direction_distribution = direction_distribution
self.polarization_vector = polarization_vector
self.wavelengths = []
def emit_ray(self):
"""
Simulates the emission of a ray
"""
point = self.emitting_region.random_point()
main_direction = self.emitting_region.main_direction # emitting main direction
direction = main_direction
if self.direction_distribution is not None: # main direction has a distribution
theta = self.direction_distribution(myrandom())
phi = 360.0 * myrandom()
direction = dispersion_from_main_direction(main_direction, theta, phi)
if self.polarization_vector: # single polarization vector is active
polarization_vector = dispersion_polarization(main_direction, self.polarization_vector, theta, phi)
if self.polarization_vector is None: # unpolarization is active
polarization_vector = random_polarization(direction) # random polarization from light direction
else:
polarization_vector = self.polarization_vector
polarization_vector.normalize()
if np.isscalar(self.light_spectrum):
wavelength = self.light_spectrum # experiment with a single wavelength (nanometers)
else:
wavelength = pick_random_from_cdf(self.light_spectrum) # light spectrum is active (nanometers)
ray = Ray(self.scene, point, direction, wavelength, self.initial_energy, polarization_vector)
return ray
# Auxiliary functions for buie_distribution
def _calculate_a1(CSR, SD):
""" Parameter a1 needed for the normalization of the probability distribution in the disk region
"""
th = np.arange(0.0, SD, 0.001)
values = [2.0 * np.pi * np.cos(0.326 * angle) / np.cos(0.305 * angle) * angle
for angle in th]
a1 = (1.0 - CSR) / np.trapz(values, dx=0.001)
return a1
def _circumsolar__density_distribution(angle, CSR):
gamma = 2.2 * np.log(0.52 * CSR) * CSR ** 0.43 - 0.1
kappa = 0.9 * np.log(13.5 * CSR) * CSR ** (-0.3)
return 2.0 * np.pi * np.exp(kappa) * angle ** (gamma + 1.0)
def _calculate_a2(CSR, SD, SS):
""" Parameter a2 needed for the normalization of the probability distribution in the circumsolar region"""
f = _circumsolar__density_distribution
th = np.arange(SD, SS, 0.001)
f_th = np.vectorize(f)
a2 = CSR / np.trapz(f_th(th, CSR), dx=0.001)
return a2
def _calculate_CDF_disk_region(a1, SD):
"""
Cumulative Distribution Function in the solar disk region
"""
th = np.arange(0.0, SD, 0.001)
values = [2.0 * np.pi * np.cos(0.326 * angle) / np.cos(0.305 * angle) * angle
for angle in th]
cumulative = np.cumsum(values)
CDF = (th, a1 * cumulative / 1000.0)
return CDF
def _th_solar_disk_region(u, CDF):
""" Random angle based on the probability distribution in the circumsolar region"""
# TODO: It is recomputed every time? @Ramon
# CDF = CDF_Disk_Region # calculate_CDF_disk_region(a1, SD)
idx = (np.abs(CDF[1] - u)).argmin()
return CDF[0][idx]
def _th_circumsolar_region(u, CSR, SD, a2):
""" Random angle based on the CDF in the circumsolar region"""
gamma = 2.2 * np.log(0.52 * CSR) * CSR ** (0.43) - 0.1
kappa = 0.9 * np.log(13.5 * CSR) * CSR ** (-0.3)
u_csr = (u - 1.0) + CSR # Since CSR-CDF starts at zero
f1 = u_csr * (gamma + 2.0) / (a2 * 2 * np.pi * np.exp(kappa))
f2 = SD ** (gamma + 2.0)
exp = (1.0 / (gamma + 2.0))
th_u = np.power(f1 + f2, exp)
return th_u
def buie_distribution(CircumSolarRatio):
"""
Implementation of the Buie Distribution for Sun emission
A distribution for the direct light from the sun according to the Buie model:
Buie, D., 2005. Corrigendum to "The effective size of the solar cone for
solar concentrating systems" [Solar Energy 74 (2003) 417-427].
Solar Energy 79, 568-570. 5.2
Parameters
----------
CircumSolarRatio : float
Returns
-------
angle distribution for random input: function
Function that interpolates by straight line segments the input data
"""
CSR = CircumSolarRatio
SD = 4.65
# Solar Disk in mrad
SS = 43.6
# Solar Size in mrad
a2 = None # _calculate_a2(CSR, SD, SS)
CDF_Disk_Region = None # _calculate_CDF_disk_region(a1, SD)
# Buie distribution for the disk region
u_values = np.arange(0.0, 1.001, 0.001)
dist_values = []
for u in u_values:
if u <= 1.0 - CSR:
if CDF_Disk_Region is None:
a1 = _calculate_a1(CSR, SD)
# normalization constant for the disk region
CDF_Disk_Region = _calculate_CDF_disk_region(a1, SD)
dist_values.append(_th_solar_disk_region(u, CDF_Disk_Region) / 1000.0 * 180.0 / np.pi)
else:
if a2 is None:
a2 = _calculate_a2(CSR, SD, SS)
# normalization constant for the circumsolar region
dist_values.append(_th_circumsolar_region(u, CSR, SD, a2) / 1000.0 * 180.0 / np.pi)
f = tabulated_function(u_values, dist_values)
return f | PypiClean |
/EBNFParser-2.1.3-py3-none-any.whl/Ruikowa/Bootstrap/Token.py | import re as re
from ..ObjectRegex.Tokenizer import (Tokenizer, str_matcher, regex_matcher,
char_matcher, unique_literal_cache_pool)
def _escape(*str_s):
return '|'.join([re.escape(string) for string in str_s])
class NameEnum:
keyword_as = unique_literal_cache_pool['as']
keyword_of = unique_literal_cache_pool['of']
keyword_throw = unique_literal_cache_pool['throw']
keyword_deftoken = unique_literal_cache_pool['deftoken']
keyword_ignore = unique_literal_cache_pool['ignore']
keyword_cast = unique_literal_cache_pool['cast']
Of = unique_literal_cache_pool['Of']
Prefix = unique_literal_cache_pool['Prefix']
Comments = unique_literal_cache_pool['Comments']
Str = unique_literal_cache_pool['Str']
Codes = unique_literal_cache_pool['Codes']
Name = unique_literal_cache_pool['Name']
Number = unique_literal_cache_pool['Number']
Newline = unique_literal_cache_pool['Newline']
TokenIgnore = unique_literal_cache_pool['TokenIgnore']
Single = unique_literal_cache_pool['Single']
Eq = unique_literal_cache_pool['Eq']
TokenRelated = unique_literal_cache_pool['TokenRelated']
TokenDef = unique_literal_cache_pool['TokenDef']
Throw = unique_literal_cache_pool['Throw']
token_table = (
# match by value
("auto_const", char_matcher(
('|',
'{',
'}',
';',
'[',
']',
'(',
')',
'+',
'*',
'.')
)),
# match by value
("auto_const", str_matcher(
("::=", ":=")
)),
# match by name
('Comment', regex_matcher(re.compile(r'(#.*)|(((/\*)+?[\w\W]+?(\*/)+))'))),
("Str", regex_matcher(re.compile(r"[A-Z]'([^\\']+|\\.)*?'|'([^\\']+|\\.)*?'"))),
("Codes", regex_matcher(re.compile(r'{{[\w\W]+?\}\}'))),
("Name", regex_matcher("[a-zA-Z_\u4e00-\u9fa5][a-zA-Z0-9_\u4e00-\u9fa5\.]*")),
("Number", regex_matcher("\d+")),
# do not match
("Space", regex_matcher('\s+|,')),
)
token_table = tuple((unique_literal_cache_pool[k], v) for k, v in token_table)
keyword = unique_literal_cache_pool['keyword']
cast_map = {
'as': keyword,
'throw': keyword,
'deftoken': keyword,
'ignore': keyword,
'for': keyword,
'of': keyword,
'cast': keyword
}
token_func = lambda _: Tokenizer.from_raw_strings(_,
token_table,
to_ignore=({'Space', 'Comment'}, {}), cast_map=cast_map) | PypiClean |
/APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/core/range.py | from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
from apav.core.isotopic import Element
from collections import OrderedDict
from configparser import ConfigParser
import copy
from tabulate import tabulate
import numpy as n
from apav.utils import helpers, validate
import apav as ap
from apav.utils.logging import log
class Range:
"""
A single mass spectrum range
"""
__next_id = 0
def __init__(
self,
ion: Union["ap.Ion", str],
minmax: Tuple[Number, Number],
vol: Number = 1,
color: Tuple[Number, Number, Number] = (0, 0, 0),
):
"""
Define a singular mass spectrum range composed of a composition, interval, volume, and color. i.e.
Created as:
>>> cu = Range("Cu", (62, 66), color=(0.5, 1, 0.25))
:param ion: the range composition
:param minmax: (min, max) tuple of the mass spectrum range
:param vol: the "volume" of the atom used during reconstruction
:param color: the color as RGB fractions
"""
super().__init__()
if any(i < 0 for i in (minmax[0], minmax[1])):
raise ValueError("Range limits cannot be negative")
elif minmax[0] >= minmax[1]:
raise ValueError("Range lower bound cannot be larger than range upper bound")
if isinstance(ion, str):
ion = ap.Ion(ion)
elif not isinstance(ion, (ap.Ion, str)):
raise TypeError(f"Range ion must be type Ion or string, not {type(ion)}")
self._ion = ion
self._lower = validate.positive_number(minmax[0])
self._upper = validate.positive_nonzero_number(minmax[1])
self._color = validate.color_as_rgb(color)
self._vol = validate.positive_number(vol)
self._id = Range.__next_id
Range.__next_id += 1
def __contains__(self, mass: float) -> bool:
"""
Be able test if range contains a mass ratio
"""
return self.contains_mass(mass)
def __repr__(self):
retn = f"Range: {self.hill_formula},"
col = [round(i, 2) for i in self.color]
retn += f" Min: {self.lower}, Max: {self.upper}, Vol: {self.vol}, Color: {col}"
return retn
def __eq__(self, other: "Range"):
if not isinstance(other, Range):
return NotImplemented
if other.ion == self.ion and n.isclose(other.lower, self.lower) and n.isclose(other.upper, self.upper):
return True
else:
return NotImplemented
@property
def id(self) -> int:
return self._id
@property
def lower(self) -> Number:
"""
Get the lower (closed) boundary of the range
"""
return self._lower
@lower.setter
def lower(self, new: Number):
"""
Set the lower (closed) boundary of the range
:param new:
:return:
"""
validate.positive_number(new)
if new >= self._upper:
raise ValueError(f"Lower bound for {self.ion} ({new}) cannot be >= upper bound ({self.upper})")
self._lower = new
@property
def upper(self) -> Number:
"""
Get the upper (open) boundary of the range
"""
return self._upper
@upper.setter
def upper(self, new: Number):
"""
Set the upper (open) boundary of the range
"""
validate.positive_number(new)
if new <= self._lower:
raise ValueError(f"Upper bound for {self.ion} ({new}) cannot be <= lower bound ({self.lower})")
self._upper = new
@property
def color(self) -> Tuple[Number, Number, Number]:
"""
Get the color of the range as (R, G, B) tuple. Values range from 0-1
"""
return self._color
@color.setter
def color(self, new: Tuple[Number, Number, Number]):
"""
Set the color of the range. Color must be a Tuple(reg, green, blue) where RGB values are between 0-1
"""
self._color = validate.color_as_rgb(new)
@property
def interval(self) -> Tuple[Number, Number]:
"""
Get the (min, max) interval defined the mass spectrum range
"""
return self.lower, self.upper
@property
def vol(self) -> Number:
"""
Get the volume of the range
"""
return self._vol
@vol.setter
def vol(self, new: Number):
"""
Set the volume of the range
:param new: the new volume
"""
self._vol = validate.positive_nonzero_number(new)
def num_elems(self) -> int:
"""
Get the number of unique elements of the range composition
"""
return len(self.ion.elements)
@property
def ion(self) -> "ap.Ion":
"""
Get a tuple of the elements that compose this range
"""
return self._ion
@ion.setter
def ion(self, new: Union["ap.Ion", str]):
"""
Set the composition of the range
:param new: the new composition
"""
if not isinstance(new, (str, ap.Ion)):
raise TypeError(f"Expected type Ion or string not {type(new)}")
if isinstance(new, str):
self._ion = ap.Ion(new)
else:
self._ion = new
@property
def hill_formula(self) -> str:
"""
Get the range composition as a string
"""
return self.ion.hill_formula
@property
def formula(self) -> str:
"""
Get the range composition as a string
"""
return self.ion.hill_formula.replace(" ", "")
def intersects(self, rng: "Range"):
"""
Determine if the range intersects a given :class:`Range`
"""
if self.lower <= rng.lower < self.upper:
return True
elif self.lower < rng.upper < self.upper:
return True
else:
return False
def contains_mass(self, mass: Number) -> bool:
"""
Test if the given mass/charge ratio is contained within range's bounds
:param mass: mass/charge ratio
"""
validate.positive_number(mass)
return self.lower <= mass < self.upper
class RangeCollection:
"""
Operations on multiple ranges
"""
def __init__(self, ranges: Sequence[Range] = ()):
"""
Maintain and operate on a collection of ranges that describe the peaks in a mass spectrum. This is the principle
class used for mass spectrum range definitions. A collection may be created by manually supplying the Range
objects through the constructor, or 1 by 1 through :meth:`RangeCollection.add`. A :class:`RangeCollection` may also
be created using the alternate constructors :meth:`RangeCollection.from_rng` and
:meth:`RangeCollection.from_rrng` to import the ranges from the two common range file types.
A :class:`RangeCollection` can be created as:
>>> rng_lst = [Range("Cu", (62.5, 63.5)), Range("Cu", (63.5, 66))]
>>> rngs = RangeCollection(rng_list)
Or 1 by 1 as:
>>> rngs = RangeCollection()
>>> rngs.add(Range("Cu", (62.5, 63.5)))
>>> rngs.add(Range("Cu", (63.5, 66)))
:param ranges: sequence of Range objects
"""
if not all(isinstance(i, Range) for i in ranges):
raise TypeError("Cannot create RangeCollection from non-Range objects")
self._ranges = list(ranges)
self.__index = 0
self._filepath = ""
def __iter__(self):
self.__index = 0
return self
def __next__(self) -> Range:
if len(self._ranges) == 0:
raise StopIteration
elif self.__index == len(self._ranges):
self.__index = 0
raise StopIteration
else:
self.__index += 1
return self._ranges[self.__index - 1]
def __len__(self) -> int:
return len(self._ranges)
def __repr__(self):
retn = "RangeCollection\n"
retn += f"Number of ranges: {len(self)}\n"
ranges = self.sorted_ranges()
if len(self) > 0:
min, max = ranges[0].lower, ranges[-1].upper
else:
min = ""
max = ""
retn += f"Mass range: {min} - {max}\n"
retn += f"Number of unique elements: {len(self.elements())}\n"
retn += f"Elements: {', '.join(elem.symbol for elem in self.elements())}\n\n"
data = [(i.hill_formula, i.lower, i.upper, i.vol, [round(j, 2) for j in i.color]) for i in self.sorted_ranges()]
head = ("Composition", "Min (Da)", "Max (Da)", "Volume", "Color (RGB 0-1)")
table = tabulate(data, headers=head)
retn += table
return retn
@property
def filepath(self) -> str:
"""
Get the file path the :class:`RangeCollection` was created from, if it was imported from a file
"""
return self._filepath
@property
def ranges(self) -> List[Range]:
"""
Get a copy of the ranges in the RangeCollection. This returns a copy to prevent accidental modification
of the underlying ranges possibly resulting in overlapping ranges.
Instead, remove the old range with RangeCollection.remove_by_mass() and add the new one, or use
RangeCollection.replace()
"""
return copy.deepcopy(self._ranges)
@classmethod
def from_rrng(cls, fpath: str):
"""
Build RangeCollection from \*.rrng files
:param fpath: filepath
"""
retn = cls()
retn._filepath = validate.file_exists(fpath)
log.info("Reading RRNG file: {}".format(fpath))
conf = ConfigParser()
conf.read(fpath)
nions = int(conf["Ions"]["Number"])
nranges = int(conf["Ranges"]["number"])
elems = [conf["Ions"]["ion" + str(i)] for i in range(1, nions + 1)]
for i in range(1, nranges + 1):
line = conf["Ranges"]["Range" + str(i)].split()
# IVAS saves unknown elements with a name field and not composition, skip these
if any("Name" in i for i in line):
continue
rmin = float(line.pop(0))
rmax = float(line.pop(0))
# The rest can be converted to a dictionary easily
vars = OrderedDict([item.split(":") for item in line])
vol = float(vars.pop("Vol"))
col = helpers.hex2rgbF(vars.pop("Color"))
# Now the rest should be ions
assert all(i in elems for i in vars.keys())
# vars = OrderedDict([(i, int(j)) for i, j in vars.items()])
comp_str = "".join(i + str(j) for i, j in vars.items())
retn.add(Range(comp_str, (rmin, rmax), vol, col))
return retn
@classmethod
def from_rng(cls, filepath: str):
"""
Build RangeCollection from a .rng file
:param filepath: filepath
"""
raise NotImplementedError()
def clear(self):
"""
Remove all Ranges from the RangeCollection
"""
self._ranges = []
def add(self, new: Range):
"""
Add a new :class:`Range` to the :class:`RangeCollection`
:param new: the new :class:`Range`
:return:
"""
if not isinstance(new, Range):
raise TypeError(f"Can only add Range types to RangeCollection not {type(new)}")
else:
for r in self.ranges:
if r.intersects(new):
raise ValueError("Mass ranges cannot coincide")
self._ranges.append(new)
return new
def remove_by_mass(self, mass: float):
"""
Remove a range overlapping the given mass ratio
"""
validate.positive_number(mass)
for i in self._ranges:
if i.lower <= mass < i.upper:
self._ranges.remove(i)
def replace(self, old_rng: Range, new_rng: Range):
"""
Replace an existing Range with a new one. Throws an error if the range is not found.
:param old_rng: Range to be replaced
:param new_rng: New range
"""
for i, rng in enumerate(self._ranges):
if rng == old_rng:
self._ranges[i] = new_rng
return
raise IndexError(f"RangeCollection does not contain {old_rng}")
def ions(self) -> Tuple["ap.Ion", ...]:
"""
Get a tuple of all ions
"""
return tuple(set([i.ion for i in self.ranges]))
def elements(self) -> Tuple[Element]:
"""
Get a tuple of all elements
"""
allelems = []
for rng in self:
elems = [i for i in rng.ion.elements]
allelems += elems
return tuple(set(allelems))
def sorted_ranges(self) -> list:
"""
Get the list of range objects sorted in ascending mass range
"""
return sorted(self._ranges, key=lambda x: x.lower)
def check_overlap(self) -> Union[Tuple, Tuple[float, float]]:
"""
Check if any ranges in the RangeCollection overlap. This returns the first overlap found, not all
overlaps. This is provided if Ranges are being directly accessed and modified
"""
for i, r1 in enumerate(self.ranges):
for j, r2 in enumerate(self.ranges):
if j <= i:
continue
else:
if r1.intersects(r2):
return r1, r2
return ()
def find_by_mass(self, mass: float) -> Range:
"""
Get the range that contains the given m/q
"""
retn = None
for range in self.ranges:
if mass in range:
retn = range
if retn is not None:
return retn
else:
raise ValueError(f"No range containing {mass} exists") | PypiClean |
/Flask-Pay-WX-1.0.5.tar.gz/Flask-Pay-WX-1.0.5/flask_pay_wx/v3/mini/PayHelper.py | import sys
import time
from typing import Dict
import requests
from flask import Flask
from flask_pay_wx import Const
from flask_pay_wx.MiniPayBean import MiniPayBean
from flask_pay_wx.utils.Util import Util
from flask_pay_wx.v3 import PayOrder
from flask_pay_wx.v3.Tools import Tools
class PayHelper(object):
def __init__(self, app: Flask, app_id: str, mch_id: str, serial_no: str, api_client_key_path: str = None):
self.app = app
self.app_id = app_id
self.mch_id = mch_id
self.serial_no = serial_no
self.api_client_key_path = api_client_key_path #商户私钥
self.BASE_URL = "https://api.mch.weixin.qq.com"
# 处理支付
def handle_pay(self, open_id: str, total_fee: str, notify_url: str, product_describe: str) -> (bool, MiniPayBean):
result = True
result_obj = None
mini_pay_bean = MiniPayBean()
try:
nonce_str = Util.get_nonce_str()
out_trade_no = Util.get_out_trade_no()
pay_order = PayOrder(app_id=self.app_id, mch_id=self.mch_id,
description=product_describe,
out_trade_no=out_trade_no, total_fee=total_fee,
notify_url=notify_url,open_id=open_id)
sub_url = "/v3/pay/transactions/jsapi"
url = "{0}{1}".format(self.BASE_URL, sub_url)
cur_time_stamp = str(int(time.time()))
content = pay_order.to_string()
sign_str = Tools.handle_sign(self.mch_id, self.serial_no, self.api_client_key_path, "POST", sub_url,
cur_time_stamp, nonce_str, content)
self.app.logger.info("v3 handle_pay sign_str:%s", sign_str)
self.app.logger.info("v3 handle_pay content:%s", content)
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': sign_str
}
response = requests.post(url=url, data=content.encode('utf-8'), headers=headers, timeout=10)
result = self._handle_pay_order_response(response, nonce_str, mini_pay_bean)
if result:
mini_pay_bean.sign_type = Const.SIGNTYPE_V3_RSA
mini_pay_bean.sign_data(api_client_key_path=self.api_client_key_path)
mini_pay_bean.out_trade_no = out_trade_no
result_obj = mini_pay_bean
except requests.Timeout as exception:
result = False
mini_pay_bean.err_msg = '超时了'
result_obj = mini_pay_bean
self.app.logger.error('v3 (%s.%s) request timeout exception: %s', self.__class__.__name__,
sys._getframe().f_code.co_name,
str(exception))
except Exception as exception:
result = False
mini_pay_bean.err_msg = '请求异常'
result_obj = mini_pay_bean
self.app.logger.error('v3 (%s.%s) other exception: %s', self.__class__.__name__,
sys._getframe().f_code.co_name,
str(exception))
finally:
self.app.logger.info("_handle_pay_v3 result:%s, result_msg:%s", result, result_obj.to_string())
return result, result_obj
def _handle_pay_order_response(self, response, nonce_str: str, mini_pay_bean: MiniPayBean) -> bool:
result = True
status_code = response.status_code
self.app.logger.info("_handle_pay_order_response code:%s", status_code)
content = response.content
result_dict = self._handle_order_response_result(content)
self.app.logger.info("_handle_pay_order_response content:%s", result_dict)
if status_code == 200 or status_code == 204:
time_stamp = str(int(time.time()))
mini_pay_bean.app_id = self.app_id
mini_pay_bean.time_stamp = time_stamp
mini_pay_bean.nonce_str = nonce_str
mini_pay_bean.package = result_dict['prepay_id']
elif status_code == 401:
result = False
mini_pay_bean.err_msg = "请求失败"
return result
# 处理请求结果
def _handle_order_response_result(self, content) -> Dict:
result_str = Util.byte_to_str(content)
result_dict = Tools.str_to_json(result_str)
return result_dict
# 查询订单
def query_order(self, transaction_id: str = None, out_trade_no: str = None) -> (bool, Dict):
result = False
result_obj = {}
if transaction_id is not None:
sub_url = '{0}{1}'.format('/v3/pay/transactions/id/', transaction_id)
url = "{0}{1}{2}".format(self.BASE_URL, sub_url, transaction_id)
content = {
"mchid": self.mch_id
}
sign_str, headers = self._query_order_sign('GET', sub_url)
response = requests.get(url=url, params=content, headers=headers, timeout=10)
result, result_obj = self._handle_query_order_response(response)
return result, result_obj
if out_trade_no is not None:
sub_url = '{0}{1}'.format('/v3/pay/transactions/out-trade-no/', out_trade_no)
url = "{0}{1}{2}".format(self.BASE_URL, sub_url, out_trade_no)
content = {
"mchid": self.mch_id
}
sign_str, headers = self._query_order_sign('GET', sub_url)
response = requests.get(url=url, params=content, headers=headers, timeout=10)
result, result_obj = self._handle_query_order_response(response)
return result, result_obj
return result, result_obj
# 查询订单签名
def _query_order_sign(self, request_type: str, request_url: str) -> (str, Dict):
nonce_str = Util.get_nonce_str()
cur_time_stamp = str(int(time.time()))
sign_url = "{0}?mchid={1}".format(request_url, self.mch_id)
sign_str = Tools.handle_sign(self.mch_id, self.serial_no, self.api_client_key_path, request_type, sign_url,
cur_time_stamp, nonce_str)
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': sign_str
}
return sign_str, headers
# 处理查询订单响应
def _handle_query_order_response(self, response) -> (bool, Dict):
query_result_obj = {}
status_code = response.status_code
content = response.content
response_dict = self._handle_order_response_result(content)
self.app.logger.info("_handle_query_order_response content:%s", response_dict)
if status_code == 200 or status_code == 204:
result = True
query_result_obj = response_dict
else:
result = False
return result, query_result_obj
if __name__ == '__main__':
# helper = PayHelper(None, "1", "2", "3")
# result = helper._handle_sign("GET", "/puch", "1655550999", "sdfdsfgree445t5t")
# print(result)
request_type = 'GET'
request_url = '/v3/certificates'
time_stamp = '1554208460'
nonce_str = '593BEC0C930BF1AFEB40B4A08C8FB242'
sort_str = "{0}\n{1}\n{2}\n{3}\n".format(request_type, request_url, time_stamp, nonce_str)
sort_str = "{0}\n".format(sort_str)
auth_str = Tools.handle_sign("1","2","../../ach.key", request_type, request_url, time_stamp, nonce_str)
# auth_str = Tools.sign_sha256(sort_str, "../../ach.key")
print(auth_str)
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': auth_str
}
response = requests.get(url="https://api.mch.weixin.qq.com/v3/certificates", headers=headers, timeout=10)
head_dict = response.headers
status_code = response.json
print(status_code)
print(head_dict['Request-ID']) | PypiClean |
/KratosMeshMovingApplication-9.4-cp311-cp311-win_amd64.whl/KratosMultiphysics/MeshMovingApplication/python_solvers_wrapper_mesh_motion.py | import KratosMultiphysics
from importlib import import_module
def CreateSolverByParameters(model, solver_settings, parallelism):
solver_type = solver_settings["solver_type"].GetString()
if solver_type.startswith("mesh_solver_"):
solver_type = solver_type[12:] # remove preceding "mesh_solver_"
depr_msg = 'Using the old names to construct the MeshSolver\n'
depr_msg += 'Please remove the preceding "mesh_solver_" from "solver_type"'
KratosMultiphysics.Logger.PrintWarning("DEPRECATION", depr_msg)
# Solvers for OpenMP parallelism
if (parallelism == "OpenMP"):
if (solver_type == "laplacian"):
solver_module_name = "mesh_solver_laplacian"
elif (solver_type == "structural_similarity"):
solver_module_name = "mesh_solver_structural_similarity"
else:
err_msg = 'The requested solver type "' + solver_type + '" is not in the python solvers wrapper\n'
err_msg += 'Available options are: "laplacian", "structural_similarity"'
raise Exception(err_msg)
# Solvers for MPI parallelism
elif (parallelism == "MPI"):
if (solver_type == "laplacian"):
solver_module_name = "trilinos_mesh_solver_laplacian"
elif (solver_type == "structural_similarity"):
solver_module_name = "trilinos_mesh_solver_structural_similarity"
else:
err_msg = 'The requested solver type "' + solver_type + '" is not in the python solvers wrapper\n'
err_msg += 'Available options are: "laplacian", "structural_similarity"'
raise Exception(err_msg)
else:
err_msg = 'The requested parallel type "' + parallelism + '" is not available!\n'
err_msg += 'Available options are: "OpenMP", "MPI"'
raise Exception(err_msg)
module_full = 'KratosMultiphysics.MeshMovingApplication.' + solver_module_name
solver = import_module(module_full).CreateSolver(model, solver_settings)
return solver
def CreateSolver(model, custom_settings):
if (type(model) != KratosMultiphysics.Model):
raise Exception("input is expected to be provided as a Kratos Model object")#
if (type(custom_settings) != KratosMultiphysics.Parameters):
raise Exception("input is expected to be provided as a Kratos Parameters object")
solver_settings = custom_settings["solver_settings"]
parallelism = custom_settings["problem_data"]["parallel_type"].GetString()
return CreateSolverByParameters(model, solver_settings, parallelism) | PypiClean |
/OMS_distributions-0.1.tar.gz/OMS_distributions-0.1/OMS_distributions/Gaussiandistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/docs/modules/functions/combinatorial.rst | Combinatorial
=============
This module implements various combinatorial functions.
bell
----
.. autoclass:: diofant.functions.combinatorial.numbers.bell
:members:
bernoulli
---------
.. autoclass:: diofant.functions.combinatorial.numbers.bernoulli
:members:
binomial
--------
.. autoclass:: diofant.functions.combinatorial.factorials.binomial
:members:
catalan
-------
.. autoclass:: diofant.functions.combinatorial.numbers.catalan
:members:
euler
-----
.. autoclass:: diofant.functions.combinatorial.numbers.euler
:members:
factorial
---------
.. autoclass:: diofant.functions.combinatorial.factorials.factorial
:members:
subfactorial
------------
.. autoclass:: diofant.functions.combinatorial.factorials.subfactorial
:members:
factorial2 / double factorial
-----------------------------
.. autoclass:: diofant.functions.combinatorial.factorials.factorial2
:members:
FallingFactorial
----------------
.. autoclass:: diofant.functions.combinatorial.factorials.FallingFactorial
:members:
fibonacci
---------
.. autoclass:: diofant.functions.combinatorial.numbers.fibonacci
:members:
harmonic
--------
.. autoclass:: diofant.functions.combinatorial.numbers.harmonic
:members:
lucas
-----
.. autoclass:: diofant.functions.combinatorial.numbers.lucas
:members:
RisingFactorial
---------------
.. autoclass:: diofant.functions.combinatorial.factorials.RisingFactorial
:members:
stirling
--------
.. autofunction:: diofant.functions.combinatorial.numbers.stirling
Enumeration
===========
Three functions are available. Each of them attempts to efficiently compute
a given combinatorial quantity for a given set or multiset which can be
entered as an integer, sequence or multiset (dictionary with
elements as keys and multiplicities as values). The ``k`` parameter indicates
the number of elements to pick (or the number of partitions to make). When
``k`` is None, the sum of the enumeration for all ``k`` (from 0 through the
number of items represented by ``n``) is returned. A ``replacement`` parameter
is recognized for combinations and permutations; this indicates that any item
may appear with multiplicity as high as the number of items in the original
set.
.. autofunction:: diofant.functions.combinatorial.numbers.nC
.. autofunction:: diofant.functions.combinatorial.numbers.nP
.. autofunction:: diofant.functions.combinatorial.numbers.nT
Note that the integer for ``n`` indicates *identical* items for ``nT`` but
indicates ``n`` *different* items for ``nC`` and ``nP``.
| PypiClean |
/FairDynamicRec-0.0.123-py3-none-any.whl/fair_dynamic_rec/core/rankers/ea_hybrid_lsb_linucb.py | from .ea_linear_submodular_bandit import EALSB
import numpy as np
import math
class EAHybridLSBLinUCB(EALSB):
def __init__(self, config, dataObj, parameters=None):
"""
The first part of features are for the diversity and the second part is for relevance.
:param args:
:param kwargs:
"""
super(EAHybridLSBLinUCB, self).__init__(config, dataObj, parameters)
self.topic_dim = self.dataObj.feature_data['train_item_topical_features'].shape[1]
self.latent_dim = self.dataObj.feature_data['train_item_latent_features'].shape[1]
# for the topic
self.beta = np.ones((self.dataObj.n_users, self.topic_dim))
self.b_z = np.zeros((self.dataObj.n_users, self.topic_dim)) # d
self.A_z = np.zeros((self.dataObj.n_users, self.topic_dim, self.topic_dim)) # d by d
self.A_z_inv = np.zeros((self.dataObj.n_users, self.topic_dim, self.topic_dim))
for i in range(self.dataObj.n_users):
self.A_z[i] = np.eye(self.topic_dim)
self.A_z_inv[i] = np.eye(self.topic_dim)
# for the relevance
self.theta = np.ones((self.dataObj.n_users, self.latent_dim))
self.b_x = np.zeros((self.dataObj.n_users, self.latent_dim))
self.A_x = np.zeros((self.dataObj.n_users, self.latent_dim, self.latent_dim))
self.A_x_inv = np.zeros((self.dataObj.n_users, self.latent_dim, self.latent_dim))
for i in range(self.dataObj.n_users):
self.A_x[i] = np.eye(self.latent_dim)
self.A_x_inv[i] = np.eye(self.latent_dim)
self.B = np.zeros((self.dataObj.n_users, self.latent_dim, self.topic_dim))
# self.X_topical = np.zeros((self.dataObj.n_users, self.dataObj.feature_data['train_item_topical_features'].shape[0], self.dataObj.feature_data['train_item_topical_features'].shape[1]))
# self.X_latent = np.zeros((self.dataObj.n_users, self.dataObj.feature_data['train_item_latent_features'].shape[0], self.dataObj.feature_data['train_item_latent_features'].shape[1]))
# for i in range(self.dataObj.n_users):
# self.X_topical[i] = self.dataObj.feature_data['train_item_topical_features']
# self.X_latent[i] = self.dataObj.feature_data['train_item_latent_features']
self.gamma = float(parameters["gamma"]["value"])
self.window = int(parameters['window']['value'])
self.click_history_x = np.zeros((self.dataObj.n_users, self.latent_dim))
self.click_history_z = np.zeros((self.dataObj.n_users, self.topic_dim))
def update(self, batch_users, rankings, clicks, round=None, user_round=None):
for i in range(len(batch_users)):
user = batch_users[i]
_clicks, _batch_features = self.__collect_feedback(clicks, i)
discount_coef = [pow(self.gamma, j) for j in range(1, len(_clicks) + 1)]
# 1
# discount_coef_reward = [1 + pow(self.gamma, len(rankings[i])-j+1) for j in range(1, len(_clicks) + 1)]
# discount_coef_penalization = [x+1 for x in discount_coef]
# 2
# discount_coef_reward = [pow(self.gamma, len(rankings[i]) - j + 1) for j in range(1, len(_clicks) + 1)]
# discount_coef_penalization = [x for x in discount_coef]
# 3
# discount_coef_reward = [pow(self.gamma, len(rankings[i]) - j + 1) for j in range(1, len(_clicks) + 1)]
# discount_coef_penalization = [pow(1 - self.gamma, j) for j in range(1, len(_clicks) + 1)]
# # 4
# discount_coef_reward = [1 + pow(self.gamma, len(rankings[i]) - j + 1) for j in range(1, len(_clicks) + 1)]
# discount_coef_penalization = [pow(1 - self.gamma, j) for j in range(1, len(_clicks) + 1)]
# # 6
# discount_coef_reward = [1 + math.log(j) for j in range(1, len(_clicks) + 1)]
# 7
discount_coef_reward = [1 + math.log(j) for j in range(1, len(_clicks) + 1)]
discount_coef_penalization = [self.gamma * 1 / (1 + math.log(j)) for j in range(1, len(_clicks) + 1)]
# if self.type == 'item_weight':
# clicked_items_index = np.where(_clicks == 1)[0]
# if len(clicked_items_index) == 0:
# self.X_topical[user][rankings[i], :] = np.multiply(np.array(discount_coef).reshape(len(_clicks), 1), self.X_topical[user][rankings[i], :])
# self.X_latent[user][rankings[i], :] = np.multiply(np.array(discount_coef).reshape(len(_clicks), 1), self.X_latent[user][rankings[i], :])
# else:
# previous_clicked_item_index = 0
# for clicked_item_index in clicked_items_index:
# current_clicked_item_index = clicked_item_index
# # penalizing unclicked items
# self.X_topical[user][rankings[i][current_clicked_item_index], :] = (2 - discount_coef[current_clicked_item_index]) * self.X_topical[user][rankings[i][current_clicked_item_index], :]
# self.X_latent[user][rankings[i][current_clicked_item_index], :] = (2 - discount_coef[current_clicked_item_index]) * self.X_latent[user][rankings[i][current_clicked_item_index], :]
# # rewarding clicked items
# self.X_topical[user][rankings[i][previous_clicked_item_index: current_clicked_item_index], :] = np.multiply(np.array(discount_coef[previous_clicked_item_index: current_clicked_item_index]).reshape(current_clicked_item_index - previous_clicked_item_index, 1), self.X_topical[user][rankings[i][previous_clicked_item_index: current_clicked_item_index], :])
# self.X_latent[user][rankings[i][previous_clicked_item_index: current_clicked_item_index], :] = np.multiply(np.array(discount_coef[previous_clicked_item_index: current_clicked_item_index]).reshape(current_clicked_item_index - previous_clicked_item_index, 1), self.X_latent[user][rankings[i][previous_clicked_item_index: current_clicked_item_index], :])
# previous_clicked_item_index = current_clicked_item_index + 1
"""
Algorithm 2 of WWW 2010
Return: self.theta is updated.
"""
z = _batch_features[:, :self.topic_dim]
x = _batch_features[:, self.topic_dim:]
BA = np.matmul(self.B[user].T, self.A_x_inv[user])
self.A_z[user] += np.matmul(BA, self.B[user])
self.b_z[user] += np.dot(BA, self.b_x[user])
self.A_x[user] += np.dot(x.T, x)
self.B[user] += np.dot(x.T, z)
# if self.type == 'feature_weight':
# clicked_items_index = np.where(_clicks == 1)[0]
# _x = x
# if len(clicked_items_index) == 0:
# _x = -np.multiply(np.array(discount_coef_penalization).reshape(len(_clicks), 1), _x)
# else:
# previous_clicked_item_index = 0
# for clicked_item_index in clicked_items_index:
# current_clicked_item_index = clicked_item_index
# _x[current_clicked_item_index, :] = discount_coef_reward[current_clicked_item_index] * _x[current_clicked_item_index, :]
# _x[previous_clicked_item_index: current_clicked_item_index, :] = -np.multiply(np.array(discount_coef_penalization[previous_clicked_item_index: current_clicked_item_index]).reshape(current_clicked_item_index-previous_clicked_item_index,1), _x[previous_clicked_item_index: current_clicked_item_index, :])
# previous_clicked_item_index = current_clicked_item_index + 1
# self.b_x[user] += _x.sum(axis=0)
# else:
# self.b_x[user] += np.dot(_clicks, x)
if self.type == 'feature_weight':
clicked_items_index = np.where(_clicks == 1)[0]
_x = x
if len(clicked_items_index) == 0:
_x = -np.multiply(np.array(discount_coef_penalization).reshape(len(_clicks), 1), _x)
else:
previous_clicked_item_index = 0
for clicked_item_index in clicked_items_index:
current_clicked_item_index = clicked_item_index
_x[current_clicked_item_index, :] = discount_coef_reward[current_clicked_item_index] * _x[current_clicked_item_index, :]
_x[previous_clicked_item_index: current_clicked_item_index, :] = -np.multiply(np.array(discount_coef_penalization[previous_clicked_item_index: current_clicked_item_index]).reshape(current_clicked_item_index-previous_clicked_item_index,1), _x[previous_clicked_item_index: current_clicked_item_index, :])
previous_clicked_item_index = current_clicked_item_index + 1
self.click_history_x[user] += _x.sum(axis=0)
self.b_x[user] = np.dot(_clicks, x)
if user_round[user] % self.window == 0:
self.b_x[user] = self.click_history_x[user] / self.window
self.click_history_x[user] = np.zeros(self.latent_dim)
else:
self.b_x[user] = np.dot(_clicks, x)
try:
self.A_x_inv[user] = np.linalg.inv(self.A_x[user])
except:
self.A_x_inv[user] = np.linalg.pinv(self.A_x[user])
BA = np.matmul(self.B[user].T, self.A_x_inv[user])
self.A_z[user] += np.dot(z.T, z) - np.matmul(BA, self.B[user])
if self.type == 'feature_weight':
clicked_items_index = np.where(_clicks == 1)[0]
_z = z
if len(clicked_items_index) == 0:
_z = -np.multiply(np.array(discount_coef_penalization).reshape(len(_clicks), 1), _z)
else:
previous_clicked_item_index = 0
for clicked_item_index in clicked_items_index:
current_clicked_item_index = clicked_item_index
_z[current_clicked_item_index, :] = discount_coef_reward[current_clicked_item_index] * _z[current_clicked_item_index, :]
_z[previous_clicked_item_index: current_clicked_item_index, :] = -np.multiply(np.array(discount_coef_penalization[previous_clicked_item_index: current_clicked_item_index]).reshape(current_clicked_item_index-previous_clicked_item_index,1), _z[previous_clicked_item_index: current_clicked_item_index, :])
previous_clicked_item_index = current_clicked_item_index + 1
self.click_history_z[user] += _z.sum(axis=0)
self.b_z[user] = np.dot(_clicks, z) - np.dot(BA, self.b_x[user])
if user_round[user] % self.window == 0:
self.b_z[user] = (self.click_history_z[user] / self.window) - np.dot(BA, self.b_x[user])
self.click_history_z[user] = np.zeros(self.topic_dim)
else:
self.b_z[user] = np.dot(_clicks, z) - np.dot(BA, self.b_x[user])
try:
self.A_z_inv[user] = np.linalg.inv(self.A_z[user])
except:
self.A_z_inv[user] = np.linalg.pinv(self.A_z[user])
self.beta[user] = np.dot(self.A_z_inv[user], self.b_z[user])
B_tmp = self.b_x[user] - np.dot(self.B[user], self.beta[user])
self.theta[user] = np.dot(self.A_x_inv[user], B_tmp)
self.n_samples[user] += len(_clicks)
self.n_clicks[user] += sum(_clicks)
# def __collect_feedback(self, y):
# """
# With Cascade assumption, only the first click counts.
# :param y: click feedback
# :return: position of first click
# """
# if np.sum(y) == 0:
# return len(y)
# first_click = np.where(y)[0][0]
#
# return first_click + 1
# def update(self, y, delta=None):
# if delta is None:
# delta = self.delta_t
# feedback_len = self.__collect_feedback(y=y)
# delta = delta[:feedback_len].reshape((feedback_len, self.d)) # make sure it is a matrix
# self.__compute_parameters(delta=delta, y=y[:feedback_len])
# self.n_samples += len(y)
# self.n_clicks += sum(y)
def __collect_feedback(self, clicks, batch_user_id):
"""
:param y:
:return: the last observed position.
"""
# With Cascade assumption, only the first click counts.
if self.config.feedback_model == 'cascade':
if np.sum(clicks[batch_user_id]) == 0:
return clicks[batch_user_id], self.batch_features[batch_user_id]
first_click = np.where(clicks[batch_user_id])[0][0]
return clicks[batch_user_id][:first_click + 1], self.batch_features[batch_user_id][:first_click + 1]
elif self.config.feedback_model == 'dcm':
if np.sum(clicks[batch_user_id]) == 0:
return clicks[batch_user_id], self.batch_features[batch_user_id]
last_click = np.where(clicks[batch_user_id])[0][-1]
return clicks[batch_user_id][:last_click + 1], self.batch_features[batch_user_id][:last_click + 1]
# all items are observed
else:
return clicks[batch_user_id], self.batch_features[batch_user_id]
def get_ranking(self, batch_users, sampled_item=None, round=None):
"""
:param x: n * (n_topic + n_feature) np array
:param k: number of positions
:return: ranking: the ranked item id.
"""
# assert x.shape[0] >= k
rankings = np.zeros((len(batch_users), self.config.list_size), dtype=int)
self.batch_features = np.zeros((len(batch_users), self.config.list_size, self.topic_dim + self.latent_dim))
tie_breaker = self.prng.rand(len(self.dataObj.feature_data['train_item_latent_features']))
# self.delta_t = np.zeros((k, x.shape[1]))
# delta = x
# z = delta[:, :self.n_z] # topic
# x = delta[:, self.n_z:] # feature
# tie_breaker = self.prng.rand(len(delta))
for i in range(len(batch_users)):
user = batch_users[i]
# for CB
BA_X = np.matmul(self.B[user].T, self.A_x_inv[user]) # B.T A_x^-1
ABA = np.matmul(self.A_z_inv[user], BA_X) # A_z^-1 B.T A_X^-1
ABABA = np.matmul(BA_X.T, ABA)
# score and cb for the relevance
score_x = np.dot(self.dataObj.feature_data['train_item_latent_features'], self.theta[user])
XAX = np.multiply(np.dot(self.dataObj.feature_data['train_item_latent_features'], self.A_x_inv[user]), self.dataObj.feature_data['train_item_latent_features']).sum(axis=1) # x^T A_X^-1 X^T
cb_x = np.multiply(np.dot(self.dataObj.feature_data['train_item_latent_features'], ABABA), self.dataObj.feature_data['train_item_latent_features']).sum(axis=1) + XAX
ucb_x = score_x + 1e-6 * tie_breaker
ABAX = np.dot(ABA, self.dataObj.feature_data['train_item_latent_features'].T).T
# score and cb for the topic
# delta_t = []
batch_features = []
coverage = np.zeros(self.topic_dim)
ranking = []
# ranking_set = set()
for j in range(self.config.list_size):
# Line 8 - 11 of Nips 11
z_t = self.conditional_coverage(x=self.dataObj.feature_data['train_item_topical_features'], coverage=coverage)
ZAZ = np.multiply(np.dot(z_t, self.A_z_inv[user]), z_t).sum(axis=1) # Z^T A_Z^-1 Z^T
# cb_z = ZAZ - 2 * np.multiply(np.dot(z_t, ABA), x).sum(axis=1)
cb_z = ZAZ - 2 * np.multiply(z_t, ABAX).sum(axis=1)
# if (self.mitigation is None):
cb = self.alpha * np.sqrt(cb_z + cb_x)
# else:
# cb = self.alpha * (1 - (self.n_recommended / (t + 1))) * np.sqrt(cb_z + cb_x)
score_z = np.dot(z_t, self.beta[i])
ucb = ucb_x + score_z + cb
# ucb = ucb_x + (1-user_avg_popularity)*score_z + cb
# if(self.mitigation is not None):
# # tmp_n_recommended = self.n_recommended.copy()
# # tmp_n_recommended[np.where(tmp_n_recommended == 0)] = 1
# # ucb = (1-pow(self.n_recommended/(t+1),2)) * ucb
# ucb = (1 - (self.n_recommended / (t + 1))) * ucb
winner = np.argmax(ucb)
while winner in ranking:
ucb[winner] = -np.inf
winner = np.argmax(ucb)
ranking.append(winner)
# ranking_set.add(winner)
batch_features.append(z_t[winner])
coverage = self.ranking_coverage(self.dataObj.feature_data['train_item_topical_features'][ranking])
rankings[i] = np.asarray(ranking)
self.batch_features[i][:, :self.topic_dim] = np.asarray(batch_features)
self.batch_features[i][:, self.topic_dim:] = self.dataObj.feature_data['train_item_latent_features'][rankings[i]]
return rankings
# def score(self, delta):
# """
# return score for an item
# """
# return np.dot(delta[:, :self.n_z], self.beta) + np.dot(delta[:, self.n_z:], self.theta)
# def ucb(self, delta):
# """
# return the upper confident bound of each item. This is for debugging.
# :param x:
# :return:
# """
# score = self.score(delta)
# z = delta[:, :self.n_z] # topic
# x = delta[:, self.n_z:] # feature
#
# ZAZ = np.multiply(np.dot(z, self.A_z_inv), z).sum(axis=1) # Z^T A_Z^-1 Z^T
# XAX = np.multiply(np.dot(x, self.A_x_inv), x).sum(axis=1) # x^T A_X^-1 X^T
# BA_X = np.matmul(self.B.T, self.A_x_inv) # B.T A_x^-1
# ABA = np.matmul(self.A_z_inv, BA_X) # A_z^-1 B.T A_X^-1
# ABABA = np.matmul(BA_X.T, ABA)
# s = ZAZ + XAX + np.multiply(np.dot(x, ABABA), x).sum(axis=1) - 2 * np.multiply(np.dot(z, ABA), x).sum(axis=1)
# cb = self.alpha * np.sqrt(s)
# return score + cb | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/contrib/flatpages/templatetags/flatpages.py | from django import template
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
register = template.Library()
class FlatpageNode(template.Node):
def __init__(self, context_name, starts_with=None, user=None):
self.context_name = context_name
if starts_with:
self.starts_with = template.Variable(starts_with)
else:
self.starts_with = None
if user:
self.user = template.Variable(user)
else:
self.user = None
def render(self, context):
if "request" in context:
site_pk = get_current_site(context["request"]).pk
else:
site_pk = settings.SITE_ID
flatpages = FlatPage.objects.filter(sites__id=site_pk)
# If a prefix was specified, add a filter
if self.starts_with:
flatpages = flatpages.filter(
url__startswith=self.starts_with.resolve(context)
)
# If the provided user is not authenticated, or no user
# was provided, filter the list to only public flatpages.
if self.user:
user = self.user.resolve(context)
if not user.is_authenticated:
flatpages = flatpages.filter(registration_required=False)
else:
flatpages = flatpages.filter(registration_required=False)
context[self.context_name] = flatpages
return ""
@register.tag
def get_flatpages(parser, token):
"""
Retrieve all flatpage objects available for the current site and
visible to the specific user (or visible to all users if no user is
specified). Populate the template context with them in a variable
whose name is defined by the ``as`` clause.
An optional ``for`` clause controls the user whose permissions are used in
determining which flatpages are visible.
An optional argument, ``starts_with``, limits the returned flatpages to
those beginning with a particular base URL. This argument can be a variable
or a string, as it resolves from the template context.
Syntax::
{% get_flatpages ['url_starts_with'] [for user] as context_name %}
Example usage::
{% get_flatpages as flatpages %}
{% get_flatpages for someuser as flatpages %}
{% get_flatpages '/about/' as about_pages %}
{% get_flatpages prefix as about_pages %}
{% get_flatpages '/about/' for someuser as about_pages %}
"""
bits = token.split_contents()
syntax_message = (
"%(tag_name)s expects a syntax of %(tag_name)s "
"['url_starts_with'] [for user] as context_name" % {"tag_name": bits[0]}
)
# Must have at 3-6 bits in the tag
if 3 <= len(bits) <= 6:
# If there's an even number of bits, there's no prefix
if len(bits) % 2 == 0:
prefix = bits[1]
else:
prefix = None
# The very last bit must be the context name
if bits[-2] != "as":
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
# If there are 5 or 6 bits, there is a user defined
if len(bits) >= 5:
if bits[-4] != "for":
raise template.TemplateSyntaxError(syntax_message)
user = bits[-3]
else:
user = None
return FlatpageNode(context_name, starts_with=prefix, user=user)
else:
raise template.TemplateSyntaxError(syntax_message) | PypiClean |
/Dozer-0.8.tar.gz/Dozer-0.8/README.rst | Dozer
=====
.. image:: https://travis-ci.com/mgedmin/dozer.svg?branch=master
:target: https://travis-ci.com/mgedmin/dozer
.. image:: https://ci.appveyor.com/api/projects/status/github/mgedmin/dozer?branch=master&svg=true
:target: https://ci.appveyor.com/project/mgedmin/dozer
.. image:: https://coveralls.io/repos/mgedmin/dozer/badge.svg?branch=master
:target: https://coveralls.io/r/mgedmin/dozer
Dozer was originally a WSGI middleware version of Robert Brewer's
`Dowser CherryPy tool <http://www.aminus.net/wiki/Dowser>`_ that
displays information as collected by the gc module to assist in
tracking down memory leaks. It now also has middleware for profiling
and for looking at logged messages.
Tracking down memory leaks
--------------------------
Usage::
from dozer import Dozer
# my_wsgi_app is a WSGI application
wsgi_app = Dozer(my_wsgi_app)
Assuming you're serving your application on the localhost at port 5000,
you can then load up ``http://localhost:5000/_dozer/index`` to view the
gc info.
Profiling requests
------------------
Usage::
from dozer import Profiler
# my_wsgi_app is a WSGI application
wsgi_app = Profiler(my_wsgi_app)
Assuming you're serving your application on the localhost at port 5000,
you can then load up ``http://localhost:5000/_profiler`` to view the
list of recorded request profiles.
Here's a blog post by Marius Gedminas that contains `a longer description
of Dozer's profiler <https://mg.pov.lt/blog/profiling-with-dozer.html>`_.
Inspecting log messages
-----------------------
Usage::
from dozer import Logview
# my_wsgi_app is a WSGI application
wsgi_app = Logview(my_wsgi_app)
Every text/html page served by your application will get some HTML and
Javascript injected into the response body listing all logging messages
produced by the thread that generated this response.
Here's a blog post by Marius Gedminas that contains `a longer description
of Dozer's logview <https://mg.pov.lt/blog/capturing-logs-with-dozer.html>`_.
| PypiClean |
/BuildSimHubAPI-2.0.0-py3-none-any.whl/buildsimdata/missing_data_imputation.py | import pandas as pd
from datetime import datetime
from feature_selector import FeatureSelector
import math
from sklearn.neighbors import KNeighborsRegressor
import numpy as np
df = pd.read_csv('/Users/weilixu/Desktop/Bldg101_2014_10m_NA10.csv')
# test_missing = pd.read_excel('../missingdata/PI_Bldg101_Webctrl_Points.xlsx')
# Feature Selection - remove single value and high correlation variable
fs = FeatureSelector(data = df)
fs.identify_single_unique()
single_unique = fs.ops['single_unique']
df = df.drop(columns = single_unique)
fs.identify_collinear(correlation_threshold = 0.975)
correlated_features = fs.ops['collinear']
df = df.drop(columns = correlated_features)
# dataframe handling
#df_copy = df
dateTime = df['timestamp'].tolist()
# print(df_copy['timestamp'].head(500))
dates_list = [datetime.strptime(index, '%m/%d/%y %H:%M').strftime('%m/%d/%y') for index in dateTime]
# print(dates_list)
month = [datetime.strptime(index, '%m/%d/%y %H:%M').strftime('%m') for index in dateTime]
# print(month)
weekofday =[datetime.strptime(index, '%m/%d/%y %H:%M').strftime('%A') for index in dateTime]
# print(weekofday)
hour =[datetime.strptime(index, '%m/%d/%y %H:%M').strftime('%H') for index in dateTime]
# print(hour)
df.insert(loc=0, column='month', value=month)
df.insert(loc=1, column='weekofday', value=weekofday)
df.insert(loc=2, column='hour', value=hour)
print(len(df))
# na_df rows have been removed from df so that we can add the row after filling missing value back to df
na_df = df[df.isnull().any(1)] # To find out which rows have NaNs
ready_before = pd.concat([df, na_df]).drop_duplicates(keep=False)
print (len(ready_before))
# The function is used to find the t1, t2 data
def nPrevHour(timeDict, n, na_df):
'''
Return a dictionary
key: current hour
value: previous n hour data
'''
row_fields = df_copy.keys()
ans = {}
for index, row in na_df.iterrows():
weekday = row[row_fields[1]]
currTime = row[row_fields[1]] + ' ' + row[row_fields[2]]
# print (currTime)
targetTime = findTargetTime(n, int(row[row_fields[2]]), weekday)
if currTime not in ans:
ans[currTime] = timeDict[targetTime]
# print("Here:")
return ans
def findTargetTime(n, hour, weekday):
weekday_list = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
res = 0
target_weekday = ''
if hour - n < 0:
a = weekday_list.index(weekday) - 1
if a < 0:
a = a + 7
target_weekday = weekday_list[a]
res = hour - n + 24
else:
target_weekday = weekday
res = hour - n
if res < 10:
return target_weekday + ' ' + '0' + str(res)
else:
return target_weekday + ' ' + str(res)
# The part is used to generate t1 and t2 data
df_copy = df
row_fields = df_copy.keys()
timeDict = {}
for index, row in df_copy.iterrows():
current_time = row[row_fields[1]] + ' ' + row[row_fields[2]]
if current_time not in timeDict:
timeDict[current_time] = []
timeDict[current_time].append(list(row)[4:])
t0 = {} # current timm
t1 = {}
t2 = {}
t1 = nPrevHour(timeDict, 1, na_df)
t2 = nPrevHour(timeDict, 2, na_df)
# The part is to put t1 and t2 data to the current time's training set
for key, value in t1.items():
t1[key].extend(t2[key])
# remove training date which has nan value in the list
# Reason: the list having nan cannot be used as the training data
for key, value in t1.items():
for i in range(len(value)):
remove = []
for j in range(len(value[i])):
if isinstance(value[i][j], float) and math.isnan(value[i][j]):
remove.append(j)
# remove.reverse()
value[i] = [value[i][j] for j in range(len(value[i])) if j not in remove]
# for j in range(len(remove)):
# value[i].pop(remove[j])
# seperate to training data and predict data
# here, training data is the dataset containing t1 and t2; predict data is the data having missing value
train_data = []
predict_data = []
idxToTime = {}
for index, row in na_df.iterrows():
weekday = row[row_fields[1]]
currTime = row[row_fields[1]] + ' ' + row[row_fields[2]]
idxToTime[index] = currTime
for index, row in na_df.iterrows():
predict_data.append(list(row)[4:])
train_data.append(t1[idxToTime[index]])
# If 85% of one row is nan, the row will be removed
delete_position = []
for i in range(len(predict_data)):
count = 0
for j in range(len(predict_data[i])):
if isinstance(predict_data[i][j], float) and math.isnan(predict_data[i][j]):
count = count + 1
if float(count) / len(predict_data[i]) > 0.8:
delete_position.append(i)
train_data = [ train_data[i] for i in range(len(train_data)) if i not in delete_position]
predict_data = [ predict_data[i] for i in range(len(predict_data)) if i not in delete_position]
def knnTrain(predict_data, train_data):
train_data_ = []
train_label_ = []
predict_data_ = []
# preprocess data
# 1. the length of training data is different from our data
# 2. the data cannot be nan in trainning data (should be done before)
len_ = len(predict_data)
# print (predict_data)
# len_ = len(train_data[0])
# print(len_)
for i in range(len(train_data)):
if len(train_data[i]) == len_:
train_data_.append([train_data[i][j] for j in range(len(predict_data)) if
isinstance(predict_data[j], float) and math.isnan(predict_data[j]) == False])
train_label_.append([train_data[i][j] for j in range(len(predict_data)) if
isinstance(predict_data[j], float) and math.isnan(predict_data[j]) == True])
predict_data_ = [x for x in predict_data if math.isnan(x) == False]
len_ = len(train_data_[0])
train_label_ = [train_label_[i] for i in range(len(train_label_)) if len(train_data_[i]) == len_]
train_data_ = [x for x in train_data_ if len(x) == len_]
temp = np.array(train_data_)
temp1 = np.array(train_label_)
# print(temp)
# train
neigh = KNeighborsRegressor(n_neighbors=3)
neigh.fit(temp, temp1)
# predict
res = neigh.predict([predict_data_])
# put the predicted value back to row
filled_na_data_ = [res.tolist()[0] for x in predict_data if math.isnan(x) == True]
# add the data back to df
new_predict_data = predict_data
for (i, item) in enumerate(new_predict_data):
for (j, item2) in enumerate(filled_na_data_):
if math.isnan(item) == True:
new_predict_data[i] = filled_na_data_[j]
print(new_predict_data)
problem = []
for i in range(1,len(predict_data)):
# print(i)
knnTrain(predict_data[i], train_data[i]) | PypiClean |
/CoinMarketCapAPI-0.5-py3-none-any.whl/coinmarketcap/endpoints/tools.py |
from .parser import args
from typing import Union
from os.path import join as urljoin
class Price:
""" Convert an amount of one currency into multiple cryptocurrencies
or fiat currencies at the same time using the latest market averages.
"""
def __init__(self, request, endpoint):
self.request = lambda x: request(
urljoin(endpoint, "price-conversion"), args(**x)
)
def convert_id(
self, amount: [int, float, str], id: Union[str, int], time=None, convert="USD"
):
""" Convert an amount of one currency into multiple cryptocurrencies
or fiat currencies at the same time using the latest market averages.
Parameters
----------
amount : `int`, `float` or `str`
An amount of currency to convert. Example: 10.43
id : `int` or `str`
The CoinMarketCap currency ID of the base cryptocurrency or fiat
to convert from. Example: "1"
time : `datetime.datetime` or `float`, optional
Timestamp (datetime obj or Unix) to reference historical pricing
during conversion. If None is passed, the current time will be
used. If passed, the closest historic values available will be
used for this conversion.
convert : `str`, optional
Calculate market quotes in up to 40 currencies at once. Each
additional convert option beyond the first requires an additional
call credit. Each conversion is returned in its own "quote"
object. A list of supported fiat options can be found here.
https://coinmarketcap.com/api/documentation/v1/#section/Standards-and-Conventions
Returns
-------
`json obj`
Schema - https://coinmarketcap.com/api/documentation/v1/#operation/getV1ToolsPriceconversion
Raises
------
ValueError
If arguments is not parseable.
requests.exceptions.HTTPError
If status code is not 200
"""
params = locals()
if time is None:
del params["time"]
return self.request(params)
def convert_symbol(
self, amount: [int, float], symbol: str, time=None, convert="USD"
):
""" Convert an amount of one currency into multiple cryptocurrencies
or fiat currencies at the same time using the latest market averages.
Parameters
----------
amount : `int`, `float` or `str`
An amount of currency to convert. Example: 10.43
symbol : `str`
Currency symbol of the base cryptocurrency or fiat to convert
from. Example: "BTC".
time : `datetime.datetime` or `float`, optional
Timestamp (datetime obj or Unix) to reference historical pricing
during conversion. If None is passed, the current time will be
used. If passed, the closest historic values available will be
used for this conversion.
convert : `str`, optional
Calculate market quotes in up to 40 currencies at once. Each
additional convert option beyond the first requires an additional
call credit. Each conversion is returned in its own "quote"
object. A list of supported fiat options can be found here.
https://coinmarketcap.com/api/documentation/v1/#section/Standards-and-Conventions
Returns
-------
`json obj`
Schema - https://coinmarketcap.com/api/documentation/v1/#operation/getV1ToolsPriceconversion
Raises
------
ValueError
If arguments is not parseable.
requests.exceptions.HTTPError
If status code is not 200
"""
params = locals()
if time is None:
del params["time"]
return self.request(params)
class Tools:
""" API for convenience utilities. """
def __init__(self, request):
args = (request, "tools")
self.price = Price(*args) | PypiClean |
/MdNotes_ROPC-1.0-py3-none-any.whl/md_notes_ropc/models/note.py | class Note(object):
"""Implementation of the 'Note' model.
TODO: type model description here.
Attributes:
id (long|int): TODO: type description here.
title (string): TODO: type description here.
body (string): TODO: type description here.
user_id (long|int): TODO: type description here.
created_at (string): TODO: type description here.
updated_at (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"id":'id',
"title":'title',
"body":'body',
"user_id":'user_id',
"created_at":'created_at',
"updated_at":'updated_at'
}
def __init__(self,
id=None,
title=None,
body=None,
user_id=None,
created_at=None,
updated_at=None,
additional_properties = {}):
"""Constructor for the Note class"""
# Initialize members of the class
self.id = id
self.title = title
self.body = body
self.user_id = user_id
self.created_at = created_at
self.updated_at = updated_at
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get('id')
title = dictionary.get('title')
body = dictionary.get('body')
user_id = dictionary.get('user_id')
created_at = dictionary.get('created_at')
updated_at = dictionary.get('updated_at')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(id,
title,
body,
user_id,
created_at,
updated_at,
dictionary) | PypiClean |
/HBT_IP_Test-1.0.1-py3-none-any.whl/HBT_IP_Test/libs/isom/python/IsomWiFiInterfaces_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import IsomStdDef_pb2 as IsomStdDef__pb2
import IsomInterfaces_pb2 as IsomInterfaces__pb2
import IsomDevices_pb2 as IsomDevices__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='IsomWiFiInterfaces.proto',
package='Honeywell.Security.ISOM.WiFiInterfaces',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x18IsomWiFiInterfaces.proto\x12&Honeywell.Security.ISOM.WiFiInterfaces\x1a\x10IsomStdDef.proto\x1a\x14IsomInterfaces.proto\x1a\x11IsomDevices.proto\"i\n\x17WiFiInterfaceOperations\x12\x44\n\tresources\x18\x0b \x03(\x0e\x32\x31.Honeywell.Security.ISOM.WiFiInterfaces.Resources*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"q\n\x1fWiFiInterfaceSupportedRelations\x12\x44\n\trelations\x18\x0b \x03(\x0e\x32\x31.Honeywell.Security.ISOM.WiFiInterfaces.Relations*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"_\n\x13WiFiInterfaceEvents\x12>\n\x06\x65vents\x18\x0b \x03(\x0e\x32..Honeywell.Security.ISOM.WiFiInterfaces.Events*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\xe0\x01\n\x16WiFiInterfaceCommState\x12\n\n\x02id\x18\x0b \x01(\t\x12\x31\n\x05state\x18\x15 \x03(\x0e\x32\".Honeywell.Security.ISOM.CommState\x12\x13\n\x0blinkQuality\x18\x16 \x01(\x04\x12L\n\x17timeSinceLastLQMeasured\x18\x17 \x01(\x0b\x32%.Honeywell.Security.ISOM.IsomDurationB\x04\x90\xb5\x18\x11\x12\x0c\n\x04rssi\x18\x18 \x01(\x03\x12\x0c\n\x03wps\x18\xe9\x07 \x01(\x08*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"y\n\x1aWiFiInterfaceCommStateList\x12Q\n\tcommState\x18\x0b \x03(\x0b\x32>.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommState*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"d\n\x14WiFiAPModeTimerState\x12\x42\n\rtimeRemaining\x18\x15 \x01(\x0b\x32%.Honeywell.Security.ISOM.IsomDurationB\x04\x90\xb5\x18\x11*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\x9b\x01\n\x0bWiFiAPState\x12.\n\x06\x61pMode\x18\x0b \x01(\x0e\x32\x1e.Honeywell.Security.ISOM.State\x12R\n\x0c\x61pTimerState\x18\x0c \x01(\x0b\x32<.Honeywell.Security.ISOM.WiFiInterfaces.WiFiAPModeTimerState*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\xf6\x03\n\x12WiFiInterfaceState\x12\n\n\x02id\x18\x0b \x01(\t\x12O\n\x0ctroubleState\x18\x19 \x01(\x0b\x32\x39.Honeywell.Security.ISOM.Interfaces.InterfaceTroubleState\x12\x43\n\tomitState\x18\x1a \x01(\x0b\x32\x30.Honeywell.Security.ISOM.Devices.DeviceOmitState\x12G\n\x0b\x62ypassState\x18\x1b \x01(\x0b\x32\x32.Honeywell.Security.ISOM.Devices.DeviceBypassState\x12Q\n\tcommState\x18\x15 \x01(\x0b\x32>.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommState\x12I\n\ttestState\x18\x16 \x01(\x0b\x32\x36.Honeywell.Security.ISOM.Interfaces.InterfaceTestState\x12M\n\x10\x61\x63\x63\x65ssPointState\x18\x1c \x01(\x0b\x32\x33.Honeywell.Security.ISOM.WiFiInterfaces.WiFiAPState*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"m\n\x16WiFiInterfaceStateList\x12I\n\x05state\x18\x0b \x03(\x0b\x32:.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\x88\x01\n\x18WiFiInterfaceIdentifiers\x12\n\n\x02id\x18\x0b \x01(\t\x12\x0c\n\x04guid\x18\x0c \x01(\t\x12\x0c\n\x04name\x18\r \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x0e \x01(\t\x12\x11\n\tnetworkId\x18\x0f \x01(\t\x12\x12\n\nmacAddress\x18\x10 \x01(\t*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\x80\x01\n\x15WiFiInterfaceRelation\x12\n\n\x02id\x18\x0b \x01(\t\x12?\n\x04name\x18\x0c \x01(\x0e\x32\x31.Honeywell.Security.ISOM.WiFiInterfaces.Relations\x12\x10\n\x08\x65ntityId\x18\x0e \x01(\t*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"v\n\x19WiFiInterfaceRelationList\x12O\n\x08relation\x18\x0b \x03(\x0b\x32=.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceRelation*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\xf0\x03\n\x13WiFiInterfaceConfig\x12U\n\x0bidentifiers\x18\x0b \x01(\x0b\x32@.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceIdentifiers\x12O\n\x08relation\x18\n \x03(\x0b\x32=.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceRelation\x12=\n\x04omit\x18\x15 \x01(\x0e\x32/.Honeywell.Security.ISOM.Devices.DeviceOmitType\x12:\n\x07subType\x18\x1d \x01(\x0b\x32#.Honeywell.Security.ISOM.IsomStringB\x04\x90\xb5\x18\x13\x12U\n\tsecConfig\x18\x19 \x01(\x0b\x32\x42.Honeywell.Security.ISOM.Interfaces.InterfaceSecurityConfiguration\x12U\n\x0bnetworkType\x18\x18 \x01(\x0e\x32@.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceNetworkType*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"p\n\x17WiFiInterfaceConfigList\x12K\n\x06\x63onfig\x18\x0b \x03(\x0b\x32;.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfig*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\xb7\x01\n\x13WiFiInterfaceEntity\x12K\n\x06\x63onfig\x18\x15 \x01(\x0b\x32;.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfig\x12I\n\x05state\x18\x1f \x01(\x0b\x32:.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState*\x08\x08\xa0\xf7\x36\x10\xe0\x91\x43\"p\n\x17WiFiInterfaceEntityList\x12K\n\x06\x65ntity\x18\x0b \x03(\x0b\x32;.Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEntity*\x08\x08\xc0\x84=\x10\xe0\x91\x43*\xbf\x05\n\tResources\x12\x18\n\x13supportedOperations\x10\xf2\x07\x12\x17\n\x12supportedRelations\x10\xf3\x07\x12\x14\n\x0fsupportedEvents\x10\xf4\x07\x12\x1a\n\x15supportedCapabilities\x10\xf5\x07\x12\x0f\n\nfullEntity\x10\xc2N\x12\x0b\n\x06\x63onfig\x10\xd7N\x12\x10\n\x0bidentifiers\x10\xebN\x12\x0e\n\trelations\x10\xffN\x12\n\n\x05state\x10\x92O\x12\x0e\n\tomitState\x10\x9cO\x12\x17\n\x12omitState_s_unOmit\x10\x9dO\x12\x15\n\x10omitState_s_omit\x10\x9eO\x12\x12\n\rtests_s_state\x10\xc4O\x12\x12\n\rtests_s_start\x10\xc5O\x12\x11\n\x0ctests_s_stop\x10\xc6O\x12\x1a\n\x15tests_s_comms_s_start\x10\xc7O\x12\x19\n\x14tests_s_comms_s_stop\x10\xc8O\x12\x14\n\x0f\x61ptimer_s_state\x10\x83Q\x12\x14\n\x0f\x61ptimer_s_start\x10\x84Q\x12\x13\n\x0e\x61ptimer_s_stop\x10\x85Q\x12\x10\n\x0b\x65nableState\x10\xcfO\x12\x1a\n\x15\x65nableState_s_enabled\x10\xd0O\x12\x1b\n\x16\x65nableState_s_disabled\x10\xd1O\x12 \n\x1b\x65nableState_s_wps_s_enabled\x10\xd2O\x12!\n\x1c\x65nableState_s_wps_s_disabled\x10\xd3O\x12\r\n\tdiscovery\x10P\x12\x15\n\x11\x64iscovery_s_state\x10Q\x12\x1f\n\x1b\x64iscovery_s_state_s_enabled\x10R\x12 \n\x1c\x64iscovery_s_state_s_disabled\x10S\x12\x15\n\rMax_Resources\x10\x80\x80\x80\x80\x04*\x85\x01\n\tRelations\x12\x1f\n\x1aInterfaceOwnedByPeripheral\x10\xc3N\x12!\n\x1cInterfaceConnectedPeripheral\x10\xc4N\x12\x1d\n\x18InterfaceConnectedDevice\x10\xc5N\x12\x15\n\rMax_Relations\x10\x80\x80\x80\x80\x04*w\n\x06\x45vents\x12\x17\n\x12\x63ommState_p_normal\x10\xf4N\x12\x15\n\x10\x63ommState_p_fail\x10\xf5N\x12\x14\n\x0f\x61ptimer_p_start\x10\xf6N\x12\x13\n\x0e\x61ptimer_p_stop\x10\xf7N\x12\x12\n\nMax_Events\x10\x80\x80\x80\x80\x04*\x86\x01\n\x18WiFiInterfaceNetworkType\x12\x12\n\x0eInfrastructure\x10\x0b\x12\n\n\x06Office\x10\x0c\x12\x08\n\x04Home\x10\r\x12\t\n\x05\x41\x64hoc\x10\x0e\x12\x0f\n\x0b\x41\x63\x63\x65ssPoint\x10\x0f\x12$\n\x1cMax_WiFiInterfaceNetworkType\x10\x80\x80\x80\x80\x04')
,
dependencies=[IsomStdDef__pb2.DESCRIPTOR,IsomInterfaces__pb2.DESCRIPTOR,IsomDevices__pb2.DESCRIPTOR,])
_RESOURCES = _descriptor.EnumDescriptor(
name='Resources',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.Resources',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='supportedOperations', index=0, number=1010,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='supportedRelations', index=1, number=1011,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='supportedEvents', index=2, number=1012,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='supportedCapabilities', index=3, number=1013,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='fullEntity', index=4, number=10050,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='config', index=5, number=10071,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='identifiers', index=6, number=10091,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='relations', index=7, number=10111,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='state', index=8, number=10130,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='omitState', index=9, number=10140,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='omitState_s_unOmit', index=10, number=10141,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='omitState_s_omit', index=11, number=10142,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tests_s_state', index=12, number=10180,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tests_s_start', index=13, number=10181,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tests_s_stop', index=14, number=10182,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tests_s_comms_s_start', index=15, number=10183,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tests_s_comms_s_stop', index=16, number=10184,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='aptimer_s_state', index=17, number=10371,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='aptimer_s_start', index=18, number=10372,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='aptimer_s_stop', index=19, number=10373,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='enableState', index=20, number=10191,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='enableState_s_enabled', index=21, number=10192,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='enableState_s_disabled', index=22, number=10193,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='enableState_s_wps_s_enabled', index=23, number=10194,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='enableState_s_wps_s_disabled', index=24, number=10195,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='discovery', index=25, number=80,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='discovery_s_state', index=26, number=81,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='discovery_s_state_s_enabled', index=27, number=82,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='discovery_s_state_s_disabled', index=28, number=83,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Max_Resources', index=29, number=1073741824,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2976,
serialized_end=3679,
)
_sym_db.RegisterEnumDescriptor(_RESOURCES)
Resources = enum_type_wrapper.EnumTypeWrapper(_RESOURCES)
_RELATIONS = _descriptor.EnumDescriptor(
name='Relations',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.Relations',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='InterfaceOwnedByPeripheral', index=0, number=10051,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='InterfaceConnectedPeripheral', index=1, number=10052,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='InterfaceConnectedDevice', index=2, number=10053,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Max_Relations', index=3, number=1073741824,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3682,
serialized_end=3815,
)
_sym_db.RegisterEnumDescriptor(_RELATIONS)
Relations = enum_type_wrapper.EnumTypeWrapper(_RELATIONS)
_EVENTS = _descriptor.EnumDescriptor(
name='Events',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.Events',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='commState_p_normal', index=0, number=10100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='commState_p_fail', index=1, number=10101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='aptimer_p_start', index=2, number=10102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='aptimer_p_stop', index=3, number=10103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Max_Events', index=4, number=1073741824,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3817,
serialized_end=3936,
)
_sym_db.RegisterEnumDescriptor(_EVENTS)
Events = enum_type_wrapper.EnumTypeWrapper(_EVENTS)
_WIFIINTERFACENETWORKTYPE = _descriptor.EnumDescriptor(
name='WiFiInterfaceNetworkType',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceNetworkType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Infrastructure', index=0, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Office', index=1, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Home', index=2, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Adhoc', index=3, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AccessPoint', index=4, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Max_WiFiInterfaceNetworkType', index=5, number=1073741824,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3939,
serialized_end=4073,
)
_sym_db.RegisterEnumDescriptor(_WIFIINTERFACENETWORKTYPE)
WiFiInterfaceNetworkType = enum_type_wrapper.EnumTypeWrapper(_WIFIINTERFACENETWORKTYPE)
supportedOperations = 1010
supportedRelations = 1011
supportedEvents = 1012
supportedCapabilities = 1013
fullEntity = 10050
config = 10071
identifiers = 10091
relations = 10111
state = 10130
omitState = 10140
omitState_s_unOmit = 10141
omitState_s_omit = 10142
tests_s_state = 10180
tests_s_start = 10181
tests_s_stop = 10182
tests_s_comms_s_start = 10183
tests_s_comms_s_stop = 10184
aptimer_s_state = 10371
aptimer_s_start = 10372
aptimer_s_stop = 10373
enableState = 10191
enableState_s_enabled = 10192
enableState_s_disabled = 10193
enableState_s_wps_s_enabled = 10194
enableState_s_wps_s_disabled = 10195
discovery = 80
discovery_s_state = 81
discovery_s_state_s_enabled = 82
discovery_s_state_s_disabled = 83
Max_Resources = 1073741824
InterfaceOwnedByPeripheral = 10051
InterfaceConnectedPeripheral = 10052
InterfaceConnectedDevice = 10053
Max_Relations = 1073741824
commState_p_normal = 10100
commState_p_fail = 10101
aptimer_p_start = 10102
aptimer_p_stop = 10103
Max_Events = 1073741824
Infrastructure = 11
Office = 12
Home = 13
Adhoc = 14
AccessPoint = 15
Max_WiFiInterfaceNetworkType = 1073741824
_WIFIINTERFACEOPERATIONS = _descriptor.Descriptor(
name='WiFiInterfaceOperations',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceOperations',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resources', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceOperations.resources', index=0,
number=11, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=127,
serialized_end=232,
)
_WIFIINTERFACESUPPORTEDRELATIONS = _descriptor.Descriptor(
name='WiFiInterfaceSupportedRelations',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceSupportedRelations',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='relations', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceSupportedRelations.relations', index=0,
number=11, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=234,
serialized_end=347,
)
_WIFIINTERFACEEVENTS = _descriptor.Descriptor(
name='WiFiInterfaceEvents',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEvents',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='events', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEvents.events', index=0,
number=11, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=349,
serialized_end=444,
)
_WIFIINTERFACECOMMSTATE = _descriptor.Descriptor(
name='WiFiInterfaceCommState',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommState.id', index=0,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommState.state', index=1,
number=21, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='linkQuality', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommState.linkQuality', index=2,
number=22, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timeSinceLastLQMeasured', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommState.timeSinceLastLQMeasured', index=3,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\220\265\030\021'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rssi', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommState.rssi', index=4,
number=24, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wps', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommState.wps', index=5,
number=1001, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=447,
serialized_end=671,
)
_WIFIINTERFACECOMMSTATELIST = _descriptor.Descriptor(
name='WiFiInterfaceCommStateList',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommStateList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='commState', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommStateList.commState', index=0,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=673,
serialized_end=794,
)
_WIFIAPMODETIMERSTATE = _descriptor.Descriptor(
name='WiFiAPModeTimerState',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiAPModeTimerState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timeRemaining', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiAPModeTimerState.timeRemaining', index=0,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\220\265\030\021'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=796,
serialized_end=896,
)
_WIFIAPSTATE = _descriptor.Descriptor(
name='WiFiAPState',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiAPState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='apMode', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiAPState.apMode', index=0,
number=11, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=11,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='apTimerState', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiAPState.apTimerState', index=1,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=899,
serialized_end=1054,
)
_WIFIINTERFACESTATE = _descriptor.Descriptor(
name='WiFiInterfaceState',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState.id', index=0,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='troubleState', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState.troubleState', index=1,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='omitState', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState.omitState', index=2,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bypassState', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState.bypassState', index=3,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='commState', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState.commState', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='testState', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState.testState', index=5,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accessPointState', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState.accessPointState', index=6,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=1057,
serialized_end=1559,
)
_WIFIINTERFACESTATELIST = _descriptor.Descriptor(
name='WiFiInterfaceStateList',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceStateList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceStateList.state', index=0,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=1561,
serialized_end=1670,
)
_WIFIINTERFACEIDENTIFIERS = _descriptor.Descriptor(
name='WiFiInterfaceIdentifiers',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceIdentifiers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceIdentifiers.id', index=0,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='guid', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceIdentifiers.guid', index=1,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceIdentifiers.name', index=2,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceIdentifiers.description', index=3,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='networkId', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceIdentifiers.networkId', index=4,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='macAddress', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceIdentifiers.macAddress', index=5,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=1673,
serialized_end=1809,
)
_WIFIINTERFACERELATION = _descriptor.Descriptor(
name='WiFiInterfaceRelation',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceRelation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceRelation.id', index=0,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceRelation.name', index=1,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=10051,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entityId', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceRelation.entityId', index=2,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=1812,
serialized_end=1940,
)
_WIFIINTERFACERELATIONLIST = _descriptor.Descriptor(
name='WiFiInterfaceRelationList',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceRelationList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='relation', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceRelationList.relation', index=0,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=1942,
serialized_end=2060,
)
_WIFIINTERFACECONFIG = _descriptor.Descriptor(
name='WiFiInterfaceConfig',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='identifiers', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfig.identifiers', index=0,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relation', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfig.relation', index=1,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='omit', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfig.omit', index=2,
number=21, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=11,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subType', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfig.subType', index=3,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\220\265\030\023'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='secConfig', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfig.secConfig', index=4,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='networkType', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfig.networkType', index=5,
number=24, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=11,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=2063,
serialized_end=2559,
)
_WIFIINTERFACECONFIGLIST = _descriptor.Descriptor(
name='WiFiInterfaceConfigList',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfigList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfigList.config', index=0,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=2561,
serialized_end=2673,
)
_WIFIINTERFACEENTITY = _descriptor.Descriptor(
name='WiFiInterfaceEntity',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEntity',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEntity.config', index=0,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEntity.state', index=1,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(900000, 1100000), ],
oneofs=[
],
serialized_start=2676,
serialized_end=2859,
)
_WIFIINTERFACEENTITYLIST = _descriptor.Descriptor(
name='WiFiInterfaceEntityList',
full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEntityList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='entity', full_name='Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEntityList.entity', index=0,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=2861,
serialized_end=2973,
)
_WIFIINTERFACEOPERATIONS.fields_by_name['resources'].enum_type = _RESOURCES
_WIFIINTERFACESUPPORTEDRELATIONS.fields_by_name['relations'].enum_type = _RELATIONS
_WIFIINTERFACEEVENTS.fields_by_name['events'].enum_type = _EVENTS
_WIFIINTERFACECOMMSTATE.fields_by_name['state'].enum_type = IsomStdDef__pb2._COMMSTATE
_WIFIINTERFACECOMMSTATE.fields_by_name['timeSinceLastLQMeasured'].message_type = IsomStdDef__pb2._ISOMDURATION
_WIFIINTERFACECOMMSTATELIST.fields_by_name['commState'].message_type = _WIFIINTERFACECOMMSTATE
_WIFIAPMODETIMERSTATE.fields_by_name['timeRemaining'].message_type = IsomStdDef__pb2._ISOMDURATION
_WIFIAPSTATE.fields_by_name['apMode'].enum_type = IsomStdDef__pb2._STATE
_WIFIAPSTATE.fields_by_name['apTimerState'].message_type = _WIFIAPMODETIMERSTATE
_WIFIINTERFACESTATE.fields_by_name['troubleState'].message_type = IsomInterfaces__pb2._INTERFACETROUBLESTATE
_WIFIINTERFACESTATE.fields_by_name['omitState'].message_type = IsomDevices__pb2._DEVICEOMITSTATE
_WIFIINTERFACESTATE.fields_by_name['bypassState'].message_type = IsomDevices__pb2._DEVICEBYPASSSTATE
_WIFIINTERFACESTATE.fields_by_name['commState'].message_type = _WIFIINTERFACECOMMSTATE
_WIFIINTERFACESTATE.fields_by_name['testState'].message_type = IsomInterfaces__pb2._INTERFACETESTSTATE
_WIFIINTERFACESTATE.fields_by_name['accessPointState'].message_type = _WIFIAPSTATE
_WIFIINTERFACESTATELIST.fields_by_name['state'].message_type = _WIFIINTERFACESTATE
_WIFIINTERFACERELATION.fields_by_name['name'].enum_type = _RELATIONS
_WIFIINTERFACERELATIONLIST.fields_by_name['relation'].message_type = _WIFIINTERFACERELATION
_WIFIINTERFACECONFIG.fields_by_name['identifiers'].message_type = _WIFIINTERFACEIDENTIFIERS
_WIFIINTERFACECONFIG.fields_by_name['relation'].message_type = _WIFIINTERFACERELATION
_WIFIINTERFACECONFIG.fields_by_name['omit'].enum_type = IsomDevices__pb2._DEVICEOMITTYPE
_WIFIINTERFACECONFIG.fields_by_name['subType'].message_type = IsomStdDef__pb2._ISOMSTRING
_WIFIINTERFACECONFIG.fields_by_name['secConfig'].message_type = IsomInterfaces__pb2._INTERFACESECURITYCONFIGURATION
_WIFIINTERFACECONFIG.fields_by_name['networkType'].enum_type = _WIFIINTERFACENETWORKTYPE
_WIFIINTERFACECONFIGLIST.fields_by_name['config'].message_type = _WIFIINTERFACECONFIG
_WIFIINTERFACEENTITY.fields_by_name['config'].message_type = _WIFIINTERFACECONFIG
_WIFIINTERFACEENTITY.fields_by_name['state'].message_type = _WIFIINTERFACESTATE
_WIFIINTERFACEENTITYLIST.fields_by_name['entity'].message_type = _WIFIINTERFACEENTITY
DESCRIPTOR.message_types_by_name['WiFiInterfaceOperations'] = _WIFIINTERFACEOPERATIONS
DESCRIPTOR.message_types_by_name['WiFiInterfaceSupportedRelations'] = _WIFIINTERFACESUPPORTEDRELATIONS
DESCRIPTOR.message_types_by_name['WiFiInterfaceEvents'] = _WIFIINTERFACEEVENTS
DESCRIPTOR.message_types_by_name['WiFiInterfaceCommState'] = _WIFIINTERFACECOMMSTATE
DESCRIPTOR.message_types_by_name['WiFiInterfaceCommStateList'] = _WIFIINTERFACECOMMSTATELIST
DESCRIPTOR.message_types_by_name['WiFiAPModeTimerState'] = _WIFIAPMODETIMERSTATE
DESCRIPTOR.message_types_by_name['WiFiAPState'] = _WIFIAPSTATE
DESCRIPTOR.message_types_by_name['WiFiInterfaceState'] = _WIFIINTERFACESTATE
DESCRIPTOR.message_types_by_name['WiFiInterfaceStateList'] = _WIFIINTERFACESTATELIST
DESCRIPTOR.message_types_by_name['WiFiInterfaceIdentifiers'] = _WIFIINTERFACEIDENTIFIERS
DESCRIPTOR.message_types_by_name['WiFiInterfaceRelation'] = _WIFIINTERFACERELATION
DESCRIPTOR.message_types_by_name['WiFiInterfaceRelationList'] = _WIFIINTERFACERELATIONLIST
DESCRIPTOR.message_types_by_name['WiFiInterfaceConfig'] = _WIFIINTERFACECONFIG
DESCRIPTOR.message_types_by_name['WiFiInterfaceConfigList'] = _WIFIINTERFACECONFIGLIST
DESCRIPTOR.message_types_by_name['WiFiInterfaceEntity'] = _WIFIINTERFACEENTITY
DESCRIPTOR.message_types_by_name['WiFiInterfaceEntityList'] = _WIFIINTERFACEENTITYLIST
DESCRIPTOR.enum_types_by_name['Resources'] = _RESOURCES
DESCRIPTOR.enum_types_by_name['Relations'] = _RELATIONS
DESCRIPTOR.enum_types_by_name['Events'] = _EVENTS
DESCRIPTOR.enum_types_by_name['WiFiInterfaceNetworkType'] = _WIFIINTERFACENETWORKTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WiFiInterfaceOperations = _reflection.GeneratedProtocolMessageType('WiFiInterfaceOperations', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACEOPERATIONS,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceOperations)
})
_sym_db.RegisterMessage(WiFiInterfaceOperations)
WiFiInterfaceSupportedRelations = _reflection.GeneratedProtocolMessageType('WiFiInterfaceSupportedRelations', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACESUPPORTEDRELATIONS,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceSupportedRelations)
})
_sym_db.RegisterMessage(WiFiInterfaceSupportedRelations)
WiFiInterfaceEvents = _reflection.GeneratedProtocolMessageType('WiFiInterfaceEvents', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACEEVENTS,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEvents)
})
_sym_db.RegisterMessage(WiFiInterfaceEvents)
WiFiInterfaceCommState = _reflection.GeneratedProtocolMessageType('WiFiInterfaceCommState', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACECOMMSTATE,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommState)
})
_sym_db.RegisterMessage(WiFiInterfaceCommState)
WiFiInterfaceCommStateList = _reflection.GeneratedProtocolMessageType('WiFiInterfaceCommStateList', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACECOMMSTATELIST,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceCommStateList)
})
_sym_db.RegisterMessage(WiFiInterfaceCommStateList)
WiFiAPModeTimerState = _reflection.GeneratedProtocolMessageType('WiFiAPModeTimerState', (_message.Message,), {
'DESCRIPTOR' : _WIFIAPMODETIMERSTATE,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiAPModeTimerState)
})
_sym_db.RegisterMessage(WiFiAPModeTimerState)
WiFiAPState = _reflection.GeneratedProtocolMessageType('WiFiAPState', (_message.Message,), {
'DESCRIPTOR' : _WIFIAPSTATE,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiAPState)
})
_sym_db.RegisterMessage(WiFiAPState)
WiFiInterfaceState = _reflection.GeneratedProtocolMessageType('WiFiInterfaceState', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACESTATE,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceState)
})
_sym_db.RegisterMessage(WiFiInterfaceState)
WiFiInterfaceStateList = _reflection.GeneratedProtocolMessageType('WiFiInterfaceStateList', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACESTATELIST,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceStateList)
})
_sym_db.RegisterMessage(WiFiInterfaceStateList)
WiFiInterfaceIdentifiers = _reflection.GeneratedProtocolMessageType('WiFiInterfaceIdentifiers', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACEIDENTIFIERS,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceIdentifiers)
})
_sym_db.RegisterMessage(WiFiInterfaceIdentifiers)
WiFiInterfaceRelation = _reflection.GeneratedProtocolMessageType('WiFiInterfaceRelation', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACERELATION,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceRelation)
})
_sym_db.RegisterMessage(WiFiInterfaceRelation)
WiFiInterfaceRelationList = _reflection.GeneratedProtocolMessageType('WiFiInterfaceRelationList', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACERELATIONLIST,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceRelationList)
})
_sym_db.RegisterMessage(WiFiInterfaceRelationList)
WiFiInterfaceConfig = _reflection.GeneratedProtocolMessageType('WiFiInterfaceConfig', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACECONFIG,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfig)
})
_sym_db.RegisterMessage(WiFiInterfaceConfig)
WiFiInterfaceConfigList = _reflection.GeneratedProtocolMessageType('WiFiInterfaceConfigList', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACECONFIGLIST,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceConfigList)
})
_sym_db.RegisterMessage(WiFiInterfaceConfigList)
WiFiInterfaceEntity = _reflection.GeneratedProtocolMessageType('WiFiInterfaceEntity', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACEENTITY,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEntity)
})
_sym_db.RegisterMessage(WiFiInterfaceEntity)
WiFiInterfaceEntityList = _reflection.GeneratedProtocolMessageType('WiFiInterfaceEntityList', (_message.Message,), {
'DESCRIPTOR' : _WIFIINTERFACEENTITYLIST,
'__module__' : 'IsomWiFiInterfaces_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.WiFiInterfaces.WiFiInterfaceEntityList)
})
_sym_db.RegisterMessage(WiFiInterfaceEntityList)
_WIFIINTERFACECOMMSTATE.fields_by_name['timeSinceLastLQMeasured']._options = None
_WIFIAPMODETIMERSTATE.fields_by_name['timeRemaining']._options = None
_WIFIINTERFACECONFIG.fields_by_name['subType']._options = None
# @@protoc_insertion_point(module_scope) | PypiClean |
/Multi-Template-Matching-1.6.6.tar.gz/Multi-Template-Matching-1.6.6/README.md | [](https://mybinder.org/v2/gh/multi-template-matching/MultiTemplateMatching-Python/master?filepath=tutorials)

[](https://saythanks.io/to/laurent132.thomas@laposte.net)
# Multi-Template-Matching
Multi-Template-Matching is a python package to perform object-recognition in images using one or several smaller template images.
The main function `MTM.matchTemplates` returns the best predicted locations provided either a score_threshold and/or the expected number of objects in the image.
The branch opencl contains some test using the UMat object to run on GPU, but it is actually slow, which can be expected for small dataset as the transfer of the data between the CPU and GPU is slow.
# News
- 03/03/2023 : Version 1.6.4 contributed by @bartleboeuf comes with speed enhancement thanks to parallelizing of the individual template searches.
Thanks for this first PR !!
- 10/11/2021 : You might be interested to test the newer python implementation which is more object-oriented and only relying on scikit-image and shapely.*
https://github.com/multi-template-matching/mtm-python-oop
# Installation
Using pip in a python environment, `pip install Multi-Template-Matching`
Once installed, `import MTM`should work.
Example jupyter notebooks can be downloaded from the tutorial folder of the github repository and executed in the newly configured python environement.
## Install in dev mode
If you want to contribute or experiment with the source code, you can install the package "from source", by first downloading or cloning the repo.
Then opening a command prompt in the repo's root directory (the one containing this README) and calling `pip install -e .` (mind the final dot).
- the `-e` flag stands for editable and make sure that any change to the source code will be directly reflected when you import the package in your script
- the . just tell pip to look for the package to install in the current directory
# Documentation
The [wiki](https://github.com/multi-template-matching/MultiTemplateMatching-Python/wiki) section of the repo contains a mini API documentation with description of the key functions of the package.
The [website](https://multi-template-matching.github.io/Multi-Template-Matching/) of the project contains some more general documentation.
# Examples
Check out the [jupyter notebook tutorial](https://github.com/multi-template-matching/MultiTemplateMatching-Python/tree/master/tutorials) for some example of how to use the package.
You can run the tutorials online using Binder, no configuration needed ! (click the Binder banner on top of this page).
To run the tutorials locally, install the package using pip as described above, then clone the repository and unzip it.
Finally open a jupyter-notebook session in the unzipped folder to be able to open and execute the notebook.
The [wiki](https://github.com/multi-template-matching/MultiTemplateMatching-Fiji/wiki) section of this related repository also provides some information about the implementation.
# Citation
If you use this implementation for your research, please cite:
Thomas, L.S.V., Gehrig, J. Multi-template matching: a versatile tool for object-localization in microscopy images.
BMC Bioinformatics 21, 44 (2020). https://doi.org/10.1186/s12859-020-3363-7
Download the citation as a .ris file from the journal website, [here](https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-020-3363-7.ris).
# Releases
Previous github releases were archived to Zenodo, but the best is to use pip to install specific versions.
[](https://zenodo.org/badge/latestdoi/197186256)
# Related projects
See this [repo](https://github.com/multi-template-matching/MultiTemplateMatching-Fiji) for the implementation as a Fiji plugin.
[Here](https://nodepit.com/workflow/com.nodepit.space%2Flthomas%2Fpublic%2FMulti-Template%20Matching.knwf) for a KNIME workflow using Multi-Template-Matching.
# Origin of the work
This work has been part of the PhD project of **Laurent Thomas** under supervision of **Dr. Jochen Gehrig** at ACQUIFER.
<img src="https://github.com/multi-template-matching/MultiTemplateMatching-Python/blob/master/images/Acquifer_Logo_60k_cmyk_300dpi.png" alt="ACQUIFER" width="400" height="80">
# Funding
This project has received funding from the European Union’s Horizon 2020 research and innovation program under the Marie Sklodowska-Curie grant agreement No 721537 ImageInLife.
<p float="left">
<img src="https://github.com/multi-template-matching/MultiTemplateMatching-Python/blob/master/images/ImageInlife.png" alt="ImageInLife" width="130" height="100">
<img src="https://github.com/multi-template-matching/MultiTemplateMatching-Python/blob/master/images/MarieCurie.jpg" alt="MarieCurie" width="130" height="130">
</p>
| PypiClean |
/FishFishJump-0.2.3.tar.gz/FishFishJump-0.2.3/fish_dashboard/static/datatables/dataTables.bootstrap4.js | * DataTables integration for Bootstrap 3. This requires Bootstrap 3 and
* DataTables 1.10 or newer.
*
* This file sets the defaults and adds options to DataTables to style its
* controls using Bootstrap. See http://datatables.net/manual/styling/bootstrap
* for further information.
*/
(function( factory ){
if ( typeof define === 'function' && define.amd ) {
// AMD
define( ['jquery', 'datatables.net'], function ( $ ) {
return factory( $, window, document );
} );
}
else if ( typeof exports === 'object' ) {
// CommonJS
module.exports = function (root, $) {
if ( ! root ) {
root = window;
}
if ( ! $ || ! $.fn.dataTable ) {
// Require DataTables, which attaches to jQuery, including
// jQuery if needed and have a $ property so we can access the
// jQuery object that is used
$ = require('datatables.net')(root, $).$;
}
return factory( $, root, root.document );
};
}
else {
// Browser
factory( jQuery, window, document );
}
}(function( $, window, document, undefined ) {
'use strict';
var DataTable = $.fn.dataTable;
/* Set the defaults for DataTables initialisation */
$.extend( true, DataTable.defaults, {
dom:
"<'row'<'col-sm-12 col-md-6'l><'col-sm-12 col-md-6'f>>" +
"<'row'<'col-sm-12'tr>>" +
"<'row'<'col-sm-12 col-md-5'i><'col-sm-12 col-md-7'p>>",
renderer: 'bootstrap'
} );
/* Default class modification */
$.extend( DataTable.ext.classes, {
sWrapper: "dataTables_wrapper container-fluid dt-bootstrap4",
sFilterInput: "form-control form-control-sm",
sLengthSelect: "form-control form-control-sm",
sProcessing: "dataTables_processing card",
sPageButton: "paginate_button page-item"
} );
/* Bootstrap paging button renderer */
DataTable.ext.renderer.pageButton.bootstrap = function ( settings, host, idx, buttons, page, pages ) {
var api = new DataTable.Api( settings );
var classes = settings.oClasses;
var lang = settings.oLanguage.oPaginate;
var aria = settings.oLanguage.oAria.paginate || {};
var btnDisplay, btnClass, counter=0;
var attach = function( container, buttons ) {
var i, ien, node, button;
var clickHandler = function ( e ) {
e.preventDefault();
if ( !$(e.currentTarget).hasClass('disabled') && api.page() != e.data.action ) {
api.page( e.data.action ).draw( 'page' );
}
};
for ( i=0, ien=buttons.length ; i<ien ; i++ ) {
button = buttons[i];
if ( $.isArray( button ) ) {
attach( container, button );
}
else {
btnDisplay = '';
btnClass = '';
switch ( button ) {
case 'ellipsis':
btnDisplay = '…';
btnClass = 'disabled';
break;
case 'first':
btnDisplay = lang.sFirst;
btnClass = button + (page > 0 ?
'' : ' disabled');
break;
case 'previous':
btnDisplay = lang.sPrevious;
btnClass = button + (page > 0 ?
'' : ' disabled');
break;
case 'next':
btnDisplay = lang.sNext;
btnClass = button + (page < pages-1 ?
'' : ' disabled');
break;
case 'last':
btnDisplay = lang.sLast;
btnClass = button + (page < pages-1 ?
'' : ' disabled');
break;
default:
btnDisplay = button + 1;
btnClass = page === button ?
'active' : '';
break;
}
if ( btnDisplay ) {
node = $('<li>', {
'class': classes.sPageButton+' '+btnClass,
'id': idx === 0 && typeof button === 'string' ?
settings.sTableId +'_'+ button :
null
} )
.append( $('<a>', {
'href': '#',
'aria-controls': settings.sTableId,
'aria-label': aria[ button ],
'data-dt-idx': counter,
'tabindex': settings.iTabIndex,
'class': 'page-link'
} )
.html( btnDisplay )
)
.appendTo( container );
settings.oApi._fnBindAction(
node, {action: button}, clickHandler
);
counter++;
}
}
}
};
// IE9 throws an 'unknown error' if document.activeElement is used
// inside an iframe or frame.
var activeEl;
try {
// Because this approach is destroying and recreating the paging
// elements, focus is lost on the select button which is bad for
// accessibility. So we want to restore focus once the draw has
// completed
activeEl = $(host).find(document.activeElement).data('dt-idx');
}
catch (e) {}
attach(
$(host).empty().html('<ul class="pagination"/>').children('ul'),
buttons
);
if ( activeEl !== undefined ) {
$(host).find( '[data-dt-idx='+activeEl+']' ).focus();
}
};
return DataTable;
})); | PypiClean |
/DBSP_DRP-1.0.0-py3-none-any.whl/dbsp_drp/quicklook.py | import argparse
import os
import time
import sys
from typing import List, Optional
import glob
from multiprocessing import Process
import numpy as np
from astropy.io import fits
from pkg_resources import resource_filename
from pypeit.pypeitsetup import PypeItSetup
from pypeit.core import framematch
from pypeit import pypeit
from pypeit import fluxcalibrate
from pypeit.scripts import show_2dspec
from dbsp_drp import show_spectrum
def entrypoint():
main(parse())
def get_cfg_lines(spectrograph: str) -> List[str]:
"""
Get standard quicklook PypeIt configuration for ``spectrograph``.
Args:
spectrograph (str): PypeIt name of spectrograph.
Returns:
List[str]: Standard PypeIt quicklook configuration for ``spectrograph``.
"""
cfg_lines = [
"[rdx]",
f"spectrograph = {spectrograph}",
"[calibrations]",
f"master_dir = Master_{spectrograph.split('_')[-1]}",
"raise_chk_error = False",
"[scienceframe]",
"[[process]]",
"mask_cr = False",
"[baseprocess]",
"use_biasimage = False",
"[reduce]",
"[[extraction]]",
"skip_optimal = True",
"[[findobj]]",
"skip_second_find = True"
]
return cfg_lines
def parse(options: Optional[List[str]] = None) -> argparse.Namespace:
argparser = argparse.ArgumentParser(description="Quicklook for P200 DBSP",
formatter_class=argparse.RawTextHelpFormatter)
argparser.add_argument("fname", type=str, nargs="+",
help="file to take a quick look at, or else red/blue\n"
"to just perform rough calibrations")
argparser.add_argument("--no-show", default=False, action="store_true",
help="Set this flag to suppress opening of plots")
return argparser.parse_args() if options is None else argparser.parse_args(options)
def main(args: argparse.Namespace):
t = time.perf_counter()
# need an arc frame and a flat frame
if all('red' in os.path.basename(fname) for fname in args.fname):
spectrograph = 'p200_dbsp_red'
elif all('blue' in os.path.basename(fname) for fname in args.fname):
spectrograph = 'p200_dbsp_blue'
else:
sys.exit(f"Raw files must be from same spectrograph: {args.fname}.")
arm = spectrograph.split('_')[-1]
CFG_LINES = get_cfg_lines(spectrograph)
flatimg = ""
arcimg = ""
sciimg = args.fname
calib_only = any(not os.path.isfile(fname) for fname in args.fname)
if calib_only:
root = args.fname[0].rstrip('0123456789.fits')
paths = glob.glob(f'{root}*.fits')
for path in paths:
with fits.open(path) as hdul:
if not flatimg:
if hdul[0].header['OBJECT'] == 'flat' or hdul[0].header['IMGTYPE'] == 'flat':
flatimg = path
if not arcimg:
if hdul[0].header['OBJECT'] == 'arcs' or hdul[0].header['IMGTYPE'] == 'cal':
arcimg = path
if flatimg and arcimg:
break
if not (flatimg and arcimg):
raise Exception(f"Could not find a flat and an arc frame in the same directory as {root}!")
files = [arcimg, flatimg]
else:
files = args.fname
ps = PypeItSetup(files, path="./", spectrograph_name=spectrograph,
cfg_lines = CFG_LINES)
ps.build_fitstbl(strict=False)
bm = framematch.FrameTypeBitMask()
file_bits = np.zeros(len(files), dtype=bm.minimum_dtype())
if calib_only:
file_bits[0] = bm.turn_on(file_bits[0], ['arc', 'tilt'])
file_bits[1] = bm.turn_on(file_bits[1], ['pixelflat', 'trace', 'illumflat'])
else:
for i in range(len(files)):
file_bits[i] = bm.turn_on(file_bits[i], 'science')
ps.fitstbl['calib_id'] = 1
asrt = np.array([ps.fitstbl['filename'].data.tolist().index(os.path.basename(fname)) for fname in files])
ps.fitstbl.set_frame_types(file_bits[asrt])
ps.fitstbl.set_combination_groups()
ps.fitstbl['setup'] = 'A'
## Hacky but needed to workaround PypeIt
# pypeit.PypeIt crashes if all ra/dec data is missing, so we fake some
if sum(ps.fitstbl['ra'] != None) == 0:
ps.fitstbl['ra'][0] = 0
if sum(ps.fitstbl['dec'] != None) == 0:
ps.fitstbl['dec'][0] = 0
ofiles = ps.fitstbl.write_pypeit(configs='A', cfg_lines=CFG_LINES)
pypeIt = pypeit.PypeIt(ofiles[0], verbosity=0,
reuse_masters=True, overwrite=True,
logname='dbsp_ql.log', show=False, calib_only=calib_only)
if calib_only:
pypeIt.calib_all()
else:
pypeIt.reduce_all()
pypeIt.build_qa()
output_spec2ds = list(filter(lambda f: os.path.isfile(os.path.join('Science', f)), [
pypeIt.spec_output_file(i, True) \
for i in range(len(pypeIt.fitstbl.table)) \
if pypeIt.fitstbl.table[i]['frametype'] in ['science']
]))
output_spec1ds = list(filter(lambda f: os.path.isfile(os.path.join('Science', f)), [
pypeIt.spec_output_file(i) \
for i in range(len(pypeIt.fitstbl.table)) \
if pypeIt.fitstbl.table[i]['frametype'] in ['science']
]))
if output_spec1ds and not calib_only:
spec = ps.spectrograph
config = '_'.join([
arm,
spec.get_meta_value(sciimg[0], 'dispname').replace('/', '_'),
spec.get_meta_value(sciimg[0], 'dichroic').lower()
])
sensfiles = [resource_filename("dbsp_drp", f"data/sens_{config}.fits")]
FxCalib = fluxcalibrate.FluxCalibrate.get_instance(output_spec1ds, sensfiles, par=ps.par['fluxcalib'])
print(f"Time elapsed: {time.perf_counter() - t}s.")
if not calib_only and not args.no_show:
p1 = Process(target = show_spec2d_helper, args=(output_spec2ds[0],))
p1.start()
if output_spec1ds:
with fits.open(output_spec1ds[0]) as hdul:
specs = len(hdul) - 2
parr = [ None ] * specs
for i in range(specs):
parr[i] = Process(target = show_spec1d_helper,
args=(str(i+1), output_spec1ds[0]))
parr[i].start()
def show_spec2d_helper(file):
return show_2dspec.Show2DSpec.main(show_2dspec.Show2DSpec.parse_args([file]))
def show_spec1d_helper(exten, file):
return show_spectrum.main(
show_spectrum.parser(['--BOX', '--exten', exten, file])
) | PypiClean |
/APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/utils/futils.py | import re
import shutil
import os
from struct import pack
# A function that takes an integer in the 8-bit range and returns
# a single-character byte object in py3 / a single-character string
# in py2.
#
_text_characters = (
b''.join(chr(i) for i in range(32, 127)) +
b'\n\r\t\f\b')
def istextfile(filename, blocksize=512):
""" Uses heuristics to guess whether the given file is text or binary,
by reading a single block of bytes from the file.
If more than 30% of the chars in the block are non-text, or there
are NUL ('\x00') bytes in the block, assume this is a binary file.
"""
with open(filename, 'rb') as fileobj:
block = fileobj.read(blocksize)
fileobj.seek(0)
if b'\x00' in block:
# Files with null bytes are binary
return False
elif not block:
# An empty file is considered a valid text file
return True
# Use translate's 'deletechars' argument to efficiently remove all
# occurrences of _text_characters from the block
nontext = block.translate(None, _text_characters)
return float(len(nontext)) / len(block) <= 0.30
def is_little_endian():
"""Checks whether the current architecture is little-endian or not"""
if pack('@h', 1) == pack('<h', 1):
return True
return False
def read_in_chunks(file_object, chunk_size=1024):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k."""
while True:
data = file_object.read(int(chunk_size))
if data:
yield data
else:
return
def read_txt_in_chunks(file_object, n=1024, comments='#'):
"""Lazy function (generator) to read a text file in chunks.
Default chunk size: 1024 characters"""
numeric_pattern = r'[+-]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?'
data = []
for line in file_object.xreadlines():
line, _, _ = line.partition(comments) # Eliminate comments
data.extend(re.findall(numeric_pattern, line))
if len(data) >= n:
yield data[:n]
data = data[n:]
yield data
def getSize(f):
"""Gets the size of a file in bytes."""
f.seek(0, 2) # move the cursor to the end of the file
size = f.tell()
f.seek(0)
return size
def get_delimiter(fileobject, lines=16):
"""Infers the delimiter used in a text file containing a list of numbers.
The text file must contain on each line a list of numbers separated
by a delimiter character, e.g.:
# Example comment
12.5,10,12
30,5,3
3,5,0.5,2.3
In this case the function will return ',' as delimiter
Args:
fileobject: A text file like object.
lines: The maximum number of lines to be read from the beginning
of the file in order to detect the delimiter.
Returns:
A character corresponding to the delimiter detected.
An empty string if nothing was found.
"""
comment = r'\s*#.*'
integer = r'[+-]?\d+'
decimal = r'\d+(e[+-]\d+)?'
number = r'{integer}\.{decimal}'.format(integer=integer, decimal=decimal)
pattern = (r'{comment}|({number}((?P<sep>[\W]+){number})*({comment})?)'.
format(number=number, comment=comment))
delimiters = {}
for i in xrange(lines):
line = fileobject.readline()
if line == '':
break
else:
m = re.match(pattern, line)
if m:
delimiter = m.groupdict()['sep']
if delimiter:
if delimiter in delimiters:
delimiters[delimiter] += 1
else:
delimiters[delimiter] = 1
fileobject.seek(0)
if delimiters:
return max(delimiters, key=lambda x: delimiters[x])
else:
return ''
def get_sample_rate(filename, max_header_lines=64, comments='#'):
"""Search for a sample rate value in the header of a text file containing
a seismic signal.
Args:
filename: Name of a text file containing a seismic signal.
max_header_lines: Maximum number of lines to be read from the beginning
of the file in order to get the sample rate value.
comments: Character used to indicate the start of a comment
Returns:
out: Sample rate, in Hz.
None if no sample rate value is found in header.
"""
units = {'hz': 10.0 ** 0, 'khz': 10.0 ** 3,
'mhz': 10.0 ** 6, 'ghz': 10.0 ** 9}
pattern = r'sample\s+(rate|frequency).*?(?P<fs>\d+(\.\d*)?(e[+-]?\d+)?).*?(?P<unit>(ghz|mhz|khz|hz))'
with open(filename, 'r') as f:
for i in xrange(max_header_lines):
line = f.readline()
_, _, comment = line.partition(comments) # Select comments
if comment != '':
m = re.search(pattern, comment.lower())
if m:
fs = m.groupdict()['fs']
if fs:
return int(float(fs) * units[m.groupdict()['unit']])
return None
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy an entire directory tree.
The destination directory must not already exist.
"""
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d) | PypiClean |
/ImSwitch-2.0.0.tar.gz/ImSwitch-2.0.0/imswitch/imscripting/model/ScriptExecutor.py | import sys
import traceback
from io import StringIO
from imswitch.imcommon.framework import Signal, SignalInterface, Thread, Worker
from imswitch.imcommon.model import initLogger
from .actions import getActionsScope
class ScriptExecutor(SignalInterface):
""" Handles execution and state of scripts. """
sigOutputAppended = Signal(str) # (outputText)
_sigExecute = Signal(str, str) # (scriptPath, code)
sigExecutionFinished = Signal()
def __init__(self, scriptScope):
super().__init__()
self.__logger = initLogger(self)
self._executionWorker = ExecutionThread(scriptScope)
self._executionWorker.sigOutputAppended.connect(self.sigOutputAppended)
self._executionThread = Thread()
self._executionWorker.moveToThread(self._executionThread)
self._executionWorker.sigExecutionFinished.connect(self.sigExecutionFinished)
self._sigExecute.connect(self._executionWorker.execute)
def __del__(self):
self._executionThread.quit()
self._executionThread.wait()
if hasattr(super(), '__del__'):
super().__del__()
def execute(self, scriptPath, code):
""" Executes the specified script code. scriptPath is the path to the
script file if if exists, or None if the script has not been saved to a
file. """
self.terminate()
self._executionThread.start()
self._sigExecute.emit(scriptPath, code)
def terminate(self):
""" Terminates the currently running script. Does nothing if no script
is running. """
if self.isExecuting():
print() # Blank line
self.__logger.info('Terminated script')
self._executionThread.terminate()
def isExecuting(self):
""" Returns whether a script is currently being executed. """
return self._executionThread.isRunning() and self._executionWorker.isWorking()
class ExecutionThread(Worker):
sigOutputAppended = Signal(str) # (outputText)
sigExecutionFinished = Signal()
def __init__(self, scriptScope):
super().__init__()
self.__logger = initLogger(self, tryInheritParent=True)
self._scriptScope = scriptScope
self._isWorking = False
def execute(self, scriptPath, code):
scriptScope = {}
scriptScope.update(self._scriptScope)
scriptScope.update(getActionsScope(self._scriptScope, scriptPath))
self._isWorking = True
oldStdout = sys.stdout
oldStderr = sys.stderr
try:
outputIO = SignaledStringIO(self.sigOutputAppended)
sys.stdout = outputIO
sys.stderr = outputIO
self.__logger.info('Started script')
print() # Blank line
try:
exec(code, scriptScope)
except Exception:
self.__logger.error(traceback.format_exc())
print() # Blank line
self.__logger.info('Finished script')
finally:
sys.stdout = oldStdout
sys.stderr = oldStderr
self.sigExecutionFinished.emit()
self._isWorking = False
def isWorking(self):
return self._isWorking
class SignaledStringIO(StringIO):
def __init__(self, signal):
super().__init__()
self._signal = signal
def write(self, text):
super().write(text)
self._signal.emit(text)
# Copyright (C) 2020-2021 ImSwitch developers
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/ODMExifRead-3.0.4.tar.gz/ODMExifRead-3.0.4/exifread/__init__.py | import struct
from typing import BinaryIO
from exifread.exif_log import get_logger
from exifread.classes import ExifHeader
from exifread.tags import DEFAULT_STOP_TAG
from exifread.utils import ord_, make_string
from exifread.heic import HEICExifFinder
from exifread.jpeg import find_jpeg_exif
from exifread.exceptions import InvalidExif, ExifNotFound
__version__ = '3.0.4'
logger = get_logger()
def _find_tiff_exif(fh: BinaryIO) -> tuple:
logger.debug("TIFF format recognized in data[0:2]")
fh.seek(0)
endian = fh.read(1)
fh.read(1)
offset = 0
return offset, endian
def _find_webp_exif(fh: BinaryIO) -> tuple:
logger.debug("WebP format recognized in data[0:4], data[8:12]")
# file specification: https://developers.google.com/speed/webp/docs/riff_container
data = fh.read(5)
if data[0:4] == b'VP8X' and data[4] & 8:
# https://developers.google.com/speed/webp/docs/riff_container#extended_file_format
fh.seek(13, 1)
while True:
data = fh.read(8) # Chunk FourCC (32 bits) and Chunk Size (32 bits)
if len(data) != 8:
raise InvalidExif("Invalid webp file chunk header.")
if data[0:4] == b'EXIF':
offset = fh.tell()
endian = fh.read(1)
return offset, endian
size = struct.unpack('<L', data[4:8])[0]
fh.seek(size, 1)
raise ExifNotFound("Webp file does not have exif data.")
def _find_png_exif(fh: BinaryIO, data: bytes) -> tuple:
logger.debug("PNG format recognized in data[0:8]=%s", data[:8].hex())
fh.seek(8)
while True:
data = fh.read(8)
chunk = data[4:8]
logger.debug("PNG found chunk %s", chunk.decode("ascii"))
if chunk in (b'', b'IEND'):
break
if chunk == b'eXIf':
offset = fh.tell()
return offset, fh.read(1)
chunk_size = int.from_bytes(data[:4], "big")
fh.seek(fh.tell() + chunk_size + 4)
raise ExifNotFound("PNG file does not have exif data.")
def _get_xmp(fh: BinaryIO) -> bytes:
xmp_bytes = b''
logger.debug('XMP not in Exif, searching file for XMP info...')
xml_started = False
xml_finished = False
for line in fh:
open_tag = line.find(b'<x:xmpmeta')
close_tag = line.find(b'</x:xmpmeta>')
if open_tag != -1:
xml_started = True
line = line[open_tag:]
logger.debug('XMP found opening tag at line position %s', open_tag)
if close_tag != -1:
logger.debug('XMP found closing tag at line position %s', close_tag)
line_offset = 0
if open_tag != -1:
line_offset = open_tag
line = line[:(close_tag - line_offset) + 12]
xml_finished = True
if xml_started:
xmp_bytes += line
if xml_finished:
break
logger.debug('XMP Finished searching for info')
return xmp_bytes
def _determine_type(fh: BinaryIO) -> tuple:
# by default do not fake an EXIF beginning
fake_exif = 0
data = fh.read(12)
if data[0:2] in [b'II', b'MM']:
# it's a TIFF file
offset, endian = _find_tiff_exif(fh)
elif data[4:12] == b'ftypheic':
fh.seek(0)
heic = HEICExifFinder(fh)
offset, endian = heic.find_exif()
elif data[0:4] == b'RIFF' and data[8:12] == b'WEBP':
offset, endian = _find_webp_exif(fh)
elif data[0:2] == b'\xFF\xD8':
# it's a JPEG file
offset, endian, fake_exif = find_jpeg_exif(fh, data, fake_exif)
elif data[0:8] == b'\x89PNG\r\n\x1a\n':
offset, endian = _find_png_exif(fh, data)
else:
# file format not recognized
raise ExifNotFound("File format not recognized.")
return offset, endian, fake_exif
def process_file(fh: BinaryIO, stop_tag=DEFAULT_STOP_TAG,
details=True, strict=False, debug=False,
truncate_tags=True, auto_seek=True, extract_thumbnail=True):
"""
Process an image file (expects an open file object).
This is the function that has to deal with all the arbitrary nasty bits
of the EXIF standard.
"""
if auto_seek:
fh.seek(0)
try:
offset, endian, fake_exif = _determine_type(fh)
except ExifNotFound as err:
logger.warning(err)
return {}
except InvalidExif as err:
logger.debug(err)
return {}
endian = chr(ord_(endian[0]))
# deal with the EXIF info we found
logger.debug("Endian format is %s (%s)", endian, {
'I': 'Intel',
'M': 'Motorola',
'\x01': 'Adobe Ducky',
'd': 'XMP/Adobe unknown'
}[endian])
hdr = ExifHeader(fh, endian, offset, fake_exif, strict, debug, details, truncate_tags)
ifd_list = hdr.list_ifd()
thumb_ifd = 0
ctr = 0
for ifd in ifd_list:
if ctr == 0:
ifd_name = 'Image'
elif ctr == 1:
ifd_name = 'Thumbnail'
thumb_ifd = ifd
else:
ifd_name = 'IFD %d' % ctr
logger.debug('IFD %d (%s) at offset %s:', ctr, ifd_name, ifd)
hdr.dump_ifd(ifd, ifd_name, stop_tag=stop_tag)
ctr += 1
# EXIF IFD
exif_off = hdr.tags.get('Image ExifOffset')
if exif_off:
logger.debug('Exif SubIFD at offset %s:', exif_off.values[0])
hdr.dump_ifd(exif_off.values[0], 'EXIF', stop_tag=stop_tag)
# deal with MakerNote contained in EXIF IFD
# (Some apps use MakerNote tags but do not use a format for which we
# have a description, do not process these).
if details and 'EXIF MakerNote' in hdr.tags and 'Image Make' in hdr.tags:
try:
hdr.decode_maker_note()
except:
pass
# extract thumbnails
if details and thumb_ifd and extract_thumbnail:
hdr.extract_tiff_thumbnail(thumb_ifd)
hdr.extract_jpeg_thumbnail()
# parse XMP tags (experimental)
if debug and details:
# Easy we already have them
xmp_tag = hdr.tags.get('Image ApplicationNotes')
if xmp_tag:
logger.debug('XMP present in Exif')
xmp_bytes = bytes(xmp_tag.values)
# We need to look in the entire file for the XML
else:
xmp_bytes = _get_xmp(fh)
if xmp_bytes:
hdr.parse_xmp(xmp_bytes)
return hdr.tags | PypiClean |
/OASYS1-XRayServer-1.0.36.tar.gz/OASYS1-XRayServer-1.0.36/orangecontrib/xrayserver/widgets/xrayserver/ow_ter_sl.py | __author__ = "Luca Rebuffi"
import numpy
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QImage,QPixmap, QPainter, QPalette, QFont, QColor
from PyQt5.QtWidgets import QLabel, QWidget
from PyQt5 import QtGui, QtWidgets
import orangecanvas.resources as resources
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets.exchange import DataExchangeObject
from oasys.util.oasys_util import ShowTextDialog
import urllib
from http import server
from orangecontrib.xrayserver.util.xrayserver_util import HttpManager, XRayServerGui, XRayServerPhysics, ShowHtmlDialog
from orangecontrib.xrayserver.widgets.xrayserver.list_utility import ListUtility
from orangecontrib.xrayserver.widgets.gui.ow_xrayserver_widget import XrayServerWidget, XrayServerException
APPLICATION = "/cgi/ter_form.pl"
class TER_SL(XrayServerWidget):
name = "TER_SL"
description = "TER_SL"
icon = "icons/tersl.png"
maintainer = "Luca Rebuffi"
maintainer_email = "luca.rebuffi(@at@)elettra.eu"
priority = 4
category = "TER_SL"
keywords = ["data", "file", "load", "read"]
want_main_area = 1
outputs = [{"name": "xrayserver_data",
"type": DataExchangeObject,
"doc": "xrayserver_data",
"id": "xrayserver_data"}, ]
ter_sl_form = Setting(0)
xway = Setting(0)
wave = Setting(1.540562)
line = Setting("Cu-Ka1")
ipol = Setting(0)
subway = Setting(0)
code = Setting("Silicon")
df1df2 = Setting(0)
chem = Setting("")
rho = Setting(0.0)
x0 = Setting(0.0)
w0 = Setting(1.0)
sigma = Setting(4.0)
tr = Setting(0.0)
scanmin = Setting(0.0)
scanmax = Setting(2.0)
unis = Setting(0)
nscan = Setting(401)
swflag = Setting(0)
swref=Setting(0)
swmin=Setting(0.0)
swmax=Setting(1000.0)
swpts=Setting(101)
profile = Setting("")
def __init__(self):
super().__init__()
left_box_1 = oasysgui.widgetBox(self.controlArea, "TER_SL Request Form", addSpace=False, orientation="vertical",
width=500, height=680)
self.central_tabs = oasysgui.tabWidget(left_box_1)
tab_template = oasysgui.createTabPage(self.central_tabs, "Template Options")
tab_input = oasysgui.createTabPage(self.central_tabs, "Input Options")
left_box_1_1 = oasysgui.widgetBox(tab_template, "", addSpace=False, orientation="vertical", width=480, height=670)
gui.separator(left_box_1_1)
ter_sl_box = oasysgui.widgetBox(left_box_1_1, "", addSpace=False, orientation="horizontal", width=470, height=220)
gui.radioButtons(ter_sl_box, self, "ter_sl_form",
["Specular reflection from perfect reflectors",
"Specular reflection from multilayers",
"Specular reflection from perfect reflectors + standing waves",
"Specular reflection from multilayers + standing waves"],
callback=self.set_TerSLForm)
# -------------------------------------------------------------
# -------------------------------------------------------------
# -------------------------------------------------------------
gui.separator(tab_input)
left_box_2 = oasysgui.widgetBox(tab_input, "", addSpace=False, orientation="vertical", width=470)
left_box_2_1 = oasysgui.widgetBox(left_box_2, "", addSpace=False, orientation="horizontal", width=470)
gui.comboBox(left_box_2_1, self, "xway", label="X-rays specified by",
items=["Wavelength (Å)", "Energy (keV)", "Bragg angle (deg)", "X-ray line"],
callback=self.set_xway, sendSelectedValue=False, orientation="horizontal")
self.box_wave = oasysgui.widgetBox(left_box_2_1, "", addSpace=False, orientation="horizontal", width=100)
oasysgui.lineEdit(self.box_wave, self, "wave", label="", labelWidth=0, addSpace=False, valueType=float, orientation="horizontal")
self.box_line = oasysgui.widgetBox(left_box_2_1, "", addSpace=False, orientation="horizontal", width=100)
XRayServerGui.combobox_text(self.box_line, self, "line", label="", labelWidth=0,
items=self.get_lines(),
sendSelectedValue=True, orientation="horizontal", selectedValue=self.line)
button = gui.button(self.box_line, self, "?", callback=self.help_lines)
button.setFixedWidth(15)
self.set_xway()
gui.comboBox(left_box_2_1, self, "ipol", label="Polarization",
items=["Sigma", "Pi", "Mixed"], sendSelectedValue=False, orientation="horizontal")
# -------------------------------------------------------------
left_box_3 = oasysgui.widgetBox(tab_input, "", addSpace=True, orientation="vertical", width=470)
gui.separator(left_box_3, height=4)
left_box_3_top = oasysgui.widgetBox(left_box_3, "", addSpace=False, orientation="horizontal", width=470)
gui.label(left_box_3_top, self, "Substrate:")
left_box_3_content = oasysgui.widgetBox(left_box_3, "", addSpace=False, orientation="horizontal", width=470)
left_box_3_left = oasysgui.widgetBox(left_box_3_content, "", addSpace=False, orientation="vertical", width=20)
left_box_3_right = oasysgui.widgetBox(left_box_3_content, "", addSpace=False, orientation="vertical", width=445)
gui.radioButtons(left_box_3_left, self, "subway", [" ", " ", " "], callback=self.set_subway)
self.left_box_3_1 = oasysgui.widgetBox(left_box_3_right, "", addSpace=False, orientation="horizontal", width=445)
XRayServerGui.combobox_text(self.left_box_3_1, self, "code", label="Crystal", labelWidth=45,
items=self.get_crystals(),
sendSelectedValue=True, orientation="horizontal", selectedValue=self.code)
button = gui.button(self.left_box_3_1, self, "?", callback=self.help_crystals)
button.setFixedWidth(15)
gui.comboBox(self.left_box_3_1, self, "df1df2", label=" ", labelWidth=1,
items=["Auto DB for f\', f\'\'",
"X0h data (0.5-2.5 A)",
"Henke (0.4-1200 A)",
"Brennan (0.02-400 A)"],
sendSelectedValue=False, orientation="horizontal")
self.left_box_3_2 = oasysgui.widgetBox(left_box_3_right, "", addSpace=False, orientation="horizontal", width=445)
oasysgui.lineEdit(self.left_box_3_2, self, "chem", label="Chemical Formula", labelWidth=110, addSpace=False, valueType=str, orientation="horizontal", callback=self.set_rho)
oasysgui.lineEdit(self.left_box_3_2, self, "rho", label=u"\u03C1" + " (g/cm3)", labelWidth=60, addSpace=False, valueType=float, orientation="horizontal")
self.left_box_3_3 = oasysgui.widgetBox(left_box_3_right, "", addSpace=False, orientation="vertical", width=445)
left_box_3_3_1 = oasysgui.widgetBox(self.left_box_3_3, "", addSpace=False, orientation="horizontal", width=445)
oasysgui.lineEdit(left_box_3_3_1, self, "x0", label="Susceptibility x0 (", labelWidth=130, addSpace=False, valueType=float, orientation="horizontal")
gui.label(left_box_3_3_1, self, " ) format: x0=(Re(x0), Im(x0))", labelWidth=230 )
left_box_3_3_2 = oasysgui.widgetBox(self.left_box_3_3, "", addSpace=False, orientation="horizontal", width=445)
oasysgui.lineEdit(left_box_3_3_2, self, "w0", label="x0 correction: w0", labelWidth=130, addSpace=False, valueType=float, orientation="horizontal")
gui.label(left_box_3_3_2, self, " this is used as: x0 = w0 * x0", labelWidth=230)
left_box_3_3_3 = oasysgui.widgetBox(self.left_box_3_3, "", addSpace=False, orientation="horizontal", width=445)
oasysgui.lineEdit(left_box_3_3_3, self, "sigma", label="Roughness: sigma [Å]", labelWidth=130, addSpace=False, valueType=float, orientation="horizontal")
gui.label(left_box_3_3_3, self, " OR ")
oasysgui.lineEdit(left_box_3_3_3, self, "tr", label="Transition layer tr [Å]", labelWidth=120, addSpace=False, valueType=float, orientation="horizontal")
self.set_subway()
# -------------------------------------------------------------
left_box_4 = oasysgui.widgetBox(tab_input, "", addSpace=False, orientation="horizontal", width=470)
left_box_4_1 = oasysgui.widgetBox(left_box_4, "", addSpace=False, orientation="vertical", width=470)
gui.label(left_box_4_1, self, "Incidence angle limits:")
left_box_4_1_1 = oasysgui.widgetBox(left_box_4_1, "", addSpace=False, orientation="horizontal", width=470)
oasysgui.lineEdit(left_box_4_1_1, self, "scanmin", label=" From", labelWidth=70, addSpace=False, valueType=float, orientation="horizontal")
oasysgui.lineEdit(left_box_4_1_1, self, "scanmax", label="To", labelWidth=15, addSpace=False, valueType=float, orientation="horizontal")
gui.comboBox(left_box_4_1_1, self, "unis", label=" ", labelWidth=1,
items=["degr.",
"min.",
"mrad.",
"sec.",
"urad"],
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(left_box_4_1_1, self, "nscan", label="Points", labelWidth=40, addSpace=False, valueType=int, orientation="horizontal")
# -------------------------------------------------------------
# -------------------------------------------------------------
self.standing_waves_box = oasysgui.widgetBox(tab_input, "", addSpace=False, orientation="vertical", width=470, height=140)
self.standing_waves_box_hidden = oasysgui.widgetBox(tab_input, "", addSpace=False, orientation="horizontal", width=470, height=140)
gui.separator(self.standing_waves_box, 10)
standing_waves_box_1 = oasysgui.widgetBox(self.standing_waves_box, "", addSpace=False, orientation="horizontal", width=120)
gui.checkBox(standing_waves_box_1, self, "swflag", "", callback=self.set_swflag)
gui.label(standing_waves_box_1, self, "Standing waves:")
self.standing_waves_box_2 = oasysgui.widgetBox(self.standing_waves_box, "", addSpace=False, orientation="vertical", width=470)
standing_waves_box_2_1 = oasysgui.widgetBox(self.standing_waves_box_2, "", addSpace=False, orientation="horizontal", width=270)
oasysgui.lineEdit(standing_waves_box_2_1, self, "swref", label="Reference interface", labelWidth=130, addSpace=False, valueType=int, orientation="horizontal")
gui.label(standing_waves_box_2_1, self, " (0=surface)")
standing_waves_box_2_2 = oasysgui.widgetBox(self.standing_waves_box_2, "", addSpace=False, orientation="horizontal", width=270)
oasysgui.lineEdit(standing_waves_box_2_2, self, "swmin", label="Start offset", labelWidth=130, addSpace=False, valueType=float, orientation="horizontal")
gui.label(standing_waves_box_2_2, self, " [Å]", labelWidth=70)
standing_waves_box_2_3 = oasysgui.widgetBox(self.standing_waves_box_2, "", addSpace=False, orientation="horizontal", width=270)
oasysgui.lineEdit(standing_waves_box_2_3, self, "swmax", label="End offset", labelWidth=130, addSpace=False, valueType=float, orientation="horizontal")
gui.label(standing_waves_box_2_3, self, " [Å]", labelWidth=70)
standing_waves_box_2_4 = oasysgui.widgetBox(self.standing_waves_box_2, "", addSpace=False, orientation="horizontal", width=270)
oasysgui.lineEdit(standing_waves_box_2_4, self, "swpts", label="Number of offsets", labelWidth=130, addSpace=False, valueType=float, orientation="horizontal")
gui.label(standing_waves_box_2_4, self, " (max = 401)")
self.set_swflag()
gui.separator(tab_input)
# -------------------------------------------------------------
box_top = oasysgui.widgetBox(tab_input, "", addSpace=False, orientation="vertical", width=470)
box_top_0 = oasysgui.widgetBox(box_top, "", addSpace=False, orientation="horizontal", width=250)
gui.label(box_top_0, self, "Top layer profile (optional):")
button = gui.button(box_top_0, self, "? (sintax)", callback=self.help_profile)
button.setFixedWidth(90)
box_top_1 = oasysgui.widgetBox(box_top, "", addSpace=False, orientation="horizontal", width=470)
self.profile_area = QtWidgets.QTextEdit()
self.profile_area.setStyleSheet("background-color: white;")
self.profile_area.setMaximumHeight(240)
self.profile_area.setMaximumWidth(335)
self.profile_area.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
box_top_1.layout().addWidget(self.profile_area)
gui.separator(box_top_1)
box_top_labels = oasysgui.widgetBox(box_top_1, "", addSpace=False, orientation="vertical", width=130)
gui.label(box_top_labels, self, "Available Codes:")
crystals_area = QtWidgets.QTextEdit()
crystals_area.setStyleSheet("background-color: white;")
crystals_area.setMaximumHeight(295)
crystals_area.setMaximumWidth(130)
crystals_area.setText("\n".join(ListUtility.get_list("crystals")))
crystals_area.setReadOnly(True)
box_top_labels.layout().addWidget(crystals_area)
gui.separator(box_top)
# -----------------------------------------------------------
button = gui.button(tab_input, self, "Submit Query!", callback=self.submit)
button.setFixedHeight(30)
gui.rubber(self.controlArea)
self.tabs = []
self.tabs_widget = oasysgui.tabWidget(self.mainArea)
self.initializeTabs()
self.set_TerSLForm(change_values=False, switch_page=False)
self.profile_area.textChanged.connect(self.set_profile)
def set_profile(self):
self.profile = self.profile_area.toPlainText()
def set_TerSLForm(self, change_values=True, switch_page=True):
self.standing_waves_box.setVisible(self.ter_sl_form == 2 or self.ter_sl_form==3)
self.standing_waves_box_hidden.setVisible(self.ter_sl_form < 2)
if change_values:
self.xway=0
self.wave=1.540562
self.ipol=0
self.subway = 0
self.code = "Silicon"
self.df1df2 = 0
self.chem = ""
self.rho = 0.0
self.x0 = 0.0
self.w0 = 1.0
self.sigma = 4.0
self.tr = 0.0
self.unis = 0
self.swflag = 0
self.swref=0
self.swmin=0.0
self.swmax=1000.0
self.swpts=101
self.profile_area.setText("")
if self.ter_sl_form==0:
if change_values:
self.scanmin = 0.0
self.scanmax = 2.0
self.nscan = 401
elif self.ter_sl_form==1:
if change_values:
self.code = "GaAs"
self.scanmin = 0.0
self.scanmax = 3.0
self.nscan = 601
self.profile_area.setText(" t=20 w0=0.5 sigma=5 ;surface oxide, organic contamination or dust\n" + \
" period=20\n"+ \
" t=100 code=GaAs sigma=4\n"+ \
" t=70 code=AlAs sigma=4\n"+ \
" end period")
elif self.ter_sl_form==2:
if change_values:
self.scanmin = 0.0
self.scanmax = 2.0
self.nscan = 401
self.swflag = 1
elif self.ter_sl_form==3:
if change_values:
self.code = "GaAs"
self.scanmin = 0.0
self.scanmax = 3.0
self.nscan = 601
self.swflag = 1
self.profile_area.setText(" t=20 w0=0.5 sigma=5 ;surface oxide, organic contamination or dust\n" + \
" period=20\n"+ \
" t=100 code=GaAs sigma=4\n"+ \
" t=70 code=AlAs sigma=4\n"+ \
" end period")
self.set_subway()
self.set_xway()
self.set_swflag()
if switch_page: self.central_tabs.setCurrentIndex(1)
def set_xway(self):
self.box_wave.setVisible(self.xway!=3)
self.box_line.setVisible(self.xway==3)
def set_subway(self):
self.left_box_3_1.setEnabled(self.subway == 0)
self.left_box_3_2.setEnabled(self.subway == 1)
self.left_box_3_3.setEnabled(self.subway == 2)
def set_rho(self):
if not self.chem is None:
if not self.chem.strip() == "":
self.chem = self.chem.strip()
self.rho = XRayServerPhysics.getMaterialDensity(self.chem)
def set_swflag(self):
self.standing_waves_box_2.setEnabled(self.swflag==1)
def help_profile(self):
ShowTextDialog.show_text("Top Layer Profile Sintax",
"period=\nt= sigma= da/a= code= x= code2= x2= code3= x3= code4= x0= xh= xhdf= w0= wh=\nend period",
height=150, parent=self)
def initializeTabs(self):
current_tab = self.tabs_widget.currentIndex()
size = len(self.tabs)
for index in range(0, size):
self.tabs_widget.removeTab(size-1-index)
self.tabs = [gui.createTabPage(self.tabs_widget, "Reflectivity Plot"),
gui.createTabPage(self.tabs_widget, "Standing Waves Plot")]
for tab in self.tabs:
tab.setFixedHeight(650)
tab.setFixedWidth(650)
self.plot_canvas = [None]
self.tabs_widget.setCurrentIndex(current_tab)
def submit(self):
self.progressBarInit()
self.setStatusMessage("Submitting Request")
self.checkFields()
parameters = {}
parameters.update({"xway" : self.decode_xway()})
parameters.update({"wave" : str(self.wave)})
parameters.update({"line" : self.line})
parameters.update({"ipol" : str(self.ipol + 1)})
parameters.update({"subway" : str(self.subway + 1)})
parameters.update({"code" : self.code})
parameters.update({"df1df2" : self.decode_df1df2()})
parameters.update({"chem" : self.chem})
parameters.update({"rho" : str(self.rho)})
parameters.update({"x0" : str(self.x0)})
parameters.update({"w0" : str(self.w0)})
parameters.update({"sigma" : str(self.sigma)})
parameters.update({"tr" : str(self.tr)})
parameters.update({"scanmin" : str(self.scanmin)})
parameters.update({"scanmax" : str(self.scanmax)})
parameters.update({"unis" : str(self.unis)})
parameters.update({"nscan" : str(self.nscan)})
parameters.update({"swflag" : self.decode_swflag()})
if self.swflag == True or self.swflag == 1:
parameters.update({"swref" : str(self.swref)})
parameters.update({"swmin" : str(self.swmin)})
parameters.update({"swmax" : str(self.swmax)})
parameters.update({"swpts" : str(self.swpts)})
parameters.update({"profile" : self.profile})
try:
self.progressBarSet(10)
response = HttpManager.send_xray_server_request_GET(APPLICATION, parameters)
self.progressBarSet(50)
data = self.extract_plots(response)
exchange_data = DataExchangeObject("XRAYSERVER", "TER_SL")
exchange_data.add_content("ter_sl_result", data)
exchange_data.add_content("ter_sl_result_units_to_degrees", self.get_units_to_degrees())
self.send("xrayserver_data", exchange_data)
except urllib.error.HTTPError as e:
ShowTextDialog.show_text("Error", 'The server couldn\'t fulfill the request.\nError Code: '
+ str(e.code) + "\n\n" +
server.BaseHTTPRequestHandler.responses[e.code][1], parent=self)
except urllib.error.URLError as e:
ShowTextDialog.show_text("Error", 'We failed to reach a server.\nReason: ' + e.reason, parent=self)
except XrayServerException as e:
ShowHtmlDialog.show_html("X-ray Server Error", e.response, width=750, height=500, parent=self)
except Exception as e:
ShowTextDialog.show_text("Error", 'Error Occurred.\nReason: ' + str(e), parent=self)
self.setStatusMessage("")
self.progressBarFinished()
def checkFields(self):
pass
def decode_xway(self):
if self.xway == 0: return "1"
elif self.xway == 1: return "2"
elif self.xway == 2: return "4"
elif self.xway == 3: return "3"
def decode_df1df2(self):
if self.df1df2 == 0: return "-1"
elif self.df1df2 == 1: return "0"
elif self.df1df2 == 2: return "2"
elif self.df1df2 == 3: return "4"
def decode_um(self):
if self.unis == 0: return "degrees"
elif self.unis == 1: return "arcmin"
elif self.unis == 2: return "mrad"
elif self.unis == 3: return "arcsec"
elif self.unis == 4: return "urad"
def decode_swflag(self):
if self.swflag == True or self.swflag == 1:
return "1"
else:
return "0"
def get_units_to_degrees(self):
if self.unis == 0: # degrees
return 1.0
elif self.unis == 1: #arcmin
return 0.0166667
elif self.unis == 2: #mrad
return 57.2957795e-3
elif self.unis == 3: # ARCSEC
return 0.000277777805
elif self.unis == 4: #urad
return 57.2957795e-6
else:
return numpy.nan
def extract_plots(self, response):
self.setStatusMessage("Plotting Results")
x_1, y_1 = self.get_data_file_from_response(response)
self.plot_histo(x_1, y_1, 85, 0, 0, "Reflectivity", "Choosen Scan Variable", "Reflectivity")
self.tabs_widget.setCurrentIndex(0)
figure_url = self.extract_2D_image_from_response(response)
self.plot_image(figure_url, 95, 1)
if not figure_url is None: self.tabs_widget.setCurrentIndex(1)
return [x_1, y_1]
def extract_2D_image_from_response(self, response):
rows = response.split("\n")
job_id = None
for row in rows:
if "Job ID" in row:
job_id = (row.split("<b>"))[1].split("</b>")[0]
if not job_id is None:
if job_id+"_sw.png" in row:
return (row.split("src=\"")[1]).split("\"")[0]
if job_id is None:
raise Exception("Job ID not present")
return None
def plot_image(self, image_url, progressBarValue, tabs_canvas_index):
layout = self.tabs[tabs_canvas_index].layout()
for i in reversed(range(layout.count())):
widgetToRemove = layout.itemAt( i ).widget()
layout.removeWidget(widgetToRemove)
widgetToRemove.setParent( None )
if image_url is None:
label = QLabel(self.tabs[tabs_canvas_index])
label.setPixmap(QPixmap(QImage(resources.package_dirname("orangecontrib.xrayserver.widgets.xrayserver") +
"/icons/no_standing_waves_result.png")))
layout.addWidget(label)
else:
layout.addWidget(FigureWidget(image_url))
self.tabs[tabs_canvas_index].setLayout(layout)
class FigureWidget(QWidget):
def __init__(self, image_url):
super(FigureWidget, self).__init__()
self.setFixedWidth(600)
self.setFixedHeight(600)
box_general = oasysgui.widgetBox(self, "", addSpace=False, orientation="vertical", width=600, height=600)
gui.separator(box_general, height=30)
box_top = oasysgui.widgetBox(box_general, "", addSpace=False, orientation="vertical", width=600, height=50)
title = gui.label(box_top, self, " Standing Waves plot")
font = QFont(title.font())
font.setBold(True)
font.setPointSize(36)
palette = QPalette(title.palette())
palette.setColor(QPalette.Foreground, QColor('blue'))
title.setFont(font)
title.setPalette(palette)
gui.separator(box_general, height=10)
box_center = oasysgui.widgetBox(box_general, "", addSpace=False, orientation="horizontal", width=600)
box_label = oasysgui.widgetBox(box_center, "", addSpace=False, orientation="vertical", width=50)
oasysgui.widgetBox(box_label, "", addSpace=False, orientation="vertical", height=50)
label_y_axis = VerticalLabel("Incidence Angle", 200, 50)
font = QFont(label_y_axis.font())
font.setBold(True)
font.setPointSize(24)
label_y_axis.setFont(font)
#label_y_axis.setFixedHeight(200)
#label_y_axis.setFixedWidth(50)
box_label.layout().addWidget(label_y_axis)
image_label = QLabel(box_center)
image = QImage()
image.loadFromData(HttpManager.send_xray_server_direct_request("/" + image_url, decode=False))
image_label.setPixmap(QPixmap(image))
box_center.layout().addWidget(image_label)
box_bottom = oasysgui.widgetBox(box_general, "", addSpace=False, orientation="horizontal", width=600)
label_x_axis = gui.label(box_bottom, self, " Offset [Å]")
font = QFont(label_x_axis.font())
font.setBold(True)
font.setPointSize(24)
label_x_axis.setFont(font)
class VerticalLabel(QLabel):
def __init__(self, text, length, height):
QLabel.__init__(self)
self.text = text
self.length = length
self.height = height
self.setFixedHeight(length)
self.setFixedWidth(height)
def paintEvent(self, event):
painter = QPainter(self)
painter.setPen(Qt.black)
painter.translate(20, self.length-1)
painter.rotate(-90)
if self.text:
painter.drawText(0, 0, self.text)
painter.end()
def minimumSizeHint(self):
size = QLabel.minimumSizeHint(self)
return QSize(size.height(), size.width())
def sizeHint(self):
size = QLabel.sizeHint(self)
return QSize(size.height(), size.width())
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
w = TER_SL()
w.show()
app.exec()
w.saveSettings() | PypiClean |
/FastFlask-1.2.32-py3-none-any.whl/colorama/win32.py |
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
_SetConsoleTitleW.argtypes = [
wintypes.LPCWSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
def _winapi_test(handle):
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def winapi_test():
return any(_winapi_test(h) for h in
(_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = _GetStdHandle(stream_id)
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = _GetStdHandle(stream_id)
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = _GetStdHandle(stream_id)
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = _GetStdHandle(stream_id)
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = _GetStdHandle(stream_id)
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title) | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/ticketing/model/ticketing_attachment_endpoint_request.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.ticketing.model.attachment_request import AttachmentRequest
globals()['AttachmentRequest'] = AttachmentRequest
class TicketingAttachmentEndpointRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'model': (AttachmentRequest,), # noqa: E501
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'model': 'model', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, model, *args, **kwargs): # noqa: E501
"""TicketingAttachmentEndpointRequest - a model defined in OpenAPI
Args:
model (AttachmentRequest):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.model = model
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, model, *args, **kwargs): # noqa: E501
"""TicketingAttachmentEndpointRequest - a model defined in OpenAPI
Args:
model (AttachmentRequest):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.model: Union["AttachmentRequest"] = model | PypiClean |
/InowasFlopyAdapter-1.5.0.tar.gz/InowasFlopyAdapter-1.5.0/FlopyAdapter/Statistics/HobStatistics.py | import json
import numpy as np
import os
from scipy import stats
from sklearn.metrics import r2_score
class HobStatistics:
def __init__(self, model_ws, name):
self._model_ws = model_ws
self._name = name
self._input_file = os.path.join(model_ws, name) + '.hob.out'
self._output_file = os.path.join(model_ws, name) + '.hob.stat'
def write_files(self):
with open(self._output_file, 'w') as outfile:
try:
result = self.calculate()
except:
result = {"error": 'Error in Hob-Calculation.'}
finally:
json.dump(result, outfile)
@staticmethod
def calculate_npf(x, n):
a = 0.5
if x < 11:
a = 3 / 8
return stats.norm.ppf((x - a) / (n + 1 - 2 * a))
def calculate(self):
if not os.path.isfile(self._input_file):
return {"error": 'File ' + self._input_file + ' not found.'}
f = open(self._input_file)
header = False
names = []
observed = []
simulated = []
for line in f:
if line.startswith('#'):
continue
if not header:
header = line.split('"')[1::2]
continue
values = line.split()
simulated.append(float(values[0]))
observed.append(float(values[1]))
names.append('_'.join(values[2].split('_')[:-1]))
simulated = np.array(simulated)
observed = np.array(observed)
# Write to statistics object
statistics = dict(
observed=list(observed),
simulated=list(simulated),
n=len(observed),
rMax=np.max(np.abs(simulated - observed)),
rMin=np.min(np.abs(simulated - observed)),
rMean=np.mean(simulated - observed),
absRMean=np.mean(np.abs(simulated - observed)),
sse=stats.sem(simulated - observed),
rmse=np.sqrt(((simulated - observed) ** 2).mean()),
R=stats.pearsonr(observed, simulated)[0],
R2=r2_score(observed, simulated)
)
statistics["nrmse"] = statistics["rmse"] / (np.max(observed) - np.min(observed))
# Plot simulated vs. observed values
statistics["Z"] = 1.96
statistics["stdObserved"] = np.std(observed)
statistics["deltaStd"] = statistics["Z"] * statistics["stdObserved"] / np.sqrt(statistics["n"])
# Plot (weighted) residuals vs. simulated heads
statistics["weightedResiduals"] = list(simulated - observed)
statistics["linRegressSW"] = stats.linregress(simulated, statistics["weightedResiduals"])
# Check for Normal Distribution¶
statistics["rankedResiduals"] = list(np.sort(simulated - observed))
n = statistics["n"]
npf = np.linspace(1, n, num=n)
npf = list(map(lambda x, n: self.calculate_npf(x, n), npf, np.ones(n) * n))
statistics["npf"] = npf
statistics["linRegressRN"] = stats.linregress(statistics["rankedResiduals"], statistics["npf"])
return statistics | PypiClean |
/Mesa_Adapted-0.8.7.3-py3-none-any.whl/mesa/visualization/templates/js/GridDraw.js | var GridVisualization = function(width, height, gridWidth, gridHeight, context, interactionHandler) {
// Find cell size:
var cellWidth = Math.floor(width / gridWidth);
var cellHeight = Math.floor(height / gridHeight);
// Find max radius of the circle that can be inscribed (fit) into the
// cell of the grid.
var maxR = Math.min(cellHeight, cellWidth)/2 - 1;
// Calls the appropriate shape(agent)
this.drawLayer = function(portrayalLayer) {
// Re-initialize the lookup table
(interactionHandler) ? interactionHandler.mouseoverLookupTable.init() : null
for (var i in portrayalLayer) {
var p = portrayalLayer[i];
// If p.Color is a string scalar, cast it to an array.
// This is done to maintain backwards compatibility
if (!Array.isArray(p.Color))
p.Color = [p.Color];
// Does the inversion of y positioning because of html5
// canvas y direction is from top to bottom. But we
// normally keep y-axis in plots from bottom to top.
p.y = gridHeight - p.y - 1;
// if a handler exists, add coordinates for the portrayalLayer index
(interactionHandler) ? interactionHandler.mouseoverLookupTable.set(p.x, p.y, i) : null;
// If the stroke color is not defined, then the first color in the colors array is the stroke color.
if (!p.stroke_color)
p.stroke_color = p.Color[0]
if (p.Shape == "rect")
this.drawRectangle(p.x, p.y, p.w, p.h, p.Color, p.stroke_color, p.Filled, p.text, p.text_color);
else if (p.Shape == "circle")
this.drawCircle(p.x, p.y, p.r, p.Color, p.stroke_color, p.Filled, p.text, p.text_color);
else if (p.Shape == "arrowHead")
this.drawArrowHead(p.x, p.y, p.heading_x, p.heading_y, p.scale, p.Color, p.stroke_color, p.Filled, p.text, p.text_color);
else
this.drawCustomImage(p.Shape, p.x, p.y, p.scale, p.text, p.text_color)
}
// if a handler exists, update its mouse listeners with the new data
(interactionHandler) ? interactionHandler.updateMouseListeners(portrayalLayer): null;
};
// DRAWING METHODS
// =====================================================================
/**
Draw a circle in the specified grid cell.
x, y: Grid coords
r: Radius, as a multiple of cell size
colors: List of colors for the gradient. Providing only one color will fill the shape with only that color, not gradient.
stroke_color: Color to stroke the shape
fill: Boolean for whether or not to fill the circle.
text: Inscribed text in rectangle.
text_color: Color of the inscribed text.
*/
this.drawCircle = function(x, y, radius, colors, stroke_color, fill, text, text_color) {
var cx = (x + 0.5) * cellWidth;
var cy = (y + 0.5) * cellHeight;
var r = radius * maxR;
context.beginPath();
context.arc(cx, cy, r, 0, Math.PI * 2, false);
context.closePath();
context.strokeStyle = stroke_color;
context.stroke();
if (fill) {
var gradient = context.createRadialGradient(cx, cy, r, cx, cy, 0);
for (i = 0; i < colors.length; i++) {
gradient.addColorStop(i/colors.length, colors[i]);
}
context.fillStyle = gradient;
context.fill();
}
// This part draws the text inside the Circle
if (text !== undefined) {
context.fillStyle = text_color;
context.textAlign = 'center';
context.textBaseline= 'middle';
context.fillText(text, cx, cy);
}
};
/**
Draw a rectangle in the specified grid cell.
x, y: Grid coords
w, h: Width and height, [0, 1]
colors: List of colors for the gradient. Providing only one color will fill the shape with only that color, not gradient.
stroke_color: Color to stroke the shape
fill: Boolean, whether to fill or not.
text: Inscribed text in rectangle.
text_color: Color of the inscribed text.
*/
this.drawRectangle = function(x, y, w, h, colors, stroke_color, fill, text, text_color) {
context.beginPath();
var dx = w * cellWidth;
var dy = h * cellHeight;
// Keep in the center of the cell:
var x0 = (x + 0.5) * cellWidth - dx/2;
var y0 = (y + 0.5) * cellHeight - dy/2;
context.strokeStyle = stroke_color;
context.strokeRect(x0, y0, dx, dy);
if (fill) {
var gradient = context.createLinearGradient(x0, y0, x0 + cellWidth, y0 + cellHeight);
for (i = 0; i < colors.length; i++) {
gradient.addColorStop(i/colors.length, colors[i]);
}
// Fill with gradient
context.fillStyle = gradient;
context.fillRect(x0,y0,dx,dy);
}
else {
context.fillStyle = color;
context.strokeRect(x0, y0, dx, dy);
}
// This part draws the text inside the Rectangle
if (text !== undefined) {
var cx = (x + 0.5) * cellWidth;
var cy = (y + 0.5) * cellHeight;
context.fillStyle = text_color;
context.textAlign = 'center';
context.textBaseline= 'middle';
context.fillText(text, cx, cy);
}
};
/**
Draw an arrow head in the specified grid cell.
x, y: Grid coords
s: Scaling of the arrowHead with respect to cell size[0, 1]
colors: List of colors for the gradient. Providing only one color will fill the shape with only that color, not gradient.
stroke_color: Color to stroke the shape
fill: Boolean, whether to fill or not.
text: Inscribed text in shape.
text_color: Color of the inscribed text.
*/
this.drawArrowHead = function(x, y, heading_x, heading_y, scale, colors, stroke_color, fill, text, text_color) {
var arrowR = maxR * scale;
var cx = (x + 0.5) * cellWidth;
var cy = (y + 0.5) * cellHeight;
if (heading_x === 0 && heading_y === 1) {
p1_x = cx;
p1_y = cy - arrowR;
p2_x = cx - arrowR;
p2_y = cy + arrowR;
p3_x = cx;
p3_y = cy + 0.8*arrowR;
p4_x = cx + arrowR;
p4_y = cy + arrowR;
}
else if (heading_x === 1 && heading_y === 0) {
p1_x = cx + arrowR;
p1_y = cy;
p2_x = cx - arrowR;
p2_y = cy - arrowR;
p3_x = cx - 0.8*arrowR;
p3_y = cy;
p4_x = cx - arrowR;
p4_y = cy + arrowR;
}
else if (heading_x === 0 && heading_y === (-1)) {
p1_x = cx;
p1_y = cy + arrowR;
p2_x = cx - arrowR;
p2_y = cy - arrowR;
p3_x = cx;
p3_y = cy - 0.8*arrowR;
p4_x = cx + arrowR;
p4_y = cy - arrowR;
}
else if (heading_x === (-1) && heading_y === 0) {
p1_x = cx - arrowR;
p1_y = cy;
p2_x = cx + arrowR;
p2_y = cy - arrowR;
p3_x = cx + 0.8*arrowR;
p3_y = cy;
p4_x = cx + arrowR;
p4_y = cy + arrowR;
}
context.beginPath();
context.moveTo(p1_x, p1_y);
context.lineTo(p2_x, p2_y);
context.lineTo(p3_x, p3_y);
context.lineTo(p4_x, p4_y);
context.closePath();
context.strokeStyle = stroke_color;
context.stroke();
if (fill) {
var gradient = context.createLinearGradient(p1_x, p1_y, p3_x, p3_y);
for (i = 0; i < colors.length; i++) {
gradient.addColorStop(i/colors.length, colors[i]);
}
// Fill with gradient
context.fillStyle = gradient;
context.fill();
}
// This part draws the text inside the ArrowHead
if (text !== undefined) {
var cx = (x + 0.5) * cellWidth;
var cy = (y + 0.5) * cellHeight;
context.fillStyle = text_color
context.textAlign = 'center';
context.textBaseline= 'middle';
context.fillText(text, cx, cy);
}
};
this.drawCustomImage = function (shape, x, y, scale, text, text_color_) {
var img = new Image();
img.src = "local/".concat(shape);
if (scale === undefined) {
var scale = 1
}
// Calculate coordinates so the image is always centered
var dWidth = cellWidth * scale;
var dHeight = cellHeight * scale;
var cx = x * cellWidth + cellWidth / 2 - dWidth / 2;
var cy = y * cellHeight + cellHeight / 2 - dHeight / 2;
// Coordinates for the text
var tx = (x + 0.5) * cellWidth;
var ty = (y + 0.5) * cellHeight;
img.onload = function() {
context.drawImage(img, cx, cy, dWidth, dHeight);
// This part draws the text on the image
if (text !== undefined) {
// ToDo: Fix fillStyle
// context.fillStyle = text_color;
context.textAlign = 'center';
context.textBaseline= 'middle';
context.fillText(text, tx, ty);
}
}
}
/**
Draw Grid lines in the full gird
*/
this.drawGridLines = function() {
context.beginPath();
context.strokeStyle = "#eee";
maxX = cellWidth * gridWidth;
maxY = cellHeight * gridHeight;
// Draw horizontal grid lines:
for(var y=0; y<=maxY; y+=cellHeight) {
context.moveTo(0, y+0.5);
context.lineTo(maxX, y+0.5);
}
for(var x=0; x<=maxX; x+= cellWidth) {
context.moveTo(x+0.5, 0);
context.lineTo(x+0.5, maxY);
}
context.stroke();
};
this.resetCanvas = function() {
context.clearRect(0, 0, width, height);
context.beginPath();
};
}; | PypiClean |
/DoorPi-2.4.1.8.tar.gz/DoorPi-2.4.1.8/doorpi/action/handler.py |
import logging
logger = logging.getLogger(__name__)
logger.debug("%s loaded", __name__)
import threading
import time # used by: fire_event_synchron
from inspect import isfunction, ismethod # used by: register_action
import string, random # used by event_id
import sqlite3
import os
from base import SingleAction
import doorpi
class EnumWaitSignalsClass():
WaitToFinish = True
WaitToEnd = True
sync = True
syncron = True
DontWaitToFinish = False
DontWaitToEnd = False
async = False
asyncron = False
EnumWaitSignals = EnumWaitSignalsClass()
ONTIME = 'OnTime'
def id_generator(size = 6, chars = string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class EventLog(object):
_db = False
#doorpi.DoorPi().conf.get_string_parsed('DoorPi', 'eventlog', '!BASEPATH!/conf/eventlog.db')
def __init__(self, file_name):
if not file_name: return
try:
if not os.path.exists(os.path.dirname(file_name)):
logger.info('Path %s does not exist - creating it now', os.path.dirname(file_name))
os.makedirs(os.path.dirname(file_name))
#https://docs.python.org/2/library/sqlite3.html#sqlite3.connect
self._db = sqlite3.connect(
database = file_name,
timeout = 1,
check_same_thread = False
)
self.execute_sql('''
CREATE TABLE IF NOT EXISTS event_log (
event_id TEXT,
fired_by TEXT,
event_name TEXT,
start_time REAL,
additional_infos TEXT
);'''
)
self.execute_sql('''
CREATE TABLE IF NOT EXISTS action_log (
event_id TEXT,
action_name TEXT,
start_time REAL,
action_result TEXT
);'''
)
except:
logger.error('error to create event_db')
def get_event_log_entries_count(self, filter = ''):
logger.debug('request event logs count with filter %s', filter)
try:
return self.execute_sql('''
SELECT COUNT(*)
FROM event_log
WHERE event_id LIKE '%{filter}%'
OR fired_by LIKE '%{filter}%'
OR event_name LIKE '%{filter}%'
OR start_time LIKE '%{filter}%'
'''.format(filter = filter)).fetchone()[0]
except Exception as exp:
logger.exception(exp)
return -1
def get_event_log_entries(self, max_count = 100, filter = ''):
logger.debug('request last %s event logs with filter %s', max_count, filter)
return_object = []
sql_statement = '''
SELECT
event_id,
fired_by,
event_name,
start_time,
additional_infos
FROM event_log
WHERE event_id LIKE '%{filter}%'
OR fired_by LIKE '%{filter}%'
OR event_name LIKE '%{filter}%'
OR start_time LIKE '%{filter}%'
ORDER BY start_time DESC
LIMIT {max_count}'''.format(max_count = max_count, filter = filter)
for single_row in self.execute_sql(sql_statement):
return_object.append({
'event_id': single_row[0],
'fired_by': single_row[1],
'event_name': single_row[2],
'start_time': single_row[3],
'additional_infos': single_row[4]
})
return return_object
def execute_sql(self, sql):
if not self._db: return
#logger.trace('fire sql: %s', sql)
return self._db.execute(sql)
def insert_event_log(self, event_id, fired_by, event_name, start_time, additional_infos):
sql_statement = '''
INSERT INTO event_log VALUES (
"{event_id}","{fired_by}","{event_name}",{start_time},"{additional_infos}"
);
'''.format(
event_id = event_id,
fired_by = fired_by.replace('"', "'"),
event_name = event_name.replace('"', "'"),
start_time = start_time,
additional_infos = str(additional_infos).replace('"', "'")
)
self.execute_sql(sql_statement)
def insert_action_log(self, event_id, action_name, start_time, action_result):
sql_statement = '''
INSERT INTO action_log VALUES (
"{event_id}","{action_name}",{start_time},"{action_result}"
);
'''.format(
event_id = event_id,
action_name = action_name.replace('"', "'"),
start_time = start_time,
action_result = str(action_result).replace('"', "'")
)
self.execute_sql(sql_statement)
def update_event_log(self):
pass
def destroy(self):
try: self._db.close()
except: pass
__del__ = destroy
class EventHandler:
__Sources = [] # Auflistung Sources
__Events = {} # Zuordnung Event zu Sources (1 : n)
__Actions = {} # Zuordnung Event zu Actions (1: n)
__additional_informations = {}
@property
def event_history(self): return self.db.get_event_log_entries()
@property
def sources(self): return self.__Sources
@property
def events(self): return self.__Events
@property
def events_by_source(self):
events_by_source = {}
for event in self.events:
for source in self.events[event]:
if source in events_by_source:
events_by_source[source].append(event)
else:
events_by_source[source] = [event]
return events_by_source
@property
def actions(self): return self.__Actions
@property
def threads(self): return threading.enumerate()
@property
def idle(self): return len(self.threads) - 1 is 0
@property
def additional_informations(self): return self.__additional_informations
def __init__(self):
db_path = doorpi.DoorPi().config.get_string_parsed('DoorPi', 'eventlog', '!BASEPATH!/conf/eventlog.db')
self.db = EventLog(db_path)
__destroy = False
def destroy(self, force_destroy = False):
self.__destroy = True
self.db.destroy()
def register_source(self, event_source):
if event_source not in self.__Sources:
self.__Sources.append(event_source)
logger.debug("event_source %s was added", event_source)
def register_event(self, event_name, event_source):
silent = ONTIME in event_name
if not silent: logger.trace("register Event %s from %s ", event_name, event_source)
self.register_source(event_source)
if event_name not in self.__Events:
self.__Events[event_name] = [event_source]
if not silent: logger.trace("added event_name %s and registered source %s", event_name, event_source)
elif event_source not in self.__Events[event_name]:
self.__Events[event_name].append(event_source)
if not silent: logger.trace("added event_source %s to existing event %s", event_source, event_name)
else:
if not silent: logger.trace("nothing to do - event %s from source %s is already known", event_name, event_source)
def fire_event(self, event_name, event_source, syncron = False, kwargs = None):
if syncron is False: return self.fire_event_asynchron(event_name, event_source, kwargs)
else: return self.fire_event_synchron(event_name, event_source, kwargs)
def fire_event_asynchron(self, event_name, event_source, kwargs = None):
silent = ONTIME in event_name
if self.__destroy and not silent: return False
if not silent: logger.trace("fire Event %s from %s asyncron", event_name, event_source)
return threading.Thread(
target = self.fire_event_synchron,
args = (event_name, event_source, kwargs),
name = "%s from %s" % (event_name, event_source)
).start()
def fire_event_asynchron_daemon(self, event_name, event_source, kwargs = None):
logger.trace("fire Event %s from %s asyncron and as daemons", event_name, event_source)
t = threading.Thread(
target = self.fire_event_synchron,
args = (event_name, event_source, kwargs),
name = "daemon %s from %s" % (event_name, event_source)
)
t.daemon = True
t.start()
def fire_event_synchron(self, event_name, event_source, kwargs = None):
silent = ONTIME in event_name
if self.__destroy and not silent: return False
event_fire_id = id_generator()
start_time = time.time()
if not silent: self.db.insert_event_log(event_fire_id, event_source, event_name, start_time, kwargs)
if event_source not in self.__Sources:
logger.warning('source %s unknown - skip fire_event %s', event_source, event_name)
return "source unknown"
if event_name not in self.__Events:
logger.warning('event %s unknown - skip fire_event %s from %s', event_name, event_name, event_source)
return "event unknown"
if event_source not in self.__Events[event_name]:
logger.warning('source %s unknown for this event - skip fire_event %s from %s', event_name, event_name, event_source)
return "source unknown for this event"
if event_name not in self.__Actions:
if not silent: logger.debug('no actions for event %s - skip fire_event %s from %s', event_name, event_name, event_source)
return "no actions for this event"
if kwargs is None: kwargs = {}
kwargs.update({
'last_fired': str(start_time),
'last_fired_from': event_source,
'event_fire_id': event_fire_id
})
self.__additional_informations[event_name] = kwargs
if 'last_finished' not in self.__additional_informations[event_name]:
self.__additional_informations[event_name]['last_finished'] = None
if 'last_duration' not in self.__additional_informations[event_name]:
self.__additional_informations[event_name]['last_duration'] = None
if not silent: logger.debug("[%s] fire for event %s this actions %s ", event_fire_id, event_name, self.__Actions[event_name])
for action in self.__Actions[event_name]:
if not silent: logger.trace("[%s] try to fire action %s", event_fire_id, action)
try:
result = action.run(silent)
if not silent: self.db.insert_action_log(event_fire_id, action.name, start_time, result)
if action.single_fire_action is True: del action
except SystemExit as exp:
logger.info('[%s] Detected SystemExit and shutdown DoorPi (Message: %s)', event_fire_id, exp)
doorpi.DoorPi().destroy()
except KeyboardInterrupt as exp:
logger.info("[%s] Detected KeyboardInterrupt and shutdown DoorPi (Message: %s)", event_fire_id, exp)
doorpi.DoorPi().destroy()
except:
logger.exception("[%s] error while fire action %s for event_name %s", event_fire_id, action, event_name)
if not silent: logger.trace("[%s] finished fire_event for event_name %s", event_fire_id, event_name)
self.__additional_informations[event_name]['last_finished'] = str(time.time())
self.__additional_informations[event_name]['last_duration'] = str(time.time() - start_time)
return True
def unregister_event(self, event_name, event_source, delete_source_when_empty = True):
try:
logger.trace("unregister Event %s from %s ", event_name, event_source)
if event_name not in self.__Events: return "event unknown"
if event_source not in self.__Events[event_name]: return "source not know for this event"
self.__Events[event_name].remove(event_source)
if len(self.__Events[event_name]) is 0:
del self.__Events[event_name]
logger.debug("no more sources for event %s - remove event too", event_name)
if delete_source_when_empty: self.unregister_source(event_source)
logger.trace("event_source %s was removed for event %s", event_source, event_name)
return True
except Exception as exp:
logger.error('failed to unregister event %s with error message %s', event_name, exp)
return False
def unregister_source(self, event_source, force_unregister = False):
try:
logger.trace("unregister Eventsource %s and force_unregister is %s", event_source, force_unregister)
if event_source not in self.__Sources: return "event_source %s unknown" % (event_source)
for event_name in self.__Events.keys():
if event_source in self.__Events[event_name] and force_unregister:
self.unregister_event(event_name, event_source, False)
elif event_source in self.__Events[event_name] and not force_unregister:
return "couldn't unregister event_source %s because it is used for event %s" % (event_source, event_name)
if event_source in self.__Sources:
# sollte nicht nötig sein, da es entfernt wird, wenn das letzte Event dafür gelöscht wird
self.__Sources.remove(event_source)
logger.trace("event_source %s was removed", event_source)
return True
except Exception as exp:
logger.exception('failed to unregister source %s with error message %s', event_source, exp)
return False
def register_action(self, event_name, action_object, *args, **kwargs):
if ismethod(action_object) and callable(action_object):
action_object = SingleAction(action_object, *args, **kwargs)
elif isfunction(action_object) and callable(action_object):
action_object = SingleAction(action_object, *args, **kwargs)
elif not isinstance(action_object, SingleAction):
action_object = SingleAction.from_string(action_object)
if action_object is None:
logger.error('action_object is None')
return False
if 'single_fire_action' in kwargs.keys() and kwargs['single_fire_action'] is True:
action_object.single_fire_action = True
del kwargs['single_fire_action']
if event_name in self.__Actions:
self.__Actions[event_name].append(action_object)
logger.trace("action %s was added to event %s", action_object, event_name)
else:
self.__Actions[event_name] = [action_object]
logger.trace("action %s was added to new evententry %s", action_object, event_name)
return action_object
__call__ = fire_event_asynchron | PypiClean |
/OBITools-1.2.13.tar.gz/OBITools-1.2.13/distutils.ext/obidistutils/serenity/pip/_vendor/distlib/locators.py |
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError:
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'http://python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
posixpath.basename(t.path))
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implement favours http:// URLs over https://, archives
from PyPI over those from other locations and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
name1, name2 = name1.lower(), name2.lower()
if name1 == name2:
result = True
else:
# distribute replaces '-' by '_' in project names, so it
# can tell where the version starts in a filename.
result = name1.replace('_', '-') == name2.replace('_', '-')
return result
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e:
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a dictionary for a
specific version, whih typically holds information gleaned from a filename or URL for an
archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = self._get_digest(info)
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, info['url'])
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if versions:
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception:
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
result = versions[slist[-1]]
if result and r.extras:
result.extras = r.extras
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
nad probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
urls = d['urls']
if urls:
info = urls[0]
md.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[md.version] = dist
except Exception as e:
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError:
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e:
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e:
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path):
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {}
else:
result = { dist.version: dist }
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
result.update(d)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other, unmatched))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems | PypiClean |
/Flask-AppBuilder-jwi078-2.1.13.tar.gz/Flask-AppBuilder-jwi078-2.1.13/flask_appbuilder/filters.py | from flask import current_app, request, url_for
from .const import PERMISSION_PREFIX
def app_template_filter(filter_name=""):
def wrap(f):
if not hasattr(f, "_filter"):
f._filter = filter_name
return f
return wrap
class TemplateFilters(object):
security_manager = None
def __init__(self, app, security_manager):
self.security_manager = security_manager
for attr_name in dir(self):
if hasattr(getattr(self, attr_name), "_filter"):
attr = getattr(self, attr_name)
app.jinja_env.filters[attr._filter] = attr
@app_template_filter("get_actions_on_list")
def get_actions_on_list(self, actions, modelview_name):
res_actions = dict()
for action_key in actions:
action = actions[action_key]
if self.is_item_visible(action.name, modelview_name) and action.multiple:
res_actions[action_key] = action
return res_actions
@app_template_filter("get_actions_on_show")
def get_actions_on_show(self, actions, modelview_name):
res_actions = dict()
for action_key in actions:
action = actions[action_key]
if self.is_item_visible(action.name, modelview_name) and action.single:
res_actions[action_key] = action
return res_actions
@app_template_filter("link_order")
def link_order_filter(self, column, modelview_name):
"""
Arguments are passed like:
_oc_<VIEW_NAME>=<COL_NAME>&_od_<VIEW_NAME>='asc'|'desc'
"""
new_args = request.view_args.copy()
args = request.args.copy()
if ("_oc_" + modelview_name) in args:
args["_oc_" + modelview_name] = column
if args.get("_od_" + modelview_name) == "asc":
args["_od_" + modelview_name] = "desc"
else:
args["_od_" + modelview_name] = "asc"
else:
args["_oc_" + modelview_name] = column
args["_od_" + modelview_name] = "asc"
return url_for(
request.endpoint,
**dict(list(new_args.items()) + list(args.to_dict().items()))
)
@app_template_filter("link_page")
def link_page_filter(self, page, modelview_name):
"""
Arguments are passed like: page_<VIEW_NAME>=<PAGE_NUMBER>
"""
new_args = request.view_args.copy()
args = request.args.copy()
args["page_" + modelview_name] = page
return url_for(
request.endpoint,
**dict(list(new_args.items()) + list(args.to_dict().items()))
)
@app_template_filter("link_page_size")
def link_page_size_filter(self, page_size, modelview_name):
"""
Arguments are passed like: psize_<VIEW_NAME>=<PAGE_NUMBER>
"""
new_args = request.view_args.copy()
args = request.args.copy()
args["psize_" + modelview_name] = page_size
return url_for(
request.endpoint,
**dict(list(new_args.items()) + list(args.to_dict().items()))
)
@app_template_filter("get_link_next")
def get_link_next_filter(self, s):
return request.args.get("next")
@app_template_filter("get_link_back")
def get_link_back_filter(self, request):
return request.args.get("next") or request.referrer
# TODO improve this
@app_template_filter("set_link_filters")
def set_link_filters_filter(self, path, filters):
lnkstr = path
for flt, value in filters.get_filters_values():
if flt.is_related_view:
if "?" in lnkstr:
lnkstr = lnkstr + "&_flt_0_" + flt.column_name + "=" + str(value)
else:
lnkstr = lnkstr + "?_flt_0_" + flt.column_name + "=" + str(value)
return lnkstr
@app_template_filter("get_link_order")
def get_link_order_filter(self, column, modelview_name):
if request.args.get("_oc_" + modelview_name) == column:
if request.args.get("_od_" + modelview_name) == "asc":
return 2
else:
return 1
else:
return 0
@app_template_filter("get_attr")
def get_attr_filter(self, obj, item):
return getattr(obj, item)
@app_template_filter("is_menu_visible")
def is_menu_visible(self, item):
return self.security_manager.has_access("menu_access", item.name)
@staticmethod
def find_views_by_name(view_name):
for view in current_app.appbuilder.baseviews:
if view.__class__.__name__ == view_name:
return view
@app_template_filter("is_item_visible")
def is_item_visible(self, permission: str, item: str) -> bool:
"""
Check if an item is visible on the template
this changed with permission mapping feature.
This is a best effort to deliver the feature
and not break compatibility
permission is:
- 'can_' + <METHOD_NAME>: On normal routes
- <METHOD_NAME>: when it's an action
"""
_view = self.find_views_by_name(item)
item = _view.class_permission_name
if PERMISSION_PREFIX in permission:
method = permission.split(PERMISSION_PREFIX)[1]
else:
if hasattr(_view, 'actions') and _view.actions.get(permission):
permission_name = _view.get_action_permission_name(permission)
if permission_name not in _view.base_permissions:
return False
return self.security_manager.has_access(permission_name, item)
else:
method = permission
permission_name = PERMISSION_PREFIX + _view.get_method_permission(method)
if permission_name not in _view.base_permissions:
return False
return self.security_manager.has_access(permission_name, item) | PypiClean |
/Kr0nOs-3.4.1.tar.gz/Kr0nOs-3.4.1/kronbot/cogs/mod/settings.py | from collections import defaultdict, deque
from kronbot.core import checks, commands, i18n
from kronbot.core.utils.chat_formatting import box
from .abc import MixinMeta
_ = i18n.Translator("Mod", __file__)
class ModSettings(MixinMeta):
"""
This is a mixin for the mod cog containing all settings commands.
"""
@commands.group()
@commands.guild_only()
@checks.guildowner_or_permissions(administrator=True)
async def modset(self, ctx: commands.Context):
"""Manage server administration settings."""
if ctx.invoked_subcommand is None:
guild = ctx.guild
# Display current settings
data = await self.settings.guild(guild).all()
delete_repeats = data["delete_repeats"]
ban_mention_spam = data["ban_mention_spam"]
respect_hierarchy = data["respect_hierarchy"]
delete_delay = data["delete_delay"]
reinvite_on_unban = data["reinvite_on_unban"]
dm_on_kickban = data["dm_on_kickban"]
default_days = data["default_days"]
msg = ""
msg += _("Delete repeats: {num_repeats}\n").format(
num_repeats=_("after {num} repeats").format(num=delete_repeats)
if delete_repeats != -1
else _("No")
)
msg += _("Ban mention spam: {num_mentions}\n").format(
num_mentions=_("{num} mentions").format(num=ban_mention_spam)
if ban_mention_spam
else _("No")
)
msg += _("Respects hierarchy: {yes_or_no}\n").format(
yes_or_no=_("Yes") if respect_hierarchy else _("No")
)
msg += _("Delete delay: {num_seconds}\n").format(
num_seconds=_("{num} seconds").format(num=delete_delay)
if delete_delay != -1
else _("None")
)
msg += _("Reinvite on unban: {yes_or_no}\n").format(
yes_or_no=_("Yes") if reinvite_on_unban else _("No")
)
msg += _("Send message to users on kick/ban: {yes_or_no}\n").format(
yes_or_no=_("Yes") if dm_on_kickban else _("No")
)
if default_days:
msg += _(
"Default message history delete on ban: Previous {num_days} days\n"
).format(num_days=default_days)
else:
msg += _("Default message history delete on ban: Don't delete any\n")
await ctx.send(box(msg))
@modset.command()
@commands.guild_only()
async def hierarchy(self, ctx: commands.Context):
"""Toggle role hierarchy check for mods and admins.
**WARNING**: Disabling this setting will allow mods to take
actions on users above them in the role hierarchy!
This is enabled by default.
"""
guild = ctx.guild
toggled = await self.settings.guild(guild).respect_hierarchy()
if not toggled:
await self.settings.guild(guild).respect_hierarchy.set(True)
await ctx.send(
_("Role hierarchy will be checked when moderation commands are issued.")
)
else:
await self.settings.guild(guild).respect_hierarchy.set(False)
await ctx.send(
_("Role hierarchy will be ignored when moderation commands are issued.")
)
@modset.command()
@commands.guild_only()
async def banmentionspam(self, ctx: commands.Context, max_mentions: int = 0):
"""Set the autoban conditions for mention spam.
Users will be banned if they send any message which contains more than
`<max_mentions>` mentions.
`<max_mentions>` must be at least 5. Set to 0 to disable.
"""
guild = ctx.guild
if max_mentions:
if max_mentions < 5:
max_mentions = 5
await self.settings.guild(guild).ban_mention_spam.set(max_mentions)
await ctx.send(
_(
"Autoban for mention spam enabled. "
"Anyone mentioning {max_mentions} or more different people "
"in a single message will be autobanned."
).format(max_mentions=max_mentions)
)
else:
cur_setting = await self.settings.guild(guild).ban_mention_spam()
if not cur_setting:
await ctx.send_help()
return
await self.settings.guild(guild).ban_mention_spam.set(False)
await ctx.send(_("Autoban for mention spam disabled."))
@modset.command()
@commands.guild_only()
async def deleterepeats(self, ctx: commands.Context, repeats: int = None):
"""Enable auto-deletion of repeated messages.
Must be between 2 and 20.
Set to -1 to disable this feature.
"""
guild = ctx.guild
if repeats is not None:
if repeats == -1:
await self.settings.guild(guild).delete_repeats.set(repeats)
self.cache.pop(guild.id, None) # remove cache with old repeat limits
await ctx.send(_("Repeated messages will be ignored."))
elif 2 <= repeats <= 20:
await self.settings.guild(guild).delete_repeats.set(repeats)
# purge and update cache to new repeat limits
self.cache[guild.id] = defaultdict(lambda: deque(maxlen=repeats))
await ctx.send(
_("Messages repeated up to {num} times will be deleted.").format(num=repeats)
)
else:
await ctx.send(
_(
"Number of repeats must be between 2 and 20"
" or equal to -1 if you want to disable this feature!"
)
)
else:
repeats = await self.settings.guild(guild).delete_repeats()
if repeats != -1:
await ctx.send(
_(
"Bot will delete repeated messages after"
" {num} repeats. Set this value to -1 to"
" ignore repeated messages"
).format(num=repeats)
)
else:
await ctx.send(_("Repeated messages will be ignored."))
@modset.command()
@commands.guild_only()
async def reinvite(self, ctx: commands.Context):
"""Toggle whether an invite will be sent to a user when unbanned.
If this is True, the bot will attempt to create and send a single-use invite
to the newly-unbanned user.
"""
guild = ctx.guild
cur_setting = await self.settings.guild(guild).reinvite_on_unban()
if not cur_setting:
await self.settings.guild(guild).reinvite_on_unban.set(True)
await ctx.send(
_("Users unbanned with `{command}` will be reinvited.").format(
command=f"{ctx.clean_prefix}unban"
)
)
else:
await self.settings.guild(guild).reinvite_on_unban.set(False)
await ctx.send(
_("Users unbanned with `{command}` will not be reinvited.").format(
command=f"{ctx.clean_prefix}unban"
)
)
@modset.command()
@commands.guild_only()
async def dm(self, ctx: commands.Context, enabled: bool = None):
"""Toggle whether a message should be sent to a user when they are kicked/banned.
If this option is enabled, the bot will attempt to DM the user with the guild name
and reason as to why they were kicked/banned.
"""
guild = ctx.guild
if enabled is None:
setting = await self.settings.guild(guild).dm_on_kickban()
await ctx.send(
_("DM when kicked/banned is currently set to: {setting}").format(setting=setting)
)
return
await self.settings.guild(guild).dm_on_kickban.set(enabled)
if enabled:
await ctx.send(_("Bot will now attempt to send a DM to user before kick and ban."))
else:
await ctx.send(
_("Bot will no longer attempt to send a DM to user before kick and ban.")
)
@modset.command()
@commands.guild_only()
async def defaultdays(self, ctx: commands.Context, days: int = 0):
"""Set the default number of days worth of messages to be deleted when a user is banned.
The number of days must be between 0 and 7.
"""
guild = ctx.guild
if not (0 <= days <= 7):
return await ctx.send(_("Invalid number of days. Must be between 0 and 7."))
await self.settings.guild(guild).default_days.set(days)
await ctx.send(
_("{days} days worth of messages will be deleted when a user is banned.").format(
days=days
)
) | PypiClean |
/HuTao%20agent-1.0.3.tar.gz/HuTao agent-1.0.3/walnut_agent/common/do_monkeylog.py | import re
from walnut_agent.common.http_request import HttpRequest
from loguru import logger
from walnut_agent.common.do_db import DataOp, dbname
def listToStr(source_list: list):
return str(source_list).replace("'", "").replace(" ", "").lstrip("[").rstrip("]")
def collect_ios_monkey_log(record_id):
kv = {}
sql = "SELECT appid,tester,begin_time,version_name,phone_name,yppno FROM {0}.test_record_ios WHERE record_id={1}".format(
dbname, record_id)
with DataOp() as db:
result = db.fetchOne(sql)
# 引用次数大于1的变量单独赋值
phone_name = result["phone_name"]
# 运行失败、运行中、重连失败、停止失败时运行collect_reportLog,流转状态为手动收集
if result["run_status"] not in [-1, 2, 3, 4, 6]:
msg = "本次测试(ip:{0} record_id:{1})的运行状态不是运行失败、运行中、手动终止、重连失败、停止失败中的一种,无法手动收集日志!". \
format(record_id, phone_name)
logger.warning(msg)
else:
appid = result["appid"]
version_name = result["version_name"]
jira_op = JiraOp(appid, domain="mat")
# 收集crash日志信息
bug_infos = jira_op.collectLog(yppno=result["yppno"], begin_time=result["begin_time"],
end_time=result["end_time"])
bug_infos = jira_op.bugDistinct(bug_infos, version_name)
report_info = jira_op.batchReport(bug_infos, version_name, result["tester"])
if isinstance(report_info, tuple):
kv["ANR_count"] = report_info[0]
kv["crash_count"] = report_info[1]
bug_numbers = report_info[2]
if len(bug_numbers) > 0:
# 将列表掐头去尾转换成字符串,用于存入数据库
kv["bug_numbers"] = listToStr(bug_numbers)
msg = "收集日志完毕,本次测试(设备[{0}] record_id:{1})提交的BUG编号如下:{2}".format(phone_name, record_id, bug_numbers)
else:
msg = "收集日志完毕,本次测试(设备[{0}] record_id:{1})发现重复或无效的log但未发现新的log".format(phone_name, record_id)
logger.info(msg)
run_status = 5
else:
msg = "设备[{0}]提交BUG失败!{1}".format(phone_name, report_info)
run_status = 7
logger.error(msg)
kv["run_status"] = run_status
db.update("test_record", kv, "record_id={0}".format(record_id))
return msg
class JiraOp:
def __init__(self, appid, domain="jira"):
# SSO登录
self.hr = HttpRequest()
url_execution = "https://sso.yupaopao.com/login?service=http://{0}.yupaopao.com/".format(domain)
resp = self.hr.request("GET", url_execution).text
execution = re.findall('<input type="hidden" name="execution"\n value="(.*?)"/>', resp)[0]
header = {"content-type": "application/x-www-form-urlencoded"}
sso_body = {"username": "huzhiming", "password": "Aa578231407", "_eventId": "submit", "geolocation": None,
"execution": execution}
resp = self.hr.request("post", "https://sso.yupaopao.com/login", headers=header, data=sso_body)
# 合并两次重定向的cookies
resp.history[0].cookies.update(resp.history[1].cookies)
self.login_cookies = resp.history[0].cookies
with DataOp() as db:
self.app_info = db.fetchOne(
"SELECT appid, assignee_ios, pkg_name, appid_mat FROM {0}.app_info WHERE appid={1};".format(
dbname, appid))
def collectLog(self, yppno=None, begin_time=None, end_time=None, record_id=None) -> dict:
import time
bug_infos = {}
appid_mat = self.app_info["appid_mat"]
if record_id:
with DataOp() as db:
record_info = db.fetchOne("SELECT begin_time, end_time, yppno FROM {0}.test_record WHERE record_id={1}"
";".format(dbname, record_id))
begin_time = record_info["begin_time"]
end_time = record_info["end_time"]
yppno = record_info["yppno"]
elif not (yppno and begin_time and end_time):
return "收集Crash日志时入参错误!"
# 将时间转换为毫秒级时间戳
startDay = int(time.mktime(time.strptime(begin_time, "%Y-%m-%d %H:%M:%S")) * 1000)
endDay = int(time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S")) * 1000)
param = "particle=DAY&startDay={0}&endDay={1}&type=crash&idType=userId&appId={2}&platform=ios&userId={3}&articleNumber=2000". \
format(startDay, endDay, appid_mat, yppno)
crash_infos = self.hr.request("GET", "https://mat.yupaopao.com/userLog/list?{0}".format(param)).json()["result"]
for crash_info in crash_infos:
crash_reason = crash_info["reason"]
param = "particle=DAY&startDay={0}&endDay={1}&type=crash&idType=userId&userId={2}&appId={3}&platform=ios&reason={4}&articleNumber=2000".format(
startDay, endDay, yppno, appid_mat, crash_reason)
# 同一crash reason下,获取到一个uuid之后即可退出
uuid = self.hr.request("GET", "https://mat.yupaopao.com/userLog/detail?{0}".format(param)).json()["result"][
"result"][0]["uuid"]
param = "startDay={0}&endDay={1}&uuid={2}".format(startDay, endDay, uuid)
bug_infos[crash_reason] = \
self.hr.request("GET", "https://mat.yupaopao.com/crashlog/detailById?{0}".format(param)).json()["result"][
"originLog"]
return bug_infos
def createVersion(self, version_name):
data = {"name": version_name, "project": "CRAS", "expand": "operations"}
resp = self.hr.request("POST", "http://jira.yupaopao.com/rest/api/2/version", cookies=self.login_cookies,
json=data).text
return resp
# 根据生成的log提交bug
def batchReport(self, distinct_info: dict, version_name: str, tester: str):
ANR_count = 0
crash_count = 0
bug_number = []
# 提交bug之前检测登录是否成功
if type(self.login_cookies) == str:
return self.login_cookies
for bug_title in distinct_info.keys():
# 拼接bug描述,并提交bug
assignee = self.app_info["assignee_ios"]
report_info = self.bugReport(bug_title, distinct_info[bug_title], version_name, tester, assignee,
issuetype="测试BUG-IOS")
try:
bug_number.append(re.findall('"key":"(.+?)"', report_info)[0])
except IndexError:
if report_info.find("指定的的报告人不是用户") != -1:
msg = "提交BUG失败,请检查测试人名称!"
elif report_info.find("不存在") != -1:
msg = "提交BUG失败,经办人{0}不存在,请检查经办人配置!".format(assignee)
else:
msg = report_info
logger.warning(msg)
return msg
else:
crash_count += 1
return ANR_count, crash_count, bug_number
# 提交BUG
def bugReport(self, summary, description, versions, reporter, assignee, components="BUG", project_key="CRAS",
priority="P0", issuetype="测试BUG-Android"):
# jira限制summary长度
if len(summary) >= 255:
summary = summary[:255]
jira_body = {"fields": {"summary": summary, "issuetype": {"name": issuetype}, "project": {"key": project_key},
"description": description, "assignee": {"name": assignee},
"priority": {"name": priority},
"components": [{"name": components}], "versions": [{"name": versions}],
"reporter": {"name": reporter}}}
resp = self.hr.request("POST", "http://jira.yupaopao.com/rest/api/2/issue/", json=jira_body,
cookies=self.login_cookies).text
# 版本号不存在时,自动创建版本后再次提交BUG
if resp.find('"版本名 “{0}”无效"'.format(versions)) != -1:
self.createVersion(versions)
resp = self.hr.request("POST", "http://jira.yupaopao.com/rest/api/2/issue/", json=jira_body,
cookies=self.login_cookies).text
return resp
def bugDistinct(self, bug_infos: dict, version_name):
bug_titles = bug_infos.keys()
# 通过BUG标题、影响版本和项目进行bug去重
url = "http://jira.yupaopao.com/rest/api/2/search/?jql=project%20%3D%20CRAS%20AND%20affectedVersion%20%3D%20" \
"{0}".format(version_name)
resp = self.hr.request("GET", url, cookies=self.login_cookies).text
jira_bug_titles = re.findall("\"summary\":\"(.+?)\"", resp)
num = len(bug_titles)
# 查询目标版本下的bug标题并去重
for i in range(num):
bug_title = bug_titles[i]
if bug_title in jira_bug_titles:
bug_infos.pop(bug_title)
return bug_infos
if __name__ == '__main__':
JiraOp(1, "mat").collectLog(111791024, "2022-03-02 00:00:00", "2022-03-03 16:00:00") | PypiClean |
/IndiAvesBirdIdentification_ASaha-1.2-py3-none-any.whl/IndiAvesBirdIdentification/preprocess.py | from IndiAvesBirdIdentification.exim import Exim
import re
import demoji
import preprocessor as p
import spacy
nlp = spacy.load("en_core_web_sm")
p.set_options(p.OPT.EMOJI, p.OPT.MENTION, p.OPT.URL, p.OPT.SMILEY, p.OPT.NUMBER, p.OPT.HASHTAG)
class UserSentence:
text = ""
preprocessed_text = ""
spelling_corrections = {"grey": "gray", "pegion": "pigeon", "brested": "breasted", "serpant": "serpent",
"avedavat": "avadavat", "open billed stork": "asian openbill",
"secretary bird": "Secretarybird", "dollar bird": "dollarbird", "silver bill": "silverbill",
"eyes": "eye"}
def get_bird_name_from_hashtag_4levels(self, hashtag_, all_birds_list):
hashtag_ = hashtag_.lower()
special_cases = {"greateradjutantstork": "greateradjutant"}
for key in special_cases:
if hashtag_ == key: hashtag_ = special_cases[key]
rel_birdnames = []
for bird in all_birds_list:
if bird[-2:] == hashtag_[-2:] and hashtag_[:2] == bird[:2]:
rel_birdnames.append(bird)
if len(rel_birdnames) > 0:
if hashtag_ in rel_birdnames:
return hashtag_
segments = [0, 1, 2, 3]
m_ = 2
while m_ < len(hashtag_) - 2:
segments[0] = hashtag_[:m_]
n_ = 0
while n_ < len(hashtag_[m_:]):
segments[1] = hashtag_[m_:][:n_]
part3 = hashtag_[m_:][n_:]
o_ = 0
while o_ < len(hashtag_[m_:][n_:]):
segments[2] = hashtag_[m_:][n_:][:o_]
p_ = 0
while p_ < len(hashtag_[m_:][n_:][o_:]):
segments[3] = hashtag_[m_:][n_:][o_:][:p_]
part4 = hashtag_[m_:][n_:][o_:][p_:]
prob_birdname = segments[0] + " " + segments[1] + " " + segments[2] + " " + segments[3] + " " + part4
prob_birdname = re.sub(r' +', ' ', prob_birdname)
if prob_birdname in rel_birdnames:
return prob_birdname
p_ += 1
o_ += 1
n_ += 1
m_ += 1
return None
def remove_emojis(self, sentence):
emojis = demoji.findall(sentence)
for item in emojis:
sentence = sentence.replace(item, " " + emojis[item] + " ")
return sentence
def replace_underscores(self, sentence):
sentence = sentence.lower()
sentence = sentence.replace("_", " ")
return sentence
def try_replacing_hashtags_mit_birdname(self, text, all_birds_list):
status = False
hashtags = re.findall(r"#(\w+)", text)
for hashtag in hashtags:
segmented_ = self.get_bird_name_from_hashtag_4levels(hashtag, all_birds_list)
if segmented_ is not None: text = text.replace("#" + hashtag, segmented_)
return text
def basic_preprocess(self, sentence):
sentence = p.clean(sentence)
if sentence[:2] == "b'":
sentence = sentence[1:]
sentence = re.sub(r'[^\w\s]', ' ', sentence)
sentence = re.sub(r' +', ' ', sentence)
sentence = sentence.strip()
for key in self.spelling_corrections:
if sentence.find(key) > -1:
sentence = sentence.replace(key, self.spelling_corrections[key])
return sentence
def plural_nn_to_singular(self, sentence, birdnames_words):
doc = nlp(sentence)
for token in doc:
if token.pos_ == "NOUN":
if token.text[-1:] == "s" and token.text not in birdnames_words:
sentence = sentence.replace(token.text, token.text[:-1])
return sentence
def preprocess(self, text):
exim = Exim()
text = self.remove_emojis(text) # Removes Emojis
text = self.replace_underscores(text) # Replaces underscores
text = self.try_replacing_hashtags_mit_birdname(text, exim.all_birds_list)
text = self.basic_preprocess(text)
text = self.plural_nn_to_singular(text, exim.birdnames_words)
return text
def __init__(self, text_):
self.text = text_
self.preprocessed_text = self.preprocess(text_) | PypiClean |
/FEADRE_AI-1.0.7.tar.gz/FEADRE_AI-1.0.7/FEADRE_AI/GLOBAL_LOG.py | import datetime
import logging
import logging.config
import os
from logging import handlers
from colorlog import colorlog
from FEADRE_AI.f_general import get_path_root
'''
在项目根目录建立 logs 文件夹
args=(os.path.abspath(os.getcwd() + "/info.log"),"midnight", 1, 6,'utf-8')
每一天午夜12点将当天的日志转存到一份新的日志文件中,并且加上时间戳后缀,最多保存6个文件
'''
def Singleton(cls):
'''
单例 通过 @Singleton
:param cls:
:return:
'''
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
def get_logger(name='root'):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# fmt = '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
# black, red, green, yellow, blue, purple, cyan and white {color},fg_ {color},bg_ {color}:前景色和背景色
log_colors_config = {
'DEBUG': 'blue', # 蓝色
# 'INFO': 'green', # 绿色
'INFO': 'cyan', # 蓝绿
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}
formatter = colorlog.ColoredFormatter(
'%(log_color)s[%(asctime)s] [%(filename)s:%(lineno)d] [%(module)s:%(funcName)s] [%(levelname)s]- %(message)s',
log_colors=log_colors_config) # 日志输出格式
# fmt = '[%(asctime)s] [%(filename)s:%(lineno)d] [%(module)s:%(funcName)s] [%(levelname)s]- %(message)s'
# formatter = logging.Formatter(fmt)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
# sh.setLevel(logging.DEBUG)
logger.addHandler(sh)
'''
实例化TimedRotatingFileHandler
#interval是时间间隔,backupCount是备份文件的个数,如果超过这个个数,就会自动删除,when是间隔的时间单位,单位有以下几种:
# S 秒
# M 分
# H 小时、
# D 天、
# W 每星期(interval==0时代表星期一)
# midnight 每天凌晨
'''
path_root = os.path.join(get_path_root(), 'logs')
if not os.path.exists(path_root):
os.makedirs(path_root)
th = handlers.TimedRotatingFileHandler(
filename=os.path.join(path_root, datetime.datetime.now().strftime('%Y_%m_%d') + '.log'),
when='D',
backupCount=5, encoding='utf-8')
th.setFormatter(formatter)
logger.addHandler(th)
# fh = RotatingFileHandler(
# filename=os.path.join(get_path_root(), 'logs', datetime.datetime.now().strftime('%Y_%m_%d') + '.log'),
# mode='a', maxBytes=1024 * 1024 * 5, backupCount=5,
# encoding='utf-8') # 使用RotatingFileHandler类,滚动备份日志
# fh.setLevel(logging.CRITICAL)
# fh.setFormatter(formatter)
# print(logger.handlers)
# logger.removeHandler(ch)
return logger
flog = get_logger(__name__) # 返回一个叫__name__ 的obj,并应用默认的日志级别、Handler和Formatter设置
# flog=Log()
# flog1=Log()
# print(id(flog))
# print(id(flog1))
if __name__ == '__main__':
# print(Colored.red('I am red!'))
# print(Colored.green('I am gree!'))
# print(Colored.yellow('I am yellow!'))
# print(Colored.blue('I am blue!'))
# print(Colored.magenta('I am magenta!'))
# print(Colored.cyan('I am cyan!'))
# print(Colored.white('I am white!'))
# print(Colored.white_green('I am white green!'))
flog.debug('一个连接只需一个 %s', get_path_root)
flog.debug('多个连接无需 %s%s', [1, 2, {123}], get_path_root())
flog.info(123)
flog.warning('多个连接无需 %s%s', [1, 2, {123}], get_path_root())
flog.error(123)
flog.critical(123)
pass | PypiClean |
/GaussianBinomial-0.1.tar.gz/GaussianBinomial-0.1/distributions/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/DTSR-0.2.0.tar.gz/DTSR-0.2.0/dtsr/dtsrbayes.py | import os
from collections import defaultdict
import time
import pandas as pd
pd.options.mode.chained_assignment = None
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
from .formula import *
from .util import *
from .dtsrbase import DTSR
from .kwargs import DTSRBAYES_INITIALIZATION_KWARGS
import edward as ed
from edward.models import Empirical, Exponential, Gamma, MultivariateNormalTriL, Normal, SinhArcsinh
######################################################
#
# BAYESIAN IMPLEMENTATION OF DTSR
#
######################################################
class DTSRBayes(DTSR):
_INITIALIZATION_KWARGS = DTSRBAYES_INITIALIZATION_KWARGS
_doc_header = """
A DTSR implementation fitted using Bayesian inference.
"""
_doc_args = DTSR._doc_args
_doc_kwargs = DTSR._doc_kwargs
_doc_kwargs += '\n' + '\n'.join([' ' * 8 + ':param %s' % x.key + ': ' + '; '.join([x.dtypes_str(), x.descr]) + ' **Default**: ``%s``.' % (x.default_value if not isinstance(x.default_value, str) else "'%s'" % x.default_value) for x in _INITIALIZATION_KWARGS])
__doc__ = _doc_header + _doc_args + _doc_kwargs
#####################################################
#
# Native methods
#
#####################################################
def __init__(self, form_str, X, y, **kwargs):
super(DTSRBayes, self).__init__(
form_str,
X,
y,
**kwargs
)
for kwarg in DTSRBayes._INITIALIZATION_KWARGS:
setattr(self, kwarg.key, kwargs.pop(kwarg.key, kwarg.default_value))
kwarg_keys = [x.key for x in DTSR._INITIALIZATION_KWARGS]
for kwarg_key in kwargs:
if kwarg_key not in kwarg_keys:
raise TypeError('__init__() got an unexpected keyword argument %s' %kwarg_key)
if not self.variational():
if self.n_samples is not None:
sys.stderr.write('Parameter n_samples being overridden for sampling optimization\n')
self.n_samples = self.n_iter*self.n_train_minibatch
if not (self.declare_priors_fixef or self.declare_priors_ranef):
assert self.variational(), 'Only variational inference can be used to fit parameters without declaring priors'
self._initialize_metadata()
self.build()
def _initialize_metadata(self):
super(DTSRBayes, self)._initialize_metadata()
self.parameter_table_columns = ['Mean', '2.5%', '97.5%']
self.inference_map = {}
self.MAP_map = {}
if self.intercept_init is None:
self.intercept_init = self.y_train_mean
if self.intercept_prior_sd is None:
self.intercept_prior_sd = self.y_train_sd * self.prior_sd_scaling_coefficient
if self.coef_prior_sd is None:
self.coef_prior_sd = self.y_train_sd * self.prior_sd_scaling_coefficient
if self.y_sd_init is None:
self.y_sd_init = self.y_train_sd
if self.y_sd_prior_sd is None:
self.y_sd_prior_sd = self.y_train_sd * self.y_sd_prior_sd_scaling_coefficient
if self.inference_name == 'MetropolisHastings':
self.proposal_map = {}
if self.mh_proposal_sd is None:
self.mh_proposal_sd = self.y_train_sd * self.prior_sd_scaling_coefficient
with self.sess.as_default():
with self.sess.graph.as_default():
self.intercept_prior_sd_tf = tf.constant(float(self.intercept_prior_sd), dtype=self.FLOAT_TF)
self.intercept_prior_sd_unconstrained = tf.contrib.distributions.softplus_inverse(self.intercept_prior_sd_tf)
self.intercept_posterior_sd_init = self.intercept_prior_sd_tf * self.posterior_to_prior_sd_ratio
self.intercept_posterior_sd_init_unconstrained = tf.contrib.distributions.softplus_inverse(self.intercept_posterior_sd_init)
self.coef_prior_sd_tf = tf.constant(float(self.coef_prior_sd), dtype=self.FLOAT_TF)
self.coef_prior_sd_unconstrained = tf.contrib.distributions.softplus_inverse(self.coef_prior_sd_tf)
self.coef_posterior_sd_init = self.coef_prior_sd_tf * self.posterior_to_prior_sd_ratio
self.coef_posterior_sd_init_unconstrained = tf.contrib.distributions.softplus_inverse(self.coef_posterior_sd_init)
self.irf_param_prior_sd_tf = tf.constant(float(self.irf_param_prior_sd), dtype=self.FLOAT_TF)
self.irf_param_prior_sd_unconstrained = tf.contrib.distributions.softplus_inverse(self.irf_param_prior_sd_tf)
self.irf_param_posterior_sd_init = self.irf_param_prior_sd_tf * self.posterior_to_prior_sd_ratio
self.irf_param_posterior_sd_init_unconstrained = tf.contrib.distributions.softplus_inverse(self.irf_param_posterior_sd_init)
self.y_sd_init_tf = tf.constant(float(self.y_sd_init), dtype=self.FLOAT_TF)
self.y_sd_init_unconstrained = tf.contrib.distributions.softplus_inverse(self.y_sd_init_tf)
self.y_sd_prior_sd_tf = tf.constant(float(self.y_sd_prior_sd), dtype=self.FLOAT_TF)
self.y_sd_prior_sd_unconstrained = tf.contrib.distributions.softplus_inverse(self.y_sd_prior_sd_tf)
self.y_sd_posterior_sd_init = self.y_sd_prior_sd_tf * self.posterior_to_prior_sd_ratio
self.y_sd_posterior_sd_init_unconstrained = tf.contrib.distributions.softplus_inverse(self.y_sd_posterior_sd_init)
self.y_skewness_prior_sd_tf = tf.constant(float(self.y_skewness_prior_sd), dtype=self.FLOAT_TF)
self.y_skewness_prior_sd_unconstrained = tf.contrib.distributions.softplus_inverse(self.y_skewness_prior_sd_tf)
self.y_skewness_posterior_sd_init = self.y_skewness_prior_sd_tf * self.posterior_to_prior_sd_ratio
self.y_skewness_posterior_sd_init_unconstrained = tf.contrib.distributions.softplus_inverse(self.y_skewness_posterior_sd_init)
self.y_tailweight_prior_sd_tf = tf.constant(float(self.y_tailweight_prior_sd), dtype=self.FLOAT_TF)
self.y_tailweight_prior_sd_unconstrained = tf.contrib.distributions.softplus_inverse(self.y_tailweight_prior_sd_tf)
self.y_tailweight_posterior_sd_init = self.y_tailweight_prior_sd_tf * self.posterior_to_prior_sd_ratio
self.y_tailweight_posterior_sd_init_unconstrained = tf.contrib.distributions.softplus_inverse(self.y_tailweight_posterior_sd_init)
# Alias prior widths for use in multivariate mode
self.intercept_joint_sd = self.intercept_prior_sd
self.coef_joint_sd = self.coef_prior_sd
self.irf_param_joint_sd = self.irf_param_prior_sd
self.ranef_to_fixef_joint_sd_ratio = self.ranef_to_fixef_prior_sd_ratio
def _pack_metadata(self):
md = super(DTSRBayes, self)._pack_metadata()
for kwarg in DTSRBayes._INITIALIZATION_KWARGS:
md[kwarg.key] = getattr(self, kwarg.key)
return md
def _unpack_metadata(self, md):
super(DTSRBayes, self)._unpack_metadata(md)
for kwarg in DTSRBayes._INITIALIZATION_KWARGS:
setattr(self, kwarg.key, md.pop(kwarg.key, kwarg.default_value))
if len(md) > 0:
sys.stderr.write('Saved model contained unrecognized attributes %s which are being ignored\n' %sorted(list(md.keys())))
######################################################
#
# Network Initialization
#
######################################################
def initialize_intercept(self, ran_gf=None):
with self.sess.as_default():
with self.sess.graph.as_default():
if ran_gf is None:
if self.variational():
# Posterior distribution
intercept_q_loc = tf.Variable(
tf.random_normal(
[],
mean=self.intercept_init_tf,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='intercept_q_loc'
)
intercept_q_scale = tf.Variable(
tf.random_normal(
[],
mean=self.intercept_posterior_sd_init_unconstrained,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='intercept_q_scale'
)
intercept_q = Normal(
loc=intercept_q_loc,
scale=tf.nn.softplus(intercept_q_scale),
name='intercept_q'
)
intercept_summary = intercept_q.mean()
if self.declare_priors_fixef:
# Prior distribution
intercept = Normal(
sample_shape=[],
loc=self.intercept_init_tf,
scale=self.intercept_prior_sd_tf,
name='intercept'
)
self.inference_map[intercept] = intercept_q
else:
intercept = intercept_q
else:
# Prior distribution
intercept = Normal(
sample_shape=[],
loc=self.intercept_init_tf,
scale=self.intercept_prior_sd_tf,
name='intercept'
)
# Posterior distribution
intercept_q_samples = tf.Variable(
tf.ones((self.n_samples), dtype=self.FLOAT_TF) * self.intercept_init_tf,
name='intercept_q_samples'
)
intercept_q = Empirical(
params=intercept_q_samples,
name='intercept_q'
)
if self.inference_name == 'MetropolisHastings':
# Proposal distribution
intercept_proposal = Normal(
loc=intercept,
scale=self.mh_proposal_sd,
name='intercept_proposal'
)
self.proposal_map[intercept] = intercept_proposal
intercept_summary = intercept_q.params[self.global_batch_step - 1]
self.inference_map[intercept] = intercept_q
else:
rangf_n_levels = self.rangf_n_levels[self.rangf.index(ran_gf)] - 1
if self.variational():
# Posterior distribution
intercept_q_loc = tf.Variable(
tf.random_normal(
[rangf_n_levels],
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='intercept_q_loc_by_%s' % ran_gf
)
intercept_q_scale = tf.Variable(
tf.random_normal(
[rangf_n_levels],
mean=self.intercept_posterior_sd_init_unconstrained,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='intercept_q_scale_by_%s' % ran_gf
)
intercept_q = Normal(
loc=intercept_q_loc,
scale=tf.nn.softplus(intercept_q_scale) * self.ranef_to_fixef_prior_sd_ratio,
name='intercept_q_by_%s' % ran_gf
)
intercept_summary = intercept_q.mean()
if self.declare_priors_ranef:
# Prior distribution
intercept = Normal(
sample_shape=[rangf_n_levels],
loc=0.,
scale=self.intercept_prior_sd_tf * self.ranef_to_fixef_prior_sd_ratio,
name='intercept_by_%s' % ran_gf
)
self.inference_map[intercept] = intercept_q
else:
intercept = intercept_q
else:
# Prior distribution
intercept = Normal(
sample_shape=[rangf_n_levels],
loc=0.,
scale=self.intercept_prior_sd_tf * self.ranef_to_fixef_prior_sd_ratio,
name='intercept_by_%s' % ran_gf
)
# Posterior distribution
intercept_q_ran_samples = tf.Variable(
tf.zeros((self.n_samples, rangf_n_levels), dtype=self.FLOAT_TF),
name='intercept_q_by_%s_samples' % ran_gf
)
intercept_q = Empirical(
params=intercept_q_ran_samples,
name='intercept_q_by_%s' % ran_gf
)
if self.inference_name == 'MetropolisHastings':
# Proposal distribution
intercept_proposal = Normal(
loc=intercept,
scale=self.mh_proposal_sd,
name='intercept_proposal_by_%s' % ran_gf
)
self.proposal_map[intercept] = intercept_proposal
intercept_summary = intercept_q.params[self.global_batch_step - 1]
self.inference_map[intercept] = intercept_q
self.MAP_map[intercept] = intercept_summary
return intercept, intercept_summary
def initialize_coefficient(self, coef_ids=None, ran_gf=None):
if coef_ids is None:
coef_ids = self.coef_names
with self.sess.as_default():
with self.sess.graph.as_default():
if ran_gf is None:
if self.variational():
# Posterior distribution
coefficient_q_loc = tf.Variable(
tf.random_normal(
[len(coef_ids)],
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='coefficient_q_loc'
)
coefficient_q_scale = tf.Variable(
tf.random_normal(
[len(coef_ids)],
mean=self.coef_posterior_sd_init_unconstrained,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='coefficient_q_scale'
)
coefficient_q = Normal(
loc=coefficient_q_loc,
scale=tf.nn.softplus(coefficient_q_scale),
name='coefficient_q'
)
coefficient_summary = coefficient_q.mean()
if self.declare_priors_fixef:
# Prior distribution
coefficient = Normal(
sample_shape=[len(coef_ids)],
loc=0.,
scale=self.coef_prior_sd_tf,
name='coefficient'
)
self.inference_map[coefficient] = coefficient_q
else:
coefficient = coefficient_q
else:
# Prior distribution
coefficient = Normal(
sample_shape=[len(coef_ids)],
loc=0.,
scale=self.coef_prior_sd_tf,
name='coefficient'
)
# Posterior distribution
coefficient_q_samples = tf.Variable(
tf.zeros((self.n_samples, len(coef_ids)), dtype=self.FLOAT_TF),
name='coefficient_q_samples'
)
coefficient_q = Empirical(
params=coefficient_q_samples,
name='coefficient_q'
)
if self.inference_name == 'MetropolisHastings':
# Proposal distribution
coefficient_proposal = Normal(
loc=coefficient,
scale=self.mh_proposal_sd,
name='coefficient_proposal'
)
self.proposal_map[coefficient] = coefficient_proposal
coefficient_summary = coefficient_q.params[self.global_batch_step - 1]
self.inference_map[coefficient] = coefficient_q
else:
rangf_n_levels = self.rangf_n_levels[self.rangf.index(ran_gf)] - 1
if self.variational():
# Posterior distribution
coefficient_q_loc = tf.Variable(
tf.random_normal(
[rangf_n_levels, len(coef_ids)],
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='coefficient_q_loc_by_%s' % ran_gf
)
coefficient_q_scale = tf.Variable(
tf.random_normal(
[rangf_n_levels, len(coef_ids)],
mean=self.coef_posterior_sd_init_unconstrained,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='coefficient_q_scale_by_%s' % ran_gf
)
coefficient_q = Normal(
loc=coefficient_q_loc,
scale=tf.nn.softplus(coefficient_q_scale) * self.ranef_to_fixef_prior_sd_ratio,
name='coefficient_q_by_%s' % ran_gf
)
coefficient_summary = coefficient_q.mean()
if self.declare_priors_ranef:
# Prior distribution
coefficient = Normal(
sample_shape=[rangf_n_levels, len(coef_ids)],
loc=0.,
scale=self.coef_prior_sd_tf * self.ranef_to_fixef_prior_sd_ratio,
name='coefficient_by_%s' % ran_gf
)
self.inference_map[coefficient] = coefficient_q
else:
coefficient = coefficient_q
else:
# Prior distribution
coefficient = Normal(
sample_shape=[rangf_n_levels, len(coef_ids)],
loc=0.,
scale=self.coef_prior_sd_tf * self.ranef_to_fixef_prior_sd_ratio,
name='coefficient_by_%s' % ran_gf
)
# Posterior distribution
coefficient_q = Empirical(
params=tf.Variable(
tf.zeros(
(self.n_samples, rangf_n_levels, len(coef_ids)),
dtype=self.FLOAT_TF
),
name='coefficient_q_by_%s_samples' % ran_gf
),
name='coefficient_q_by_%s' % ran_gf
)
if self.inference_name == 'MetropolisHastings':
# Proposal distribution
coefficient_proposal = Normal(
loc=coefficient,
scale=self.mh_proposal_sd,
name='coefficient_proposal_by_%s' % ran_gf
)
self.proposal_map[coefficient] = coefficient_proposal
coefficient_summary = coefficient_q.params[self.global_batch_step - 1]
self.inference_map[coefficient] = coefficient_q
self.MAP_map[coefficient] = coefficient_summary
return coefficient, coefficient_summary
def initialize_irf_param_unconstrained(self, param_name, ids, mean=0., ran_gf=None):
with self.sess.as_default():
with self.sess.graph.as_default():
if ran_gf is None:
if self.variational():
# Posterior distribution
param_q_loc = tf.Variable(
tf.random_normal(
[1, len(ids)],
mean=mean,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name=sn('%s_q_loc_%s' % (param_name, '-'.join(ids)))
)
param_q_scale = tf.Variable(
tf.random_normal(
[1, len(ids)],
mean=self.irf_param_posterior_sd_init_unconstrained,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name=sn('%s_q_scale_%s' % (param_name, '-'.join(ids)))
)
param_q = Normal(
loc=param_q_loc,
scale=tf.nn.softplus(param_q_scale),
name=sn('%s_q_%s' % (param_name, '-'.join(ids)))
)
param_summary = param_q.mean()
if self.declare_priors_fixef:
# Prior distribution
param = Normal(
loc=mean,
scale=self.irf_param_prior_sd,
name=sn('%s_%s' % (param_name, '-'.join(ids)))
)
self.inference_map[param] = param_q
else:
param = param_q
else:
# Prior distribution
param = Normal(
loc=mean,
scale=self.irf_param_prior_sd,
name=sn('%s_%s' % (param_name, '-'.join(ids)))
)
# Posterior distribution
params_q_samples = tf.Variable(
tf.zeros((self.n_samples, 1, len(ids)), dtype=self.FLOAT_TF),
name=sn('%s_q_%s_samples' % (param_name, '-'.join(ids)))
)
param_q = Empirical(
params=params_q_samples,
name=sn('%s_q_%s_samples' % (param_name, '-'.join(ids)))
)
if self.inference_name == 'MetropolisHastings':
# Proposal distribution
L_proposal = Normal(
loc=param,
scale=self.mh_proposal_sd,
name=sn('%s_proposal_%s' % (param_name, '-'.join(ids)))
)
self.proposal_map[param] = L_proposal
param_summary = param_q.params[self.global_batch_step - 1]
self.inference_map[param] = param_q
else:
rangf_n_levels = self.rangf_n_levels[self.rangf.index(ran_gf)] - 1
if self.variational():
# Posterior distribution
param_q_loc = tf.Variable(
tf.random_normal(
[rangf_n_levels, len(ids)],
mean=0.,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name=sn('%s_q_loc_%s_by_%s' % (param_name, '-'.join(ids), ran_gf))
)
param_q_scale = tf.Variable(
tf.random_normal(
[rangf_n_levels, len(ids)],
mean=self.irf_param_posterior_sd_init_unconstrained,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name=sn('%s_q_scale_%s_by_%s' % (param_name, '-'.join(ids), ran_gf))
)
param_q = Normal(
loc=param_q_loc,
scale=tf.nn.softplus(param_q_scale) * self.ranef_to_fixef_prior_sd_ratio,
name=sn('%s_q_%s_by_%s' % (param_name, '-'.join(ids), ran_gf))
)
param_summary = param_q.mean()
if self.declare_priors_ranef:
# Prior distribution
param = Normal(
sample_shape=[rangf_n_levels, len(ids)],
loc=0.,
scale=self.irf_param_prior_sd * self.ranef_to_fixef_prior_sd_ratio,
name='%s_by_%s' % (param_name, ran_gf)
)
self.inference_map[param] = param_q
else:
param = param_q
else:
# Prior distribution
param = Normal(
sample_shape=[rangf_n_levels, len(ids)],
loc=0.,
scale=self.irf_param_prior_sd * self.ranef_to_fixef_prior_sd_ratio,
name='%s_by_%s' % (param_name, ran_gf)
)
# Posterior distribution
param_q_samples = tf.Variable(
tf.zeros((self.n_samples, rangf_n_levels, len(ids)), dtype=self.FLOAT_TF),
name=sn('%s_q_%s_by_%s_samples' % (param_name, '-'.join(ids), ran_gf))
)
param_q = Empirical(
params=param_q_samples,
name=sn('%s_q_%s_by_%s' % (param_name, '-'.join(ids), ran_gf))
)
param_summary = param_q.params[self.global_batch_step - 1]
if self.inference_name == 'MetropolisHastings':
# Proposal distribution
param_proposal = Normal(
loc=param,
scale=self.mh_proposal_sd,
name=sn('%s_proposal_%s_by_%s' % (param_name, '-'.join(ids), ran_gf))
)
self.proposal_map[param] = param_proposal
self.inference_map[param] = param_q
self.MAP_map[param] = param_summary
return param, param_summary
def initialize_joint_distribution(self, means, sds, ran_gf=None):
with self.sess.as_default():
with self.sess.graph.as_default():
dim = int(means.shape[0])
if self.variational():
# Posterior distribution
joint_q_loc = tf.Variable(
tf.random_normal(
[dim],
mean=means,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='joint_q_loc' if ran_gf is None else 'joint_q_loc_by_%s' %ran_gf
)
# Construct cholesky decomposition of initial covariance using sds, then use for initialization
n_scale = int(dim * (dim + 1) / 2)
if ran_gf is not None:
sds *= self.ranef_to_fixef_prior_sd_ratio
cholesky = tf.diag(sds)
tril_ix = np.ravel_multi_index(
np.tril_indices(dim),
(dim, dim)
)
scale_init = tf.gather(tf.reshape(cholesky, [dim * dim]), tril_ix)
scale_posterior_init = scale_init * self.posterior_to_prior_sd_ratio
joint_q_scale = tf.Variable(
tf.random_normal(
[n_scale],
mean=scale_posterior_init,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='joint_q_scale' if ran_gf is None else 'joint_q_scale_by_%s' %ran_gf
)
joint_q = MultivariateNormalTriL(
loc=joint_q_loc,
scale_tril=tf.contrib.distributions.fill_triangular(joint_q_scale),
name='joint_q' if ran_gf is None else 'joint_q_by_%s' %ran_gf
)
joint_summary = joint_q.mean()
if (ran_gf is None and self.declare_priors_fixef) or (ran_gf is not None and self.declare_priors_ranef):
# Prior distribution
joint = MultivariateNormalTriL(
loc=means,
scale_tril=tf.contrib.distributions.fill_triangular(scale_init),
name='joint' if ran_gf is None else 'joint_by_%s' %ran_gf
)
self.inference_map[joint] = joint_q
else:
joint = joint_q
else:
# Construct cholesky decomposition of initial covariance using sds, then use for initialization
cholesky = tf.diag(sds)
tril_ix = np.ravel_multi_index(
np.tril_indices(dim),
(dim, dim)
)
scale_init = tf.gather(tf.reshape(cholesky, [dim * dim]), tril_ix)
# Prior distribution
joint = MultivariateNormalTriL(
loc=means,
scale_tril=tf.contrib.distributions.fill_triangular(scale_init),
name='joint' if ran_gf is None else 'joint_by_%s' %ran_gf
)
# Posterior distribution
joint_q_samples = tf.Variable(
tf.ones((self.n_samples), dtype=self.FLOAT_TF) * means,
name='joint_q_samples' if ran_gf is None else 'joint_q_samples_by_%s' %ran_gf
)
joint_q = Empirical(
params=joint_q_samples,
name='joint_q' if ran_gf is None else 'joint_q_by_%s' %ran_gf
)
if self.inference_name == 'MetropolisHastings':
# Proposal distribution
joint_proposal = Normal(
loc=joint,
scale=self.mh_proposal_sd,
name='joint_proposal' if ran_gf is None else 'joint_q_proposal_by_%s' %ran_gf
)
self.proposal_map[joint] = joint_proposal
joint_summary = joint_q.params[self.global_batch_step - 1]
self.inference_map[joint] = joint_q
self.MAP_map[joint] = joint_summary
return joint, joint_summary
def _initialize_output_model(self):
with self.sess.as_default():
with self.sess.graph.as_default():
if self.y_sd_trainable:
y_sd_init_unconstrained = self.y_sd_init_unconstrained
if self.variational():
# Posterior distribution
y_sd_loc_q = tf.Variable(
tf.random_normal(
[],
mean=y_sd_init_unconstrained,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='y_sd_loc_q'
)
y_sd_scale_q = tf.Variable(
tf.random_normal(
[],
mean=self.y_sd_posterior_sd_init_unconstrained,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='y_sd_scale_q'
)
y_sd_q = Normal(
loc=y_sd_loc_q,
scale=tf.nn.softplus(y_sd_scale_q),
name='y_sd_q'
)
y_sd_summary = y_sd_q.mean()
if self.declare_priors_fixef:
# Prior distribution
y_sd = Normal(
loc=y_sd_init_unconstrained,
scale=self.y_sd_prior_sd_tf,
name='y_sd'
)
self.inference_map[y_sd] = y_sd_q
else:
y_sd = y_sd_q
else:
# Prior distribution
y_sd = Normal(
loc=y_sd_init_unconstrained,
scale=self.y_sd_prior_sd_tf,
name='y_sd'
)
# Posterior distribution
y_sd_q_samples = tf.Variable(
tf.zeros([self.n_samples], dtype=self.FLOAT_TF),
name=sn('y_sd_q_samples')
)
y_sd_q = Empirical(
params=y_sd_q_samples,
name=sn('y_sd_q')
)
if self.inference_name == 'MetropolisHastings':
# Proposal distribution
y_sd_proposal = Normal(
loc=y_sd,
scale=self.mh_proposal_sd,
name=sn('y_sd_proposal')
)
self.proposal_map[y_sd] = y_sd_proposal
y_sd_summary = y_sd_q.params[self.global_batch_step - 1]
self.inference_map[y_sd] = y_sd_q
self.MAP_map[y_sd] = y_sd_summary
y_sd = tf.nn.softplus(y_sd)
y_sd_summary = tf.nn.softplus(y_sd_summary)
tf.summary.scalar(
'y_sd',
y_sd_summary,
collections=['params']
)
else:
sys.stderr.write('Fixed y scale: %s\n' % self.y_sd_init)
y_sd = self.y_sd_init_tf
y_sd_summary = y_sd
self.y_sd = y_sd
self.y_sd_summary = y_sd_summary
if self.asymmetric_error:
if self.variational():
# Posterior distributions
y_skewness_loc_q = tf.Variable(
tf.random_normal(
[],
mean=0.,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='y_skewness_q_loc'
)
y_skewness_scale_q = tf.Variable(
tf.random_normal(
[],
mean=self.y_skewness_posterior_sd_init_unconstrained,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='y_skewness_q_loc'
)
self.y_skewness_q = Normal(
loc=y_skewness_loc_q,
scale=tf.nn.softplus(y_skewness_scale_q),
name='y_skewness_q'
)
self.y_skewness_summary = self.y_skewness_q.mean()
tf.summary.scalar(
'y_skewness',
self.y_skewness_summary,
collections=['params']
)
y_tailweight_loc_q = tf.Variable(
tf.random_normal(
[],
mean=tf.contrib.distributions.softplus_inverse(1.),
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='y_tailweight_q_loc'
)
y_tailweight_scale_q = tf.Variable(
tf.random_normal(
[],
mean=self.y_tailweight_posterior_sd_init_unconstrained,
stddev=self.init_sd,
dtype=self.FLOAT_TF
),
name='y_tailweight_q_scale'
)
self.y_tailweight_q = Normal(
loc=y_tailweight_loc_q,
scale=tf.nn.softplus(y_tailweight_scale_q),
name='y_tailweight_q'
)
self.y_tailweight_summary = self.y_tailweight_q.mean()
tf.summary.scalar(
'y_tailweight',
tf.nn.softplus(self.y_tailweight_summary),
collections=['params']
)
if self.declare_priors_fixef:
# Prior distributions
self.y_skewness = Normal(
loc=0.,
scale=self.y_skewness_prior_sd,
name='y_skewness'
)
self.y_tailweight = Normal(
loc=tf.contrib.distributions.softplus_inverse(1.),
scale=self.y_tailweight_prior_sd,
name='y_tailweight'
)
self.inference_map[self.y_skewness] = self.y_skewness_q
self.inference_map[self.y_tailweight] = self.y_tailweight_q
else:
self.y_skewness = self.y_skewness_q
self.y_tailweight = self.y_tailweight_q
else:
# Prior distributions
self.y_skewness = Normal(
loc=0.,
scale=self.y_skewness_prior_sd,
name='y_skewness'
)
self.y_tailweight = Normal(
loc=tf.contrib.distributions.softplus_inverse(1.),
scale=self.y_tailweight_prior_sd,
name='y_tailweight'
)
# Posterior distributions
y_skewness_q_samples = tf.Variable(
tf.zeros([self.n_samples], dtype=self.FLOAT_TF),
name=sn('y_skewness_q_samples')
)
self.y_skewness_q = Empirical(
params=y_skewness_q_samples,
name=sn('y_skewness_q')
)
y_tailweight_q_samples = tf.Variable(
tf.zeros([self.n_samples], dtype=self.FLOAT_TF),
name=sn('y_tailweight_q_samples')
)
self.y_tailweight_q = Empirical(
params=y_tailweight_q_samples,
name=sn('y_tailweight_q')
)
if self.inference_name == 'MetropolisHastings':
# Proposal distributions
y_skewness_proposal = Normal(
loc=self.y_skewness,
scale=self.mh_proposal_sd,
name=sn('y_skewness_proposal')
)
y_tailweight_proposal = Normal(
loc=self.y_tailweight,
scale=self.mh_proposal_sd,
name=sn('y_tailweight_proposal')
)
self.proposal_map[self.y_skewness] = y_skewness_proposal
self.proposal_map[self.y_tailweight] = y_tailweight_proposal
self.y_skewness_summary = self.y_skewness_q.params[self.global_batch_step - 1]
self.y_tailweight_summary = self.y_tailweight_q.params[self.global_batch_step - 1]
tf.summary.scalar(
'y_skewness',
self.y_skewness_summary,
collections=['params']
)
tf.summary.scalar(
'y_tailweight',
tf.nn.softplus(self.y_tailweight_summary),
collections=['params']
)
self.inference_map[self.y_skewness] = self.y_skewness_q
self.inference_map[self.y_tailweight] = self.y_tailweight_q
self.MAP_map[self.y_skewness] = self.y_skewness_summary
self.MAP_map[self.y_tailweight] = self.y_tailweight_summary
self.out = SinhArcsinh(
loc=self.out,
scale=y_sd,
skewness=self.y_skewness,
tailweight=tf.nn.softplus(self.y_tailweight),
name='output'
)
self.err_dist = SinhArcsinh(
loc=0.,
scale=y_sd_summary,
skewness=self.y_skewness_summary,
tailweight=tf.nn.softplus(self.y_tailweight_summary),
name='err_dist'
)
self.err_dist_plot = tf.exp(self.err_dist.log_prob(self.support))
else:
self.out = Normal(
loc=self.out,
scale=self.y_sd,
name='output'
)
self.err_dist = Normal(
loc=0.,
scale=self.y_sd_summary,
name='err_dist'
)
self.err_dist_plot = tf.exp(self.err_dist.log_prob(self.support))
self.err_dist_lb = -3 * y_sd_summary
self.err_dist_ub = 3 * y_sd_summary
def initialize_objective(self):
with self.sess.as_default():
with self.sess.graph.as_default():
self._initialize_output_model()
self.opt = self._initialize_optimizer(self.optim_name)
if self.variational():
self.inference = getattr(ed,self.inference_name)(self.inference_map, data={self.out: self.y})
self.inference.initialize(
n_samples=self.n_samples,
n_iter=self.n_iter,
n_print=self.n_train_minibatch * self.log_freq,
logdir=self.outdir + '/tensorboard/edward',
log_timestamp=False,
scale={self.out: self.minibatch_scale},
optimizer=self.opt
)
elif self.inference_name == 'MetropolisHastings':
self.inference = getattr(ed, self.inference_name)(self.inference_map, self.proposal_map, data={self.out: self.y})
self.inference.initialize(
n_print=self.n_train_minibatch * self.log_freq,
logdir=self.outdir + '/tensorboard/edward',
log_timestamp=False,
scale={self.out: self.minibatch_scale}
)
else:
self.inference = getattr(ed,self.inference_name)(self.inference_map, data={self.out: self.y})
self.inference.initialize(
step_size=self.lr,
n_print=self.n_train_minibatch * self.log_freq,
logdir=self.outdir + '/tensorboard/edward',
log_timestamp=False,
scale={self.out: self.minibatch_scale}
)
## Set up posteriors for MC sampling
self.X_conv_prior = self.X_conv
self.X_conv_scaled_prior = self.X_conv
self.X_conv_post = ed.copy(self.X_conv, self.inference_map)
self.X_conv_scaled_post = ed.copy(self.X_conv_scaled, self.inference_map)
self.X_conv_MAP = ed.copy(self.X_conv, self.MAP_map, scope='MAP')
self.X_conv_scaled_MAP = ed.copy(self.X_conv_scaled, self.MAP_map, scope='MAP')
self.out_prior = self.out
self.out_post = ed.copy(self.out, self.inference_map)
self.MAP_map[self.out] = self.out.mean()
self.out_MAP = tf.identity(self.MAP_map[self.out])
self.out_MAP = ed.copy(self.out_MAP, self.MAP_map, scope='MAP')
self.ll_prior = self.out_prior.log_prob(self.y)
self.ll_post = self.out_post.log_prob(self.y)
self.ll_MAP = ed.copy(self.out.log_prob(self.y), self.MAP_map, scope='MAP')
for x in self.irf_mc:
for a in self.irf_mc[x]:
for b in self.irf_mc[x][a]:
self.irf_mc[x][a][b] = ed.copy(self.irf_mc[x][a][b], self.inference_map)
for x in self.irf_integral_tensors:
self.irf_integral_tensors[x] = ed.copy(self.irf_integral_tensors[x], self.inference_map)
if self.pc:
for x in self.src_irf_mc:
for a in self.src_irf_mc[x]:
for b in self.src_irf_mc[x][a]:
self.src_irf_mc[x][a][b] = ed.copy(self.src_irf_mc[x][a][b], self.inference_map)
for x in self.src_irf_integral_tensors:
self.src_irf_integral_tensors[x] = ed.copy(self.src_irf_integral_tensors[x], self.inference_map)
# Overload this method to perform parameter sampling and compute credible intervals
def _extract_parameter_values(self, fixed=True, level=95, n_samples=None):
if n_samples is None:
n_samples = self.n_samples_eval
alpha = 100 - float(level)
with self.sess.as_default():
with self.sess.graph.as_default():
if fixed:
param_vector = self.parameter_table_fixed_values
else:
param_vector = self.parameter_table_random_values
samples = [param_vector.eval(session=self.sess) for _ in range(n_samples)]
samples = np.stack(samples, axis=1)
mean = samples.mean(axis=1)
lower = np.percentile(samples, alpha / 2, axis=1)
upper = np.percentile(samples, 100 - (alpha / 2), axis=1)
out = np.stack([mean, lower, upper], axis=1)
return out
# Overload this method to perform parameter sampling and compute credible intervals
def _extract_irf_integral(self, terminal_name, level=95, n_samples=None, n_time_units=None, n_time_points=1000):
if n_samples is None:
n_samples = self.n_samples_eval
if n_time_units is None:
n_time_units = self.max_tdelta
with self.sess.as_default():
with self.sess.graph.as_default():
fd = {
self.support_start: 0.,
self.n_time_units: n_time_units,
self.n_time_points: n_time_points,
self.gf_y: np.expand_dims(np.array(self.rangf_n_levels, dtype=self.INT_NP), 0) - 1,
self.time_y: [n_time_units],
self.time_X: np.zeros((1, self.history_length))
}
alpha = 100 - float(level)
if terminal_name in self.irf_integral_tensors:
posterior = self.irf_integral_tensors[terminal_name]
else:
posterior = self.src_irf_integral_tensors[terminal_name]
samples = [np.squeeze(self.sess.run(posterior, feed_dict=fd)[0]) for _ in range(n_samples)]
samples = np.stack(samples, axis=0)
mean = samples.mean(axis=0)
lower = np.percentile(samples, alpha / 2, axis=0)
upper = np.percentile(samples, 100 - (alpha / 2), axis=0)
return (mean, lower, upper)
# Overload this method to use posterior distribution
def _initialize_parameter_tables(self):
with self.sess.as_default():
with self.sess.graph.as_default():
super(DTSRBayes, self)._initialize_parameter_tables()
self.parameter_table_fixed_values = ed.copy(self.parameter_table_fixed_values, self.inference_map)
if len(self.rangf) > 0:
self.parameter_table_random_values = ed.copy(self.parameter_table_random_values, self.inference_map)
#####################################################
#
# Public methods
#
######################################################
def variational(self):
"""
Report whether the DTSR model uses variational Bayes.
:return: ``bool``; ``True`` if the model is variational, ``False`` otherwise.
"""
return self.inference_name in [
'KLpq',
'KLqp',
'ImplicitKLqp',
'ReparameterizationEntropyKLqp',
'ReparameterizationKLKLqp',
'ReparameterizationKLqp',
'ScoreEntropyKLqp',
'ScoreKLKLqp',
'ScoreKLqp',
'ScoreRBKLqp',
'WakeSleep'
]
def ci_curve(
self,
posterior,
level=95,
n_samples=None,
n_time_units=None,
n_time_points=1000
):
"""
Extract an IRF with Monte Carlo credible intervals for plotting
:param posterior: the IRF node in the model's graph
:param level: ``float``; level of the credible interval.
:param n_samples: ``int`` or ``None``; number of posterior samples to draw. If ``None``, use model defaults.
:param n_time_units: ``float``; number of time units over which to plot the curve.
:param n_time_points: ``float``; number of points to use in the plot.
:return: ``tuple`` of 3 ``numpy`` vectors; mean, lower bound, and upper bound at the desired level for each plot point.
"""
if n_samples is None:
n_samples = self.n_samples_eval
if n_time_units is None:
n_time_units = self.max_tdelta
with self.sess.as_default():
with self.sess.graph.as_default():
fd = {
self.support_start: 0.,
self.n_time_units: n_time_units,
self.n_time_points: n_time_points,
self.gf_y: np.expand_dims(np.array(self.rangf_n_levels, dtype=self.INT_NP), 0) - 1,
self.time_y: np.ones((1,)) * n_time_units,
self.time_X: np.zeros((1, self.history_length))
}
alpha = 100-float(level)
samples = [self.sess.run(posterior, feed_dict=fd) for _ in range(n_samples)]
samples = np.concatenate(samples, axis=1)
mean = samples.mean(axis=1)
lower = np.percentile(samples, alpha/2, axis=1)
upper = np.percentile(samples, 100-(alpha/2), axis=1)
return (mean, lower, upper)
def report_settings(self, indent=0):
out = super(DTSRBayes, self).report_settings(indent=indent)
for kwarg in DTSRBAYES_INITIALIZATION_KWARGS:
val = getattr(self, kwarg.key)
out += ' ' * indent + ' %s: %s\n' %(kwarg.key, "\"%s\"" %val if isinstance(val, str) else val)
out += '\n'
return out
def run_train_step(self, feed_dict):
with self.sess.as_default():
with self.sess.graph.as_default():
info_dict = self.inference.update(feed_dict)
self.sess.run(self.incr_global_batch_step)
out_dict = {
'loss': info_dict['loss'] if self.variational() else info_dict['accept_rate']
}
return out_dict
def run_predict_op(self, feed_dict, n_samples=None, algorithm='MAP', verbose=True):
if algorithm in ['map', 'MAP'] and self.variational():
MAP = True
else:
MAP = False
with self.sess.as_default():
with self.sess.graph.as_default():
if MAP:
preds = self.sess.run(self.out_MAP, feed_dict=feed_dict)
else:
if n_samples is None:
n_samples = self.n_samples_eval
if verbose:
pb = tf.contrib.keras.utils.Progbar(n_samples)
preds = np.zeros((len(feed_dict[self.time_y]), n_samples))
for i in range(n_samples):
preds[:, i] = self.sess.run(self.out_post, feed_dict=feed_dict)
if verbose:
pb.update(i + 1, force=True)
preds = preds.mean(axis=1)
return preds
def run_loglik_op(self, feed_dict, n_samples=None, algorithm='MAP', verbose=True):
if algorithm in ['map', 'MAP'] and self.variational():
MAP = True
else:
MAP = False
with self.sess.as_default():
with self.sess.graph.as_default():
if MAP:
log_lik = self.sess.run(self.ll_MAP, feed_dict=feed_dict)
else:
if n_samples is None:
n_samples = self.n_samples_eval
if verbose:
pb = tf.contrib.keras.utils.Progbar(n_samples)
log_lik = np.zeros((len(feed_dict[self.time_y]), n_samples))
for i in range(n_samples):
log_lik[:, i] = self.sess.run(self.ll_post, feed_dict=feed_dict)
if verbose:
pb.update(i + 1, force=True)
log_lik = log_lik.mean(axis=1)
return log_lik
def run_conv_op(self, feed_dict, scaled=False, n_samples=None, algorithm='MAP', verbose=True):
if algorithm in ['map', 'MAP'] and self.variational():
MAP = True
else:
MAP = False
with self.sess.as_default():
with self.sess.graph.as_default():
if MAP:
X_conv = self.sess.run(self.X_conv_scaled_MAP if scaled else self.X_conv_MAP, feed_dict=feed_dict)
else:
if n_samples is None:
n_samples = self.n_samples_eval
if verbose:
pb = tf.contrib.keras.utils.Progbar(n_samples)
X_conv = np.zeros((len(feed_dict[self.X]), self.X_conv.shape[-1], n_samples))
for i in range(0, n_samples):
X_conv[..., i] = self.sess.run(self.X_conv_scaled_post if scaled else self.X_conv_post, feed_dict=feed_dict)
if verbose:
pb.update(i + 1, force=True)
X_conv = X_conv.mean(axis=2)
return X_conv
def finalize(self):
super(DTSRBayes, self).finalize()
self.inference.finalize() | PypiClean |
/ImSwitchUC2-2.1.0.tar.gz/ImSwitchUC2-2.1.0/imswitch/imcontrol/controller/controllers/WatcherController.py | from ..basecontrollers import ImConWidgetController
from imswitch.imcommon.view.guitools.FileWatcher import FileWatcher
import os
from time import perf_counter
class WatcherController(ImConWidgetController):
""" Linked to WatcherWidget. """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.t0 = None
self._widget.sigWatchChanged.connect(self.toggleWatch)
self._commChannel.sigScriptExecutionFinished.connect(self.executionFinished)
self.execution = False
self.toExecute = []
self.current = []
def toggleWatch(self, checked):
if checked:
self.watcher = FileWatcher(self._widget.path, 'py', 1)
self._widget.updateFileList()
files = self.watcher.filesInDirectory()
self.toExecute = files
self.watcher.sigNewFiles.connect(self.newFiles)
self.watcher.start()
self.runNextFile()
else:
self.watcher.stop()
self.watcher.quit()
self.toExecute = []
def newFiles(self, files):
self._widget.updateFileList()
self.toExecute.extend(files)
self.runNextFile()
def runNextFile(self):
if len(self.toExecute) and not self.execution:
self.current = self._widget.path + '/' + self.toExecute.pop()
file = open(self.current, "r")
text = file.read()
file.close()
self.t0 = perf_counter()
self._commChannel.runScript(text)
self.execution = True
def executionFinished(self):
self.execution = False
diff = perf_counter() - self.t0
self.watcher.addToLog(self.current, [str(self.t0), str(diff)])
os.remove(self.current)
self._widget.updateFileList()
self.runNextFile()
# Copyright (C) 2020-2021 ImSwitch developers
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/DeCiDa-1.1.5.tar.gz/DeCiDa-1.1.5/decida/Calc.py | from builtins import zip
from builtins import str
from builtins import range
import sys
import random
import math
import tkinter as tk
import tkinter.messagebox
from decida.ItclObjectx import ItclObjectx
class Calc(ItclObjectx) :
"""
**synopsis**:
Scientific calculator.
*Calc* is an algebraic-mode calculator with 50 buttons, a small
menu of additional functions, a menu of constants, and a help display.
*Calc* instances can be embedded in other tk windows. The calculators
can be operated with mouse or keyboard entry.
The DeCiDa application *calc* simply instantiates one *Calc* object.
**constructor arguments**:
**parent** (tk handle, default=None)
handle of frame or other widget to pack plot in.
if this is not specified, top-level is created.
**\*\*kwargs** (dict)
keyword=value specifications:
options or configuration-options
**options**:
**wait** (bool, default=True)
If True, wait in main-loop until window is destroyed.
**configuration options**:
**verbose** (bool, default=False)
Enable/disable verbose mode. (Not currently used.)
**example**:
>>> from decida.Calc import Calc
>>> Calc()
**buttons**:
**OFF**
Dismiss the calculator. Shuts down the main-loop.
**AC**
All clear: clear display accumulator, memory accumulator,
and calculator stack.
**CE**
Clear entry: clear display accumulator.
**HELP**
Display help window: button / keystroke map.
**MENU**
Menu of additional functions. Currently these are:
* random number:
Enter a random number between 0 and 1 into the
display accumulator.
* random seed:
Use the display accumulator value as a random seed.
* list stack:
Display the calculator stack.
* set precision:
Use the display accumulator value as a number of digits
of precision in calculator evaluations.
* logging:
Open a file to log the calculator evaluations.
**const**
Display constant dialog. Double click on a constant to enter it
into the display accumulator.
**put**
Put the current display accumulator number into constant memory.
The const button can then be used to access its value.
**pi**
Enter pi into the display accumulator.
**e**
Enter e into the display accumulator.
**DRG**
Change from degrees to radians to grads for trigonometric functions.
**HYP**
Toggle hyperbolic mode.
If sin, cos, or tan buttons are then
invoked, the evaluation is of sinh, cosh, or tanh.
If arc and hyperbolic mode are both on, then
evaluation is of asinh, acosh, or atanh.
**ARC**
Toggle arc mode.
If sin, cos, or tan buttons are then
invoked, the evaluation is of asin, acos, or atan.
If arc and hyperbolic mode are both on, then
evaluation is of asinh, acosh, or atanh.
**sin cos tan**
Trigonometric, inverse-trigonometric (arc mode on),
hyperbolic (hyperbolic mode on), or inverse-hyperbolic
(arc and hyperbolic modes both on).
**sqrt**
Square root of the display accumulator.
**x!**
Factorial of the (integer value of the) display accumulator.
**10^x**
10 to the power of the display accumulator.
**exp**
e to the power of the display accumulator.
**x^2**
Square of the display accumulator.
**1/x**
Inverse of the display accumulator.
**log**
Logarithm to the base ten of the display accumulator.
**ln**
Natural logarithm of the display accumulator.
**y^x**
Push the current display accumulator value onto the stack,
evaluate the stack value raised to the power of the display
accumulator value.
**M+**
Add the display accumulator to the memory accumulator.
**M-**
Subtract the display accumulator from the memory accumulator.
**STO**
Store the display accumulator to the memory accumulator.
**RCL**
Recall the memory accumulator to the display accumulator.
**XCH**
Exchange the memory accumulator with the display accumulator.
**CLR**
Clear the memory accumulator.
**(** **)**
Algebraic parentheses.
**0 1 2 3 4 5 6 7 8 9 EE +/-**
Numerical entry.
**+ - * /**
Binary arithmetic operations.
**=**
Evaluate the calculator stack.
"""
nrows = 10
ncols = 5
buttons = """
OFF HELP const pi AC
DRG MENU put e CE
HYP sin cos tan sqrt
ARC x! 10^x exp x^2
M+ 1/x log ln y^x
M- EE ( ) /
STO 7 8 9 *
RCL 4 5 6 -
XCH 1 2 3 +
CLR 0 . +/- =
""".split()
buttoncommands = """
OFF HELP const pi AC
DRG MENU put e CE
HYP trig trig trig sqrt
ARC fact tenx exp sqr
Mp inv log ln ytox
Mm EE lpar rpar mdiv
STO num num num mdiv
RCL num num num pmin
XCH num num num pmin
CLR num pt porm eq
""".split()
buttonbindings = """
Control-Key-q H less numbersign A
d question greater at C
h s c t r
a exclam X x dollar
Control-Key-p percent L l asciicircum
Control-Key-m e parenleft parenright slash
Control-Key-s Key-7 Key-8 Key-9 asterisk
Control-Key-r Key-4 Key-5 Key-6 minus
Control-Key-x Key-1 Key-2 Key-3 plus
Control-Key-c Key-0 period asciitilde equal
""".split()
menuentries = [
["random number", "RAND"],
["random seed", "SEED"],
["list stack", "STKQ"],
["set precision", "PREC"],
["logging", "LOG" ],
]
constants = [
"273.16 Tabs T(O deg C) deg K",
"1.380622e-23 kB (J/K) boltzmann constant",
"8.61708e-05 kB (eV/K) boltzmann constant",
"6.58218e-16 hbar (eV-s) planck constant",
"2.99792e+10 co (cm/s) speed of light in vacuum",
"1.602192e-19 qe (C) unit charge",
"8.854215e-14 eo (F/cm) permittivity of free space",
"9.10956e-31 mo (kg) electron rest mass",
"11.7 ksi relative permittivity (Si)",
"3.9 kox relative permittivity (SiO2)",
"1.03594315e-12 esi (F/cm) permittivity (Si)",
"3.45314385e-13 eox (F/cm) permittivity (SiO2)",
]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Calc main
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#==========================================================================
# METHOD : __init__
# PURPOSE : constructor
#==========================================================================
def __init__(self, parent=None, **kwargs) :
ItclObjectx.__init__(self)
#----------------------------------------------------------------------
# private variables:
#----------------------------------------------------------------------
self.__parent = parent
self.__Component = {}
self.__log = False
self.__saved_constants = []
self.__wait = True
#----------------------------------------------------------------------
# configuration options:
#----------------------------------------------------------------------
self._add_options({
"verbose" : [False, None],
})
#----------------------------------------------------------------------
# keyword arguments are *not* all configuration options
#----------------------------------------------------------------------
for key, value in list(kwargs.items()) :
if key == "wait" :
self.__wait = value
else :
self[key] = value
#----------------------------------------------------------------------
# class variables
#----------------------------------------------------------------------
self.logfile = ""
self.__logfid = None
self.__constant_results = ""
self.mem = 0
self.m = ""
self.hyp = 0
self.arc = 0
self.mode = ">"
self.x = []
self.STK = []
self.drg = 0
#----------------------------------------------------------------------
# build gui:
#----------------------------------------------------------------------
self.__gui()
#==========================================================================
# METHOD : __del__
# PURPOSE : destructor
#==========================================================================
def __del__(self) :
top = self.__Component["top"]
try:
top.destroy()
except:
pass
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Calc configuration option callback methods
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Calc GUI
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def __gui(self) :
#---------------------------------------------------------------------
# top-level:
#---------------------------------------------------------------------
if not tk._default_root :
root = tk.Tk()
root.wm_state("withdrawn")
tk._default_root = root
if not self.__parent :
self.__toplevel = True
top = tk.Toplevel()
else:
self.__toplevel = False
top = tk.Frame(self.__parent)
top.pack(side="top", fill="both", expand=True)
self.__Component["top"] = top
#---------------------------------------------------------------------
# bitmaps
#---------------------------------------------------------------------
self._bm_pi = tk.Image("bitmap")
self._bm_pi["data"] = """
#define bm_pi_width 16
#define bm_pi_height 16
static char bm_pi_bits[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0x3f, 0x12, 0x04, 0x10, 0x04, 0x10, 0x04,
0x10, 0x04, 0x10, 0x04, 0x10, 0x04, 0x10, 0x04,
0x10, 0x04, 0x10, 0x04, 0x1c, 0x04, 0x00, 0x00
};
"""
self._bm_sqrt = tk.Image("bitmap")
self._bm_sqrt["data"] = """
#define bm_sqrt_width 16
#define bm_sqrt_height 16
static char bm_sqrt_bits[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x02,
0x00, 0x02, 0x00, 0x03, 0x00, 0x01, 0x00, 0x01,
0x87, 0x01, 0x8c, 0x00, 0x98, 0x00, 0xf0, 0x00,
0x60, 0x00, 0x40, 0x00, 0x40, 0x00, 0x00, 0x00
};
"""
#---------------------------------------------------------------------
# option database:
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# main layout
#---------------------------------------------------------------------
display = tk.Frame(top)
states = tk.Frame(top)
buttons = tk.Frame(top)
display.pack(side="top", fill="x", expand=True)
states.pack(side="top", fill="x", expand=True)
buttons.pack(side="top", fill="both", expand=True)
#---------------------------------------------------------------------
# calculator display
#---------------------------------------------------------------------
disp = tk.Entry(display,
bd=2, relief="sunken", state="disabled",
background="GhostWhite", foreground="black")
disp.pack(side="top", padx=1, pady=1, expand=True, fill="both")
disp["disabledforeground"] = "black"
self.__Component["disp"] = disp
#---------------------------------------------------------------------
# calculator states
#---------------------------------------------------------------------
for state in ["a", "h", "d", "m", "s"] :
lst = tk.Label(states, background="steel blue", foreground="white")
lst.pack(side="left", fill="x", expand=True)
self.__Component["state_" + state] = lst
#---------------------------------------------------------------------
# calculator buttons
#---------------------------------------------------------------------
def focuscmd(event) :
event.widget.focus_set()
def unfocuscmd(event) :
tk._default_root.focus_set()
def eqcmd(event, self=self) :
self.eq("=")
def bscmd(event, self=self) :
self.BS()
def selcmd(event, self=self) :
x=self.selection_get()
self.STX(x)
self.eq("=")
for i, button in enumerate(Calc.buttons) :
x = i % Calc.ncols
y = i // Calc.ncols
if button == "MENU":
b = tk.Menubutton(buttons,
text=button,
relief="raised", bd=2, pady=2, padx=2, highlightthickness=0,
background="dark khaki", foreground="black")
m = tk.Menu(b)
b["menu"] = m
for label, c in Calc.menuentries :
def cmd(self=self, command = c) :
cx = compile("self.%s()" % (command), "string", "single")
eval(cx)
m.add_command(label=label, command=cmd)
else :
c=Calc.buttoncommands[i]
e=Calc.buttonbindings[i]
def cmd(self=self, command=c, button=button) :
cx = compile(
"self.%s(\"%s\")" % (command, button), "string", "single"
)
eval(cx)
b = tk.Button(buttons,
command=cmd,
relief="raised", bd=2, pady=1, padx=1, highlightthickness=0,
background="dark khaki", foreground="black")
if button == "pi" :
b["image"] = self._bm_pi
elif button == "sqrt" :
b["image"] = self._bm_sqrt
else :
b["text"] = button
if sys.platform == "darwin" :
b["width"] = 7
else :
b["width"] = 5
def cmde(event, self=self, command=c, button=button) :
cx = compile(
"self.%s(\"%s\")" % (command, button), "string", "single"
)
eval(cx)
top.bind("<" + e + ">" , cmde)
b.bind("<Enter>", focuscmd)
b.bind("<Leave>", unfocuscmd)
bx = list(b.bindtags())
bx.insert(0, top)
b.bindtags(tuple(bx))
b.grid(column=x, row=y, sticky="nsew")
#---------------------------------------------------------------------
# calculator bindings
#---------------------------------------------------------------------
for c in ["disp", "state_a", "state_h", "state_d", "state_m", "state_s"] :
w = self.__Component[c]
w.bind("<Enter>", focuscmd)
w.bind("<Leave>", unfocuscmd)
wx = list(w.bindtags())
wx.insert(0, top)
w.bindtags(tuple(wx))
top.bind("<Return>", eqcmd)
top.bind("<space>", eqcmd)
top.bind("<BackSpace>", bscmd)
top.bind("<Delete>", bscmd)
top.bind("<Double-Button-2>", selcmd)
self.AC("AC")
#---------------------------------------------------------------------
# update / mainloop
#---------------------------------------------------------------------
if self.__toplevel :
top.wm_title("calc")
if self.__wait :
top.wait_window()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Calc buttons callback methods
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#==========================================================================
# METHOD : num
# PURPOSE : enter one digit of a number
#==========================================================================
def num(self, button) :
if (self.mode == ">") :
self.x = []
self.x.append(button)
self.mode = "I"
elif (self.mode in ["I", "F"]) :
self.x.append(button)
elif (self.mode in ["E", "X"]) :
e=self.x.pop()
self.x.pop()
self.x.append(e)
self.x.append(button)
self.mode = "X"
self.__dispx__()
#==========================================================================
# METHOD : pt
# PURPOSE : decimal point
#==========================================================================
def pt(self, button) :
if (self.mode == ">") :
self.x = []
self.x.append("0")
self.x.append(".")
self.mode = "F"
elif (self.mode == "I") :
self.x.append(".")
self.mode = "F"
self.__dispx__()
#==========================================================================
# METHOD : EE
# PURPOSE : enter exponent
#==========================================================================
def EE(self, button) :
if (self.mode in ["I", "F"]) :
self.x.append("E")
self.x.append("+")
self.x.append("0")
self.x.append("0")
self.mode = "E"
self.__dispx__()
#==========================================================================
# METHOD : porm
# PURPOSE : enter +/-
#==========================================================================
def porm(self, button) :
if (self.mode in [">", "I", "F"]) :
if (self.x[0] == "-") :
self.x.pop(0)
else :
self.x.insert(0, "-")
elif (self.mode in ["E", "X"]) :
if (self.x[-3] == "+") :
self.x[-3] = "-"
elif (self.x[-3] == "-") :
self.x[-3] = "+"
self.__dispx__()
#==========================================================================
# METHOD : OFF
# PURPOSE : shut down
#==========================================================================
def OFF(self, button) :
top = self.__Component["top"]
if top :
top.quit()
top.destroy()
#==========================================================================
# METHOD : CE
# PURPOSE : clear entry
#==========================================================================
def CE(self, button) :
self.CLX()
#==========================================================================
# METHOD : AC
# PURPOSE : clear all
#==========================================================================
def AC(self, button) :
self.CLX()
self.CLM()
self.CLA()
self.STK = []
#==========================================================================
# METHOD : pi
# PURPOSE : enter pi
#==========================================================================
def pi(self, button) :
self.STX(math.pi)
#==========================================================================
# METHOD : e
# PURPOSE : enter e
#==========================================================================
def e(self, button) :
self.STX(math.exp(1))
#==========================================================================
# METHOD : const
# PURPOSE : display constant dialog
#==========================================================================
def const(self, button) :
u = self.constant()
if u is not None :
self.STX(u)
#==========================================================================
# METHOD : put
# PURPOSE : put accumulator into constant database
#==========================================================================
def put(self, button) :
x = self.GTX()
self.__saved_constants.append(
"%-16s saved result" % (str(x)))
#==========================================================================
# METHOD : sqr
# PURPOSE : x^2
#==========================================================================
def sqr(self, button) :
x = self.GTX()
self.STX(x*x)
#==========================================================================
# METHOD : sqrt
# PURPOSE : sqrt(x)
#==========================================================================
def sqrt(self, button) :
x = self.GTX()
if (x < 0) :
self.messagebox("error: x<0")
else :
self.STX(math.sqrt(x))
#==========================================================================
# METHOD : inv
# PURPOSE : 1/x
#==========================================================================
def inv(self, button) :
x = self.GTX()
if (x == 0) :
self.messagebox("error: x=0")
else :
self.STX(1.0/x)
#==========================================================================
# METHOD : tenx
# PURPOSE : 10^x
#==========================================================================
def tenx(self, button) :
x = self.GTX()
self.STX(10.0**x)
#==========================================================================
# METHOD : exp
# PURPOSE : e^x
#==========================================================================
def exp(self, button) :
x = self.GTX()
self.STX(math.exp(x))
#==========================================================================
# METHOD : log
# PURPOSE : log10(x)
#==========================================================================
def log(self, button) :
x = self.GTX()
if (x <= 0) :
self.messagebox("error: x<=0")
else :
self.STX(math.log10(x))
#==========================================================================
# METHOD : ln
# PURPOSE : ln(x)
#==========================================================================
def ln(self, button) :
x = self.GTX()
if (x <= 0) :
self.messagebox("error: x<=0")
else :
self.STX(math.log(x))
#==========================================================================
# METHOD : ytox
# PURPOSE : y^x
#==========================================================================
def ytox(self, button) :
x = self.GTX()
self.PSH(str(x))
self.PSH("**")
self.mode = ">"
self.__dispx__()
#==========================================================================
# METHOD : fact
# PURPOSE : x!
#==========================================================================
def fact(self, button) :
x = self.GTX()
if ((x < 0) or (x > 170)) :
self.messagebox("error: x<0 or x>170")
else :
u = 1.0
for i in range(1, int(x+1)) :
u *= i
self.STX(u)
#==========================================================================
# METHOD : trig
# PURPOSE : sin/cos/tan, asin/acos/atan, sinh/cosh/tanh, asinh/acosh/atanh
#==========================================================================
def trig(self, button) :
x = self.GTX()
if (not(self.hyp)) :
if (self.drg == 0) :
f = math.pi/180.0
elif (self.drg == 1) :
f = 1.0
elif (self.drg == 2) :
f = math.pi/200.0
if (not(self.arc)) :
x = eval("math." + button + "(" + str(f) + "*" + str(x) + ")")
self.STX(x)
else :
x = eval("math.a" + button + "(" + str(x) + ")" + "/" + str(f))
self.STX(x)
else :
if (not(self.arc)) :
x = eval("math." + button + "h" + "(" + str(x) + ")")
self.STX(x)
else :
x = eval(compile("self.a" + button + "h" + "(" + str(x) + ")",
"string", "single"))
self.STX(x)
self.arc = 0
self.hyp = 0
#==========================================================================
# METHOD : pmin
# PURPOSE : +,-
#==========================================================================
def pmin(self, button) :
if (self.mode == "E") :
self.x[-3] = button
self.__dispx__()
else :
x = self.GTX()
self.PSH(str(x))
x = self.PEX(("("))
self.STX(x)
x = self.GTX()
self.PSH(str(x))
self.PSH(button)
#==========================================================================
# METHOD : mdiv
# PURPOSE : *,/
#==========================================================================
def mdiv(self, button) :
x = self.GTX()
self.PSH(str(x))
x = self.PEX(("-", "+", "("))
self.STX(x)
x = self.GTX()
self.PSH(str(x))
self.PSH(button)
#==========================================================================
# METHOD : lpar
# PURPOSE : (
#==========================================================================
def lpar(self, button) :
if (self.mode == ">") :
self.PSH(button)
#==========================================================================
# METHOD : rpar
# PURPOSE : )
#==========================================================================
def rpar(self, button) :
x = self.GTX()
self.PSH(str(x))
x = self.PEX(("("))
self.STX(x)
self.POP()
#==========================================================================
# METHOD : eq
# PURPOSE : =
#==========================================================================
def eq(self, button) :
x = self.GTX()
self.PSH(str(x))
x = self.PEX()
self.STX(x)
#==========================================================================
# METHOD : HELP
# PURPOSE : display help dialog
#==========================================================================
def HELP(self, button) :
text = []
text.append("Button:\tKey-binding:\n")
text.append("----------------------------\n")
for button1, binding in zip(Calc.buttons, Calc.buttonbindings) :
text.append(button1 + "\t" + binding + "\n")
text.append("=\t<Return>\n")
text.append("=\t<space>\n")
self.messagebox("".join(text))
#==========================================================================
# METHOD : DRG
# PURPOSE : degree/radian/grad
#==========================================================================
def DRG(self, button) :
self.drg = (self.drg + 1) % 3
self.__dispa__()
#==========================================================================
# METHOD : ARC
# PURPOSE : arc
#==========================================================================
def ARC(self, button) :
self.arc = not(self.arc)
self.__dispa__()
#==========================================================================
# METHOD : HYP
# PURPOSE : hy
#==========================================================================
def HYP(self, button) :
self.hyp = not(self.hyp)
self.__dispa__()
#==========================================================================
# METHOD : Mp
# PURPOSE : add to memory
#==========================================================================
def Mp(self, button) :
x = self.GTX()
m = self.GTM()
m = m + x
self.STM(m)
#==========================================================================
# METHOD : Mm
# PURPOSE : subtract from memory
#==========================================================================
def Mm(self, button) :
x = self.GTX()
m = self.GTM()
m = m - x
self.STM(m)
#==========================================================================
# METHOD : STO
# PURPOSE : store in memory
#==========================================================================
def STO(self, button) :
x = self.GTX()
self.STM(x)
#==========================================================================
# METHOD : RCL
# PURPOSE : recall from memory
#==========================================================================
def RCL(self, button) :
m = self.GTM()
self.STX(m)
#==========================================================================
# METHOD : XCH
# PURPOSE : exchange accumulator and memory
#==========================================================================
def XCH(self, button) :
x = self.GTX()
m = self.GTM()
self.STX(m)
self.STM(x)
#==========================================================================
# METHOD : CLR
# PURPOSE : clear memory
#==========================================================================
def CLR(self, button) :
self.CLM()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# binding and menu commands
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#==========================================================================
# METHOD : BS
# PURPOSE : back-space
#==========================================================================
def BS(self) :
x = "".join(self.x)
if (len(x) == 1) :
self.CLX()
return
if (self.mode == "I") :
c = x[-2]
if (c == "+" or c == "-") :
self.CLX()
else :
u = x[:-1]
self.x = [u]
elif (self.mode == "F") :
if x[-1] == "." :
u = x[:-1]
self.x = [u]
self.mode = "I"
else :
u = x[:-1]
self.x = [u]
elif (self.mode == "X" or self.mode == "E") :
u = x[:-4]
self.x = [u]
if (u.find(".") >= 0) :
self.mode = "F"
else :
self.mode = "I"
self.__dispx__()
return
#==========================================================================
# METHOD : RAND
# PURPOSE : enter a random-number
#==========================================================================
def RAND(self) :
self.STX(random.random())
#==========================================================================
# METHOD : SEED
# PURPOSE : use accumulator as a random seed
#==========================================================================
def SEED(self) :
x=self.GTX()
random.seed(x)
#==========================================================================
# METHOD : STKQ
# PURPOSE : print out stack
#==========================================================================
def STKQ(self) :
text = []
text.append("LOC:\tVALUE:\nX:\t" + str(self.x) + "\n")
text.append("----------------------------\n")
for i, entry in enumerate(self.STK) :
text.append(str(i) + ":\t" + entry)
self.messagebox("".join(text))
#==========================================================================
# METHOD : PREC
# PURPOSE : use accumulator to set precision
#==========================================================================
def PREC(self) :
x = self.GTX()
if ((x < 1) or (x >= 18)) :
self.messagebox("error: x < 1 or x >= 18")
else :
p = int(x)
tcl_precision = p
self.messagebox("PRECISION = " + str(p))
#==========================================================================
# METHOD : LOG
# PURPOSE : toggle log file mode
#==========================================================================
def LOG(self) :
self.logfile = "calc.py.log"
if not self.__log :
self.__log = True
self.__logfid = open(self.logfile, "w")
self.messagebox("log file \"" + self.logfile + "\" opened")
else :
self.__log = False
self.__logfid.close()
self.messagebox("log file \"" + self.logfile + "\" closed")
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# basic calculator functions
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#==========================================================================
# METHOD : PSH
# PURPOSE : push token onto stack
#==========================================================================
def PSH(self, token) :
self.STK.append(token)
#==========================================================================
# METHOD : POP
# PURPOSE : pop token from stack
#==========================================================================
def POP(self) :
return self.STK.pop()
#==========================================================================
# METHOD : PEX
# PURPOSE : pop expression from stack (stop before Stop)
#==========================================================================
def PEX(self, Stop=("")) :
e = []
while self.STK :
t = self.POP()
if (t in Stop) :
self.PSH(t)
break
if (t != "NOP") :
e.insert(0, t)
e = "".join(e)
if e :
x = eval(e)
return x
return 0.0
#==========================================================================
# METHOD : STX
# PURPOSE : set accumulator
#==========================================================================
def STX(self, x) :
self.x = []
self.x.append(str(x))
self.mode = ">"
self.__dispx__()
if (self.__log) :
u = "".join(self.x)
self.__logfid.write("expression:" + u)
self.__logfid.flush()
#==========================================================================
# METHOD : STM
# PURPOSE : set memory
#==========================================================================
def STM(self, m) :
self.m = m
self.mem = 1
self.__dispm__()
self.mode = ">"
self.__dispx__()
#==========================================================================
# METHOD : CLX
# PURPOSE : clear accumulator
#==========================================================================
def CLX(self) :
self.x = ["0"]
self.mode = ">"
self.__dispx__()
#==========================================================================
# METHOD : CLM
# PURPOSE : clear memory
#==========================================================================
def CLM(self) :
self.m = 0
self.mem = 0
self.__dispm__()
#==========================================================================
# METHOD : CLA
# PURPOSE : clear states
#==========================================================================
def CLA(self) :
self.drg = 0
self.hyp = 0
self.arc = 0
self.__dispa__()
#==========================================================================
# METHOD : GTX
# PURPOSE : return accumulator
# NOTES :
# * put decimal point on end of x if still in integer mode
# before further processing
#==========================================================================
def GTX(self) :
if (self.mode == "I") :
self.x.append(".")
self.mode = "F"
self.__dispx__()
x = eval("".join(self.x))
return x
#==========================================================================
# METHOD : GTM
# PURPOSE : return memory
#==========================================================================
def GTM(self) :
if (not(self.mem)) :
self.m = 0
self.mem = True
self.__dispm__()
return self.m
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# extra math functions
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#==========================================================================
# METHOD : asinh
# PURPOSE : asinh
#==========================================================================
def asinh(self, x) :
return math.log(x + math.sqrt(x*x + 1.0))
#==========================================================================
# METHOD : acosh
# PURPOSE : acosh
#==========================================================================
def acosh(self, x) :
return math.log(x + math.sqrt(x*x - 1.0))
#==========================================================================
# METHOD : atanh
# PURPOSE : atanh
#==========================================================================
def atanh(self, x) :
if (x == 1.0) :
return 100.0
return math.log(math.sqrt((1.0 + x)/(1.0 - x)))
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# windows
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#==========================================================================
# METHOD : __dispx__
# PURPOSE : re-display accumulator, mode
#==========================================================================
def __dispx__(self) :
x = "".join(self.x)
disp = self.__Component["disp"]
stat_s = self.__Component["state_s"]
disp["state"] = "normal"
disp.delete(0, "end")
disp.insert(0, x)
disp["state"] = "disabled"
stat_s["text"] = self.mode
#==========================================================================
# METHOD : __dispm__
# PURPOSE : re-display memory
#==========================================================================
def __dispm__(self) :
stat_m = self.__Component["state_m"]
if (self.mem) :
stat_m["text"] = "MEM"
else :
stat_m["text"] = ""
#==========================================================================
# METHOD : __dispa__
# PURPOSE : re-display drg, hyp, arc
#==========================================================================
def __dispa__(self) :
stat_d = self.__Component["state_d"]
stat_a = self.__Component["state_a"]
stat_h = self.__Component["state_h"]
if (self.drg == 0) :
stat_d["text"] = "DEG"
elif (self.drg == 1) :
stat_d["text"] = "RAD"
else :
stat_d["text"] = "GRD"
if (self.arc) :
stat_a["text"] = "ARC"
else :
stat_a["text"] = ""
if (self.hyp) :
stat_h["text"] = "HYP"
else :
stat_h["text"] = ""
#==========================================================================
# METHOD : messagebox
# PURPOSE : display error or info
#==========================================================================
def messagebox(self, message) :
top = self.__Component["top"]
tkinter.messagebox.showinfo(parent=top, message=message)
#==========================================================================
# METHOD : constant
# PURPOSE : constant toplevel
#==========================================================================
def constant(self) :
#---------------------------------------------------------------------
# toplevel
#---------------------------------------------------------------------
top = tk.Toplevel()
top.protocol('WM_DELETE_WINDOW', self.__constant_cancel)
self.__Component["constant_top"] = top
#---------------------------------------------------------------------
# basic frames
#---------------------------------------------------------------------
tf = tk.Frame(top, bd=3, relief="groove")
tf.pack(side="top", fill="x", expand=True)
mf = tk.Frame(top, bd=3, relief="flat")
mf.pack(side="top", fill="both", expand=True)
bf = tk.Frame(top, bd=3, relief="flat", bg="cadet blue")
bf.pack(side="top", fill="x", expand=True)
#---------------------------------------------------------------------
# title
#---------------------------------------------------------------------
tf_l = tk.Label(tf, bitmap="question")
tf_l.pack(side="left", fill="both", expand=True)
tf_m = tk.Label(tf, justify="left")
tf_m.pack(side="right", fill="both", expand=True)
tf_m["text"] = "select constant with <Double-Button-1>"
#---------------------------------------------------------------------
# listbox/scrollbar
#---------------------------------------------------------------------
yscr= tk.Scrollbar(mf, orient="vertical")
xscr= tk.Scrollbar(mf, orient="horizontal")
yscr.pack(side="right", expand=True, fill="y")
xscr.pack(side="bottom", expand=True, fill="x")
lbox = tk.Listbox(mf, relief="raised", height=15, width=60)
lbox.pack(side="left", expand=True, fill="both")
yscr.set(0, 0)
xscr.set(0, 0)
yscr["command"] = lbox.yview
xscr["command"] = lbox.xview
lbox["yscrollcommand"] = yscr.set
lbox["xscrollcommand"] = xscr.set
lbox["font"] = "Courier"
self.__Component["constant_lbox"] = lbox
#---------------------------------------------------------------------
# accept and cancel buttons
#---------------------------------------------------------------------
bf_cancel = tk.Frame(bf, bd=2, relief="sunken")
bf_cancel.pack(side="left", expand=True, padx=3, pady=2)
bf_cancel_button = tk.Button(bf_cancel, text="cancel")
bf_cancel_button.pack(anchor="c", expand=True, padx=3, pady=2)
bf_accept = tk.Frame(bf, bd=2, relief="sunken")
bf_accept.pack(side="left", expand=True, padx=3, pady=2)
bf_accept_button = tk.Button(bf_accept, text="accept")
bf_accept_button.pack(anchor="c", expand=True, padx=3, pady=2)
bf_cancel_button["command"] = self.__constant_cancel
bf_accept_button["command"] = self.__constant_accept
#---------------------------------------------------------------------
# bindings
#---------------------------------------------------------------------
def cmd(event, self=self) :
self.__constant_accept()
lbox.bind("<Double-Button-1>", cmd)
self.__constant_results = None
for constant in Calc.constants :
lbox.insert("end", constant)
for constant in self.__saved_constants :
lbox.insert("end", constant)
lbox.activate(0)
lbox.selection_set(0)
top.grab_set()
top.mainloop()
top.grab_release()
top.destroy()
del self.__Component["constant_top"]
del self.__Component["constant_lbox"]
return self.__constant_results
#==========================================================================
# METHOD : __constant_accept
# PURPOSE : constant dialog accept button callback
#==========================================================================
def __constant_accept(self) :
top = self.__Component["constant_top"]
lbox = self.__Component["constant_lbox"]
results = lbox.get("active")
results = results.split()[0]
self.__constant_results = results
top.quit()
#==========================================================================
# METHOD : __constant_cancel
# PURPOSE : constant dialog cancel button callback
#==========================================================================
def __constant_cancel(self) :
top = self.__Component["constant_top"]
self.__constant_results = None
top.quit() | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/gis/geos/polygon.py | from ctypes import byref, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.linestring import LinearRing
class Polygon(GEOSGeometry):
_minlength = 1
def __init__(self, *args, **kwargs):
"""
Initialize on an exterior ring and a sequence of holes (both
instances may be either LinearRing instances, or a tuple/list
that may be constructed into a LinearRing).
Examples of initialization, where shell, hole1, and hole2 are
valid LinearRing geometries:
>>> from django.contrib.gis.geos import LinearRing, Polygon
>>> shell = hole1 = hole2 = LinearRing()
>>> poly = Polygon(shell, hole1, hole2)
>>> poly = Polygon(shell, (hole1, hole2))
>>> # Example where a tuple parameters are used:
>>> poly = Polygon(((0, 0), (0, 10), (10, 10), (0, 10), (0, 0)),
... ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4)))
"""
if not args:
super().__init__(self._create_polygon(0, None), **kwargs)
return
# Getting the ext_ring and init_holes parameters from the argument list
ext_ring, *init_holes = args
n_holes = len(init_holes)
# If initialized as Polygon(shell, (LinearRing, LinearRing)) [for backward-compatibility]
if n_holes == 1 and isinstance(init_holes[0], (tuple, list)):
if not init_holes[0]:
init_holes = ()
n_holes = 0
elif isinstance(init_holes[0][0], LinearRing):
init_holes = init_holes[0]
n_holes = len(init_holes)
polygon = self._create_polygon(n_holes + 1, [ext_ring, *init_holes])
super().__init__(polygon, **kwargs)
def __iter__(self):
"Iterate over each ring in the polygon."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Return the number of rings in this Polygon."
return self.num_interior_rings + 1
@classmethod
def from_bbox(cls, bbox):
"Construct a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
for z in bbox:
if not isinstance(z, (float, int)):
return GEOSGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' %
(x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))
return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)))
# ### These routines are needed for list-like operation w/ListMixin ###
def _create_polygon(self, length, items):
# Instantiate LinearRing objects if necessary, but don't clone them yet
# _construct_ring will throw a TypeError if a parameter isn't a valid ring
# If we cloned the pointers here, we wouldn't be able to clean up
# in case of error.
if not length:
return capi.create_empty_polygon()
rings = []
for r in items:
if isinstance(r, GEOM_PTR):
rings.append(r)
else:
rings.append(self._construct_ring(r))
shell = self._clone(rings.pop(0))
n_holes = length - 1
if n_holes:
holes = (GEOM_PTR * n_holes)(*[self._clone(r) for r in rings])
holes_param = byref(holes)
else:
holes_param = None
return capi.create_polygon(shell, holes_param, c_uint(n_holes))
def _clone(self, g):
if isinstance(g, GEOM_PTR):
return capi.geom_clone(g)
else:
return capi.geom_clone(g.ptr)
def _construct_ring(self, param, msg=(
'Parameter must be a sequence of LinearRings or objects that can initialize to LinearRings')):
"Try to construct a ring from the given parameter."
if isinstance(param, LinearRing):
return param
try:
ring = LinearRing(param)
return ring
except TypeError:
raise TypeError(msg)
def _set_list(self, length, items):
# Getting the current pointer, replacing with the newly constructed
# geometry, and destroying the old geometry.
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_polygon(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
def _get_single_internal(self, index):
"""
Return the ring at the specified index. The first index, 0, will
always return the exterior ring. Indices > 0 will return the
interior ring at the given index (e.g., poly[1] and poly[2] would
return the first and second interior ring, respectively).
CAREFUL: Internal/External are not the same as Interior/Exterior!
Return a pointer from the existing geometries for use internally by the
object's methods. _get_single_external() returns a clone of the same
geometry for use by external code.
"""
if index == 0:
return capi.get_extring(self.ptr)
else:
# Getting the interior ring, have to subtract 1 from the index.
return capi.get_intring(self.ptr, index - 1)
def _get_single_external(self, index):
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
# #### Polygon Properties ####
@property
def num_interior_rings(self):
"Return the number of interior rings."
# Getting the number of rings
return capi.get_nrings(self.ptr)
def _get_ext_ring(self):
"Get the exterior ring of the Polygon."
return self[0]
def _set_ext_ring(self, ring):
"Set the exterior ring of the Polygon."
self[0] = ring
# Properties for the exterior ring/shell.
exterior_ring = property(_get_ext_ring, _set_ext_ring)
shell = exterior_ring
@property
def tuple(self):
"Get the tuple for each ring in this Polygon."
return tuple(self[i].tuple for i in range(len(self)))
coords = tuple
@property
def kml(self):
"Return the KML representation of this Polygon."
inner_kml = ''.join(
"<innerBoundaryIs>%s</innerBoundaryIs>" % self[i + 1].kml
for i in range(self.num_interior_rings)
)
return "<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>" % (self[0].kml, inner_kml) | PypiClean |
/Battleships_api-0.1.2-py3-none-any.whl/Battleships_api/CLIBattleships.py | from .Battleships import Battleships
from . import References
from os import system, name
import time
# DONE move algorithm to set ship locations into cli, and remove from player class
# TODO (low) change coord system in cli to use letters and then numbers
# DONE move function to get shot coords from player into cli.
# class References:
# # The ships : length of ship
# ships = { 'Aircraft Carrier' : 5, \
# 'Battleship' : 4, \
# 'Cruiser' : 3, \
# 'Submarine' : 3, \
# 'Destroyer' : 2 \
# }
# # ship symbol translation
# symbols = {'Aircraft Carrier' : 'A', \
# 'Battleship' : 'B', \
# 'Cruiser' : 'C', \
# 'Submarine': 'S', \
# 'Destroyer' : 'D', \
# 'Hit' : 'X', \
# 'Miss' : 'o', \
# 'Empty' : ' ',
# 'Sunk' : '#' \
# }
# # the delay used to display errors before clearing the screen
# displayDelay = 2
# # The size of the gameboard
# #TODO update classes to use this variable, frontend will need to set this?
# sizeOfBoard = 10
# def getShips():
# return ships
# def getSymbols():
# return symbols
# def setSizeOfBoard(size):
# sizeOfBoard = size
# def getSizeOfBoard():
# return sizeOfBoard
# validInputs = ['y', 'yes', 'Y' 'Yes', 'YES', 'ok', 'Ok', 'OK', 'o']
# ansiColours = {\
# 'black' : '\033[30m', \
# 'boldBlack' : '\033[30;1m', \
# 'red': '\033[31m', \
# 'boldRed': '\033[31;1m', \
# 'green': '\033[32m', \
# 'boldGreen': '\033[32;1m', \
# 'yellow' : '\033[33m', \
# 'boldYellow' : '\033[33;1m', \
# 'blue': '\033[34m', \
# 'boldBlue': '\033[34;1m', \
# 'magenta' : '\033[35m', \
# 'boldMagenta' : '\033[35;1m', \
# 'cyan' : '\033[36m', \
# 'boldCyan' : '\033[36;1m', \
# 'white' : '\033[37m', \
# 'boldWhite' : '\033[37;1m', \
# 'reset': '\033[0m' \
# }
# resetColour = ansiColours['reset']
# boardColour = ansiColours['blue']
# yLabelColour = ansiColours['boldWhite']
# xLabelColour = ansiColours['boldWhite']
# shipColour = ansiColours['yellow']
# missColour = ansiColours['cyan']
# hitColour = ansiColours['boldRed']
# sunkColour = ansiColours['red']
# highlightColour = ansiColours['boldMagenta']
# instantiate the game object to avoid warnings in methods that variable has not been declared.
# not strictly necessary as game object is in scope when called by the helper methods.
# this just seems good practice.
# game = None
def printBoard(board, latestShot=False):
# TODO aloow printing of boards side by side
# TODO move cursor to print only changing information (low)
# TODO highlight latest shot
# TODO allow dynamic resizing of board
yLabel = 9
string = References.boardColour+' _______________________________________\n'+References.resetColour
for i in range(len(board)-1, -1, -1):
string += References.yLabelColour+str(yLabel)+References.resetColour
for j in range(len(board[i])):
string += References.boardColour+' | '
#highlight latest shot
if (j,i) == latestShot:
string += References.highlightColour + board[i][j] + References.resetColour
elif board[i][j] == References.symbols['Hit']:
string += References.hitColour + board[i][j] + References.resetColour
elif board[i][j] == References.symbols['Miss']:
string += References.missColour + board[i][j] + References.resetColour
elif board[i][j] == References.symbols['Sunk']:
string += References.sunkColour + board[i][j] + References.resetColour
else:
string += References.shipColour + board[i][j] + References.resetColour
string += References.boardColour + ' |\n' + References.resetColour
yLabel -= 1
# should be dynamic, to allow different board sizes.
string += References.xLabelColour + ' 0 1 2 3 4 5 6 7 8 9' + References.resetColour
print(string)
def printWinner(state, game):
clear()
if state == 'lose':
print('\nYou Lose\n\n')
elif state == 'win':
print('\nYou Win!\n\n')
print("Player 1 fleet")
printBoard(game.getPlayerBoard('P1'))
print("\nPlayer 1 tracking")
printBoard(game.getPlayerBoard('P1', tracking=True))
print('')
def run():
""" Sets up the game, aiplayers, ships placement"""
clear()
print()
#References.setSizeOfBoard(getBoardSize()) #TODO printBoard needs to be dynamic before this is used.
if input('Single player game? ') in References.validInputs:
humanVcomp()
#elif input('Fully automatic? ') in References.validInputs:
# TODO
else:
compVcomp()
def setBoard(gameInstance, player):
# TODO
''' Prompts to setup board for human players
@param player: player number 1/2
'''
auto = gameInstance.getAutoPlayer(player)
# TODO handle errors, and widen valid inputs
test = False
#test = True if input('Do you want a test placement? ') in References.validInputs else False
if not test:
randomise = True if input('\nDo you want to place the ships at random? ') in References.validInputs else False
if not auto:
if test:
# places ships in the bottom left corner for shot testing.
x, y, direction = 0, 0, 0
for eachShip in References.getShips():
# in case the ships can't be placed for whatever reason
placed = gameInstance.setFleetLocation(player, [[eachShip, (x,y), direction]])
if not placed:
print('failed to place')
return False
y += 1
result = True
elif randomise:
# places ships at random
result = gameInstance.setFleetLocation(player, [], randomise=True)
else:
# goes through the defined ships, asks for intended location
# checks if valid loaction, and places if so.
for eachShip in References.getShips():
print(eachShip)
placed = False
index = 0
while not placed:
clear()
print('\n')
printBoard(gameInstance.getPlayerBoard(player))
print('Place your '+eachShip)
xCoord, yCoord, direction = getCoords(placing=True)
# could check the placement in cli, but to avoid repeating myself, using functions in backend.
placed = gameInstance.setFleetLocation(player, [[eachShip, (xCoord, yCoord), direction]])
if not placed:
print("Sorry, you can't place it there cli")
time.sleep(References.displayDelay)
index += 1
result = True
elif auto or randomise:
result = False
return result
def checkPlacement(shipName, xCoord, yCoord, direction):
#iterate through list checking that ships stay inside grid, and don't start at the same place.
# TODO check for overlap (may involve passing all coords of ships to placeship)
if (xCoord+References.getShips()[shipName] > 10 and direction == 0)\
or (yCoord+References.getShips()[shipName] > 10 and direction == 1):
print('nope to check cli')
return False
"""for i in range(References.getShips()[shipName]):
if [yCoord][xCoord] != ' ':
return False
if direction == 0:
xCoord += 1
elif direction == 1:
yCoord += 1"""
return True
def takeShotAt(gameInstance, activePlayer, target):
invalid = True
if gameInstance.getAutoPlayer(activePlayer):
result = gameInstance.takeShot(activePlayer, target)
invalid = False
while invalid:
x, y, direction = getCoords()
result = gameInstance.takeShot(activePlayer, target, xCoord=x, yCoord=y)
if result == -1:
print("You've already shot there, try again")
else:
invalid = False
return result
def getCoords(placing=False):
failed = True
while failed:
try:
xCoord = int(input('X-coordinate (0-9): '))
if xCoord < 0 or xCoord > 9:
raise ValueError
yCoord = int(input('y-coordinate (0-9): '))
if yCoord < 0 or yCoord > 9:
raise ValueError
direction = False #default for reuse
if placing:
direction = input('To the right, or up? (r/u): ')
if not(direction == 'r' or direction == 'u'):
raise ValueError
elif direction == 'r':
direction = 0
elif direction == 'u':
direction = 1
failed = False
except ValueError:
print('Sorry, your input was not recognised, please try again')
if placing == False:
return xCoord, yCoord, direction
return xCoord, yCoord, direction
def getBoardSize():
while True:
try:
size = (int)(input('How big would you like the board to be? (default is 10) '))
if (size <10 or size >20):
raise ValueError
return size
except ValueError:
print('Sorry thats not a valid, please enter a number between 10 and 20')
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
print("Battleships - Shoot to win!")
def humanVcomp():
humanVcomp = True
while humanVcomp:
clear()
game = Battleships(p1auto=False, p2auto=True, aiLevelP2=1, boardSize=References.sizeOfBoard)
playerFirst = 0
setBoard(game, 'P1')
while not game.getWinner():
clear()
if playerFirst != 0:
result, location = takeShotAt(game, "P2", "P1")
if result == 'P':
printWinner('lose', game)
break
# when a ship is sunk, all squares from that ship are returned, not in hit order
if result in References.ships: # ship name only returned on sink event
print(f"\nComputer fired at {game.getLatestShot('P2')} and sunk your {result}\n")
else:
print('\nComputer fired at {loc} and it was a {res}\n'.format(loc=location[0], res=result))
else:
print('\n\n')
print("Player 1 fleet")
printBoard(game.getPlayerBoard('P1'))
print("\nPlayer 1 tracking")
printBoard(game.getPlayerBoard('P1', tracking=True))
print('')
if cheat:
print('COMPUTER BOARD')
print(f"Computer fleet size = {game.player2.fleetSize['shipsRemaining']}")
printBoard(game.getPlayerBoard('P2'))
print()
result = takeShotAt(game, 'P1', 'P2')
if result == 'P1':
printWinner('win', game)
break
playerFirst = 1
humanVcomp=False
def compVcomp():
winner = 0
compVcomp = True
while compVcomp:
clear()
print()
game = Battleships(p1auto=True, p2auto=True, aiLevelP2=1, aiLevelP1=1)
while not game.getWinner():
if takeShotAt(game, "P1", "P2") == "P1":
winner = 'Player 1 wins'
break
if takeShotAt(game, "P2", "P1") == "P2":
winner = 'Player 2 wins'
break
clear()
print('\n\n\n\n')
#print('p2 has taken a shot\n')
#print(f"\nplayer 1 moves = {game.player1.movesMade}")
#print(f"p1 shipsRemaining = {game.player1.fleetSize['shipsRemaining']}")
#print(f"player 2 moves = {game.player2.movesMade}")
#print(f"p2 shipsRemaining = {game.player2.fleetSize['shipsRemaining']}")
print("Player 1 fleet")
printBoard(game.getPlayerBoard('P1'), latestShot = game.getLatestShot('P2'))
print("\n\nPlayer 2 fleet")
printBoard(game.getPlayerBoard('P2'), latestShot = game.getLatestShot('P1'))
time.sleep(0.5)
clear()
print('\n--', winner, '--\n')
print(f"player 1 moves = {game.player1.movesMade}")
print(f"player 2 moves = {game.player2.movesMade}")
print(f"P1 latest shot is = {game.getLatestShot('P1')}")
print(f"P2 latest shot is = {game.getLatestShot('P2')}")
print("Player 1 fleet")
printBoard(game.getPlayerBoard('P1'), latestShot = game.getLatestShot('P2'))
print("\n\nPlayer 2 fleet")
printBoard(game.getPlayerBoard('P2'), latestShot = game.getLatestShot('P1'))
print()
compVcomp = False
if __name__ == '__main__':
cheat = True if input('Cheat mode? ') in References.validInputs else False
run() | PypiClean |
/Finance-Ultron-1.0.8.1.tar.gz/Finance-Ultron-1.0.8.1/ultron/ump/ump/base.py | import weakref, functools
import numpy as np
import sklearn.preprocessing as preprocessing
from abc import ABCMeta, abstractmethod
from ultron.ump.core.fixes import six
from ultron.kdutils.file import load_pickle
class BuyUmpMixin(object):
"""
买入ump混入, 与BuyFeatureMixin不同,每一个具体的ump只能属于一个ump类别
即不是BuyUmpMixin就应该是SellUmpMixin
"""
_ump_type_prefix = 'buy_'
class SellUmpMixin(object):
"""
卖出ump混入, 与SellFeatureMixin不同,每一个具体的ump只能属于一个ump类别
即不是BuyUmpMixin就应该是SellUmpMixin
"""
_ump_type_prefix = 'sell_'
class UmpDict(dict):
"""Several built-in types such as list and dict do not directly support weak references
but can add support through subclassing:"""
pass
class CachedUmpManager:
"""ump拦截缓存实体,分别在主裁和边裁类中"""
"""不对外开发的设置,仅针对源代码修改,默认使用dict不使用WeakValueDictionary"""
s_use_weak = False
def __init__(self):
"""初始化_cache本体,根据s_use_weak决定使用WeakValueDictionary或者dict"""
self._cache = weakref.WeakValueDictionary(
) if CachedUmpManager.s_use_weak else dict()
def get_ump(self, ump):
"""
主要在具体裁判类的predict方法中获取裁判本体使用,如果
不在_cache中load_pickle,否则使用catch中的ump返回
:param ump: 具体裁判对象,UltronUmpBase对象
:return: 每个裁判需要的决策数据,每类裁判主体的数据形式不同,且使用方法不同
eg:主裁中的使用
def predict(self, x, need_hit_cnt=1):
dump_clf_with_ind = UltronUmpMainBase.dump_clf_manager.get_ump(self)
count_hit = 0
for clf, ind in dump_clf_with_ind.values():
ss = clf.predict(x)
if ss == ind:
count_hit += 1
if need_hit_cnt == count_hit:
return 1
return 0
"""
# dump_file_fn是每一个具体裁判需要复写的方法,声明自己缓存的存放路径
name = ump.dump_file_fn() #fix me
if name not in self._cache:
# 不在缓存字典中load_pickle
dump_clf = load_pickle(name)
if dump_clf is None:
# 没有对ump进行训练,就直接进行拦截了,抛异常
raise RuntimeError(
'{}: you must first fit orders, or {} is not exist!!'.
format(ump.__class__.__name__, name))
if CachedUmpManager.s_use_weak:
# 如果使用WeakValueDictionary模式,需要进一步使用UmpDict包一层
dump_clf = UmpDict(**dump_clf)
self._cache[name] = dump_clf
else:
# 有缓存直接拿缓存
dump_clf = self._cache[name]
return dump_clf
def clear(self):
"""清除缓存中所有cache ump"""
self._cache.clear()
def ump_main_make_xy(func):
"""
主裁中对应fiter class中make_xy的装饰器,
使用eg:详阅UltronUmpMainDeg或其它主裁子类实现
class UltronUmpMainDeg(UltronUmpMainBase, BuyUmpMixin):
class UmpDegFiter(UltronMLPd):
@ump_main_make_xy
def make_xy(self, **kwarg):
regex = 'result|{}'.format(
'|'.join(UltronFeatureDeg().get_feature_ump_keys(ump_cls=UltronUmpMainDeg)))
# noinspection PyUnresolvedReferences
deg_df = self.order_has_ret.filter(regex=regex)
return deg_df
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
"""
kwargs中必须的参数为:kwargs['orders_pd'],在UltronUmpMainBase初始化__init__中
将orders_pd=self.orders_pd,eg:
self.fiter = self.fiter_cls(orders_pd=self.orders_pd, **kwarg)
kwargs中可选的参数为:kwargs['scaler'],bool类型,默认不传递为false,控制在
make_xy中返回的特征矩阵数据是否进行标准化处理
"""
if kwargs is None or 'orders_pd' not in kwargs:
raise ValueError('kwarg is None or not kwarg.has_key orders_pd')
orders_pd = kwargs['orders_pd']
# 从orders_pd中筛选有交易结果形成order_has_ret
order_has_ret = orders_pd[orders_pd['result'] != 0]
# 之前的交易结果-1为loss,1为win,0为keep状态,order_has_ret没有keep状态,所以转换为loss:0,win:1
# noinspection PyTypeChecker
order_has_ret['result'] = np.where(order_has_ret['result'] == -1, 0, 1)
self.order_has_ret = order_has_ret
# 通过被装饰的make_xy方法,筛选具体裁判需要的特征形成特征矩阵ump_df
ump_df = func(self, *args, **kwargs)
if 'scaler' in kwargs and kwargs['scaler'] is True:
# 控制在make_xy中返回的特征矩阵数据是否进行标准化处理
scaler = preprocessing.StandardScaler()
for col in ump_df.columns[1:]:
ump_df[col] = scaler.fit_transform(ump_df[col].values.reshape(
-1, 1))
# 转换为matrix,形成x,y
ump_np = ump_df.values #ump_df.as_matrix()
self.y = ump_np[:, 0]
self.x = ump_np[:, 1:]
# 将pd.DataFrame对象ump_df也保留一份
self.df = ump_df
self.np = ump_np
return wrapper
def ump_edge_make_xy(func):
"""
边裁中对应fiter class中make_xy的装饰器,
使用eg:详阅UltronUmpEdgeDeg或其它边裁子类实现
class UltronUmpEdgeDeg(UltronUmpEdgeBase, BuyUmpMixin):
class UmpDegFiter(UltronMLPd):
@ump_edge_make_xy
def make_xy(self, **kwarg):
filter_list = ['profit', 'profit_cg']
cols = UltronFeatureDeg().get_feature_ump_keys(ump_cls=UltronUmpEdgeDeg)
filter_list.extend(cols)
# noinspection PyUnresolvedReferences
deg_df = self.order_has_ret.filter(filter_list)
return deg_df
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
"""
kwargs中必须的参数为:kwargs['orders_pd'],在UltronUmpEdgeBase初始化__init__中
将orders_pd=self.orders_pd,eg:
self.fiter = self.fiter_cls(orders_pd=orders_pd, **kwarg)
"""
if kwargs is None or 'orders_pd' not in kwargs:
raise ValueError('kwarg is None or not kwarg.has_key orders_pd')
orders_pd = kwargs['orders_pd']
# 从orders_pd中筛选有交易结果形成orders_pd_tmp
orders_pd_tmp = orders_pd[orders_pd['result'] != 0]
# 从orders_pd_tmp进行二次筛选必须profit != 0
order_has_ret = orders_pd_tmp[orders_pd_tmp['profit'] != 0]
self.order_has_ret = order_has_ret
# 通过被装饰的make_xy方法,筛选具体裁判需要的特征形成特征矩阵ump_df
ump_df = func(self, *args, **kwargs)
# 转换为matrix,形成x,y
ump_np = ump_df.values #ump_df.as_matrix()
# 边裁特征中filter_list = ['profit', 'profit_cg']都设定为y
self.y = ump_np[:, :2]
self.x = ump_np[:, 2:]
# 将pd.DataFrame对象ump_df也保留一份
self.df = ump_df
self.np = ump_np
return wrapper
class Base(six.with_metaclass(ABCMeta, object)):
"""ump拦截缓存,在UltronUmpBase类中"""
dump_clf_manager = CachedUmpManager()
@abstractmethod
def get_fiter_class(self):
"""abstractmethod子类必须实现,声明具体子类裁判使用的筛选特征形成特征的类"""
pass
@abstractmethod
def get_predict_col(self):
"""abstractmethod子类必须实现,获取具体子类裁判需要的特征keys"""
pass
@classmethod
def is_buy_ump(cls):
"""
返回裁判本身是否是买入拦截裁判,类方法
:return: bool,是否是买入拦截裁判
"""
return getattr(cls, "_ump_type_prefix") == 'buy_'
@classmethod
@abstractmethod
def class_unique_id(cls):
"""
具体ump类关键字唯一名称,类方法,abstractmethod子类必须实现
主要针对外部user设置自定义ump使用, 需要user自己保证class_unique_id的唯一性,内部不做检测
具体使用见UltronUmpManager中extend_ump_block方法
"""
pass
def __str__(self):
"""打印对象显示:class name, is_buy_ump, predict_col"""
return '{}: is_buy_ump:{} predict_col:{}'.format(
self.__class__.__name__, self.__class__.is_buy_ump(),
self.get_predict_col())
__repr__ = __str__ | PypiClean |
/AutoDataCleaner-1.1.3.tar.gz/AutoDataCleaner-1.1.3/README.md | # AutoDataCleaner
[](https://github.com/sinkingtitanic/AutoDataCleaner)
[](https://pypi.org/project/AutoDataCleaner/)
[](https://www.python.org/downloads/)
[](https://pypi.org/project/AutoDataCleaner/)

Simple and automatic data cleaning in one line of code! It performs **one-hot encoding**, **converts columns to numeric dtype**, **cleaning dirty/empty values**, **normalizes values** and **removes unwanted columns** all in one line of code.
Get your data ready for model training and fitting quickly.
# Features
0. **Uses Pandas DataFrames** (no need to learn new syntax)
1. **One-hot encoding**: encodes non-numeric values to one-hot encoding columns
2. **Converts columns to numeric dtypes**: converts text numbers to numeric dtypes **see [1] below**
3. **Auto detects binary columns**: any column that has two unique values, these values will be replaced with 0 and 1 (e.g.: `['looser', 'winner'] => [0,1]`)
4. **Normalization**: performs normalization to columns (excludes binary [1/0] columns)
5. **Cleans Dirty/None/NA/Empty values**: replace None values with mean or mode of a column, delete row that has None cell or substitute None values with pre-defined value
6. **Delete Unwanted Columns**: drop and remove unwanted columns (usually this will be the 'id' column)
7. **Converts date, time or datetime columns to datetime dtype**
# Installation
#### Using pip
`pip install AutoDataCleaner`
#### Cloning repo:
Clone repository and run `pip install -e .` inside the repository directory
#### Install from repo directly
Install from repository directly using `pip install git+git://github.com/sinkingtitanic/AutoDataCleaner.git#egg=AutoDataCleaner`
# Quick One-line Usage:
```
import AutoDataCleaner.AutoDataCleaner as adc
adc.clean_me(dataframe,
detect_binary=True,
numeric_dtype=True,
one_hot=True,
na_cleaner_mode="mean",
normalize=True,
datetime_columns=[],
remove_columns=[],
verbose=True)
```
# Example
```
>>> import pandas as pd
>>> import AutoDataCleaner.AutoDataCleaner as adc
>>> df = pd.DataFrame([
... [1, "Male", "white", 3, "2018/11/20"],
... [2, "Female", "blue", "4", "2014/01/12"],
... [3, "Male", "white", 15, "2020/09/02"],
... [4, "Male", "blue", "5", "2020/09/02"],
... [5, "Male", "green", None, "2020/12/30"]
... ], columns=['id', 'gender', 'color', 'weight', 'created_on'])
>>>
>>> adc.clean_me(df,
... detect_binary=True,
... numeric_dtype=True,
... one_hot=True,
... na_cleaner_mode="mode",
... normalize=True,
... datetime_columns=["created_on"],
... remove_columns=["id"],
... verbose=True)
+++++++++++++++ AUTO DATA CLEANING STARTED ++++++++++++++++
= AutoDataCleaner: Casting datetime columns to datetime dtype...
+ converted column created_on to datetime dtype
= AutoDataCleaner: Performing removal of unwanted columns...
+ removed 1 columns successfully.
= AutoDataCleaner: Performing One-Hot encoding...
+ detected 1 binary columns [['gender']], cells cleaned: 5 cells
= AutoDataCleaner: Converting columns to numeric dtypes when possible...
+ 1 minority (minority means < %25 of 'weight' entries) values that cannot be converted to numeric dtype in column 'weight' have been set to NaN, nan cleaner function will deal with them
+ converted 5 cells to numeric dtypes
= AutoDataCleaner: Performing One-Hot encoding...
+ one-hot encoding done, added 2 new columns
= AutoDataCleaner: Performing None/NA/Empty values cleaning...
+ cleaned the following NaN values: {'weight NaN Values': 1}
= AutoDataCleaner: Performing dataset normalization...
+ normalized 5 cells
+++++++++++++++ AUTO DATA CLEANING FINISHED +++++++++++++++
gender weight created_on color_blue color_green color_white
0 1 -0.588348 2018-11-20 0 0 1
1 0 -0.392232 2014-01-12 1 0 0
2 1 1.765045 2020-09-02 0 0 1
3 1 -0.196116 2020-09-02 1 0 0
4 1 -0.588348 2020-12-30 0 1 0
```
**If you want to pick and choose with more customization, please go to `AutoDataCleaner.py` (the code is highly documented for your convenience)**
# Explaining Parameters
`adc.clean_me(dataframe, detect_binary=True, one_hot=True, na_cleaner_mode="mean", normalize=True, remove_columns=[], verbose=True)`
Parameters & what do they mean
_Call the help function `adc.help()` to output the below instructions_
* `dataframe`: input Pandas DataFrame on which the cleaning will be performed <br />
* `detect_binary`: if True, any column that has two unique values, these values will be replaced with 0 and 1 (e.g.: ['looser', 'winner'] => [0,1]) <br />
* `numeric_dtype`: if True, columns will be converted to numeric dtypes when possible **see [1] below**
* `one_hot`: if True, all non-numeric columns will be encoded to one-hot columns <br />
* `na_cleaner_mode`: what technique to use when dealing with None/NA/Empty values. Modes: <br />
* `False`: do not consider cleaning na values <br />
* `'remove row'`: removes rows with a cell that has NA value<br />
* `'mean'`: substitues empty NA cells with the mean of that column <br />
* `'mode'`: substitues empty NA cells with the mode of that column<br />
* `'*'`: any other value will substitute empty NA cells with that particular value passed here <br />
* `normalize`: if True, all non-binray (columns with values 0 or 1 are excluded) columns will be normalized. <br />
* `datetime_columns`: a list of columns which contains date or time or datetime entries (important to be announced in this list, otherwise `normalize_df` and `convert_numeric_df` functions will mess up these columns)
* `remove_columns`: list of columns to remove, this is usually non-related featues such as the ID column <br />
* `verbose`: print progress in terminal/cmd<br />
* `returns`: processed and clean Pandas DataFrame <br />
[1] When `numeric_dtype` is set to True, columns that have strings of numbers (e.g.: "123" instead of 123) will be converted to numeric dtype.
if in a particular column, the values that cannot be converted to numeric dtypes are minority in that column (< 25% of total entries in that column), these
minority non-numeric values in that column will be converted to NaN; then, the NaN cleaner function will handle them according to your settings. See `convert_numeric_df()` function in `AutoDataCleaner.py` file for more documentation.
# Prediction
In prediction phase, put the examples to be predicted in Pandas DataFrame and run them through `adc.clean_me()` function **with the same parameters you
used during training**.
# Contribution & Maintenance
This repository is seriously commented for your convenience; please feel free to send me feedback on "ofcourse7878@gmail.com", submit an issue or make a pull request!
| PypiClean |
/HADeploy-0.6.1.tar.gz/HADeploy-0.6.1/lib/hadeploy/plugins/storm/roles/storm_modules/README.md | # storm_modules
This ansible role host a module aimed to manage topology lifecycle using Storm UI REST API
[Parameters reference here](./docs/storm_topology.txt)
## Requirements
These modules need the `python-requests` package to be present on the remote node.
To be able to access kerberos protected cluster, `python-requests-kerberos` is also required
# Example Playbook
The following example kill all topologies defined in a list.
- hosts: en1
vars:
topologies:
- { name: "storm1", wt: 10 }
- { name: "storm2", wt: 12 }
- { name: "storm3", wt: 10 }
roles:
- storm_modules
tasks:
- name: Kill
storm_topology:
ui_url: http://en1.mycluster.com:8744/
name: "{{item.name}}"
state: killed
wait_time_secs: "{{item.wt}}"
timeout_secs: 20
with_items: "{{ topologies }}"
- name: And wait
storm_topology:
ui_url: http://en1.mycluster.com:8744/
name: "{{item.name}}"
state: nonexistent
wait_time_secs: "{{item.wt}}"
timeout_secs: 20
with_items: "{{ topologies }}"
Note we first set all topologies in `killed` state, then we wait for the kill to be completed (state `nonexistent`).
Doing this way allow to shutdown all topologies in parallel, thus reducing drastically shutdown duration.
# License
GNU GPL
Click on the [Link](COPYING) to see the full text.
| PypiClean |
/FitBenchmarking-1.0.0.tar.gz/FitBenchmarking-1.0.0/docs/source/extending/cost_function.rst | .. _cost_function:
#########################
Adding new cost functions
#########################
*This section describes how to add cost functions to benchmarking in FitBenchmarking*
In order to add a new cost function, ``<cost_func>``,
you will need to:
1. Create ``fitbenchmarking/cost_func/<cost_func>_cost_func.py``,
which contains a new subclass of
:class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`.
Then implement the methods:
- .. automethod:: fitbenchmarking.cost_func.base_cost_func.CostFunc.eval_cost()
:noindex:
- .. automethod:: fitbenchmarking.cost_func.base_cost_func.CostFunc.jac_res()
:noindex:
- .. automethod:: fitbenchmarking.cost_func.base_cost_func.CostFunc.jac_cost()
:noindex:
- .. automethod:: fitbenchmarking.cost_func.base_cost_func.CostFunc.hes_res()
:noindex:
- .. automethod:: fitbenchmarking.cost_func.base_cost_func.CostFunc.hes_cost()
:noindex:
2. Document the available cost functions by:
* adding ``<cost_func>`` to the ``cost_func_type`` option in :ref:`fitting_option`.
* updating any example files in the ``examples`` directory
* adding the new cost function to the :ref:`cost_func` user docs.
3. Create tests for the cost function in
``fitbenchmarking/cost_func/tests/test_cost_func.py``.
The :class:`~fitbenchmarking.parsing.fitting_problem.FittingProblem` and :class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc` classes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When adding new cost functions, you will find it helpful to make use of the
following members of the :class:`~fitbenchmarking.parsing.fitting_problem.FittingProblem`
class.
.. currentmodule:: fitbenchmarking.parsing.fitting_problem
.. autoclass:: fitbenchmarking.parsing.fitting_problem.FittingProblem
:members: eval_model, data_x, data_y, data_e
:noindex:
You will also find it useful to implement the subclass members of
:class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`,
:class:`~fitbenchmarking.jacobian.base_jacobian.Jacobian` and
:class:`~fitbenchmarking.hessian.base_hessian.Hessian`.
.. currentmodule:: fitbenchmarking.cost_func.base_cost_func
.. autoclass:: fitbenchmarking.cost_func.base_cost_func.CostFunc
:members: eval_cost, jac_res, jac_cost, hes_res, hes_cost
:noindex:
.. currentmodule:: fitbenchmarking.jacobian.base_jacobian
.. autoclass:: fitbenchmarking.jacobian.base_jacobian.Jacobian
:members: eval
:noindex:
.. currentmodule:: fitbenchmarking.hessian.base_hessian
.. autoclass:: fitbenchmarking.hessian.base_hessian.Hessian
:members: eval
:noindex:
| PypiClean |
/Flask-AppBuilder-redirect-2.1.13.tar.gz/Flask-AppBuilder-redirect-2.1.13/flask_appbuilder/api/schemas.py | from ..const import (
API_ADD_COLUMNS_RIS_KEY,
API_ADD_TITLE_RIS_KEY,
API_DESCRIPTION_COLUMNS_RIS_KEY,
API_EDIT_COLUMNS_RIS_KEY,
API_EDIT_TITLE_RIS_KEY,
API_FILTERS_RIS_KEY,
API_LABEL_COLUMNS_RIS_KEY,
API_LIST_COLUMNS_RIS_KEY,
API_LIST_TITLE_RIS_KEY,
API_ORDER_COLUMN_RIS_KEY,
API_ORDER_COLUMNS_RIS_KEY,
API_ORDER_DIRECTION_RIS_KEY,
API_PAGE_INDEX_RIS_KEY,
API_PAGE_SIZE_RIS_KEY,
API_PERMISSIONS_RIS_KEY,
API_SELECT_COLUMNS_RIS_KEY,
API_SELECT_KEYS_RIS_KEY,
API_SHOW_COLUMNS_RIS_KEY,
API_SHOW_TITLE_RIS_KEY,
)
get_list_schema = {
"type": "object",
"properties": {
API_SELECT_KEYS_RIS_KEY: {
"type": "array",
"items": {
"type": "string",
"enum": [
API_LIST_COLUMNS_RIS_KEY,
API_ORDER_COLUMNS_RIS_KEY,
API_LABEL_COLUMNS_RIS_KEY,
API_DESCRIPTION_COLUMNS_RIS_KEY,
API_LIST_TITLE_RIS_KEY,
"none",
],
},
},
API_SELECT_COLUMNS_RIS_KEY: {"type": "array", "items": {"type": "string"}},
API_ORDER_COLUMN_RIS_KEY: {"type": "string"},
API_ORDER_DIRECTION_RIS_KEY: {"type": "string", "enum": ["asc", "desc"]},
API_PAGE_INDEX_RIS_KEY: {"type": "integer"},
API_PAGE_SIZE_RIS_KEY: {"type": "integer"},
API_FILTERS_RIS_KEY: {
"type": "array",
"items": {
"type": "object",
"properties": {
"col": {"type": "string"},
"opr": {"type": "string"},
"value": {
"anyOf": [
{"type": "number"},
{"type": "string"},
{"type": "boolean"},
]
},
},
},
},
},
}
get_item_schema = {
"type": "object",
"properties": {
API_SELECT_KEYS_RIS_KEY: {
"type": "array",
"items": {
"type": "string",
"enum": [
API_SHOW_COLUMNS_RIS_KEY,
API_DESCRIPTION_COLUMNS_RIS_KEY,
API_LABEL_COLUMNS_RIS_KEY,
API_SHOW_TITLE_RIS_KEY,
"none",
],
},
},
API_SELECT_COLUMNS_RIS_KEY: {"type": "array", "items": {"type": "string"}},
},
}
get_info_schema = {
"type": "object",
"properties": {
API_SELECT_KEYS_RIS_KEY: {
"type": "array",
"items": {
"type": "string",
"enum": [
API_ADD_COLUMNS_RIS_KEY,
API_EDIT_COLUMNS_RIS_KEY,
API_FILTERS_RIS_KEY,
API_PERMISSIONS_RIS_KEY,
API_ADD_TITLE_RIS_KEY,
API_EDIT_TITLE_RIS_KEY,
"none",
],
},
},
API_ADD_COLUMNS_RIS_KEY: {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
API_PAGE_SIZE_RIS_KEY: {"type": "integer"},
API_PAGE_INDEX_RIS_KEY: {"type": "integer"},
},
},
},
},
} | PypiClean |
/AnkiServer-2.0.6.tar.gz/AnkiServer-2.0.6/anki-bundled/oldanki/sync.py | __docformat__ = 'restructuredtext'
import zlib, re, urllib, urllib2, socket, time, shutil
from anki.utils import json as simplejson
import os, base64, httplib, sys, tempfile, httplib, types
from datetime import date
import oldanki, oldanki.deck, oldanki.cards
from oldanki.db import sqlite
from oldanki.errors import *
from oldanki.models import Model, FieldModel, CardModel
from oldanki.facts import Fact, Field
from oldanki.cards import Card
from oldanki.stats import Stats, globalStats
from oldanki.history import CardHistoryEntry
from oldanki.stats import globalStats
from oldanki.utils import ids2str, hexifyID, checksum
from oldanki.media import mediaFiles
from oldanki.lang import _
from hooks import runHook
if simplejson.__version__ < "1.7.3":
raise Exception("SimpleJSON must be 1.7.3 or later.")
CHUNK_SIZE = 32768
MIME_BOUNDARY = "Anki-sync-boundary"
# live
SYNC_URL = "http://ankiweb.net/sync/"
SYNC_HOST = "ankiweb.net"; SYNC_PORT = 80
# testing
#SYNC_URL = "http://localhost:8001/sync/"
#SYNC_HOST = "localhost"; SYNC_PORT = 8001
KEYS = ("models", "facts", "cards", "media")
##########################################################################
# Monkey-patch httplib to incrementally send instead of chewing up large
# amounts of memory, and track progress.
sendProgressHook = None
def incrementalSend(self, strOrFile):
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print "send:", repr(str)
try:
if (isinstance(strOrFile, str) or
isinstance(strOrFile, unicode)):
self.sock.sendall(strOrFile)
else:
cnt = 0
t = time.time()
while 1:
if sendProgressHook and time.time() - t > 1:
sendProgressHook(cnt)
t = time.time()
data = strOrFile.read(CHUNK_SIZE)
cnt += len(data)
if not data:
break
self.sock.sendall(data)
except socket.error, v:
if v[0] == 32: # Broken pipe
self.close()
raise
httplib.HTTPConnection.send = incrementalSend
def fullSyncProgressHook(cnt):
runHook("fullSyncProgress", "fromLocal", cnt)
##########################################################################
class SyncTools(object):
def __init__(self, deck=None):
self.deck = deck
self.diffs = {}
self.serverExcludedTags = []
self.timediff = 0
# Control
##########################################################################
def setServer(self, server):
self.server = server
def sync(self):
"Sync two decks locally. Reimplement this for finer control."
if not self.prepareSync(0):
return
sums = self.summaries()
payload = self.genPayload(sums)
res = self.server.applyPayload(payload)
self.applyPayloadReply(res)
self.deck.reset()
def prepareSync(self, timediff):
"Sync setup. True if sync needed."
self.localTime = self.modified()
self.remoteTime = self.server.modified()
if self.localTime == self.remoteTime:
return False
l = self._lastSync(); r = self.server._lastSync()
# set lastSync to the lower of the two sides, and account for slow
# clocks & assume it took up to 10 seconds for the reply to arrive
self.deck.lastSync = min(l, r) - timediff - 10
return True
def summaries(self):
return (self.summary(self.deck.lastSync),
self.server.summary(self.deck.lastSync))
def genPayload(self, summaries):
(lsum, rsum) = summaries
self.preSyncRefresh()
payload = {}
# first, handle models, facts and cards
for key in KEYS:
diff = self.diffSummary(lsum, rsum, key)
payload["added-" + key] = self.getObjsFromKey(diff[0], key)
payload["deleted-" + key] = diff[1]
payload["missing-" + key] = diff[2]
self.deleteObjsFromKey(diff[3], key)
# handle the remainder
if self.localTime > self.remoteTime:
payload['stats'] = self.bundleStats()
payload['history'] = self.bundleHistory()
payload['sources'] = self.bundleSources()
# finally, set new lastSync and bundle the deck info
payload['deck'] = self.bundleDeck()
return payload
def applyPayload(self, payload):
reply = {}
self.preSyncRefresh()
# model, facts and cards
for key in KEYS:
k = 'added-' + key
# send back any requested
if k in payload:
reply[k] = self.getObjsFromKey(
payload['missing-' + key], key)
self.updateObjsFromKey(payload['added-' + key], key)
self.deleteObjsFromKey(payload['deleted-' + key], key)
# send back deck-related stuff if it wasn't sent to us
if not 'deck' in payload:
reply['stats'] = self.bundleStats()
reply['history'] = self.bundleHistory()
reply['sources'] = self.bundleSources()
# finally, set new lastSync and bundle the deck info
reply['deck'] = self.bundleDeck()
else:
self.updateDeck(payload['deck'])
self.updateStats(payload['stats'])
self.updateHistory(payload['history'])
if 'sources' in payload:
self.updateSources(payload['sources'])
self.postSyncRefresh()
cardIds = [x[0] for x in payload['added-cards']]
self.deck.updateCardTags(cardIds)
# rebuild priorities on server
self.rebuildPriorities(cardIds, self.serverExcludedTags)
return reply
def applyPayloadReply(self, reply):
# model, facts and cards
for key in KEYS:
k = 'added-' + key
# old version may not send media
if k in reply:
self.updateObjsFromKey(reply['added-' + key], key)
# deck
if 'deck' in reply:
self.updateDeck(reply['deck'])
self.updateStats(reply['stats'])
self.updateHistory(reply['history'])
if 'sources' in reply:
self.updateSources(reply['sources'])
self.postSyncRefresh()
# rebuild priorities on client
cardIds = [x[0] for x in reply['added-cards']]
self.deck.updateCardTags(cardIds)
self.rebuildPriorities(cardIds)
if self.missingFacts() != 0:
raise Exception(
"Facts missing after sync. Please run Tools>Advanced>Check DB.")
def missingFacts(self):
return self.deck.s.scalar(
"select count() from cards where factId "+
"not in (select id from facts)");
def rebuildPriorities(self, cardIds, suspend=[]):
self.deck.updateAllPriorities(partial=True, dirty=False)
self.deck.updatePriorities(cardIds, suspend=suspend, dirty=False)
def postSyncRefresh(self):
"Flush changes to DB, and reload object associations."
self.deck.s.flush()
self.deck.s.refresh(self.deck)
self.deck.currentModel
def preSyncRefresh(self):
# ensure global stats are available (queue may not be built)
self.deck._globalStats = globalStats(self.deck)
def payloadChanges(self, payload):
h = {
'lf': len(payload['added-facts']['facts']),
'rf': len(payload['missing-facts']),
'lc': len(payload['added-cards']),
'rc': len(payload['missing-cards']),
'lm': len(payload['added-models']),
'rm': len(payload['missing-models']),
}
if self.localTime > self.remoteTime:
h['ls'] = _('all')
h['rs'] = 0
else:
h['ls'] = 0
h['rs'] = _('all')
return h
def payloadChangeReport(self, payload):
p = self.payloadChanges(payload)
return _("""\
<table>
<tr><td><b>Added/Changed </b></td>
<td><b>Here </b></td><td><b>Server</b></td></tr>
<tr><td>Cards</td><td>%(lc)d</td><td>%(rc)d</td></tr>
<tr><td>Facts</td><td>%(lf)d</td><td>%(rf)d</td></tr>
<tr><td>Models</td><td>%(lm)d</td><td>%(rm)d</td></tr>
<tr><td>Stats</td><td>%(ls)s</td><td>%(rs)s</td></tr>
</table>""") % p
# Summaries
##########################################################################
def summary(self, lastSync):
"Generate a full summary of modtimes for two-way syncing."
# client may have selected an earlier sync time
self.deck.lastSync = lastSync
# ensure we're flushed first
self.deck.s.flush()
return {
# cards
"cards": self.realLists(self.deck.s.all(
"select id, modified from cards where modified > :mod",
mod=lastSync)),
"delcards": self.realLists(self.deck.s.all(
"select cardId, deletedTime from cardsDeleted "
"where deletedTime > :mod", mod=lastSync)),
# facts
"facts": self.realLists(self.deck.s.all(
"select id, modified from facts where modified > :mod",
mod=lastSync)),
"delfacts": self.realLists(self.deck.s.all(
"select factId, deletedTime from factsDeleted "
"where deletedTime > :mod", mod=lastSync)),
# models
"models": self.realLists(self.deck.s.all(
"select id, modified from models where modified > :mod",
mod=lastSync)),
"delmodels": self.realLists(self.deck.s.all(
"select modelId, deletedTime from modelsDeleted "
"where deletedTime > :mod", mod=lastSync)),
# media
"media": self.realLists(self.deck.s.all(
"select id, created from media where created > :mod",
mod=lastSync)),
"delmedia": self.realLists(self.deck.s.all(
"select mediaId, deletedTime from mediaDeleted "
"where deletedTime > :mod", mod=lastSync)),
}
# Diffing
##########################################################################
def diffSummary(self, localSummary, remoteSummary, key):
# list of ids on both ends
lexists = localSummary[key]
ldeleted = localSummary["del"+key]
rexists = remoteSummary[key]
rdeleted = remoteSummary["del"+key]
ldeletedIds = dict(ldeleted)
rdeletedIds = dict(rdeleted)
# to store the results
locallyEdited = []
locallyDeleted = []
remotelyEdited = []
remotelyDeleted = []
# build a hash of all ids, with value (localMod, remoteMod).
# deleted/nonexisting cards are marked with a modtime of None.
ids = {}
for (id, mod) in rexists:
ids[id] = [None, mod]
for (id, mod) in rdeleted:
ids[id] = [None, None]
for (id, mod) in lexists:
if id in ids:
ids[id][0] = mod
else:
ids[id] = [mod, None]
for (id, mod) in ldeleted:
if id in ids:
ids[id][0] = None
else:
ids[id] = [None, None]
# loop through the hash, determining differences
for (id, (localMod, remoteMod)) in ids.items():
if localMod and remoteMod:
# changed/existing on both sides
if localMod < remoteMod:
remotelyEdited.append(id)
elif localMod > remoteMod:
locallyEdited.append(id)
elif localMod and not remoteMod:
# if it's missing on server or newer here, sync
if (id not in rdeletedIds or
rdeletedIds[id] < localMod):
locallyEdited.append(id)
else:
remotelyDeleted.append(id)
elif remoteMod and not localMod:
# if it's missing locally or newer there, sync
if (id not in ldeletedIds or
ldeletedIds[id] < remoteMod):
remotelyEdited.append(id)
else:
locallyDeleted.append(id)
else:
if id in ldeletedIds and id not in rdeletedIds:
locallyDeleted.append(id)
elif id in rdeletedIds and id not in ldeletedIds:
remotelyDeleted.append(id)
return (locallyEdited, locallyDeleted,
remotelyEdited, remotelyDeleted)
# Models
##########################################################################
def getModels(self, ids, updateModified=False):
return [self.bundleModel(id, updateModified) for id in ids]
def bundleModel(self, id, updateModified):
"Return a model representation suitable for transport."
mod = self.deck.s.query(Model).get(id)
# force load of lazy attributes
mod.fieldModels; mod.cardModels
m = self.dictFromObj(mod)
m['fieldModels'] = [self.bundleFieldModel(fm) for fm in m['fieldModels']]
m['cardModels'] = [self.bundleCardModel(fm) for fm in m['cardModels']]
if updateModified:
m['modified'] = time.time()
return m
def bundleFieldModel(self, fm):
d = self.dictFromObj(fm)
if 'model' in d: del d['model']
return d
def bundleCardModel(self, cm):
d = self.dictFromObj(cm)
if 'model' in d: del d['model']
return d
def updateModels(self, models):
for model in models:
local = self.getModel(model['id'])
# avoid overwriting any existing card/field models
fms = model['fieldModels']; del model['fieldModels']
cms = model['cardModels']; del model['cardModels']
self.applyDict(local, model)
self.mergeFieldModels(local, fms)
self.mergeCardModels(local, cms)
self.deck.s.statement(
"delete from modelsDeleted where modelId in %s" %
ids2str([m['id'] for m in models]))
def getModel(self, id, create=True):
"Return a local model with same ID, or create."
id = int(id)
for l in self.deck.models:
if l.id == id:
return l
if not create:
return
m = Model()
self.deck.models.append(m)
return m
def mergeFieldModels(self, model, fms):
ids = []
for fm in fms:
local = self.getFieldModel(model, fm)
self.applyDict(local, fm)
ids.append(fm['id'])
for fm in model.fieldModels:
if fm.id not in ids:
self.deck.deleteFieldModel(model, fm)
def getFieldModel(self, model, remote):
id = int(remote['id'])
for fm in model.fieldModels:
if fm.id == id:
return fm
fm = FieldModel()
model.addFieldModel(fm)
return fm
def mergeCardModels(self, model, cms):
ids = []
for cm in cms:
local = self.getCardModel(model, cm)
if not 'allowEmptyAnswer' in cm or cm['allowEmptyAnswer'] is None:
cm['allowEmptyAnswer'] = True
self.applyDict(local, cm)
ids.append(cm['id'])
for cm in model.cardModels:
if cm.id not in ids:
self.deck.deleteCardModel(model, cm)
def getCardModel(self, model, remote):
id = int(remote['id'])
for cm in model.cardModels:
if cm.id == id:
return cm
cm = CardModel()
model.addCardModel(cm)
return cm
def deleteModels(self, ids):
for id in ids:
model = self.getModel(id, create=False)
if model:
self.deck.deleteModel(model)
# Facts
##########################################################################
def getFacts(self, ids, updateModified=False):
if updateModified:
modified = time.time()
else:
modified = "modified"
factIds = ids2str(ids)
return {
'facts': self.realLists(self.deck.s.all("""
select id, modelId, created, %s, tags, spaceUntil, lastCardId from facts
where id in %s""" % (modified, factIds))),
'fields': self.realLists(self.deck.s.all("""
select id, factId, fieldModelId, ordinal, value from fields
where factId in %s""" % factIds))
}
def updateFacts(self, factsdict):
facts = factsdict['facts']
fields = factsdict['fields']
if not facts:
return
# update facts first
dlist = [{
'id': f[0],
'modelId': f[1],
'created': f[2],
'modified': f[3],
'tags': f[4],
'spaceUntil': f[5] or "",
'lastCardId': f[6]
} for f in facts]
self.deck.s.execute("""
insert or replace into facts
(id, modelId, created, modified, tags, spaceUntil, lastCardId)
values
(:id, :modelId, :created, :modified, :tags, :spaceUntil, :lastCardId)""", dlist)
# now fields
dlist = [{
'id': f[0],
'factId': f[1],
'fieldModelId': f[2],
'ordinal': f[3],
'value': f[4]
} for f in fields]
# delete local fields since ids may have changed
self.deck.s.execute(
"delete from fields where factId in %s" %
ids2str([f[0] for f in facts]))
# then update
self.deck.s.execute("""
insert into fields
(id, factId, fieldModelId, ordinal, value)
values
(:id, :factId, :fieldModelId, :ordinal, :value)""", dlist)
self.deck.s.statement(
"delete from factsDeleted where factId in %s" %
ids2str([f[0] for f in facts]))
def deleteFacts(self, ids):
self.deck.deleteFacts(ids)
# Cards
##########################################################################
def getCards(self, ids):
return self.realLists(self.deck.s.all("""
select id, factId, cardModelId, created, modified, tags, ordinal,
priority, interval, lastInterval, due, lastDue, factor,
firstAnswered, reps, successive, averageTime, reviewTime, youngEase0,
youngEase1, youngEase2, youngEase3, youngEase4, matureEase0,
matureEase1, matureEase2, matureEase3, matureEase4, yesCount, noCount,
question, answer, lastFactor, spaceUntil, type, combinedDue, relativeDelay
from cards where id in %s""" % ids2str(ids)))
def updateCards(self, cards):
if not cards:
return
# FIXME: older clients won't send this, so this is temp compat code
def getType(row):
if len(row) > 36:
return row[36]
if row[15]:
return 1
elif row[14]:
return 0
return 2
dlist = [{'id': c[0],
'factId': c[1],
'cardModelId': c[2],
'created': c[3],
'modified': c[4],
'tags': c[5],
'ordinal': c[6],
'priority': c[7],
'interval': c[8],
'lastInterval': c[9],
'due': c[10],
'lastDue': c[11],
'factor': c[12],
'firstAnswered': c[13],
'reps': c[14],
'successive': c[15],
'averageTime': c[16],
'reviewTime': c[17],
'youngEase0': c[18],
'youngEase1': c[19],
'youngEase2': c[20],
'youngEase3': c[21],
'youngEase4': c[22],
'matureEase0': c[23],
'matureEase1': c[24],
'matureEase2': c[25],
'matureEase3': c[26],
'matureEase4': c[27],
'yesCount': c[28],
'noCount': c[29],
'question': c[30],
'answer': c[31],
'lastFactor': c[32],
'spaceUntil': c[33],
'type': c[34],
'combinedDue': c[35],
'rd': getType(c)
} for c in cards]
self.deck.s.execute("""
insert or replace into cards
(id, factId, cardModelId, created, modified, tags, ordinal,
priority, interval, lastInterval, due, lastDue, factor,
firstAnswered, reps, successive, averageTime, reviewTime, youngEase0,
youngEase1, youngEase2, youngEase3, youngEase4, matureEase0,
matureEase1, matureEase2, matureEase3, matureEase4, yesCount, noCount,
question, answer, lastFactor, spaceUntil, type, combinedDue,
relativeDelay, isDue)
values
(:id, :factId, :cardModelId, :created, :modified, :tags, :ordinal,
:priority, :interval, :lastInterval, :due, :lastDue, :factor,
:firstAnswered, :reps, :successive, :averageTime, :reviewTime, :youngEase0,
:youngEase1, :youngEase2, :youngEase3, :youngEase4, :matureEase0,
:matureEase1, :matureEase2, :matureEase3, :matureEase4, :yesCount,
:noCount, :question, :answer, :lastFactor, :spaceUntil,
:type, :combinedDue, :rd, 0)""", dlist)
self.deck.s.statement(
"delete from cardsDeleted where cardId in %s" %
ids2str([c[0] for c in cards]))
def deleteCards(self, ids):
self.deck.deleteCards(ids)
# Deck/stats/history
##########################################################################
def bundleDeck(self):
# ensure modified is not greater than server time
if getattr(self, "server", None) and getattr(
self.server, "timestamp", None):
self.deck.modified = min(self.deck.modified,self.server.timestamp)
# and ensure lastSync is greater than modified
self.deck.lastSync = max(time.time(), self.deck.modified+1)
d = self.dictFromObj(self.deck)
del d['Session']
del d['engine']
del d['s']
del d['path']
del d['syncName']
del d['version']
if 'newQueue' in d:
del d['newQueue']
del d['failedQueue']
del d['revQueue']
# these may be deleted before bundling
if 'css' in d: del d['css']
if 'models' in d: del d['models']
if 'currentModel' in d: del d['currentModel']
keys = d.keys()
for k in keys:
if isinstance(d[k], types.MethodType):
del d[k]
d['meta'] = self.realLists(self.deck.s.all("select * from deckVars"))
return d
def updateDeck(self, deck):
if 'meta' in deck:
meta = deck['meta']
for (k,v) in meta:
self.deck.s.statement("""
insert or replace into deckVars
(key, value) values (:k, :v)""", k=k, v=v)
del deck['meta']
self.applyDict(self.deck, deck)
def bundleStats(self):
def bundleStat(stat):
s = self.dictFromObj(stat)
s['day'] = s['day'].toordinal()
del s['id']
return s
lastDay = date.fromtimestamp(max(0, self.deck.lastSync - 60*60*24))
ids = self.deck.s.column0(
"select id from stats where type = 1 and day >= :day", day=lastDay)
stat = Stats()
def statFromId(id):
stat.fromDB(self.deck.s, id)
return stat
stats = {
'global': bundleStat(self.deck._globalStats),
'daily': [bundleStat(statFromId(id)) for id in ids],
}
return stats
def updateStats(self, stats):
stats['global']['day'] = date.fromordinal(stats['global']['day'])
self.applyDict(self.deck._globalStats, stats['global'])
self.deck._globalStats.toDB(self.deck.s)
for record in stats['daily']:
record['day'] = date.fromordinal(record['day'])
stat = Stats()
id = self.deck.s.scalar("select id from stats where "
"type = :type and day = :day",
type=1, day=record['day'])
if id:
stat.fromDB(self.deck.s, id)
else:
stat.create(self.deck.s, 1, record['day'])
self.applyDict(stat, record)
stat.toDB(self.deck.s)
def bundleHistory(self):
return self.realLists(self.deck.s.all("""
select cardId, time, lastInterval, nextInterval, ease, delay,
lastFactor, nextFactor, reps, thinkingTime, yesCount, noCount
from reviewHistory where time > :ls""",
ls=self.deck.lastSync))
def updateHistory(self, history):
dlist = [{'cardId': h[0],
'time': h[1],
'lastInterval': h[2],
'nextInterval': h[3],
'ease': h[4],
'delay': h[5],
'lastFactor': h[6],
'nextFactor': h[7],
'reps': h[8],
'thinkingTime': h[9],
'yesCount': h[10],
'noCount': h[11]} for h in history]
if not dlist:
return
self.deck.s.statements("""
insert or ignore into reviewHistory
(cardId, time, lastInterval, nextInterval, ease, delay,
lastFactor, nextFactor, reps, thinkingTime, yesCount, noCount)
values
(:cardId, :time, :lastInterval, :nextInterval, :ease, :delay,
:lastFactor, :nextFactor, :reps, :thinkingTime, :yesCount, :noCount)""",
dlist)
def bundleSources(self):
return self.realLists(self.deck.s.all("select * from sources"))
def updateSources(self, sources):
for s in sources:
self.deck.s.statement("""
insert or replace into sources values
(:id, :name, :created, :lastSync, :syncPeriod)""",
id=s[0],
name=s[1],
created=s[2],
lastSync=s[3],
syncPeriod=s[4])
# Media metadata
##########################################################################
def getMedia(self, ids):
return [tuple(row) for row in self.deck.s.all("""
select id, filename, size, created, originalPath, description
from media where id in %s""" % ids2str(ids))]
def updateMedia(self, media):
meta = []
for m in media:
# build meta
meta.append({
'id': m[0],
'filename': m[1],
'size': m[2],
'created': m[3],
'originalPath': m[4],
'description': m[5]})
# apply metadata
if meta:
self.deck.s.statements("""
insert or replace into media (id, filename, size, created,
originalPath, description)
values (:id, :filename, :size, :created, :originalPath,
:description)""", meta)
self.deck.s.statement(
"delete from mediaDeleted where mediaId in %s" %
ids2str([m[0] for m in media]))
def deleteMedia(self, ids):
sids = ids2str(ids)
files = self.deck.s.column0(
"select filename from media where id in %s" % sids)
self.deck.s.statement("""
insert into mediaDeleted
select id, :now from media
where media.id in %s""" % sids, now=time.time())
self.deck.s.execute(
"delete from media where id in %s" % sids)
# One-way syncing (sharing)
##########################################################################
def syncOneWay(self, lastSync):
"Sync two decks one way."
payload = self.server.genOneWayPayload(lastSync)
self.applyOneWayPayload(payload)
self.deck.reset()
def syncOneWayDeckName(self):
return (self.deck.s.scalar("select name from sources where id = :id",
id=self.server.deckName) or
hexifyID(int(self.server.deckName)))
def prepareOneWaySync(self):
"Sync setup. True if sync needed. Not used for local sync."
srcID = self.server.deckName
(lastSync, syncPeriod) = self.deck.s.first(
"select lastSync, syncPeriod from sources where id = :id", id=srcID)
if self.server.modified() <= lastSync:
return
self.deck.lastSync = lastSync
return True
def genOneWayPayload(self, lastSync):
"Bundle all added or changed objects since the last sync."
p = {}
# facts
factIds = self.deck.s.column0(
"select id from facts where modified > :l", l=lastSync)
p['facts'] = self.getFacts(factIds, updateModified=True)
# models
modelIds = self.deck.s.column0(
"select id from models where modified > :l", l=lastSync)
p['models'] = self.getModels(modelIds, updateModified=True)
# media
mediaIds = self.deck.s.column0(
"select id from media where created > :l", l=lastSync)
p['media'] = self.getMedia(mediaIds)
# cards
cardIds = self.deck.s.column0(
"select id from cards where modified > :l", l=lastSync)
p['cards'] = self.realLists(self.getOneWayCards(cardIds))
return p
def applyOneWayPayload(self, payload):
keys = [k for k in KEYS if k != "cards"]
# model, facts, media
for key in keys:
self.updateObjsFromKey(payload[key], key)
# models need their source tagged
for m in payload["models"]:
self.deck.s.statement("update models set source = :s "
"where id = :id",
s=self.server.deckName,
id=m['id'])
# cards last, handled differently
t = time.time()
try:
self.updateOneWayCards(payload['cards'])
except KeyError:
sys.stderr.write("Subscribed to a broken deck. "
"Try removing your deck subscriptions.")
t = 0
# update sync time
self.deck.s.statement(
"update sources set lastSync = :t where id = :id",
id=self.server.deckName, t=t)
self.deck.modified = time.time()
def getOneWayCards(self, ids):
"The minimum information necessary to generate one way cards."
return self.deck.s.all(
"select id, factId, cardModelId, ordinal, created from cards "
"where id in %s" % ids2str(ids))
def updateOneWayCards(self, cards):
if not cards:
return
t = time.time()
dlist = [{'id': c[0], 'factId': c[1], 'cardModelId': c[2],
'ordinal': c[3], 'created': c[4], 't': t} for c in cards]
# add any missing cards
self.deck.s.statements("""
insert or ignore into cards
(id, factId, cardModelId, created, modified, tags, ordinal,
priority, interval, lastInterval, due, lastDue, factor,
firstAnswered, reps, successive, averageTime, reviewTime, youngEase0,
youngEase1, youngEase2, youngEase3, youngEase4, matureEase0,
matureEase1, matureEase2, matureEase3, matureEase4, yesCount, noCount,
question, answer, lastFactor, spaceUntil, isDue, type, combinedDue,
relativeDelay)
values
(:id, :factId, :cardModelId, :created, :t, "", :ordinal,
1, 0, 0, :created, 0, 2.5,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, "", "", 2.5, 0, 0, 2, :t, 2)""", dlist)
# update q/as
models = dict(self.deck.s.all("""
select cards.id, models.id
from cards, facts, models
where cards.factId = facts.id
and facts.modelId = models.id
and cards.id in %s""" % ids2str([c[0] for c in cards])))
self.deck.s.flush()
self.deck.updateCardQACache(
[(c[0], c[2], c[1], models[c[0]]) for c in cards])
# rebuild priorities on client
cardIds = [c[0] for c in cards]
self.deck.updateCardTags(cardIds)
self.rebuildPriorities(cardIds)
# Tools
##########################################################################
def modified(self):
return self.deck.modified
def _lastSync(self):
return self.deck.lastSync
def unstuff(self, data):
"Uncompress and convert to unicode."
return simplejson.loads(unicode(zlib.decompress(data), "utf8"))
def stuff(self, data):
"Convert into UTF-8 and compress."
return zlib.compress(simplejson.dumps(data))
def dictFromObj(self, obj):
"Return a dict representing OBJ without any hidden db fields."
return dict([(k,v) for (k,v) in obj.__dict__.items()
if not k.startswith("_")])
def applyDict(self, obj, dict):
"Apply each element in DICT to OBJ in a way the ORM notices."
for (k,v) in dict.items():
setattr(obj, k, v)
def realLists(self, result):
"Convert an SQLAlchemy response into a list of real lists."
return [list(x) for x in result]
def getObjsFromKey(self, ids, key):
return getattr(self, "get" + key.capitalize())(ids)
def deleteObjsFromKey(self, ids, key):
return getattr(self, "delete" + key.capitalize())(ids)
def updateObjsFromKey(self, ids, key):
return getattr(self, "update" + key.capitalize())(ids)
# Full sync
##########################################################################
def needFullSync(self, sums):
if self.deck.lastSync <= 0:
return True
for sum in sums:
for l in sum.values():
if len(l) > 1000:
return True
if self.deck.s.scalar(
"select count() from reviewHistory where time > :ls",
ls=self.deck.lastSync) > 1000:
return True
lastDay = date.fromtimestamp(max(0, self.deck.lastSync - 60*60*24))
if self.deck.s.scalar(
"select count() from stats where day >= :day",
day=lastDay) > 100:
return True
return False
def prepareFullSync(self):
t = time.time()
# ensure modified is not greater than server time
self.deck.modified = min(self.deck.modified, self.server.timestamp)
self.deck.s.commit()
self.deck.close()
fields = {
"p": self.server.password,
"u": self.server.username,
"d": self.server.deckName.encode("utf-8"),
}
if self.localTime > self.remoteTime:
return ("fromLocal", fields, self.deck.path)
else:
return ("fromServer", fields, self.deck.path)
def fullSync(self):
ret = self.prepareFullSync()
if ret[0] == "fromLocal":
self.fullSyncFromLocal(ret[1], ret[2])
else:
self.fullSyncFromServer(ret[1], ret[2])
def fullSyncFromLocal(self, fields, path):
global sendProgressHook
try:
# write into a temporary file, since POST needs content-length
src = open(path, "rb")
(fd, name) = tempfile.mkstemp(prefix="oldanki")
tmp = open(name, "w+b")
# post vars
for (key, value) in fields.items():
tmp.write('--' + MIME_BOUNDARY + "\r\n")
tmp.write('Content-Disposition: form-data; name="%s"\r\n' % key)
tmp.write('\r\n')
tmp.write(value)
tmp.write('\r\n')
# file header
tmp.write('--' + MIME_BOUNDARY + "\r\n")
tmp.write(
'Content-Disposition: form-data; name="deck"; filename="deck"\r\n')
tmp.write('Content-Type: application/octet-stream\r\n')
tmp.write('\r\n')
# data
comp = zlib.compressobj()
while 1:
data = src.read(CHUNK_SIZE)
if not data:
tmp.write(comp.flush())
break
tmp.write(comp.compress(data))
src.close()
tmp.write('\r\n--' + MIME_BOUNDARY + '--\r\n\r\n')
size = tmp.tell()
tmp.seek(0)
# open http connection
runHook("fullSyncStarted", size)
headers = {
'Content-type': 'multipart/form-data; boundary=%s' %
MIME_BOUNDARY,
'Content-length': str(size),
'Host': SYNC_HOST,
}
req = urllib2.Request(SYNC_URL + "fullup?v=2", tmp, headers)
try:
sendProgressHook = fullSyncProgressHook
res = urllib2.urlopen(req).read()
assert res.startswith("OK")
# update lastSync
c = sqlite.connect(path)
c.execute("update decks set lastSync = ?",
(res[3:],))
c.commit()
c.close()
finally:
sendProgressHook = None
tmp.close()
os.close(fd)
os.unlink(name)
finally:
runHook("fullSyncFinished")
def fullSyncFromServer(self, fields, path):
try:
runHook("fullSyncStarted", 0)
fields = urllib.urlencode(fields)
src = urllib.urlopen(SYNC_URL + "fulldown", fields)
(fd, tmpname) = tempfile.mkstemp(dir=os.path.dirname(path),
prefix="fullsync")
tmp = open(tmpname, "wb")
decomp = zlib.decompressobj()
cnt = 0
while 1:
data = src.read(CHUNK_SIZE)
if not data:
tmp.write(decomp.flush())
break
tmp.write(decomp.decompress(data))
cnt += CHUNK_SIZE
runHook("fullSyncProgress", "fromServer", cnt)
src.close()
tmp.close()
os.close(fd)
# if we were successful, overwrite old deck
os.unlink(path)
os.rename(tmpname, path)
# reset the deck name
c = sqlite.connect(path)
c.execute("update decks set syncName = ?",
[checksum(path.encode("utf-8"))])
c.commit()
c.close()
finally:
runHook("fullSyncFinished")
# Local syncing
##########################################################################
class SyncServer(SyncTools):
def __init__(self, deck=None):
SyncTools.__init__(self, deck)
class SyncClient(SyncTools):
pass
# HTTP proxy: act as a server and direct requests to the real server
##########################################################################
class HttpSyncServerProxy(SyncServer):
def __init__(self, user, passwd):
SyncServer.__init__(self)
self.decks = None
self.deckName = None
self.username = user
self.password = passwd
self.protocolVersion = 5
self.sourcesToCheck = []
def connect(self, clientVersion=""):
"Check auth, protocol & grab deck list."
if not self.decks:
import socket
socket.setdefaulttimeout(30)
d = self.runCmd("getDecks",
libanki=oldanki.version,
client=clientVersion,
sources=simplejson.dumps(self.sourcesToCheck),
pversion=self.protocolVersion)
socket.setdefaulttimeout(None)
if d['status'] != "OK":
raise SyncError(type="authFailed", status=d['status'])
self.decks = d['decks']
self.timestamp = d['timestamp']
self.timediff = abs(self.timestamp - time.time())
def hasDeck(self, deckName):
self.connect()
return deckName in self.decks.keys()
def availableDecks(self):
self.connect()
return self.decks.keys()
def createDeck(self, deckName):
ret = self.runCmd("createDeck", name=deckName.encode("utf-8"))
if not ret or ret['status'] != "OK":
raise SyncError(type="createFailed")
self.decks[deckName] = [0, 0]
def summary(self, lastSync):
return self.runCmd("summary",
lastSync=self.stuff(lastSync))
def genOneWayPayload(self, lastSync):
return self.runCmd("genOneWayPayload",
lastSync=self.stuff(lastSync))
def modified(self):
self.connect()
return self.decks[self.deckName][0]
def _lastSync(self):
self.connect()
return self.decks[self.deckName][1]
def applyPayload(self, payload):
return self.runCmd("applyPayload",
payload=self.stuff(payload))
def finish(self):
assert self.runCmd("finish") == "OK"
def runCmd(self, action, **args):
data = {"p": self.password,
"u": self.username,
"v": 2}
if self.deckName:
data['d'] = self.deckName.encode("utf-8")
else:
data['d'] = None
data.update(args)
data = urllib.urlencode(data)
try:
f = urllib2.urlopen(SYNC_URL + action, data)
except (urllib2.URLError, socket.error, socket.timeout,
httplib.BadStatusLine), e:
raise SyncError(type="connectionError",
exc=`e`)
ret = f.read()
if not ret:
raise SyncError(type="noResponse")
try:
return self.unstuff(ret)
except Exception, e:
raise SyncError(type="connectionError",
exc=`e`)
# HTTP server: respond to proxy requests and return data
##########################################################################
class HttpSyncServer(SyncServer):
def __init__(self):
SyncServer.__init__(self)
self.decks = {}
self.deck = None
def summary(self, lastSync):
return self.stuff(SyncServer.summary(
self, float(zlib.decompress(lastSync))))
def applyPayload(self, payload):
return self.stuff(SyncServer.applyPayload(self,
self.unstuff(payload)))
def genOneWayPayload(self, lastSync):
return self.stuff(SyncServer.genOneWayPayload(
self, float(zlib.decompress(lastSync))))
def getDecks(self, libanki, client, sources, pversion):
return self.stuff({
"status": "OK",
"decks": self.decks,
"timestamp": time.time(),
})
def createDeck(self, name):
"Create a deck on the server. Not implemented."
return self.stuff("OK")
# Local media copying
##########################################################################
def copyLocalMedia(src, dst):
srcDir = src.mediaDir()
if not srcDir:
return
dstDir = dst.mediaDir(create=True)
files = os.listdir(srcDir)
# find media references
used = {}
for col in ("question", "answer"):
txt = dst.s.column0("""
select %(c)s from cards where
%(c)s like '%%<img %%'
or %(c)s like '%%[sound:%%'""" % {'c': col})
for entry in txt:
for fname in mediaFiles(entry):
used[fname] = True
# copy only used media
for file in files:
if file not in used:
continue
srcfile = os.path.join(srcDir, file)
dstfile = os.path.join(dstDir, file)
if not os.path.exists(dstfile):
try:
shutil.copy2(srcfile, dstfile)
except IOError, OSError:
pass | PypiClean |
/Hikka_Pyro-2.0.66-py3-none-any.whl/pyrogram/errors/exceptions/not_acceptable_406.py |
from ..rpc_error import RPCError
class NotAcceptable(RPCError):
"""Not Acceptable"""
CODE = 406
"""``int``: RPC Error Code"""
NAME = __doc__
class AuthKeyDuplicated(NotAcceptable):
"""The same authorization key (session file) was used in more than one place simultaneously. You must delete your session file and log in again with your phone number or bot token"""
ID = "AUTH_KEY_DUPLICATED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelPrivate(NotAcceptable):
"""The channel/supergroup is not accessible"""
ID = "CHANNEL_PRIVATE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilerefUpgradeNeeded(NotAcceptable):
"""The file reference has expired and you must use a refreshed one by obtaining the original media message"""
ID = "FILEREF_UPGRADE_NEEDED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FreshChangeAdminsForbidden(NotAcceptable):
"""You were just elected admin, you can't add or modify other admins yet"""
ID = "FRESH_CHANGE_ADMINS_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FreshChangePhoneForbidden(NotAcceptable):
"""You can't change your phone number because your session was logged-in recently"""
ID = "FRESH_CHANGE_PHONE_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FreshResetAuthorisationForbidden(NotAcceptable):
"""You can't terminate other authorized sessions because the current was logged-in recently"""
ID = "FRESH_RESET_AUTHORISATION_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberInvalid(NotAcceptable):
"""The phone number is invalid"""
ID = "PHONE_NUMBER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhonePasswordFlood(NotAcceptable):
"""You have tried to log-in too many times"""
ID = "PHONE_PASSWORD_FLOOD"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickersetInvalid(NotAcceptable):
"""The sticker set is invalid"""
ID = "STICKERSET_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickersetOwnerAnonymous(NotAcceptable):
"""This sticker set can't be used as the group's sticker set because it was created by one of its anonymous admins"""
ID = "STICKERSET_OWNER_ANONYMOUS"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserpicUploadRequired(NotAcceptable):
"""You must have a profile picture to publish your geolocation"""
ID = "USERPIC_UPLOAD_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserRestricted(NotAcceptable):
"""You are limited/restricted. You can't perform this action"""
ID = "USER_RESTRICTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UpdateAppToLogin(NotAcceptable):
"""You must update your Telegram app to log in"""
ID = "UPDATE_APP_TO_LOGIN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SendCodeUnavailable(NotAcceptable):
"""The phone number is not available for authorization"""
ID = "SEND_CODE_UNAVAILABLE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PremiumCurrentlyUnavailable(NotAcceptable):
"""The premium account is currently unavailable"""
ID = "PREMIUM_CURRENTLY_UNAVAILABLE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__ | PypiClean |
/Aj_Zsl_nlu-4.2.0-py3-none-any.whl/nlu/pipe/pipe_logic.py | import logging
from dataclasses import dataclass
from nlu import Licenses
from nlu.pipe.component_resolution import resolve_feature
from nlu.pipe.nlu_component import NluComponent
from nlu.pipe.pipeline import NLUPipeline
from nlu.pipe.utils.component_utils import ComponentUtils
from nlu.pipe.utils.pipe_utils import PipeUtils
from nlu.pipe.utils.resolution.storage_ref_utils import StorageRefUtils
from nlu.universe.component_universes import jsl_id_to_empty_component
from nlu.universe.feature_node_ids import NLP_NODE_IDS, NLP_HC_NODE_IDS
from nlu.universe.feature_universes import NLP_FEATURES
from nlu.universe.logic_universes import AnnoTypes
logger = logging.getLogger('nlu')
@dataclass
class StorageRefConversionResolutionData:
"""Hold information that can be used to resolve to a NLU component_to_resolve, which satisfies the storage ref demands."""
storage_ref: str # storage ref a resolver component_to_resolve should have
component_candidate: NluComponent # from which NLU component_to_resolve should the converter feed
type: str # what kind of conversion, either word2chunk or word2sentence
class PipelineCompleter:
'''
Pass a list of NLU components to the pipeline (or a NLU pipeline)
For every component_to_resolve, it checks if all requirements are met.
It checks and fixes the following issues for a list of components:
1. Missing Features / component_to_resolve requirements
2. Bad order of components (which will cause missing features exception)
3. Check Feature names in the output
4. Check weather pipeline needs to be fitted
'''
@staticmethod
def check_if_storage_ref_is_satisfied_or_get_conversion_candidate(component_to_check: NluComponent,
pipe: NLUPipeline,
storage_ref_to_find: str):
"""Check if any other component_to_resolve in the pipeline has same storage ref as the input component_to_resolve.
Returns 1. If there is a candidate, but it has different level, it will be returned as candidate
If first condition is not satisfied, consults the namespace.storage_ref_2_nlp_ref
"""
# If there is just 1 component_to_resolve, there is nothing to check
if len(pipe.components) == 1:
return False, None
conversion_candidate = None
conversion_type = "no_conversion"
logger.info(f'checking for storage={storage_ref_to_find} is available in component_list..')
for c in pipe.components:
if component_to_check.name != c.name:
if StorageRefUtils.has_storage_ref(c):
if StorageRefUtils.extract_storage_ref(c) == storage_ref_to_find:
# Both components have Different Names AND their Storage Ref Matches up AND they both take in tokens -> Match
if NLP_FEATURES.TOKEN in component_to_check.in_types and c.type == AnnoTypes.TOKEN_EMBEDDING:
logger.info(f'Word Embedding Match found = {c.name}')
return False, None
# Since document and be substituted for sentence
# and vice versa if either of them matches up we have a match
if NLP_FEATURES.SENTENCE_EMBEDDINGS in component_to_check.in_types and \
c.type == AnnoTypes.DOCUMENT_EMBEDDING:
logger.info(f'Sentence Embedding Match found = {c.name}')
return False, None
# component_to_check requires Sentence_embedding
# but the Matching Storage_ref component_to_resolve takes in Token
# -> Convert the Output of the Match to SentenceLevel
# and feed the component_to_check to the new component_to_resolve
if NLP_FEATURES.SENTENCE_EMBEDDINGS in component_to_check.in_types \
and c.type == AnnoTypes.TOKEN_EMBEDDING:
logger.info(f'Sentence Embedding Conversion Candidate found={c.name}')
conversion_type = 'word2sentence'
conversion_candidate = c
# analogous case as above for chunk
if NLP_FEATURES.CHUNK_EMBEDDINGS in component_to_check.in_types and c.type == AnnoTypes.TOKEN_EMBEDDING:
logger.info(f'Sentence Embedding Conversion Candidate found={c.name}')
conversion_type = 'word2chunk'
conversion_candidate = c
logger.info(f'No matching storage ref found')
return True, StorageRefConversionResolutionData(storage_ref_to_find, conversion_candidate, conversion_type)
@staticmethod
def extract_required_features_refless_from_pipe(pipe: NLUPipeline):
"""Extract provided features from component_list, which have no storage ref"""
provided_features_no_ref = []
for c in pipe.components:
if c.loaded_from_pretrained_pipe:
continue
for feat in c.in_types:
if 'embed' not in feat: provided_features_no_ref.append(feat)
return ComponentUtils.clean_irrelevant_features(provided_features_no_ref)
@staticmethod
def extract_provided_features_refless_from_pipe(pipe: NLUPipeline):
"""Extract provided features from component_list, which have no storage ref"""
provided_features_no_ref = []
for c in pipe.components:
for feat in c.out_types:
if 'embed' not in feat: provided_features_no_ref.append(feat)
return ComponentUtils.clean_irrelevant_features(provided_features_no_ref)
@staticmethod
def extract_provided_features_ref_from_pipe(pipe: NLUPipeline):
"""Extract provided features from component_list, which have storage ref.
"""
provided_features_ref = []
for c in pipe.components:
for feat in c.out_types:
if 'embed' in feat:
if '@' not in feat:
provided_features_ref.append(feat + "@" + StorageRefUtils.extract_storage_ref(c))
else:
provided_features_ref.append(feat)
return ComponentUtils.clean_irrelevant_features(provided_features_ref)
@staticmethod
def extract_required_features_ref_from_pipe(pipe: NLUPipeline):
"""Extract provided features from component_list, which have storage ref"""
provided_features_ref = []
for c in pipe.components:
if c.loaded_from_pretrained_pipe:
continue
for feat in c.in_types:
if 'embed' in feat:
# if StorageRefUtils.extract_storage_ref(os_components) !='': # special edge case, some components might not have a storage ref set
if '@' not in feat:
provided_features_ref.append(feat + "@" + StorageRefUtils.extract_storage_ref(c))
else:
provided_features_ref.append(feat)
return ComponentUtils.clean_irrelevant_features(provided_features_ref)
@staticmethod
def extract_sentence_embedding_conversion_candidates(pipe: NLUPipeline):
"""Extract information about embedding conversion candidates"""
conversion_candidates_data = []
for c in pipe.components:
if ComponentUtils.component_has_embeddings_requirement(c) and not PipeUtils.is_trainable_pipe(pipe):
storage_ref = StorageRefUtils.extract_storage_ref(c)
conversion_applicable, conversion_data = PipelineCompleter.check_if_storage_ref_is_satisfied_or_get_conversion_candidate(
c, pipe, storage_ref)
if conversion_applicable: conversion_candidates_data.append(conversion_data)
return conversion_candidates_data
@staticmethod
def get_missing_required_features(pipe: NLUPipeline):
"""For every component_to_resolve in the pipeline"""
provided_features_no_ref = ComponentUtils.clean_irrelevant_features(
PipelineCompleter.extract_provided_features_refless_from_pipe(pipe))
required_features_no_ref = ComponentUtils.clean_irrelevant_features(
PipelineCompleter.extract_required_features_refless_from_pipe(pipe))
provided_features_ref = ComponentUtils.clean_irrelevant_features(
PipelineCompleter.extract_provided_features_ref_from_pipe(pipe))
required_features_ref = ComponentUtils.clean_irrelevant_features(
PipelineCompleter.extract_required_features_ref_from_pipe(pipe))
is_trainable = PipeUtils.is_trainable_pipe(pipe)
conversion_candidates = PipelineCompleter.extract_sentence_embedding_conversion_candidates(
pipe)
pipe.has_trainable_components = is_trainable
required_features_ref, conversion_candidates = PipeUtils.remove_convertable_storage_refs(required_features_ref,
conversion_candidates,
provided_features_ref)
provided_features_ref, required_features_ref = PipeUtils.update_converter_storage_refs_and_cols(pipe,
provided_features_ref,
required_features_ref)
if is_trainable:
trainable_index, embed_type = PipeUtils.find_trainable_embed_consumer(pipe)
required_features_ref = []
if embed_type is not None:
# After resolve for a word embedding ,we must fix all NONES and set their storage refs !
# embed consuming trainable annotators get their storage ref set here
if len(provided_features_ref) == 0:
required_features_no_ref.append(embed_type)
if embed_type == NLP_FEATURES.CHUNK_EMBEDDINGS:
required_features_no_ref.append(NLP_FEATURES.WORD_EMBEDDINGS)
if len(provided_features_ref) >= 1 and embed_type == NLP_FEATURES.CHUNK_EMBEDDINGS:
# This case is for when 1 Embed is preloaded and we still need to load the converter
if any(NLP_FEATURES.WORD_EMBEDDINGS in c for c in provided_features_ref):
required_features_no_ref.append(embed_type)
if len(provided_features_ref) >= 1:
# TODO Appraoches / Trainable models have no setStorageRef, we must set it after fitting
pipe.components[trainable_index].storage_ref = provided_features_ref[0].split('@')[-1]
missing_features_no_ref = set(required_features_no_ref) - set(
provided_features_no_ref) # - set(['text','label'])
missing_features_ref = set(required_features_ref) - set(provided_features_ref)
PipelineCompleter.log_resolution_status(provided_features_no_ref, required_features_no_ref,
provided_features_ref, required_features_ref, is_trainable,
conversion_candidates, missing_features_no_ref,
missing_features_ref, )
return missing_features_no_ref, missing_features_ref, conversion_candidates
@staticmethod
def add_sentence_embedding_converter(resolution_data: StorageRefConversionResolutionData) -> NluComponent:
""" Return a Word to Sentence Embedding converter for a given Component. The input cols with match the Sentence Embedder ones
The converter is a NLU Component Embelishement of the Spark NLP Sentence Embeddings Annotator
"""
logger.info(f'Adding Sentence embedding conversion for Embedding Provider={resolution_data}')
word_embedding_provider = resolution_data.component_candidate
c = jsl_id_to_empty_component(NLP_NODE_IDS.SENTENCE_EMBEDDINGS_CONVERTER)
storage_ref = StorageRefUtils.extract_storage_ref(word_embedding_provider)
c.set_metadata(c.get_default_model(), 'sentence_embedding_converter',
NLP_NODE_IDS.SENTENCE_EMBEDDINGS_CONVERTER, 'xx', False, Licenses.open_source, storage_ref)
c.model.setStorageRef(storage_ref)
# set output cols
embed_AT_out = NLP_FEATURES.SENTENCE_EMBEDDINGS + '@' + storage_ref
c.model.setOutputCol(embed_AT_out)
c.spark_output_column_names = [embed_AT_out]
c.spark_input_column_names = [NLP_FEATURES.DOCUMENT, NLP_FEATURES.WORD_EMBEDDINGS + '@' + storage_ref]
c.model.setInputCols(c.spark_input_column_names)
return c
@staticmethod
def add_chunk_embedding_converter(
resolution_data: StorageRefConversionResolutionData) -> NluComponent:
""" Return a Word to CHUNK Embedding converter for a given Component. The input cols with match the Sentence Embedder ones
The converter is a NLU Component Embelishement of the Spark NLP Sentence Embeddings Annotator
The CHUNK embedder requires entities and also embeddings to generate data from. Since there could be multiple entities generators, we neeed to pass the correct one
"""
# TODO REFACTOR
logger.info(f'Adding Chunk embedding conversion Provider={resolution_data} and NER Converter provider = ')
word_embedding_provider = resolution_data.component_candidate
entities_col = 'entities'
embed_provider_col = word_embedding_provider.info.spark_output_column_names[0]
c = jsl_id_to_empty_component(NLP_NODE_IDS.CHUNK_EMBEDDINGS_CONVERTER)
c.set_metadata(c.get_default_model(),
NLP_NODE_IDS.CHUNK_EMBEDDINGS_CONVERTER, NLP_NODE_IDS.CHUNK_EMBEDDINGS_CONVERTER,
'xx',
False, Licenses.open_source)
# c = nlu.embeddings_chunker.EmbeddingsChunker(annotator_class='chunk_embedder')
storage_ref = StorageRefUtils.extract_storage_ref(word_embedding_provider)
c.model.setStorageRef(storage_ref)
c.info.storage_ref = storage_ref
c.model.setInputCols(entities_col, embed_provider_col)
c.model.setOutputCol('chunk_embeddings@' + storage_ref)
c.info.spark_input_column_names = [entities_col, embed_provider_col]
c.info.input_column_names = [entities_col, embed_provider_col]
c.info.spark_output_column_names = ['chunk_embeddings@' + storage_ref]
c.info.output_column_names = ['chunk_embeddings@' + storage_ref]
return c
@staticmethod
def check_if_all_conversions_satisfied(components_for_embedding_conversion):
"""Check if all dependencies are satisfied."""
for conversion in components_for_embedding_conversion:
if conversion.component_candidate is not None: return False
return True
@staticmethod
def check_dependencies_satisfied(missing_components, missing_storage_refs,
components_for_embedding_conversion):
"""Check if all dependencies are satisfied."""
return len(missing_components) == 0 and len(
missing_storage_refs) == 0 and PipelineCompleter.check_if_all_conversions_satisfied(
components_for_embedding_conversion)
@staticmethod
def has_licensed_components(pipe: NLUPipeline) -> bool:
"""Check if any licensed components in component_list"""
for c in pipe.components:
if c.license == Licenses.hc or c.license == Licenses.ocr:
return True
return False
@staticmethod
def check_same_as_last_iteration(last_missing_components, last_missing_storage_refs,
last_components_for_embedding_conversion, missing_components, missing_storage_refs,
components_for_embedding_conversion):
return last_missing_components == missing_components and last_missing_storage_refs == missing_storage_refs and last_components_for_embedding_conversion == components_for_embedding_conversion
@staticmethod
def except_infinity_loop(reason):
raise Exception(f"Sorry, nlu has problems building this spell, please report this issue. Problem={reason}")
@staticmethod
def satisfy_dependencies(pipe: NLUPipeline):
"""Feature Dependency Resolution Algorithm.
For a given pipeline with N components, builds a DAG in reverse and satisfy each of their dependencies and child dependencies
with a BFS approach and returns the resulting pipeline
:param pipe: Nlu Pipe containing components for which dependencies should be satisfied
:return: Nlu pipe with dependencies satisfied
"""
all_features_provided = False
is_licensed = PipelineCompleter.has_licensed_components(pipe)
pipe.has_licensed_components = is_licensed
is_trainable = PipeUtils.is_trainable_pipe(pipe)
loop_count = 0
max_loop_count = 5
while all_features_provided == False:
# After new components have been added, check again for the new components if requirements are met
components_to_add = []
missing_components, missing_storage_refs, components_for_embedding_conversion = \
PipelineCompleter.get_missing_required_features(pipe)
if PipelineCompleter.check_dependencies_satisfied(missing_components, missing_storage_refs,
components_for_embedding_conversion):
# Now all features are provided
break
# Update last iteration variables
last_missing_components, last_missing_storage_refs, last_components_for_embedding_conversion = missing_components, missing_storage_refs, components_for_embedding_conversion
# Create missing base storage ref producers, i.e. embeddings
for missing_component in missing_storage_refs:
component = resolve_feature(missing_component, language=pipe.lang,
is_licensed=is_licensed, is_trainable_pipe=is_trainable)
if component is None:
continue
if 'chunk_emb' in missing_component:
components_to_add.append(ComponentUtils.config_chunk_embed_converter(component))
else:
components_to_add.append(component)
# Create missing base components, storage refs are fetched in previous loop
for missing_component in missing_components:
components_to_add.append(
resolve_feature(missing_component, language=pipe.lang, is_licensed=is_licensed,
is_trainable_pipe=is_trainable))
# Create embedding converters
for resolution_info in components_for_embedding_conversion:
converter = None
if 'word2chunk' == resolution_info.type:
converter = PipelineCompleter.add_chunk_embedding_converter(resolution_info)
elif 'word2sentence' == resolution_info.type:
converter = PipelineCompleter.add_sentence_embedding_converter(resolution_info)
if converter is not None:
components_to_add.append(converter)
logger.info(f'Resolved for missing components the following NLU components : {components_to_add}')
# Add missing components
for new_component in components_to_add:
if new_component:
logger.info(f'adding {new_component.name}')
pipe.add(new_component)
# For some models we update storage ref to the resovling models storageref.
# We need to update them so dependencies can properly be deducted as satisfied
pipe = PipeUtils.update_bad_storage_refs(pipe)
# Check if we are in an infinity loop
if PipelineCompleter.check_same_as_last_iteration(last_missing_components, last_missing_storage_refs,
last_components_for_embedding_conversion,
missing_components, missing_storage_refs,
components_for_embedding_conversion):
loop_count += 1
else:
loop_count = 0
if loop_count > max_loop_count:
PipelineCompleter.except_infinity_loop('Failure resolving feature dependencies')
logger.info(f"{'!*'*20} ALL DEPENDENCIES SATISFIED {'!*'*20}")
return pipe
@staticmethod
def check_and_fix_component_output_column_name_satisfaction(pipe: NLUPipeline):
'''
This function verifies that every input and output column name of a component_to_resolve is satisfied.
If some output names are missing, it will be added by this method.
Usually classifiers need to change their input column name, so that it matches one of the previous embeddings because they have dynamic output names
This function performs the following steps :
1. For each component_to_resolve we verify that all input column names are satisfied by checking all other components output names
2. When a input column is missing we do the following :
2.1 Figure out the type of the missing input column. The name of the missing column should be equal to the type
2.2 Check if there is already a component_to_resolve in the component_list, which provides this input (It should)
2.3. When A providing component_to_resolve is found, check if storage ref matches up.
2.4 If True for all, update provider component_to_resolve output name, or update the original component_to_resolve input name
:return: NLU pipeline where the output and input column names of the models have been adjusted to each other
'''
logger.info("Fixing input and output column names")
for component_to_check in pipe.components:
if component_to_check.loaded_from_pretrained_pipe: continue
input_columns = set(component_to_check.spark_input_column_names)
# a component_to_resolve either has '' storage ref or at most 1
logger.info(
f'Checking for component_to_resolve {component_to_check.name} wether inputs {input_columns} is satisfied by another component_to_resolve in the component_list ', )
for other_component in pipe.components:
if component_to_check.name == other_component.name: continue
output_columns = set(other_component.spark_output_column_names)
input_columns -= output_columns # remove provided columns
input_columns = ComponentUtils.clean_irrelevant_features(input_columns)
# Resolve basic mismatches, usually storage refs
if len(input_columns) != 0 and not pipe.has_trainable_components or ComponentUtils.is_embedding_consumer(
component_to_check): # fix missing column name
# We must not only check if input satisfied, but if storage refs match! and Match Storage_refs accordingly
logger.info(f"Fixing bad input col for C={component_to_check} untrainable component_list")
resolved_storage_ref_cols = []
for missing_column in input_columns:
for other_component in pipe.components:
if component_to_check.name == other_component.name: continue
if other_component.type == missing_column:
# We update the output name for the component_to_resolve which consumes our feature
if StorageRefUtils.has_storage_ref(
other_component) and ComponentUtils.is_embedding_provider(component_to_check):
if ComponentUtils.are_producer_consumer_matches(component_to_check, other_component):
resolved_storage_ref_cols.append(
(other_component.spark_output_column_names[0], missing_column))
component_to_check.spark_output_column_names = [missing_column]
logger.info(
f'Resolved requirement for missing_column={missing_column} with inputs from provider={other_component.name} by col={missing_column} ')
other_component.model.setOutputCol(missing_column)
for resolution, unsatisfied in resolved_storage_ref_cols:
component_to_check.spark_input_column_names.remove(unsatisfied)
component_to_check.spark_input_column_names.append(resolution)
# Resolve training missmatches
elif len(input_columns) != 0 and pipe.has_trainable_components: # fix missing column name
logger.info(f"Fixing bad input col for C={component_to_check} trainable component_list")
# for trainable components, we change their input columns and leave other components outputs unchanged
for missing_column in input_columns:
for other_component in pipe.components:
if component_to_check.name == other_component.name: continue
if other_component.type == missing_column:
# We update the input col name for the componenet that has missing cols
component_to_check.spark_input_column_names.remove(missing_column)
component_to_check.spark_input_column_names.append(
other_component.spark_output_column_names[0])
component_to_check.model.setInputCols(
component_to_check.spark_input_column_names)
logger.info(
f'Setting input col columns for component_to_resolve {component_to_check.name} to {other_component.spark_output_column_names[0]} ')
return pipe
@staticmethod
def check_and_fix_nlu_pipeline(pipe: NLUPipeline):
"""Check if the NLU pipeline is ready to transform data and return it.
If all dependencies not satisfied, returns a new NLU pipeline where dependencies and sub-dependencies are satisfied.
Checks and resolves in the following order :
1. Get a reference list of input features missing for the current component_list
2. Resolve the list of missing features by adding new Annotators to component_list
3. Add NER Converter if required (When there is a NER model_anno_obj)
4. Fix order and output column names
5.
:param pipe:
:return:
"""
# main entry point for Model stacking withouth pretrained pipelines
# requirements and provided features will be lists of lists
# 0. Clean old @AT storage ref from all columns
# logger.info('Cleaning old AT refs')
# pipe = PipeUtils.clean_AT_storage_refs(pipe)
# 1. Resolve dependencies, builds a DAG in reverse and satisfies dependencies with a Breadth-First-Search approach
# 0. Write additional metadata to the pipe pre pipe construction
pipe = PipeUtils.add_metadata_to_pipe(pipe)
logger.info('Satisfying dependencies')
pipe = PipelineCompleter.satisfy_dependencies(pipe)
# 2. Enforce naming schema <col_name>@<storage_ref> for storage_ref consumers and producers and <entity@nlu_ref> and <ner@nlu_ref> for NER and NER-Converters
# and add NER-IOB to NER-Pretty converters for every NER model_anno_obj that is not already feeding a NER converter
pipe = PipeUtils.enforce_AT_schema_on_pipeline_and_add_NER_converter(pipe)
# 2.1 If Sentence Resolvers are in pipeline, all Sentence-Embeddings must feed from Chunk2Doc which stems from the entities column to resolve
pipe = PipelineCompleter.enforce_chunk2doc_on_sentence_embeddings(pipe)
# 3. Validate naming of output columns is correct and no error will be thrown in spark
logger.info('Fixing column names')
pipe = PipelineCompleter.check_and_fix_component_output_column_name_satisfaction(pipe)
# 4. Set on every NLP Annotator the output columns
pipe = PipeUtils.enforce_NLU_columns_to_NLP_columns(pipe)
# 5. fix order
logger.info('Optimizing component_list component_to_resolve order')
pipe = PipelineCompleter.check_and_fix_component_order(pipe)
# 6. Rename overlapping/duplicate leaf columns in the DAG
logger.info('Renaming duplicates cols')
pipe = PipeUtils.rename_duplicate_cols(pipe)
# 7. enfore again because trainable pipes might mutate component_list cols
pipe = PipeUtils.enforce_NLU_columns_to_NLP_columns(pipe)
# 8. Write additional metadata to the pipe post pipe construction
pipe = PipeUtils.add_metadata_to_pipe(pipe)
# 9. For Table-QA based Pipes, we Inject a SetenceDetector for tokenizing the questions
pipe = PipeUtils.add_sentence_detector_to_pipe_if_required(pipe)
logger.info('Done with component_list optimizing')
return pipe
@staticmethod
def check_and_fix_component_order(pipe: NLUPipeline):
'''
This method takes care that the order of components is the correct in such a way,that the pipeline can be iteratively processed by spark NLP.
Column Names will not be touched. DAG Task Sort basically.
'''
logger.info("Starting to optimize component_to_resolve order ")
correct_order_component_pipeline = []
provided_features = []
all_components_ordered = False
unsorted_components = pipe.components
update_last_type = False
last_type_sorted = None
trainable_updated = False
pipe.components = sorted(pipe.components, key=lambda x: x.type)
if not pipe.contains_ocr_components:
# if OCR we must take text sorting into account. Non-OCR pipes get text provided externalyl
provided_features.append('text')
if pipe.contains_audio_components:
provided_features.append(NLP_FEATURES.RAW_AUDIO)
loop_count = 0
max_loop_count = 10 * len(pipe.components)
last_correct_order_component_pipeline = []
last_provided_features = []
while not all_components_ordered:
if update_last_type:
last_type_sorted = None
else:
update_last_type = True
for component in unsorted_components:
logger.info(f"Optimizing order for component_to_resolve {component.name}")
input_columns = ComponentUtils.remove_storage_ref_from_features(
ComponentUtils.clean_irrelevant_features(component.spark_input_column_names.copy(), False, False))
if last_type_sorted is None or component.type == last_type_sorted:
if set(input_columns).issubset(provided_features):
correct_order_component_pipeline.append(component)
# Leave pretrained component_list components untouched
if component.loaded_from_pretrained_pipe:
unsorted_components.remove(component)
if component in unsorted_components:
unsorted_components.remove(component)
# TODO remove storage ref from provided features ?
provided_features += ComponentUtils.remove_storage_ref_from_features(
ComponentUtils.clean_irrelevant_features(component.spark_output_column_names.copy(), False,
False))
last_type_sorted = component.type
update_last_type = False
break
if len(unsorted_components) == 0:
all_components_ordered = True
if not all_components_ordered and len(
unsorted_components) <= 2 and pipe.has_trainable_components and not trainable_updated and \
unsorted_components[0].trainable and 'sentence_embeddings@' in unsorted_components[
0].spark_input_column_names:
# special case, if trainable then we feed embed consumers on the first sentence embed provider
# 1. Find first sent embed provider
# 2. substitute any 'sent_embed@' consumer inputs for the provider col
for f in provided_features:
if 'sentence_embeddings' in f and not trainable_updated:
unsorted_components[0].spark_input_column_names.remove('sentence_embeddings@')
if 'sentence_embeddings@' in unsorted_components[0].spark_input_column_names:
unsorted_components[0].spark_input_column_names.remove('sentence_embeddings@')
unsorted_components[0].spark_input_column_names.append(f)
if f not in unsorted_components[0].spark_input_column_names: unsorted_components[
0].spark_input_column_names.append(f)
trainable_updated = True
if not all_components_ordered and len(
unsorted_components) <= 2 and pipe.has_trainable_components and not trainable_updated and \
unsorted_components[0].trainable and 'word_embeddings@' in unsorted_components[
0].spark_input_column_names:
# special case, if trainable then we feed embed consumers on the first sentence embed provider
# 1. Find first sent embed provider
# 2. substitute any 'sent_embed@' consumer inputs for the provider col
for f in provided_features:
if 'word_embeddings' in f and not trainable_updated:
unsorted_components[0].spark_input_column_names.remove('word_embeddings@')
if 'word_embeddings@' in unsorted_components[0].spark_input_column_names: unsorted_components[
0].spark_input_column_names.remove(
'word_embeddings@')
unsorted_components[0].spark_input_column_names.append(f)
if f not in unsorted_components[0].spark_input_column_names: unsorted_components[
0].spark_input_column_names.append(f)
trainable_updated = True
# detect endless loop
if last_correct_order_component_pipeline == correct_order_component_pipeline and last_provided_features == provided_features:
loop_count += 1
else:
loop_count = 0
if loop_count > max_loop_count:
PipelineCompleter.except_infinity_loop('Failure sorting dependencies')
last_provided_features = provided_features.copy()
# correct_order_component_pipeline = last_correct_order_component_pipeline.copy()
last_correct_order_component_pipeline = correct_order_component_pipeline.copy()
pipe.components = correct_order_component_pipeline
return pipe
@staticmethod
def is_storage_ref_match(embedding_consumer, embedding_provider, pipe: NLUPipeline):
"""Check for 2 components, if one provides the embeddings for the other. Makes sure that pipe_prediction_output_level matches up (chunk/sent/tok/embeds)"""
consumer_AT_ref = ComponentUtils.extract_storage_ref_AT_notation_for_embeds(embedding_consumer, 'input')
provider_AT_rev = ComponentUtils.extract_storage_ref_AT_notation_for_embeds(embedding_provider, 'output')
consum_level = ComponentUtils.extract_embed_level_identity(embedding_consumer, 'input')
provide_level = ComponentUtils.extract_embed_level_identity(embedding_provider, 'output')
consumer_ref = StorageRefUtils.extract_storage_ref(embedding_consumer)
provider_rev = StorageRefUtils.extract_storage_ref(embedding_provider)
# input/output levels must match
if consum_level != provide_level: return False
# If storage ref dont match up, we must consult the storage_ref_2_embed mapping if it still maybe is a match, otherwise it is not.
if consumer_ref == provider_rev: return True
# Embed Components have have been resolved via@ have a nlu_resolution_ref_source will match up with the consumer ref if correct embedding.
if hasattr(embedding_provider.info, 'nlu_ref'):
if consumer_ref == StorageRefUtils.extract_storage_ref(embedding_provider.info.nlu_ref): return True
# If it is either sentence_embedding_converter or chunk_embedding_converter then we gotta check what the storage ref of the inpot of those is.
# If storage ref matches up, the providers output will match the consumer
# if embedding_provider
if embedding_provider.info.name in ["chunk_embedding_converter",
'sentence_embedding_converter']: # TODO FOR RESOLUTION
nlu_ref, conv_prov_storage_ref = PipelineCompleter.get_converters_provider_info(embedding_provider,
pipe)
return False
@staticmethod
def is_matching_level(embedding_consumer, embedding_provider):
"""Check for embedding consumer if input level matches up outputlevel of consumer
"""
@staticmethod
def get_converters_provider_info(embedding_provider, pipe: NLUPipeline):
"""For a component_to_resolve and a component_list, find storage_ref and """
@staticmethod
def enforce_chunk2doc_on_sentence_embeddings(pipe: NLUPipeline):
"""
#If Sentence Resolvers are in pipeline, all Sentence-Embeddings must feed from Chunk2Doc which stems from
the entities column to resolve We need to update input/output types of sentence Resolver, to the component_to_resolve
so sorting does not get confused
"""
if not pipe.has_licensed_components:
return pipe
resolvers = []
ner_converters = []
sentence_embeddings = []
# Find Resolver
for i, c in enumerate(pipe.components):
if c.loaded_from_pretrained_pipe: continue
# if isinstance(c.model_anno_obj, SentenceEntityResolverModel): resolvers.append(c)
# if isinstance(c.model_anno_obj, (NerConverter, NerConverterInternal)): ner_converters.append(c)
# if 'sentence_embeddings' == c.info.type: sentence_embeddings.append(c)
if c.name == NLP_HC_NODE_IDS.SENTENCE_ENTITY_RESOLVER:
resolvers.append(c)
if c.name in [NLP_NODE_IDS.NER_CONVERTER, NLP_HC_NODE_IDS.NER_CONVERTER_INTERNAL]:
ner_converters.append(c)
if c.type == AnnoTypes.DOCUMENT_EMBEDDING or c.type == AnnoTypes.SENTENCE_EMBEDDING:
sentence_embeddings.append(c)
# No resolvers, nothing to update
if len(resolvers) == 0:
return pipe
# Update Resolver
# TODO this does not work in multi resolver scenarios reliably
if NLP_FEATURES.DOCUMENT in sentence_embeddings[0].in_types:
sentence_embeddings[0].in_types.remove(NLP_FEATURES.DOCUMENT)
if NLP_FEATURES.SENTENCE in sentence_embeddings[0].in_types:
sentence_embeddings[0].in_types.remove(NLP_FEATURES.SENTENCE)
if NLP_FEATURES.DOCUMENT in sentence_embeddings[0].spark_input_column_names:
sentence_embeddings[0].spark_input_column_names.remove(NLP_FEATURES.DOCUMENT)
if NLP_FEATURES.SENTENCE in sentence_embeddings[0].spark_input_column_names:
sentence_embeddings[0].spark_input_column_names.remove(NLP_FEATURES.SENTENCE)
sentence_embeddings[0].in_types.append(NLP_FEATURES.DOCUMENT_FROM_CHUNK)
sentence_embeddings[0].spark_input_column_names.append(NLP_FEATURES.DOCUMENT_FROM_CHUNK)
# sentence_embeddings[0].info.inputs = ['chunk2doc']
# sentence_embeddings[0].info.spark_input_column_names = ['chunk2doc']
# sentence_embeddings[0].model_anno_obj.setInputCols('chunk2doc') # shouldb e handled by enforcing
# chunk2doc.model_anno_obj.setOutputCol("chunk2doc")
# chunk2doc.info.inputs = ner_converters[0].spark_output_column_names
# TODO this will not be resolved by the resolution Algo!!
chunk2doc = resolve_feature(NLP_FEATURES.DOCUMENT_FROM_CHUNK, 'xx')
chunk2doc.model.setInputCols(ner_converters[0].spark_output_column_names)
chunk2doc.spark_input_column_names = ner_converters[0].spark_output_column_names
pipe.components.append(chunk2doc)
# this will add a entity converter and a NER model_anno_obj if none provided
pipe = PipelineCompleter.satisfy_dependencies(pipe)
return pipe
@staticmethod
def log_resolution_status(provided_features_no_ref, required_features_no_ref, provided_features_ref,
required_features_ref, is_trainable, conversion_candidates, missing_features_no_ref,
missing_features_ref, ):
logger.info(f"========================================================================")
logger.info(f"Resolution Status provided_features_no_ref = {set(provided_features_no_ref)}")
logger.info(f"Resolution Status required_features_no_ref = {set(required_features_no_ref)}")
logger.info(f"Resolution Status provided_features_ref = {set(provided_features_ref)}")
logger.info(f"Resolution Status required_features_ref = {set(required_features_ref)}")
logger.info(f"Resolution Status is_trainable = {is_trainable}")
logger.info(f"Resolution Status conversion_candidates = {conversion_candidates}")
logger.info(f"Resolution Status missing_features_no_ref = {set(missing_features_no_ref)}")
logger.info(f"Resolution Status conversion_candidates = {set(missing_features_ref)}")
logger.info(f"========================================================================") | PypiClean |
/EntropyEncoding-0.0.4.tar.gz/EntropyEncoding-0.0.4/EntropyEncoding.py |
###################
# This package implements an encoding to bypass entropy antivirus check.
# Copyright (C) 2023 EntropyEncoding
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
###################
"""
This package implements an encoding to bypass entropy antivirus check.
https://www.schellman.com/blog/cybersecurity/penetration-testing-methods-entropy
https://vanmieghem.io/blueprint-for-evading-edr-in-2022/
https://pentest.blog/art-of-anti-detection-1-introduction-to-av-detection-techniques/
Information based on a blog (red teamer test a not named EDR):
- antivirus detect potentially malicious entropy when the entropy score is greater than 7.2
~# python3 EntropyEncoding.py
Entropy for non-encoded secrets: 4.521591372417719
Entropy for non-encoded encrypted secrets: 7.945422222752084
Entropy for entropy-encoded encrypted secrets: 5.762166896848745
Entropy for entropy-encoded2 encrypted secrets: 5.748670434218312
Entropy for non-encoded exe: 5.22055339277441
Entropy for non-encoded encrypted exe: 7.923900258907012
Entropy for entropy-encoded encrypted exe: 5.756072685391074
Entropy for entropy-encoded2 encrypted exe: 5.799741821347019
~#
"""
__version__ = "0.0.4"
__author__ = "Maurice Lambert"
__author_email__ = "mauricelambert434@gmail.com"
__maintainer__ = "Maurice Lambert"
__maintainer_email__ = "mauricelambert434@gmail.com"
__description__ = """
This package implements an encoding to bypass entropy antivirus check.
"""
__url__ = "https://github.com/mauricelambert/EntropyEncoding"
__all__ = [
"entropy_encode2",
"entropy_decode2",
"entropy_encode",
"entropy_decode",
"shannon_entropy",
]
__license__ = "GPL-3.0 License"
__copyright__ = """
EntropyEncoding Copyright (C) 2023 Maurice Lambert
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions.
"""
copyright = __copyright__
license = __license__
print(copyright)
from math import log
from typing import Tuple, Dict
from random import choice, randint
from base64 import b32encode, b32decode
from collections import Counter, defaultdict
dll = (
b'MZ\x90\x00\x03\x00\x00\x00\x04\x00\x00\x00\xff\xff\x00\x00\xb8\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x0e\x1f\xba\x0e\x00\xb4\t\xcd!\xb8\x01L\xcd!This program cannot be run in DOS mode.\r\r\n$\x00\x00\x00\x00\x00\x00\x00PE\x00\x00L\x01\x03\x00\xcf\x1e^\x92\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x00\x02\x01\x0b\x010\x00\x00*\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x9eI\x00\x00\x00 \x00\x00\x00`\x00\x00\x00\x00@\x00\x00 \x00\x00\x00\x02\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\xa0\x00\x00\x00\x02\x00\x00\x85\xd9\x00\x00\x03\x00`\x85\x00\x00\x10\x00\x00\x10\x00\x00\x00\x00\x10\x00\x00\x10\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LI\x00\x00O\x00\x00\x00\x00`\x00\x00P\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x0c\x00\x00\x00\x80H\x00\x008\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08 \x00\x00H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00.text\x00\x00\x00\xa4)\x00\x00\x00 \x00\x00\x00*\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00`.rsrc\x00\x00\x00P\x07\x00\x00\x00`\x00\x00\x00\x08\x00\x00\x00,\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00@.reloc\x00\x00\x0c\x00\x00\x00\x00\x80\x00\x00\x00\x02\x00\x00\x004\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80I\x00\x00\x00\x00\x00\x00H\x00\x00\x00\x02\x00\x05\x00\x04)\x00\x00\x84\x19\x00\x00\x03\x00\x00\x00\x05\x00\x00\x06\x88B\x00\x00\xf8\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfa\x02\x03(\x01\x00\x00+/ \x03\x02o\x15\x00\x00\no\x16\x00\x00\n\x171%\x03\x02o\x15\x00\x00\n\x16o\x17\x00\x00\n\x1f-3\x14\x03\x02\x17Yo\x15\x00\x00\n(\x0e\x00\x00\x06s\x18\x00\x00\nz*\x00\x130\x04\x00\x17\x03\x00\x00\x01\x00\x00\x11\x02-\x15r\x01\x00\x00p(\x19\x00\x00\no\x1a\x00\x00\ns\x1b\x00\x00\nzr\x0b\x00\x00p\x80\x02\x00\x00\x04r\x0b\x00\x00p\x80\x01\x00\x00\x04r\x0b\x00\x00p\x80\x04\x00\x00\x04r\x0b\x00\x00p\x80\x05\x00\x00\x04r\x0b\x00\x00p\x80\x07\x00\x00\x04r\x0b\x00\x00p\x80\x06\x00\x00\x04r\x0b\x00\x00p\x80\x03\x00\x00\x04r\x0b\x00\x00p\x80\x08\x00\x00\x04\x16\x80\n\x00\x00\x04\x02(\x01\x00\x00+\n\x16\x0b\x06:\x92\x02\x00\x00r\x0b\x00\x00ps\x1c\x00\x00\nz\x02\x07%\x17X\x0bo\x15\x00\x00\n(\x1d\x00\x00\no\x1e\x00\x00\n\x0c\x08o\x16\x00\x00\n\x161!\x08\x16o\x17\x00\x00\n\x1f-.\x16(\r\x00\x00\x06r\r\x00\x00p\x08(\x1f\x00\x00\ns\x1c\x00\x00\nz\x08(\x14\x00\x00\x06\r\t >\xa8kA58\t \n\x86\x8b\x135\x15\t c\xacN\x11.k\t \n\x86\x8b\x13.x8\x05\x02\x00\x00\t c\xfdf\x17;\x92\x00\x00\x00\t >\xa8kA;\xb1\x00\x00\x008\xea\x01\x00\x00\t \xf1\xf3\xd3V5\x18\t \xe7>\x16S;\xae\x00\x00\x00\t \xf1\xf3\xd3V.|8\xca\x01\x00\x00\t o7K\x8c;\xc0\x00\x00\x00\t \xb7\xc3X\xa0;\xa0\x00\x00\x00\t \x8c\x97\x7f\xa2./8\xa7\x01\x00\x00\x08r\x15\x00\x00p( \x00\x00\n:\xad\x00\x00\x008\x92\x01\x00\x00\x08r!\x00\x00p( \x00\x00\n:\xb4\x00\x00\x008}\x01\x00\x00\x08rA\x00\x00p( \x00\x00\n:\xbb\x00\x00\x008h\x01\x00\x00\x08r]\x00\x00p( \x00\x00\n:\xc2\x00\x00\x008S\x01\x00\x00\x08r\x87\x00\x00p( \x00\x00\n:\xc9\x00\x00\x008>\x01\x00\x00\x08r\x9b\x00\x00p( \x00\x00\n:\xcd\x00\x00\x008)\x01\x00\x00\x08r\xb3\x00\x00p( \x00\x00\n:\xd1\x00\x00\x008\x14\x01\x00\x00\x08r\xcd\x00\x00p( \x00\x00\n:\xda\x00\x00\x008\xff\x00\x00\x00\x08r\xe5\x00\x00p( \x00\x00\n:\xe3\x00\x00\x008\xea\x00\x00\x00\x07\x02(\x01\x00\x00\x06\x02\x07%\x17X\x0bo\x15\x00\x00\n\x80\x02\x00\x00\x048\xe4\x00\x00\x00\x07\x02(\x01\x00\x00\x06\x02\x07%\x17X\x0bo\x15\x00\x00\n\x80\x01\x00\x00\x048\xc8\x00\x00\x00\x07\x02(\x01\x00\x00\x06\x02\x07%\x17X\x0bo\x15\x00\x00\n\x80\x04\x00\x00\x048\xac\x00\x00\x00\x07\x02(\x01\x00\x00\x06\x02\x07%\x17X\x0bo\x15\x00\x00\n\x80\x05\x00\x00\x048\x90\x00\x00\x00\x07\x02(\x01\x00\x00\x06\x02\x07%\x17X\x0bo\x15\x00\x00\n\x80\x07\x00\x00\x04+w\x07\x02(\x01\x00\x00\x06\x02\x07%\x17X\x0bo\x15\x00\x00\n\x80\x06\x00\x00\x04+^\x07\x02(\x01\x00\x00\x06\x02\x07%\x17X\x0bo\x15\x00\x00\n(!\x00\x00\n\x80\x03\x00\x00\x04+@\x07\x02(\x01\x00\x00\x06\x02\x07%\x17X\x0bo\x15\x00\x00\n(!\x00\x00\n\x80\x08\x00\x00\x04+"\x07\x17X\x0b\x17\x80\n\x00\x00\x04+\x16(\r\x00\x00\x06r\r\x00\x00p\x08(\x1f\x00\x00\ns\x1c\x00\x00\nz\x07\x06?r\xfd\xff\xff*\x00\x130\x02\x00\x99\x00\x00\x00\x00\x00\x00\x00~\x01\x00\x00\x04("\x00\x00\n,\x10r\xf3\x00\x00p(\x0f\x00\x00\x06s\x18\x00\x00\nz~\x01\x00\x00\x04(#\x00\x00\n-\x10r\xf3\x00\x00p(\x0b\x00\x00\x06s\x18\x00\x00\nz~\x02\x00\x00\x04("\x00\x00\n,\x10r\x11\x01\x00p(\x0f\x00\x00\x06s\x18\x00\x00\nz~\x03\x00\x00\x04("\x00\x00\n,\x10r\x1b\x01\x00p(\x0f\x00\x00\x06s\x18\x00\x00\nz~\x07\x00\x00\x04("\x00\x00\n,\x1c~\x06\x00\x00\x04("\x00\x00\n,\x10r3\x01\x00p(\x0f\x00\x00\x06s\x18\x00\x00\nz*\x00\x00\x00\x1b0\x07\x00\xfd\x01\x00\x00\x02\x00\x00\x11~\x07\x00\x00\x04("\x00\x00\n-E~\x07\x00\x00\x04\x17\x8d+\x00\x00\x01%\x16\x1f,\x9do$\x00\x00\n~\x0e\x00\x00\x04%-\x17&~\r\x00\x00\x04\xfe\x06\x17\x00\x00\x06s%\x00\x00\n%\x80\x0e\x00\x00\x04(\x02\x00\x00+(\x03\x00\x00+s(\x00\x00\n+\x05s)\x00\x00\n\n~\x06\x00\x00\x04("\x00\x00\n-E~\x06\x00\x00\x04\x17\x8d+\x00\x00\x01%\x16\x1f,\x9do$\x00\x00\n~\x0f\x00\x00\x04%-\x17&~\r\x00\x00\x04\xfe\x06\x18\x00\x00\x06s%\x00\x00\n%\x80\x0f\x00\x00\x04(\x02\x00\x00+(\x03\x00\x00+s(\x00\x00\n+\x05s)\x00\x00\n\x0b\x06\x07(\x04\x00\x00+(\x05\x00\x00+\x0c\x08(\x06\x00\x00+,)(\x1d\x00\x00\n(\x12\x00\x00\x06\x17\x8d\x11\x00\x00\x01%\x16r[\x01\x00p\x08(-\x00\x00\n\xa2(.\x00\x00\ns/\x00\x00\nz~\x01\x00\x00\x04(!\x00\x00\n\x80\x01\x00\x00\x04s0\x00\x00\n\r\x06o1\x00\x00\n\x13\x04+Z\x12\x04(2\x00\x00\n\x13\x05~\x01\x00\x00\x04\x11\x05(3\x00\x00\n(4\x00\x00\n- (\x1d\x00\x00\n(\x11\x00\x00\x06\x17\x8d\x11\x00\x00\x01%\x16\x11\x05\xa2(.\x00\x00\ns/\x00\x00\nz\t~\x02\x00\x00\x04~\x01\x00\x00\x04\x11\x05(3\x00\x00\n\x14\x16s5\x00\x00\no6\x00\x00\n\x12\x04(7\x00\x00\n-\x9d\xde\x0e\x12\x04\xfe\x16\x05\x00\x00\x1bo8\x00\x00\n\xdc\x07o1\x00\x00\n\x13\x06+Z\x12\x06(2\x00\x00\n\x13\x07~\x01\x00\x00\x04\x11\x07(3\x00\x00\n(4\x00\x00\n- (\x1d\x00\x00\n(\x11\x00\x00\x06\x17\x8d\x11\x00\x00\x01%\x16\x11\x07\xa2(.\x00\x00\ns/\x00\x00\nz\t~\x02\x00\x00\x04~\x01\x00\x00\x04\x11\x07(3\x00\x00\n\x14\x17s5\x00\x00\no6\x00\x00\n\x12\x06(7\x00\x00\n-\x9d\xde\x0e\x12\x06\xfe\x16\x05\x00\x00\x1bo8\x00\x00\n\xdc\t*\x00\x00\x00\x01\x1c\x00\x00\x02\x00\t\x01gp\x01\x0e\x00\x00\x00\x00\x02\x00\x86\x01g\xed\x01\x0e\x00\x00\x00\x00\x1b0\x05\x00\xb8\x00\x00\x00\x03\x00\x00\x11\x02(\x02\x00\x00\x06(\x03\x00\x00\x06(\x04\x00\x00\x06\n~\x08\x00\x00\x04("\x00\x00\n-\'~\x08\x00\x00\x04(9\x00\x00\ns:\x00\x00\n\x0c\x08o;\x00\x00\n\x80\t\x00\x00\x04\xde\n\x08,\x06\x08o8\x00\x00\n\xdc~\x02\x00\x00\x04\x06~\t\x00\x00\x04~\x04\x00\x00\x04~\x05\x00\x00\x04s<\x00\x00\n\x0b\x07~\x03\x00\x00\x04o=\x00\x00\n\xdeK\r\to>\x00\x00\n(?\x00\x00\n(\x13\x00\x00\x06(?\x00\x00\n\xde3\x13\x04\x11\x04o>\x00\x00\n(?\x00\x00\n(\x13\x00\x00\x06(?\x00\x00\n\xde\x19\x13\x05~\n\x00\x00\x04,\x02\xfe\x1a\x11\x05o>\x00\x00\n(?\x00\x00\n\xde\x00*\x014\x00\x00\x02\x00-\x00\r:\x00\n\x00\x00\x00\x00\x00\x00\x00\x00ll\x00\x18\x19\x00\x00\x01\x00\x00\x00\x00l\x84\x00\x1a\x1a\x00\x00\x01\x00\x00\x00\x00l\x9e\x00\x19\x1b\x00\x00\x01\x1e\x02(@\x00\x00\n*\x130\x02\x00-\x00\x00\x00\x04\x00\x00\x11~\x0b\x00\x00\x04- r_\x01\x00p\xd0\x03\x00\x00\x02(A\x00\x00\noB\x00\x00\nsC\x00\x00\n\n\x06\x80\x0b\x00\x00\x04~\x0b\x00\x00\x04*\x1a~\x0c\x00\x00\x04*\x1e\x02\x80\x0c\x00\x00\x04*V(\x07\x00\x00\x06r\xdf\x01\x00p~\x0c\x00\x00\x04oD\x00\x00\n*V(\x07\x00\x00\x06r%\x02\x00p~\x0c\x00\x00\x04oD\x00\x00\n*V(\x07\x00\x00\x06rO\x02\x00p~\x0c\x00\x00\x04oD\x00\x00\n*V(\x07\x00\x00\x06r\x89\x02\x00p~\x0c\x00\x00\x04oD\x00\x00\n*V(\x07\x00\x00\x06r\xc7\x02\x00p~\x0c\x00\x00\x04oD\x00\x00\n*V(\x07\x00\x00\x06r\x01\x03\x00p~\x0c\x00\x00\x04oD\x00\x00\n*V(\x07\x00\x00\x06r7\x03\x00p~\x0c\x00\x00\x04oD\x00\x00\n*V(\x07\x00\x00\x06rw\x03\x00p~\x0c\x00\x00\x04oD\x00\x00\n*V(\x07\x00\x00\x06r\xa9\x03\x00p~\x0c\x00\x00\x04oD\x00\x00\n*V(\x07\x00\x00\x06r\x01\x04\x00p~\x0c\x00\x00\x04oD\x00\x00\n*\x130\x02\x00,\x00\x00\x00\x05\x00\x00\x11\x02,\' \xc5\x9d\x1c\x81\n\x16\x0b+\x14\x02\x07o\x17\x00\x00\n\x06a \x93\x01\x00\x01Z\n\x07\x17X\x0b\x07\x02o\x16\x00\x00\n2\xe3\x06*.s\x16\x00\x00\x06\x80\r\x00\x00\x04*\x1e\x02(@\x00\x00\n*\x1e\x03oE\x00\x00\n*\x1e\x03oE\x00\x00\n*BSJB\x01\x00\x01\x00\x00\x00\x00\x00\x0c\x00\x00\x00v4.0.30319\x00\x00\x00\x00\x05\x00l\x00\x00\x000\x07\x00\x00#~\x00\x00\x9c\x07\x00\x00\xe4\t\x00\x00#Strings\x00\x00\x00\x00\x80\x11\x00\x00\x18\x04\x00\x00#US\x00\x98\x15\x00\x00\x10\x00\x00\x00#GUID\x00\x00\x00\xa8\x15\x00\x00\xdc\x03\x00\x00#Blob\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x01W\x15\xa2\t\t\x0b\x00\x00\x00\xfa\x013\x00\x16\x00\x00\x01\x00\x00\x006\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x18\x00\x00\x00\x08\x00\x00\x00E\x00\x00\x00\x16\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x0c\x00\x00\x00\r\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x00\x00l\x05\x01\x00\x00\x00\x00\x00\x06\x00\xbc\x04\x1a\x08\x06\x00?\x05\x1a\x08\x06\x00\xef\x03\xe8\x07\x0f\x00\x95\x08\x00\x00\x06\x00\x83\x04\xca\x06\x06\x00d\x04\xca\x06\x06\x000\x04\xca\x06\x06\x00\xcd\x03:\x08\x06\x00\xa0\x04\x1a\x08\x06\x00\x8b\x03\xfb\x07\x06\x00\x1c\x04\xfb\x07\x06\x00\x10\x05\x7f\x06\x06\x00\xdc\x04\xca\x06\x06\x00\xf5\x04\xca\x06\x06\x00&\x05\xca\x06\x06\x00K\x04\x96\x05\x06\x00J\t\x7f\x06\x06\x00{\x00#\x02\x06\x00|\x00#\x02\n\x00m\x06?\x06\x0e\x00q\x00#\x02W\x00\xa5\x07\x00\x00\n\x00`\x06?\x06\x06\x007\x07\xb2\x01\x06\x00\x08\x07\x7f\x06\n\x00\xdc\x06?\x06\x06\x00\x10\x07\x7f\x06\x06\x00p\x03\x1a\x08\x06\x00\x83\x00\x7f\x06\x12\x00\x99\x03\x82\x07\x06\x00\xb0\x03\xe8\x07\x06\x00r\x07:\x08\x06\x00\x1a\x07\xa1\x06\x12\x00[\x03\x14\x06\x12\x00\x03\x04\x14\x06\x0e\x00\x7f\x02&\x07\x06\x00c\x00#\x02\x06\x00\xbc\x05\x7f\x06\x06\x002\x03\xca\x06\x06\x00\xf2\x06\x7f\x06\x06\x00\x04\x06\xb2\x01\x06\x00\xbf\t\xb2\x01\x06\x002\x07\x7f\x06\x06\x00O\x07\x7f\x06\x06\x00\xba\x02\xb2\x01\n\x00\xb6\x06?\x06\x06\x00\x8a\x02\x7f\x06\x06\x00\x8d\x05\x91\t\x06\x00D\x07\xb2\x01\n\x00=\x03?\x06\x06\x00\xbf\x02\x7f\x06\x06\x00\xe6\x02\x7f\x06\x06\x00\x96\x02\x7f\x06\x06\x00\xb2\t\xca\x06\x00\x00\x00\x00\x97\x00\x00\x00\x00\x00\x01\x00\x01\x00\x81\x01\x10\x00\xc9\x07\xb0\x07E\x00\x01\x00\x01\x00\x00\x00\x10\x00A\x08\xa4\x08E\x00\x0b\x00\x06\x00\x00\x01\x00\x00\xa0\x00\x00\x00E\x00\r\x00\x14\x00\x03!\x10\x00\x1f\x02\x00\x00E\x00\r\x00\x15\x00\x11\x00_\x07\xde\x01\x11\x00j\x02\xde\x01\x11\x00\xe6\x05\xde\x01\x11\x00\xc7\x02\xde\x01\x11\x00>\x02\xde\x01\x11\x00\xf9\x08\xde\x01\x11\x00\xfb\x08\xde\x01\x11\x00\xfe\x05\xde\x01\x11\x00z\t\xde\x01\x11\x00\xce\x05\xe1\x01\x11\x00\x8b\x06\xe4\x01\x11\x00\x0f\x03\xe9\x016\x00\x93\x00\xee\x01\x16\x00\x01\x00\xf2\x01\x16\x002\x00\xf2\x01P \x00\x00\x00\x00\x91\x00]\x05\xfa\x01\x01\x00\x90 \x00\x00\x00\x00\x93\x00\xef\x08\x04\x02\x03\x00\xb4#\x00\x00\x00\x00\x93\x00\xda\x08\r\x02\x04\x00\\$\x00\x00\x00\x00\x93\x00\r\t\x11\x02\x04\x00\x84&\x00\x00\x00\x00\x96\x00\x97\x06\x1a\x02\x04\x00|\'\x00\x00\x00\x00\x83\x18\xdb\x07\x06\x00\x05\x00\x84\'\x00\x00\x00\x00\x93\x08n\x07 \x02\x05\x00\xbd\'\x00\x00\x00\x00\x93\x08\xf7\x02]\x00\x05\x00\xc4\'\x00\x00\x00\x00\x93\x08\x03\x03&\x02\x05\x00\xcc\'\x00\x00\x00\x00\x93\x08\xbf\x00-\x02\x06\x00\xe2\'\x00\x00\x00\x00\x93\x08\xe6\x00-\x02\x06\x00\xf8\'\x00\x00\x00\x00\x93\x08\xec\x01-\x02\x06\x00\x0e(\x00\x00\x00\x00\x93\x08\x8f\x01-\x02\x06\x00$(\x00\x00\x00\x00\x93\x08O\x01-\x02\x06\x00:(\x00\x00\x00\x00\x93\x08p\x01-\x02\x06\x00P(\x00\x00\x00\x00\x93\x08\x1c\x01-\x02\x06\x00f(\x00\x00\x00\x00\x93\x08\xff\x00-\x02\x06\x00|(\x00\x00\x00\x00\x93\x08\xbc\x01-\x02\x06\x00\x92(\x00\x00\x00\x00\x93\x08@\x01-\x02\x06\x00\xa8(\x00\x00\x00\x00\x93\x00\xd4\x051\x02\x06\x00\xe0(\x00\x00\x00\x00\x91\x18\xe1\x07\r\x02\x07\x00\xec(\x00\x00\x00\x00\x86\x18\xdb\x07\x06\x00\x07\x00\xf4(\x00\x00\x00\x00\x83\x00\x0b\x006\x02\x07\x00\xfc(\x00\x00\x00\x00\x83\x00<\x006\x02\x08\x00\x00\x00\x01\x00\x9d\t\x00\x00\x02\x00\x08\t\x00\x00\x01\x00\x08\t\x00\x00\x01\x00\x08\t\x00\x00\x01\x00f\x05\x00\x00\x01\x00:\t\x00\x00\x01\x00\xc3\x05\x00\x00\x01\x00\xc3\x05\t\x00\xdb\x07\x01\x00\x11\x00\xdb\x07\x06\x00\x19\x00\xdb\x07\n\x00)\x00\xdb\x07\x10\x001\x00\xdb\x07\x10\x009\x00\xdb\x07\x10\x00A\x00\xdb\x07\x10\x00I\x00\xdb\x07\x10\x00Q\x00\xdb\x07\x10\x00Y\x00\xdb\x07\x15\x00a\x00\xdb\x07\x15\x00i\x00\xdb\x07\x10\x00q\x00\xdb\x07\x10\x00y\x00\xdb\x07\x10\x00\x81\x00\xdb\x07\x10\x00\xe1\x00\xdb\x07\x06\x00\xf1\x00\xdb\x07\x1a\x00\xf9\x00\xdb\x07\x06\x00\x19\x01\xdb\x07 \x00!\x01t\t\'\x00\x0c\x00W\x06=\x001\x01\t\x06C\x001\x01+\tG\x00\xd1\x00\xdb\x07\x1a\x009\x01Y\x02S\x00\x89\x00\xb0\x05Y\x00A\x01\xdb\x07\x1a\x00\xd1\x00\xdb\x07\x10\x00\t\x01\x1f\x03]\x001\x01\x9a\x07c\x001\x01<\tj\x001\x01\xc9\tq\x00I\x01\xf2\x05w\x001\x01\xd5\t|\x00Q\x015\t|\x001\x01b\t\xa5\x00\x14\x00\xdb\x07\xb3\x00!\x01Q\t\xb9\x00!\x01\xa6\t\xd8\x00\x1c\x00\xdb\x07\xec\x00\x1c\x00\xdb\x07\x06\x00!\x01X\t\xf7\x00!\x01\x81\t\x10\x01!\x01\xbb\t!\x011\x01\x9c\x06-\x011\x01C\t8\x01\xc9\x00\xdb\x07\x10\x00$\x00\xdb\x07\x06\x00\x1c\x00\xa2\x07I\x01,\x00h\tX\x01I\x01\xde\x02]\x01i\x015\t|\x00\xa1\x00\xdb\x07c\x01$\x00K\x02m\x01,\x00\x88\ts\x01y\x01S\x03\x06\x00\x81\x01\x8a\x00\x8a\x01\xc1\x00\xdb\x07\x90\x01\x89\x01O\x02Y\x00\xb9\x00\xdb\x07\x98\x01\x91\x01\r\x02\x10\x00\xd9\x00s\x02Y\x00\x99\x01\xd4\x02\xa6\x01\x89\x00\xdb\x07\x06\x00\xa1\x01\xa8\x02\xb1\x01\xa1\x01\xae\t\xba\x01\x01\x01\xdb\x07\xc0\x01\x01\x01\xb9\x05\xc8\x011\x01\x86\x06Y\x00)\x00\x9b\x00s\x02.\x00\x0b\x00K\x02.\x00\x13\x00T\x02.\x00\x1b\x00s\x02.\x00#\x00|\x02.\x00+\x00|\x02.\x003\x00|\x02.\x00;\x00\x82\x02.\x00C\x00\x8d\x02.\x00K\x00\xa3\x02.\x00S\x00|\x02.\x00[\x00\xcd\x02.\x00c\x00\xd3\x02.\x00k\x00\x03\x03.\x00s\x00B\x03.\x00{\x00]\x03I\x00\x9b\x00s\x02c\x00\x8b\x00\x95\x03c\x00\x93\x00\xd6\x03c\x00\x83\x00\xd6\x03\x83\x00\x83\x00\xd6\x03\xa3\x00\x83\x00\xd6\x03L\x00\x81\x00w\x01\xab\x01\xd0\x01\x03\x00\x01\x00\x00\x00r\x07;\x02\x00\x00*\x03A\x02\x00\x00\xc3\x00G\x02\x00\x00\xea\x00G\x02\x00\x00\xf0\x01G\x02\x00\x00\x93\x01G\x02\x00\x00S\x01G\x02\x00\x00t\x01G\x02\x00\x00 \x01G\x02\x00\x00\x03\x01G\x02\x00\x00\xc0\x01G\x02\x00\x00D\x01G\x02\x02\x00\x07\x00\x03\x00\x02\x00\x08\x00\x05\x00\x01\x00\t\x00\x05\x00\x02\x00\n\x00\x07\x00\x02\x00\x0b\x00\t\x00\x02\x00\x0c\x00\x0b\x00\x02\x00\r\x00\r\x00\x02\x00\x0e\x00\x0f\x00\x02\x00\x0f\x00\x11\x00\x02\x00\x10\x00\x13\x00\x02\x00\x11\x00\x15\x00\x02\x00\x12\x00\x17\x00\x02\x00\x13\x00\x19\x007\x00\xac\x00\xe6\x00B\x01R\x01\x04\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xbe\x07\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd5\x01\x16\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00*\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd5\x01\xeb\x02\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd5\x01\x7f\x06\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00K\x08\x00\x00\x05\x00\x02\x00)\x003\x00M\x00\xd3\x00O\x003\x00U\x003\x00W\x003\x00Y\x003\x00\x00\x00\x00\x00\x00<>9__13_0\x00<GetTemplateConfigurationItems>b__13_0\x00<>9__13_1\x00<GetTemplateConfigurationItems>b__13_1\x00IEnumerable`1\x00HashSet`1\x00IList`1\x00Func`2\x00get_UTF8\x00<>9\x00<Module>\x00<PrivateImplementationDetails>\x00get_ERROR_AT_LEAST_ONE_ACTION_REQUIRED\x00get_ERROR_FILE_NOT_FOUND\x00get_ERROR_TEMPLATE_NOT_FOUND\x00get_ERROR_TEMPLATE_FOLDER_NOT_FOUND\x00get_TEXT_USAGE\x00get_ERROR_MISSING_ARGUMENT_VALUE\x00get_ERROR_MISSING_REQUIRED_ARG\x00get_ERROR_INVALID_COMMAND_LINE_ARG\x00System.IO\x00get_ERROR_TEMPLATE_REGISTER_UNREGISTER_CONFLICT\x00get_ERROR_INVALID_ARGUMENT_COUNT\x00WriteCab\x00mscorlib\x00<>c\x00System.Collections.Generic\x00baselineDesc\x00Add\x00ReadToEnd\x00GetCurrentMethod\x00siteCode\x00get_Message\x00Enumerable\x00IDisposable\x00RuntimeTypeHandle\x00GetTypeFromHandle\x00File\x00Console\x00baselineName\x00WriteLine\x00Combine\x00Type\x00System.Core\x00get_Culture\x00set_Culture\x00resourceCulture\x00get_CurrentCulture\x00MethodBase\x00ConfigurationItemBase\x00Dispose\x00EditorBrowsableState\x00CompilerGeneratedAttribute\x00GuidAttribute\x00GeneratedCodeAttribute\x00DebuggerNonUserCodeAttribute\x00NeutralResourcesLanguageAttribute\x00DebuggableAttribute\x00EditorBrowsableAttribute\x00ComVisibleAttribute\x00AssemblyTrademarkAttribute\x00TargetFrameworkAttribute\x00AssemblyConfigurationAttribute\x00AssemblyDescriptionAttribute\x00InternalsVisibleToAttribute\x00CompilationRelaxationsAttribute\x00AssemblyProductAttribute\x00AssemblyCopyrightAttribute\x00CLSCompliantAttribute\x00AssemblyCompanyAttribute\x00RuntimeCompatibilityAttribute\x00HasValue\x00value\x00UevTemplateBaselineGenerator.exe\x00Encoding\x00System.Runtime.Versioning\x00ToString\x00GetString\x00trimmedArg\x00debug\x00ComputeStringHash\x00cabFilePath\x00GetFullPath\x00digestPath\x00get_Length\x00System.ComponentModel\x00Microsoft.Uev.CmUtil\x00Microsoft.Uev.UevCmUtil\x00get_Item\x00BaselineItem\x00ConfigurationItem\x00System\x00Trim\x00resourceMan\x00Main\x00Join\x00System.Globalization\x00ConfigurationAction\x00System.Reflection\x00CmCmdLineArgException\x00ArgumentNullException\x00ArgumentException\x00CultureInfo\x00System.Linq\x00Char\x00StreamReader\x00TextReader\x00IFormatProvider\x00templateFolder\x00get_ResourceManager\x00System.CodeDom.Compiler\x00ToLower\x00GetEnumerator\x00Microsoft.Uev.UevTemplateBaselineGenerator\x00.ctor\x00.cctor\x00System.Diagnostics\x00System.Runtime.InteropServices\x00System.Runtime.CompilerServices\x00System.Resources\x00Microsoft.Uev.UevTemplateBaselineGenerator.Properties.Resources.resources\x00DebuggingModes\x00Microsoft.Uev.UevTemplateBaselineGenerator.Properties\x00ValidateRequiredArgs\x00ParseArgs\x00unregisterArgs\x00args\x00GetTemplateConfigurationItems\x00get_Chars\x00Exists\x00Concat\x00Format\x00Object\x00Select\x00Intersect\x00Split\x00get_Current\x00Count\x00digest\x00ToList\x00MoveNext\x00System.Text\x00argIndex\x00ToArray\x00get_Assembly\x00Any\x00Directory\x00op_Equality\x00IsNullOrEmpty\x00\x00\x00\ta\x00r\x00g\x00s\x00\x00\x01\x00\x07 \x00:\x00 \x00\x00\x0b-\x00s\x00i\x00t\x00e\x00\x01\x1f-\x00t\x00e\x00m\x00p\x00l\x00a\x00t\x00e\x00f\x00o\x00l\x00d\x00e\x00r\x00\x01\x1b-\x00b\x00a\x00s\x00e\x00l\x00i\x00n\x00e\x00n\x00a\x00m\x00e\x00\x01)-\x00b\x00a\x00s\x00e\x00l\x00i\x00n\x00e\x00d\x00e\x00s\x00c\x00r\x00i\x00p\x00t\x00i\x00o\x00n\x00\x01\x13-\x00r\x00e\x00g\x00i\x00s\x00t\x00e\x00r\x00\x01\x17-\x00u\x00n\x00r\x00e\x00g\x00i\x00s\x00t\x00e\x00r\x00\x01\x19-\x00c\x00a\x00b\x00f\x00i\x00l\x00e\x00p\x00a\x00t\x00h\x00\x01\x17-\x00d\x00i\x00g\x00e\x00s\x00t\x00p\x00a\x00t\x00h\x00\x01\r-\x00d\x00e\x00b\x00u\x00g\x00\x01\x1dT\x00e\x00m\x00p\x00l\x00a\x00t\x00e\x00F\x00o\x00l\x00d\x00e\x00r\x00\x00\tS\x00i\x00t\x00e\x00\x00\x17C\x00a\x00b\x00F\x00i\x00l\x00e\x00P\x00a\x00t\x00h\x00\x00\'R\x00e\x00g\x00i\x00s\x00t\x00e\x00r\x00|\x00U\x00n\x00r\x00e\x00g\x00i\x00s\x00t\x00e\x00r\x00\x00\x03,\x00\x00\x7fM\x00i\x00c\x00r\x00o\x00s\x00o\x00f\x00t\x00.\x00U\x00e\x00v\x00.\x00U\x00e\x00v\x00T\x00e\x00m\x00p\x00l\x00a\x00t\x00e\x00B\x00a\x00s\x00e\x00l\x00i\x00n\x00e\x00G\x00e\x00n\x00e\x00r\x00a\x00t\x00o\x00r\x00.\x00P\x00r\x00o\x00p\x00e\x00r\x00t\x00i\x00e\x00s\x00.\x00R\x00e\x00s\x00o\x00u\x00r\x00c\x00e\x00s\x00\x00EE\x00R\x00R\x00O\x00R\x00_\x00A\x00T\x00_\x00L\x00E\x00A\x00S\x00T\x00_\x00O\x00N\x00E\x00_\x00A\x00C\x00T\x00I\x00O\x00N\x00_\x00R\x00E\x00Q\x00U\x00I\x00R\x00E\x00D\x00\x00)E\x00R\x00R\x00O\x00R\x00_\x00F\x00I\x00L\x00E\x00_\x00N\x00O\x00T\x00_\x00F\x00O\x00U\x00N\x00D\x00\x009E\x00R\x00R\x00O\x00R\x00_\x00I\x00N\x00V\x00A\x00L\x00I\x00D\x00_\x00A\x00R\x00G\x00U\x00M\x00E\x00N\x00T\x00_\x00C\x00O\x00U\x00N\x00T\x00\x00=E\x00R\x00R\x00O\x00R\x00_\x00I\x00N\x00V\x00A\x00L\x00I\x00D\x00_\x00C\x00O\x00M\x00M\x00A\x00N\x00D\x00_\x00L\x00I\x00N\x00E\x00_\x00A\x00R\x00G\x00\x009E\x00R\x00R\x00O\x00R\x00_\x00M\x00I\x00S\x00S\x00I\x00N\x00G\x00_\x00A\x00R\x00G\x00U\x00M\x00E\x00N\x00T\x00_\x00V\x00A\x00L\x00U\x00E\x00\x005E\x00R\x00R\x00O\x00R\x00_\x00M\x00I\x00S\x00S\x00I\x00N\x00G\x00_\x00R\x00E\x00Q\x00U\x00I\x00R\x00E\x00D\x00_\x00A\x00R\x00G\x00\x00?E\x00R\x00R\x00O\x00R\x00_\x00T\x00E\x00M\x00P\x00L\x00A\x00T\x00E\x00_\x00F\x00O\x00L\x00D\x00E\x00R\x00_\x00N\x00O\x00T\x00_\x00F\x00O\x00U\x00N\x00D\x00\x001E\x00R\x00R\x00O\x00R\x00_\x00T\x00E\x00M\x00P\x00L\x00A\x00T\x00E\x00_\x00N\x00O\x00T\x00_\x00F\x00O\x00U\x00N\x00D\x00\x00WE\x00R\x00R\x00O\x00R\x00_\x00T\x00E\x00M\x00P\x00L\x00A\x00T\x00E\x00_\x00R\x00E\x00G\x00I\x00S\x00T\x00E\x00R\x00_\x00U\x00N\x00R\x00E\x00G\x00I\x00S\x00T\x00E\x00R\x00_\x00C\x00O\x00N\x00F\x00L\x00I\x00C\x00T\x00\x00\x15T\x00E\x00X\x00T\x00_\x00U\x00S\x00A\x00G\x00E\x00\x00\x00\x98\t\xc2b*\x80\xf5@\x96\x1f\xe7\x1b\xa6\x93q\xbe\x00\x04 \x01\x01\x08\x03 \x00\x01\x05 \x01\x01\x11\x11\x04 \x01\x01\x0e\x04 \x01\x01\x02\x05 \x02\x01\x0e\x0e\x06 \x01\x01\x11\x80\x89\x0b\x10\x01\x01\x08\x15\x12\x80\x95\x01\x1e\x00\x03\n\x01\x0e\x05\x15\x12I\x01\x0e\x05 \x01\x13\x00\x08\x03 \x00\x08\x04 \x01\x03\x08\x06\x07\x04\x08\x08\x0e\t\x05\x00\x00\x12\x80\x9d\x03 \x00\x0e\x05\x00\x00\x12\x80\x85\x06 \x01\x0e\x12\x80\x85\x06\x00\x03\x0e\x0e\x0e\x0e\x05\x00\x02\x02\x0e\x0e\x04\x00\x01\x0e\x0e\x04\x00\x01\x02\x0e#\x07\x08\x15\x12U\x01\x0e\x15\x12U\x01\x0e\x15\x12M\x01\x0e\x15\x12M\x01\x12Q\x15\x11Y\x01\x0e\x0e\x15\x11Y\x01\x0e\x0e\x06 \x01\x1d\x0e\x1d\x03\x06\x15\x12u\x02\x0e\x0e\x05 \x02\x01\x1c\x18\x19\x10\x02\x02\x15\x12\x80\x95\x01\x1e\x01\x15\x12\x80\x95\x01\x1e\x00\x15\x12u\x02\x1e\x00\x1e\x01\x04\n\x02\x0e\x0e\r\x10\x01\x01\x1d\x1e\x00\x15\x12\x80\x95\x01\x1e\x00\x05\x15\x12U\x01\x0e\n \x01\x01\x15\x12\x80\x95\x01\x13\x00\x18\x10\x01\x02\x15\x12\x80\x95\x01\x1e\x00\x15\x12\x80\x95\x01\x1e\x00\x15\x12\x80\x95\x01\x1e\x00\x10\x10\x01\x01\x15\x12M\x01\x1e\x00\x15\x12\x80\x95\x01\x1e\x00\x0b\x10\x01\x01\x02\x15\x12\x80\x95\x01\x1e\x00\n\x00\x02\x0e\x0e\x15\x12\x80\x95\x01\x0e\t\x00\x03\x0e\x12\x80\xb1\x0e\x1d\x1c\x06\x15\x12M\x01\x12Q\x08 \x00\x15\x11Y\x01\x13\x00\x05\x15\x11Y\x01\x0e\x04 \x00\x13\x00\x05\x00\x02\x0e\x0e\x0e\t \x04\x01\x0e\x0e\x0e\x11\x80\xb9\x05 \x01\x01\x13\x00\x03 \x00\x02\x12\x07\x06\x15\x12M\x01\x12Q\x12]\x12a\x12e\x12i\x12m\x05\x00\x00\x12\x80\xc1\x07 \x02\x01\x0e\x12\x80\xc1\r \x05\x01\x0e\x15\x12M\x01\x12Q\x0e\x0e\x0e\x04\x00\x01\x01\x0e\x05\x07\x01\x12\x80\x81\x08\x00\x01\x12\x80\xd1\x11\x80\xd5\x05 \x00\x12\x80\xd9\x07 \x02\x01\x0e\x12\x80\xd9\x07 \x02\x0e\x0e\x12\x80\x85\x04\x07\x02\t\x08\x08\xb7z\\V\x194\xe0\x89\x02\x06\x0e\x02\x06\x02\x04\x06\x12\x80\x81\x04\x06\x12\x80\x85\x03\x06\x12\x14\x07\x06\x15\x12u\x02\x0e\x0e\t\x00\x02\x01\x08\x15\x12I\x01\x0e\x08\x00\x01\x01\x15\x12I\x01\x0e\x03\x00\x00\x01\x08\x00\x00\x15\x12M\x01\x12Q\x05\x00\x01\x01\x1d\x0e\x05\x00\x00\x12\x80\x81\x06\x00\x01\x01\x12\x80\x85\x03\x00\x00\x0e\x04\x00\x01\t\x0e\x04 \x01\x0e\x0e\x05\x08\x00\x12\x80\x81\x05\x08\x00\x12\x80\x85\x03\x08\x00\x0e\x08\x01\x00\x08\x00\x00\x00\x00\x00\x1e\x01\x00\x01\x00T\x02\x16WrapNonExceptionThrows\x01\x08\x01\x00\x02\x00\x00\x00\x00\x00\x05\x01\x00\x00\x00\x00\n\x01\x00\x05en-US\x00\x00\x15\x01\x00\x10UevCmUtil.uev.ut\x00\x00)\x01\x00$44e0f318-5faa-47f9-8ad3-1775aca4c017\x00\x00\x05\x01\x00\x01\x00\x00/\x01\x00*Microsoft (R) Windows (R) Operating System\x00\x00>\x01\x009Copyright (c) Microsoft Corporation. All rights reserved.\x00\x00\x1a\x01\x00\x15Microsoft Corporation\x00\x007\x01\x00\x1a.NETFramework,Version=v4.5\x01\x00T\x0e\x14FrameworkDisplayName\x00@\x01\x003System.Resources.Tools.StronglyTypedResourceBuilder\x074.0.0.0\x00\x00\x04\x01\x00\x00\x00\x00\xef\x05\x00\x00\xce\xca\xef\xbe\x01\x00\x00\x00\x91\x00\x00\x00lSystem.Resources.ResourceReader, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089#System.Resources.RuntimeResourceSet\x02\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x00PADPADP#v\x9f\x89\xe2NJ\x8c8\x92\xa4\x8e\xe7\xfcY\xa7\xfb\xab\xdf\xfd\xd1nP9Q\x92o<\x80\x10DE\xfe\x19\x82]K\xe8Vsv\x00\x00\x00=\x02\x00\x00j\x01\x00\x00I\x00\x00\x001\x01\x00\x00\xb3\x00\x00\x00\xad\x01\x00\x00\xe2\x01\x00\x00\xf4\x00\x00\x00\x00\x00\x00\x00Z\x03\x00\x00DE\x00R\x00R\x00O\x00R\x00_\x00A\x00T\x00_\x00L\x00E\x00A\x00S\x00T\x00_\x00O\x00N\x00E\x00_\x00A\x00C\x00T\x00I\x00O\x00N\x00_\x00R\x00E\x00Q\x00U\x00I\x00R\x00E\x00D\x00\x00\x00\x00\x00(E\x00R\x00R\x00O\x00R\x00_\x00F\x00I\x00L\x00E\x00_\x00N\x00O\x00T\x00_\x00F\x00O\x00U\x00N\x00D\x00;\x00\x00\x008E\x00R\x00R\x00O\x00R\x00_\x00I\x00N\x00V\x00A\x00L\x00I\x00D\x00_\x00A\x00R\x00G\x00U\x00M\x00E\x00N\x00T\x00_\x00C\x00O\x00U\x00N\x00T\x00K\x00\x00\x00<E\x00R\x00R\x00O\x00R\x00_\x00I\x00N\x00V\x00A\x00L\x00I\x00D\x00_\x00C\x00O\x00M\x00M\x00A\x00N\x00D\x00_\x00L\x00I\x00N\x00E\x00_\x00A\x00R\x00G\x00\x88\x00\x00\x008E\x00R\x00R\x00O\x00R\x00_\x00M\x00I\x00S\x00S\x00I\x00N\x00G\x00_\x00A\x00R\x00G\x00U\x00M\x00E\x00N\x00T\x00_\x00V\x00A\x00L\x00U\x00E\x00\x9a\x00\x00\x004E\x00R\x00R\x00O\x00R\x00_\x00M\x00I\x00S\x00S\x00I\x00N\x00G\x00_\x00R\x00E\x00Q\x00U\x00I\x00R\x00E\x00D\x00_\x00A\x00R\x00G\x00\xd1\x00\x00\x00>E\x00R\x00R\x00O\x00R\x00_\x00T\x00E\x00M\x00P\x00L\x00A\x00T\x00E\x00_\x00F\x00O\x00L\x00D\x00E\x00R\x00_\x00N\x00O\x00T\x00_\x00F\x00O\x00U\x00N\x00D\x00\xf0\x00\x00\x000E\x00R\x00R\x00O\x00R\x00_\x00T\x00E\x00M\x00P\x00L\x00A\x00T\x00E\x00_\x00N\x00O\x00T\x00_\x00F\x00O\x00U\x00N\x00D\x00\x15\x01\x00\x00VE\x00R\x00R\x00O\x00R\x00_\x00T\x00E\x00M\x00P\x00L\x00A\x00T\x00E\x00_\x00R\x00E\x00G\x00I\x00S\x00T\x00E\x00R\x00_\x00U\x00N\x00R\x00E\x00G\x00I\x00S\x00T\x00E\x00R\x00_\x00C\x00O\x00N\x00F\x00L\x00I\x00C\x00T\x008\x01\x00\x00\x14T\x00E\x00X\x00T\x00_\x00U\x00S\x00A\x00G\x00E\x00\x82\x01\x00\x00\x019At least one template must be registered or unregistered.\x01\x0eFile not found\x01;Invalid argument count. Possibly missing an agrument value.\x01\x10Invalid argument\x015Command line argument is missing the argument value\r\n\x01\x1dRequired argument not found\r\n\x01#UE-V Template folder not found: {0}\x01!UE-V template file not found: {0}\x01HOne or more templates are marked for both register and unregister: {0}\r\n\x01\x90\x02Usage: UevTemplateBaselineGenerator -Site <CM Site Code> [-BaselineName <name>] [-BaselineDescription <description>] -TemplateFolder <UE-V Template Folder Path> [-Register <template1,template2,...>] [-Unregister <template1,template2,...>] -CabFilePath <full path\\file.cab>\x00\x00\x00\x00\x00\x00\x00\x00\x00\x92t\xa8\xb0\x00\x00\x00\x00\x02\x00\x00\x009\x00\x00\x00\xb8H\x00\x00\xb8*\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00RSDS\x15\xb4T\xc7\x1b\xd8\x8fC\xab\xd9A\xe4\xaf\x89n\xa2\x01\x00\x00\x00UevTemplateBaselineGenerator.pdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x8eI\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80I\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00_CorExeMain\x00mscoree.dll\x00\x00\x00\x00\x00\xff%\x00 @\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x10\x00\x00\x00 \x00\x00\x80\x18\x00\x00\x00P\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x008\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00h\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\xa8\x04\x00\x00\x90`\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x044\x00\x00\x00V\x00S\x00_\x00V\x00E\x00R\x00S\x00I\x00O\x00N\x00_\x00I\x00N\x00F\x00O\x00\x00\x00\x00\x00\xbd\x04\xef\xfe\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00D\x00\x00\x00\x01\x00V\x00a\x00r\x00F\x00i\x00l\x00e\x00I\x00n\x00f\x00o\x00\x00\x00\x00\x00$\x00\x04\x00\x00\x00T\x00r\x00a\x00n\x00s\x00l\x00a\x00t\x00i\x00o\x00n\x00\x00\x00\x00\x00\x00\x00\xb0\x04x\x03\x00\x00\x01\x00S\x00t\x00r\x00i\x00n\x00g\x00F\x00i\x00l\x00e\x00I\x00n\x00f\x00o\x00\x00\x00T\x03\x00\x00\x01\x000\x000\x000\x000\x000\x004\x00b\x000\x00\x00\x00\x1a\x00\x01\x00\x01\x00C\x00o\x00m\x00m\x00e\x00n\x00t\x00s\x00\x00\x00\x00\x00\x00\x00L\x00\x16\x00\x01\x00C\x00o\x00m\x00p\x00a\x00n\x00y\x00N\x00a\x00m\x00e\x00\x00\x00\x00\x00M\x00i\x00c\x00r\x00o\x00s\x00o\x00f\x00t\x00 \x00C\x00o\x00r\x00p\x00o\x00r\x00a\x00t\x00i\x00o\x00n\x00\x00\x00,\x00\x02\x00\x01\x00F\x00i\x00l\x00e\x00D\x00e\x00s\x00c\x00r\x00i\x00p\x00t\x00i\x00o\x00n\x00\x00\x00\x00\x00 \x00\x00\x000\x00\x08\x00\x01\x00F\x00i\x00l\x00e\x00V\x00e\x00r\x00s\x00i\x00o\x00n\x00\x00\x00\x00\x000\x00.\x000\x00.\x000\x00.\x000\x00\x00\x00b\x00!\x00\x01\x00I\x00n\x00t\x00e\x00r\x00n\x00a\x00l\x00N\x00a\x00m\x00e\x00\x00\x00U\x00e\x00v\x00T\x00e\x00m\x00p\x00l\x00a\x00t\x00e\x00B\x00a\x00s\x00e\x00l\x00i\x00n\x00e\x00G\x00e\x00n\x00e\x00r\x00a\x00t\x00o\x00r\x00.\x00e\x00x\x00e\x00\x00\x00\x00\x00\x98\x00:\x00\x01\x00L\x00e\x00g\x00a\x00l\x00C\x00o\x00p\x00y\x00r\x00i\x00g\x00h\x00t\x00\x00\x00C\x00o\x00p\x00y\x00r\x00i\x00g\x00h\x00t\x00 \x00(\x00c\x00)\x00 \x00M\x00i\x00c\x00r\x00o\x00s\x00o\x00f\x00t\x00 \x00C\x00o\x00r\x00p\x00o\x00r\x00a\x00t\x00i\x00o\x00n\x00.\x00 \x00A\x00l\x00l\x00 \x00r\x00i\x00g\x00h\x00t\x00s\x00 \x00r\x00e\x00s\x00e\x00r\x00v\x00e\x00d\x00.\x00\x00\x00*\x00\x01\x00\x01\x00L\x00e\x00g\x00a\x00l\x00T\x00r\x00a\x00d\x00e\x00m\x00a\x00r\x00k\x00s\x00\x00\x00\x00\x00\x00\x00\x00\x00j\x00!\x00\x01\x00O\x00r\x00i\x00g\x00i\x00n\x00a\x00l\x00F\x00i\x00l\x00e\x00n\x00a\x00m\x00e\x00\x00\x00U\x00e\x00v\x00T\x00e\x00m\x00p\x00l\x00a\x00t\x00e\x00B\x00a\x00s\x00e\x00l\x00i\x00n\x00e\x00G\x00e\x00n\x00e\x00r\x00a\x00t\x00o\x00r\x00.\x00e\x00x\x00e\x00\x00\x00\x00\x00v\x00+\x00\x01\x00P\x00r\x00o\x00d\x00u\x00c\x00t\x00N\x00a\x00m\x00e\x00\x00\x00\x00\x00M\x00i\x00c\x00r\x00o\x00s\x00o\x00f\x00t\x00 \x00(\x00R\x00)\x00 \x00W\x00i\x00n\x00d\x00o\x00w\x00s\x00 \x00(\x00R\x00)\x00 \x00O\x00p\x00e\x00r\x00a\x00t\x00i\x00n\x00g\x00 \x00S\x00y\x00s\x00t\x00e\x00m\x00\x00\x00\x00\x004\x00\x08\x00\x01\x00P\x00r\x00o\x00d\x00u\x00c\x00t\x00V\x00e\x00r\x00s\x00i\x00o\x00n\x00\x00\x000\x00.\x000\x00.\x000\x00.\x000\x00\x00\x008\x00\x08\x00\x01\x00A\x00s\x00s\x00e\x00m\x00b\x00l\x00y\x00 \x00V\x00e\x00r\x00s\x00i\x00o\x00n\x00\x00\x000\x00.\x000\x00.\x000\x00.\x000\x00\x00\x00\xb8d\x00\x00\x93\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xef\xbb\xbf<?xml version="1.0" encoding="utf-8"?>\r\n<asmv1:assembly manifestVersion="1.0" xmlns="urn:schemas-microsoft-com:asm.v1" xmlns:asmv1="urn:schemas-microsoft-com:asm.v1" xmlns:asmv2="urn:schemas-microsoft-com:asm.v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\r\n <assemblyIdentity version="1.0.0.0" name="UevTemplateBaselineGenerator.app"/>\r\n <trustInfo xmlns="urn:schemas-microsoft-com:asm.v2">\r\n <security>\r\n <requestedPrivileges xmlns="urn:schemas-microsoft-com:asm.v3">\r\n <requestedExecutionLevel level="requireAdministrator" uiAccess="false" />\r\n </requestedPrivileges>\r\n </security>\r\n </trustInfo>\r\n</asmv1:assembly>\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x0c\x00\x00\x00\xa09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
* 2
)
secrets = (
"""
The Raven
By Edgar Allan Poe
Once upon a midnight dreary, while I pondered, weak and weary,
Over many a quaint and curious volume of forgotten lore—
While I nodded, nearly napping, suddenly there came a tapping,
As of some one gently rapping, rapping at my chamber door.
“’Tis some visitor,” I muttered, “tapping at my chamber door—
Only this and nothing more.”
Ah, distinctly I remember it was in the bleak December;
And each separate dying ember wrought its ghost upon the floor.
Eagerly I wished the morrow;—vainly I had sought to borrow
From my books surcease of sorrow—sorrow for the lost Lenore—
For the rare and radiant maiden whom the angels name Lenore—
Nameless here for evermore.
And the silken, sad, uncertain rustling of each purple curtain
Thrilled me—filled me with fantastic terrors never felt before;
So that now, to still the beating of my heart, I stood repeating
“’Tis some visitor entreating entrance at my chamber door—
Some late visitor entreating entrance at my chamber door;—
This it is and nothing more.”
Presently my soul grew stronger; hesitating then no longer,
“Sir,” said I, “or Madam, truly your forgiveness I implore;
But the fact is I was napping, and so gently you came rapping,
And so faintly you came tapping, tapping at my chamber door,
That I scarce was sure I heard you”—here I opened wide the door;—
Darkness there and nothing more.
Deep into that darkness peering, long I stood there wondering, fearing,
Doubting, dreaming dreams no mortal ever dared to dream before;
But the silence was unbroken, and the stillness gave no token,
And the only word there spoken was the whispered word, “Lenore?”
This I whispered, and an echo murmured back the word, “Lenore!”—
Merely this and nothing more.
Back into the chamber turning, all my soul within me burning,
Soon again I heard a tapping somewhat louder than before.
“Surely,” said I, “surely that is something at my window lattice;
Let me see, then, what thereat is, and this mystery explore—
Let my heart be still a moment and this mystery explore;—
’Tis the wind and nothing more!”
Open here I flung the shutter, when, with many a flirt and flutter,
In there stepped a stately Raven of the saintly days of yore;
Not the least obeisance made he; not a minute stopped or stayed he;
But, with mien of lord or lady, perched above my chamber door—
Perched upon a bust of Pallas just above my chamber door—
Perched, and sat, and nothing more.
Then this ebony bird beguiling my sad fancy into smiling,
By the grave and stern decorum of the countenance it wore,
“Though thy crest be shorn and shaven, thou,” I said, “art sure no craven,
Ghastly grim and ancient Raven wandering from the Nightly shore—
Tell me what thy lordly name is on the Night’s Plutonian shore!”
Quoth the Raven “Nevermore.”
Much I marvelled this ungainly fowl to hear discourse so plainly,
Though its answer little meaning—little relevancy bore;
For we cannot help agreeing that no living human being
Ever yet was blessed with seeing bird above his chamber door—
Bird or beast upon the sculptured bust above his chamber door,
With such name as “Nevermore.”
But the Raven, sitting lonely on the placid bust, spoke only
That one word, as if his soul in that one word he did outpour.
Nothing farther then he uttered—not a feather then he fluttered—
Till I scarcely more than muttered “Other friends have flown before—
On the morrow he will leave me, as my Hopes have flown before.”
Then the bird said “Nevermore.”
Startled at the stillness broken by reply so aptly spoken,
“Doubtless,” said I, “what it utters is its only stock and store
Caught from some unhappy master whom unmerciful Disaster
Followed fast and followed faster till his songs one burden bore—
Till the dirges of his Hope that melancholy burden bore
Of ‘Never—nevermore’.”
But the Raven still beguiling all my fancy into smiling,
Straight I wheeled a cushioned seat in front of bird, and bust and door;
Then, upon the velvet sinking, I betook myself to linking
Fancy unto fancy, thinking what this ominous bird of yore—
What this grim, ungainly, ghastly, gaunt, and ominous bird of yore
Meant in croaking “Nevermore.”
This I sat engaged in guessing, but no syllable expressing
To the fowl whose fiery eyes now burned into my bosom’s core;
This and more I sat divining, with my head at ease reclining
On the cushion’s velvet lining that the lamp-light gloated o’er,
But whose velvet-violet lining with the lamp-light gloating o’er,
She shall press, ah, nevermore!
Then, methought, the air grew denser, perfumed from an unseen censer
Swung by Seraphim whose foot-falls tinkled on the tufted floor.
“Wretch,” I cried, “thy God hath lent thee—by these angels he hath sent thee
Respite—respite and nepenthe from thy memories of Lenore;
Quaff, oh quaff this kind nepenthe and forget this lost Lenore!”
Quoth the Raven “Nevermore.”
“Prophet!” said I, “thing of evil!—prophet still, if bird or devil!—
Whether Tempter sent, or whether tempest tossed thee here ashore,
Desolate yet all undaunted, on this desert land enchanted—
On this home by Horror haunted—tell me truly, I implore—
Is there—is there balm in Gilead?—tell me—tell me, I implore!”
Quoth the Raven “Nevermore.”
“Prophet!” said I, “thing of evil!—prophet still, if bird or devil!
By that Heaven that bends above us—by that God we both adore—
Tell this soul with sorrow laden if, within the distant Aidenn,
It shall clasp a sainted maiden whom the angels name Lenore—
Clasp a rare and radiant maiden whom the angels name Lenore.”
Quoth the Raven “Nevermore.”
“Be that word our sign of parting, bird or fiend!” I shrieked, upstarting—
“Get thee back into the tempest and the Night’s Plutonian shore!
Leave no black plume as a token of that lie thy soul hath spoken!
Leave my loneliness unbroken!—quit the bust above my door!
Take thy beak from out my heart, and take thy form from off my door!”
Quoth the Raven “Nevermore.”
And the Raven, never flitting, still is sitting, still is sitting
On the pallid bust of Pallas just above my chamber door;
And his eyes have all the seeming of a demon’s that is dreaming,
And the lamp-light o’er him streaming throws his shadow on the floor;
And my soul from out that shadow that lies floating on the floor
Shall be lifted—nevermore!
""".encode()
* 5
)
def generate_entropy_encoding() -> Tuple[bytearray, Dict[int, bytearray]]:
"""
This function generates an encoding to bypass entropy checks.
"""
base32_characters = bytearray(b"01234567ABCDEFGHIJKLMNOPQRSTUVWXYZ=")
random_characters = bytearray(
b"89abcdefghijklmnopqrstuvwxyz!\"#$%&'()*+,-./:;<>?@[\\]^_`{|}~ \t\n\r"
)
data = bytearray()
encoding = defaultdict(bytearray)
while base32_characters:
character = choice(base32_characters)
base32_characters.remove(character)
data.append(character)
encoding[character].append(character)
choice_ = choice(random_characters)
random_characters.remove(choice_)
for _ in range(randint(2, 5)):
data.append(choice_)
encoding[character].append(choice_)
return data + random_characters, encoding
def generate_entropy_encoding2() -> Tuple[bytearray, Dict[int, bytearray]]:
"""
This function generates an encoding to bypass entropy checks.
+ Very difficult to identify
- Entropy score is higher
- Longer to encode/decode
"""
base32_characters = bytearray(b"01234567ABCDEFGHIJKLMNOPQRSTUVWXYZ=")
random_characters = bytearray(
b"89abcdefghijklmnopqrstuvwxyz!\"#$%&'()*+,-./:;<>?@[\\]^_`{|}~ \t\n\r"
)
data = bytearray()
encoding = defaultdict(bytearray)
while base32_characters:
character = choice(base32_characters)
base32_characters.remove(character)
data.append(character)
choice1 = choice(random_characters)
random_characters.remove(choice1)
encoding[character].append(choice1)
choice2 = choice(random_characters)
random_characters.remove(choice2)
order = randint(0, 1)
if order:
data.append(choice1)
for _ in range(randint(2, 5)):
data.append(choice2)
encoding[character].append(choice2)
if not order:
data.append(choice1)
random_characters.append(character)
return data, encoding
def get_entropy_encoding(data: bytes) -> bytes:
"""
This function returns the encoding to decode entropy-encoding.
"""
encoding = {}
while len(encoding) < 70:
character1 = data[0]
character2 = data[1]
encoding[character1] = character1
encoding[character2] = character1
index = 2
for character in data[2:]:
if character != character2:
data = data[index:]
break
index += 1
return data[28:], encoding
def get_entropy_encoding2(data: bytes) -> bytes:
"""
This function returns the encoding to decode entropy-encoding.
"""
encoding = {}
while len(encoding) < 70:
character_base = data[0]
character1 = data[1]
character2 = data[2]
index = 3
while character1 == character2:
character2 = data[index]
index += 1
encoding[character1] = character_base
encoding[character2] = character_base
for character in data[index:]:
if character != character2:
data = data[index:]
break
index += 1
return data, encoding
def entropy_encode(data: bytes, version: int = 1) -> bytes:
"""
This function encodes data to bypass entropy checks.
version should be 1 or 2.
"""
data_encoded, encoding = (
generate_entropy_encoding()
if version == 1
else generate_entropy_encoding2()
)
for character in b32encode(data):
data_encoded.append(choice(encoding[character]))
return data_encoded
def entropy_encode2(data: bytes) -> bytes:
"""
Call entropy_encode with version 2.
"""
return entropy_encode(data, version=2)
def entropy_decode(data: bytes, version: int = 1) -> bytes:
"""
This function decodes entropy-encoding to retrieve
data from entropy-encoded data.
version should be 1 or 2.
"""
data, encoding = (
get_entropy_encoding(data)
if version == 1
else get_entropy_encoding2(data)
)
data_decoded = bytearray()
for character in data:
data_decoded.append(encoding[character])
return b32decode(data_decoded)
def entropy_decode2(data: bytes) -> bytes:
"""
Call entropy_decode with version 2.
"""
return entropy_decode(data, version=2)
def shannon_entropy(data: bytes) -> float:
"""
This function returns the shannon entropy for bytes.
Greater entropy = more randomness
Max entropy: 8
Min entropy: 0
"""
frequency = Counter()
for char in data:
frequency[char] += 1
entropy = 0
data_length = len(data)
for key in frequency:
p = frequency[key] / data_length
entropy -= p * log(p, 2)
return entropy
def test():
# For test i use librc4 to encrypt data to get higher entropy:
# - https://github.com/mauricelambert/FastRC4/releases/download/v0.0.1/librc4.dll
# - https://github.com/mauricelambert/FastRC4/releases/download/v0.0.1/librc4.so
# - https://raw.githubusercontent.com/mauricelambert/FastRC4/main/librc4.py
from urllib.request import urlopen
from shutil import copyfileobj
from os import remove, name
with open("librc4.dll", "wb") as file:
copyfileobj(
urlopen(
"https://github.com/mauricelambert/FastRC4/releases/download/v0.0.1/librc4.dll"
),
file,
)
with open("librc4.so", "wb") as file:
copyfileobj(
urlopen(
"https://github.com/mauricelambert/FastRC4/releases/download/v0.0.1/librc4.so"
),
file,
)
with open("librc4.py", "wb") as file:
copyfileobj(
urlopen(
"https://raw.githubusercontent.com/mauricelambert/FastRC4/main/librc4.py"
),
file,
)
from librc4 import RC4
rc4 = RC4(b"my secret key")
print("Entropy for non-encoded secrets:", shannon_entropy(secrets))
encrypted_data = rc4.encrypt(secrets)
print(
"Entropy for non-encoded encrypted secrets:",
shannon_entropy(encrypted_data),
)
data = entropy_encode(encrypted_data)
print(
"Entropy for entropy-encoded encrypted secrets:", shannon_entropy(data)
)
assert encrypted_data == entropy_decode(data)
data = entropy_encode2(encrypted_data)
print(
"Entropy for entropy-encoded2 encrypted secrets:",
shannon_entropy(data),
)
assert encrypted_data == entropy_decode2(data)
rc4 = RC4(b"my secret key")
print("Entropy for non-encoded exe:", shannon_entropy(dll))
encrypted_data = rc4.encrypt(dll)
print(
"Entropy for non-encoded encrypted exe:",
shannon_entropy(encrypted_data),
)
data = entropy_encode(encrypted_data)
print("Entropy for entropy-encoded encrypted exe:", shannon_entropy(data))
assert encrypted_data == entropy_decode(data)
data = entropy_encode2(encrypted_data)
print("Entropy for entropy-encoded2 encrypted exe:", shannon_entropy(data))
assert encrypted_data == entropy_decode2(data)
if name == "nt":
from ctypes import windll
windll.kernel32.FreeLibrary(".\\librc4.dll")
remove("librc4.py")
remove("librc4.so")
remove("librc4.dll")
if __name__ == "__main__":
test() | PypiClean |
/Findex_GUI-0.2.18-py3-none-any.whl/findex_gui/static/js/findex/findex_core.js | $('#help').css('display', 'none');
function endsWith(str, suffix) {
return str.indexOf(suffix, str.length - suffix.length) !== -1;
}
function historyBack(){
history.back()
}
function file_icons(){
var file_icons = {
"bluray": ""
}
}
function errorBox(errors){
var text = '';
for(var i = 0; i != errors.length ; i++){
text += '<b>' + i + ':</b> ' + errors[i] + '<br>';
}
return '<div class=\"alert alert-danger\">'+text+'</div>';
}
function required_input(id){
$('#'+id).fadeTo(300,0.3);
setTimeout(function(){$('#'+id).fadeTo(200,1);}, 300);
}
function change_uri(uri){
window.history.pushState("", "", uri);
}
function goto_uri(uri){
window.location.href = uri;
}
function check_form(show_errors){
var warnings = [];
var data = {};
$('body *').each(function(){
var $this = $(this);
if($this.attr('data-req')){
var id = $this.attr('id');
var text = $this.html();
if($this.attr('data-req') == 'yes' && text == 'Empty'){
warnings.push('Property \'' + id + '\' cannot be empty.');
required_input(id);
}
else{
data[id] = text;
}
}
});
if(warnings.length == 0){
return data;
}
else{
if(show_alerts) $('#errorbox').html(errorBox(warnings));
}
}
function chart_browse_pie_filedistribution_spawn(target, data, source_name) {
var c = $(target).highcharts({
chart: {
plotBackgroundColor: null,
plotBorderWidth: 0,
plotShadow: false,
margin: [0, 0, 0, 0],
spacingTop: 0,
spacingBottom: 0,
spacingLeft: 0,
spacingRight: 0,
reflow: false
},
title: {
text: '',
align: 'center',
verticalAlign: 'middle',
y: -116
},
tooltip: {
pointFormat: '{series.name}: <b>{point.percentage:.1f}%</b>'
},
plotOptions: {
pie: {
size: '100%',
dataLabels: {
enabled: true,
distance: -40,
style: {
fontWeight: 'bold',
color: 'white',
textShadow: '0px 1px 2px black'
}
},
startAngle: -90,
endAngle: 90,
center: ['50%', '58%']
}
},
credits: {
enabled: false
},
series: [{
type: 'pie',
name: source_name,
innerSize: '0%',
data: data
}]
});
return c;
}
function gets(){
console.log($('#form_filter').serialize);
}
$.fn.serializeObject = function()
{
var o = {};
var a = this.serializeArray();
$.each(a, function() {
if (o[this.name] !== undefined) {
if (!o[this.name].push) {
o[this.name] = [o[this.name]];
}
o[this.name].push(this.value || 'x2');
} else {
if(this.value){
o[this.name] = this.value;
}
}
});
return o;
};
function url_for(inp){
if(inp.startsWith("/")) inp = inp.slice(1);
return `${APPLICATION_ROOT}${inp}`;
} | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/accounting/model/error_validation_problem.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.accounting.model.validation_problem_source import ValidationProblemSource
globals()['ValidationProblemSource'] = ValidationProblemSource
class ErrorValidationProblem(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'title': (str,), # noqa: E501
'detail': (str,), # noqa: E501
'problem_type': (str,), # noqa: E501
'source': (ValidationProblemSource, none_type,), # noqa: E501
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'title': 'title', # noqa: E501
'detail': 'detail', # noqa: E501
'problem_type': 'problem_type', # noqa: E501
'source': 'source', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, title, detail, problem_type, *args, **kwargs): # noqa: E501
"""ErrorValidationProblem - a model defined in OpenAPI
Args:
title (str):
detail (str):
problem_type (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
source (ValidationProblemSource): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.title = title
self.detail = detail
self.problem_type = problem_type
self.source = kwargs.get("source", None)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, title, detail, problem_type, *args, **kwargs): # noqa: E501
"""ErrorValidationProblem - a model defined in OpenAPI
Args:
title (str):
detail (str):
problem_type (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
source (ValidationProblemSource): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.title: Union[str] = title
self.detail: Union[str] = detail
self.problem_type: Union[str] = problem_type
self.source: Union["ValidationProblemSource"] = kwargs.get("source", None) | PypiClean |
/OctoPrint-1.9.2.tar.gz/OctoPrint-1.9.2/src/octoprint/vendor/sockjs/tornado/router.py | from __future__ import absolute_import, division, print_function, unicode_literals
"""
sockjs.tornado.router
~~~~~~~~~~~~~~~~~~~~~
SockJS protocol router implementation.
"""
from tornado import ioloop, version_info
from octoprint.vendor.sockjs.tornado import transports, session, sessioncontainer, static, stats, proto
DEFAULT_SETTINGS = {
# Sessions check interval in seconds
'session_check_interval': 1,
# Session expiration in seconds
'disconnect_delay': 5,
# Heartbeat time in seconds. Do not change this value unless
# you absolutely sure that new value will work.
'heartbeat_delay': 25,
# Enabled protocols
'disabled_transports': [],
# SockJS location
'sockjs_url': 'https://cdn.jsdelivr.net/sockjs/0.3/sockjs.min.js',
# Max response body size
'response_limit': 128 * 1024,
# Enable or disable JSESSIONID cookie handling
'jsessionid': True,
# Should sockjs-tornado flush messages immediately or queue then and
# flush on next ioloop tick
'immediate_flush': True,
# Enable or disable Nagle for persistent transports
'disable_nagle': True,
# Enable IP checks for polling transports. If enabled, all subsequent
# polling calls should be from the same IP address.
'verify_ip': True,
# list of allowed origins for websocket connections
# or "*" - accept all websocket connections
'websocket_allow_origin': "*"
}
GLOBAL_HANDLERS = [
('xhr_send', transports.XhrSendHandler),
('jsonp_send', transports.JSONPSendHandler)
]
TRANSPORTS = {
'websocket': transports.WebSocketTransport,
'xhr': transports.XhrPollingTransport,
'xhr_streaming': transports.XhrStreamingTransport,
'jsonp': transports.JSONPTransport,
'eventsource': transports.EventSourceTransport,
'htmlfile': transports.HtmlFileTransport
}
STATIC_HANDLERS = {
'/chunking_test': static.ChunkingTestHandler,
'/info': static.InfoHandler,
'/iframe[0-9-.a-z_]*.html': static.IFrameHandler,
'/websocket': transports.RawWebSocketTransport,
'/?': static.GreetingsHandler
}
class SockJSRouter(object):
"""SockJS protocol router"""
def __init__(self,
connection,
prefix='',
user_settings={},
io_loop=None,
session_kls=None):
"""Constructor.
`connection`
SockJSConnection class
`prefix`
Connection prefix
`user_settings`
Settings dictionary
`io_loop`
Optional IOLoop instance
"""
# TODO: Version check
if version_info[0] < 2:
raise Exception('sockjs-tornado requires Tornado 2.0 or higher.')
# Store connection class
self._connection = connection
# Initialize io_loop
self.io_loop = io_loop or ioloop.IOLoop.instance()
# Settings
self.settings = DEFAULT_SETTINGS.copy()
if user_settings:
self.settings.update(user_settings)
self.websockets_enabled = 'websocket' not in self.settings['disabled_transports']
self.cookie_needed = self.settings['jsessionid']
# Sessions
self._session_kls = session_kls if session_kls else session.Session
self._sessions = sessioncontainer.SessionContainer()
check_interval = self.settings['session_check_interval'] * 1000
self._sessions_cleanup = ioloop.PeriodicCallback(self._sessions.expire,
check_interval)
self._sessions_cleanup.start()
# Stats
self.stats = stats.StatsCollector()
# Initialize URLs
base = prefix + r'/[^/.]+/(?P<session_id>[^/.]+)'
# Generate global handler URLs
self._transport_urls = [('%s/%s$' % (base, p[0]), p[1], {"server": self})
for p in GLOBAL_HANDLERS]
for k, v in TRANSPORTS.items():
if k in self.settings['disabled_transports']:
continue
# Only version 1 is supported
self._transport_urls.append(
(r'%s/%s$' % (base, k),
v,
{"server": self})
)
# Generate static URLs
self._transport_urls.extend([('%s%s' % (prefix, k), v, {"server": self})
for k, v in STATIC_HANDLERS.items()])
@property
def urls(self):
"""List of the URLs to be added to the Tornado application"""
return self._transport_urls
def apply_routes(self, routes):
"""Feed list of the URLs to the routes list. Returns list"""
routes.extend(self._transport_urls)
return routes
def create_session(self, session_id, register=True):
"""Creates new session object and returns it.
`request`
Request that created the session. Will be used to get query string
parameters and cookies
`register`
Should be session registered in a storage. Websockets don't
need it.
"""
# TODO: Possible optimization here for settings.get
s = self._session_kls(self._connection,
self,
session_id,
self.settings.get('disconnect_delay')
)
if register:
self._sessions.add(s)
return s
def get_session(self, session_id):
"""Get session by session id
`session_id`
Session id
"""
return self._sessions.get(session_id)
def get_connection_class(self):
"""Return associated connection class"""
return self._connection
# Broadcast helper
def broadcast(self, clients, msg):
"""Optimized `broadcast` implementation. Depending on type of the session, will json-encode
message once and will call either `send_message` or `send_jsonifed`.
`clients`
Clients iterable
`msg`
Message to send
"""
json_msg = None
count = 0
for c in clients:
sess = c.session
if not sess.is_closed:
if sess.send_expects_json:
if json_msg is None:
json_msg = proto.json_encode(msg)
sess.send_jsonified(json_msg, False)
else:
sess.send_message(msg, stats=False)
count += 1
self.stats.on_pack_sent(count) | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/hris/model/time_off_status_enum.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
from MergePythonSDK.shared.model_utils import MergeEnumType
class TimeOffStatusEnum(ModelNormal, MergeEnumType):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'REQUESTED': "REQUESTED",
'APPROVED': "APPROVED",
'DECLINED': "DECLINED",
'CANCELLED': "CANCELLED",
'DELETED': "DELETED",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
defined_types = {
'value': (str,),
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, value, *args, **kwargs): # noqa: E501
"""TimeOffStatusEnum - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, value, *args, **kwargs): # noqa: E501
"""TimeOffStatusEnum - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value | PypiClean |
/ACME%20oneM2M%20CSE-0.3.0.tar.gz/ACME oneM2M CSE-0.3.0/acme/GroupManager.py |
from Logging import Logging
from Constants import Constants as C
import CSE, Utils
from resources import FCNT, MgmtObj
class GroupManager(object):
def __init__(self):
# Add delete event handler because we like to monitor the resources in mid
CSE.event.addHandler(CSE.event.deleteResource, self.handleDeleteEvent)
Logging.log('GroupManager initialized')
def shutdown(self):
Logging.log('GroupManager shut down')
#########################################################################
def validateGroup(self, group, originator):
# Get consistencyStrategy
csy = group.csy
# Check member types and group set type
# Recursive for sub groups, if .../fopt. Check privileges of originator
if not (res := self._checkMembersAndPrivileges(group, group.mt, group.csy, group.spty, originator))[0]:
return res
# Check for max members
if group.hasAttribute('mnm'): # only if mnm attribute is set
try: # mnm may not be a number
if len(group.mid) > int(group.mnm):
return (False, C.rcMaxNumberOfMemberExceeded)
except ValueError:
return (False, C.rcInvalidArguments)
# TODO: check virtual resources
return (True, C.rcOK)
def _checkMembersAndPrivileges(self, group, mt, csy, spty, originator):
# check for duplicates and remove them
midsList = [] # contains the real mid list
for mid in group['mid']:
# get the resource and check it
id = mid[:-5] if (hasFopt := mid.endswith('/fopt')) else mid # remove /fopt to retrieve the resource
if (r := CSE.dispatcher.retrieveResource(id))[0] is None:
return (False, C.rcNotFound)
resource = r[0]
# skip if ri is already in the list
if (ri := resource.ri) in midsList:
continue
# check privileges
if not CSE.security.hasAccess(originator, resource, C.permRETRIEVE):
return (False, C.rcReceiverHasNoPrivileges)
# if it is a group + fopt, then recursively check members
if (ty := resource.ty) == C.tGRP and hasFopt:
if not (res := self._checkMembersAndPrivileges(resource, mt, csy, spty, originator))[0]:
return res
ty = resource.mt # set the member type to the group's member type
# check specializationType spty
if spty is not None:
if isinstance(spty, int): # mgmtobj type
if isinstance(resource, MgmtObj.MgmtObj) and ty != spty:
return (False, C.rcGroupMemberTypeInconsistent)
elif isinstance(spty, str): # fcnt specialization
if isinstance(resource, FCNT.FCNT) and resource.cnd != spty:
return (False, C.rcGroupMemberTypeInconsistent)
# check type of resource and member type of group
if not (mt == C.tMIXED or ty == mt): # types don't match
if csy == C.csyAbandonMember: # abandon member
continue
elif csy == C.csySetMixed: # change group's member type
mt = C.tMIXED
group['mt'] = C.tMIXED
else: # abandon group
return (False, C.rcGroupMemberTypeInconsistent)
# member seems to be ok, so add ri to the list
midsList.append(ri if not hasFopt else ri + '/fopt') # restore fopt for ri
group['mid'] = midsList # replace with a cleaned up mid
group['cnm'] = len(midsList)
return (True, C.rcOK)
def foptRequest(self, operation, fopt, request, id, originator, ct=None, ty=None):
""" Handle requests to a fanOutPoint.
This method might be called recursivly, when there are groups in groups."""
# get parent / group
group = fopt.retrieveParentResource()
if group is None:
return (None, C.rcNotFound)
# get the rqi header field
(_, _, _, rqi, _) = Utils.getRequestHeaders(request)
# check whether there is something after the /fopt ...
(_, _, tail) = id.partition('/fopt/') if '/fopt/' in id else (_, _, '')
Logging.logDebug('Adding additional path elements: %s' % tail)
# walk through all members
result = []
tail = '/' + tail if len(tail) > 0 else '' # add remaining path, if any
for mid in group.mid:
# Try to get the SRN and add the tail
if (srn := Utils.structuredPathFromRI(mid)) is not None:
mid = srn + tail
else:
mid = mid + tail
# Invoke the request
if operation == C.opRETRIEVE:
if (res := CSE.dispatcher.handleRetrieveRequest(request, mid, originator))[0] is None:
return res
elif operation == C.opCREATE:
if (res := CSE.dispatcher.handleCreateRequest(request, mid, originator, ct, ty))[0] is None:
return res
elif operation == C.opUPDATE:
if (res := CSE.dispatcher.handleUpdateRequest(request, mid, originator, ct))[0] is None:
return res
elif operation == C.opDELETE:
if (res := CSE.dispatcher.handleDeleteRequest(request, mid, originator))[1] != C.rcDeleted:
return res
else:
return (None, C.rcOperationNotAllowed)
result.append(res)
# construct aggregated response
if len(result) > 0:
items = []
for r in result:
item = { 'rsc' : r[1],
'rqi' : rqi,
'pc' : r[0].asJSON(),
'to' : r[0].__srn__
}
items.append(item)
rsp = { 'm2m:rsp' : items}
agr = { 'm2m:agr' : rsp }
else:
agr = {}
# Different "ok" results per operation
return (agr, [ C.rcOK, C.rcCreated, C.rcUpdated, C.rcDeleted ][operation])
#########################################################################
def handleDeleteEvent(self, deletedResource):
"""Handle a delete event. Check whether the deleted resource is a member
of group. If yes, remove the member."""
ri = deletedResource.ri
groups = CSE.storage.searchByTypeFieldValue(C.tGRP, 'mid', ri)
for group in groups:
group['mid'].remove(ri)
group['cnm'] = group.cnm - 1
CSE.storage.updateResource(group) | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/@types/node/README.md | # Installation
> `npm install --save @types/node`
# Summary
This package contains type definitions for Node.js (https://nodejs.org/).
# Details
Files were exported from https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/node/v18.
### Additional Details
* Last updated: Tue, 25 Jul 2023 19:02:56 GMT
* Dependencies: none
* Global values: `AbortController`, `AbortSignal`, `__dirname`, `__filename`, `console`, `exports`, `gc`, `global`, `module`, `process`, `require`, `structuredClone`
# Credits
These definitions were written by [Microsoft TypeScript](https://github.com/Microsoft), [DefinitelyTyped](https://github.com/DefinitelyTyped), [Alberto Schiabel](https://github.com/jkomyno), [Alvis HT Tang](https://github.com/alvis), [Andrew Makarov](https://github.com/r3nya), [Benjamin Toueg](https://github.com/btoueg), [Chigozirim C.](https://github.com/smac89), [David Junger](https://github.com/touffy), [Deividas Bakanas](https://github.com/DeividasBakanas), [Eugene Y. Q. Shen](https://github.com/eyqs), [Hannes Magnusson](https://github.com/Hannes-Magnusson-CK), [Huw](https://github.com/hoo29), [Kelvin Jin](https://github.com/kjin), [Klaus Meinhardt](https://github.com/ajafff), [Lishude](https://github.com/islishude), [Mariusz Wiktorczyk](https://github.com/mwiktorczyk), [Mohsen Azimi](https://github.com/mohsen1), [Nicolas Even](https://github.com/n-e), [Nikita Galkin](https://github.com/galkin), [Parambir Singh](https://github.com/parambirs), [Sebastian Silbermann](https://github.com/eps1lon), [Simon Schick](https://github.com/SimonSchick), [Thomas den Hollander](https://github.com/ThomasdenH), [Wilco Bakker](https://github.com/WilcoBakker), [wwwy3y3](https://github.com/wwwy3y3), [Samuel Ainsworth](https://github.com/samuela), [Kyle Uehlein](https://github.com/kuehlein), [Thanik Bhongbhibhat](https://github.com/bhongy), [Marcin Kopacz](https://github.com/chyzwar), [Trivikram Kamat](https://github.com/trivikr), [Junxiao Shi](https://github.com/yoursunny), [Ilia Baryshnikov](https://github.com/qwelias), [ExE Boss](https://github.com/ExE-Boss), [Piotr Błażejewicz](https://github.com/peterblazejewicz), [Anna Henningsen](https://github.com/addaleax), [Victor Perin](https://github.com/victorperin), [Yongsheng Zhang](https://github.com/ZYSzys), [NodeJS Contributors](https://github.com/NodeJS), [Linus Unnebäck](https://github.com/LinusU), [wafuwafu13](https://github.com/wafuwafu13), [Matteo Collina](https://github.com/mcollina), and [Dmitry Semigradsky](https://github.com/Semigradsky).
| PypiClean |
/FukuML-0.4.1.tar.gz/FukuML-0.4.1/README.rst | `FukuML`_
=========
.. _FukuML: http://www.fukuball.com/fuku-ml/
.. image:: https://travis-ci.org/fukuball/fuku-ml.svg?branch=master
:target: https://travis-ci.org/fukuball/fuku-ml
.. image:: https://codecov.io/github/fukuball/fuku-ml/coverage.svg?branch=master
:target: https://codecov.io/github/fukuball/fuku-ml?branch=master
.. image:: https://badge.fury.io/py/FukuML.svg
:target: https://badge.fury.io/py/FukuML
.. image:: https://api.codacy.com/project/badge/grade/afc87eff27ab47d6b960ea7b3088c469
:target: https://www.codacy.com/app/fukuball/fuku-ml
.. image:: https://img.shields.io/badge/made%20with-%e2%9d%a4-ff69b4.svg
:target: http://www.fukuball.com
Simple machine learning library / 簡單易用的機器學習套件
Installation
============
.. code-block:: bash
$ pip install FukuML
Tutorial
============
- Lesson 1: `Perceptron Binary Classification Learning Algorithm`_
- Appendix 1: `Play With Your Own Dataset`_
- Appendix 2: `iNDIEVOX Open Data/API 智慧音樂應用:An Introduce to iNDIEVOX Open Data/API and the intelligent music application`_
.. _Perceptron Binary Classification Learning Algorithm: https://github.com/fukuball/FukuML-Tutorial/blob/master/Perceptron%20Binary%20Classification%20Learning%20Algorithm%20Tutorial.ipynb
.. _Play With Your Own Dataset: https://github.com/fukuball/FukuML-Tutorial/blob/master/Play%20With%20Your%20Own%20Dataset%20Tutorial.ipynb
.. _iNDIEVOX Open Data/API 智慧音樂應用:An Introduce to iNDIEVOX Open Data/API and the intelligent music application: https://speakerdeck.com/fukuball/api-and-the-intelligent-music-application
Algorithm
============
- Perceptron
- Perceptron Binary Classification Learning Algorithm
- Perceptron Multi Classification Learning Algorithm
- Pocket Perceptron Binary Classification Learning Algorithm
- Pocket Perceptron Multi Classification Learning Algorithm
- Regression
- Linear Regression Learning Algorithm
- Linear Regression Binary Classification Learning Algorithm
- Linear Regression Multi Classification Learning Algorithm
- Ridge Regression Learning Algorithm
- Ridge Regression Binary Classification Learning Algorithm
- Ridge Regression Multi Classification Learning Algorithm
- Kernel Ridge Regression Learning Algorithm
- Kernel Ridge Regression Binary Classification Learning Algorithm
- Kernel Ridge Regression Multi Classification Learning Algorithm
- Logistic Regression
- Logistic Regression Learning Algorithm
- Logistic Regression Binary Classification Learning Algorithm
- Logistic Regression One vs All Multi Classification Learning Algorithm
- Logistic Regression One vs One Multi Classification Learning Algorithm
- L2 Regularized Logistic Regression Learning Algorithm
- L2 Regularized Logistic Regression Binary Classification Learning Algorithm
- Kernel Logistic Regression Learning Algorithm
- Support Vector Machine
- Primal Hard Margin Support Vector Machine Binary Classification Learning Algorithm
- Dual Hard Margin Support Vector Machine Binary Classification Learning Algorithm
- Polynomial Kernel Support Vector Machine Binary Classification Learning Algorithm
- Gaussian Kernel Support Vector Machine Binary Classification Learning Algorithm
- Soft Polynomial Kernel Support Vector Machine Binary Classification Learning Algorithm
- Soft Gaussian Kernel Support Vector Machine Binary Classification Learning Algorithm
- Polynomial Kernel Support Vector Machine Multi Classification Learning Algorithm
- Gaussian Kernel Support Vector Machine Multi Classification Learning Algorithm
- Soft Polynomial Kernel Support Vector Machine Multi Classification Learning Algorithm
- Soft Gaussian Kernel Support Vector Machine Multi Classification Learning Algorithm
- Probabilistic Support Vector Machine Learning Algorithm
- Least Squares Support Vector Machine Binary Classification Learning Algorithm
- Least Squares Support Vector Machine Multi Classification Learning Algorithm
- Support Vector Regression Learning Algorithm
- Decision Tree
- Decision Stump Binary Classification Learning Algorithm
- AdaBoost Stump Binary Classification Learning Algorithm
- AdaBoost Decision Tree Classification Learning Algorithm
- Gradient Boost Decision Tree Regression Learning Algorithm
- Decision Tree Classification Learning Algorithm
- Decision Tree Regression Learning Algorithm
- Random Forest Classification Learning Algorithm
- Random Forest Regression Learning Algorithm
- Neural Network
- Neural Network Learning Algorithm
- Neural Network Binary Classification Learning Algorithm
- Accelerator
- Linear Regression Accelerator
- Feature Transform
- Polynomial Feature Transform
- Legendre Feature Transform
- Validation
- 10 Fold Cross Validation
- Blending
- Uniform Blending for Classification
- Linear Blending for Classification
- Uniform Blending for Regression
- Linear Blending for Regression
Usage
============
.. code-block:: py
>>> import numpy as np
# we need numpy as a base libray
>>> import FukuML.PLA as pla
# import FukuML.PLA to do Perceptron Learning
>>> your_input_data_file = '/path/to/your/data/file'
# assign your input data file, please check the data format: https://github.com/fukuball/fuku-ml/blob/master/FukuML/dataset/pla_binary_train.dat
>>> pla_bc = pla.BinaryClassifier()
# new a PLA binary classifier
>>> pla_bc.load_train_data(your_input_data_file)
# load train data
>>> pla_bc.set_param()
# set parameter
>>> pla_bc.init_W()
# init the W
>>> W = pla_bc.train()
# train by Perceptron Learning Algorithm to find best W
>>> test_data = 'Each feature of data x separated with spaces. And the ground truth y put in the end of line separated by a space'
# assign test data, format like this '0.97681 0.10723 0.64385 ........ 0.29556 1'
>>> prediction = pla_bc.prediction(test_data)
# prediction by trained W
>>> print prediction['input_data_x']
# print test data x
>>> print prediction['input_data_y']
# print test data y
>>> print prediction['prediction']
# print the prediction, will find out prediction is the same as pla_bc.test_data_y
For detail, please check https://github.com/fukuball/fuku-ml/blob/master/doc/sample_code.rst
Tests
=========
.. code-block:: shell
python test_fuku_ml.py
PEP8
=========
.. code-block:: shell
pep8 FukuML/*.py --ignore=E501
Donate
=========
If you find fuku-ml useful, please consider a donation. Thank you!
.. image:: https://pledgie.com/campaigns/33186.png?skin_name=chrome
:target: https://pledgie.com/campaigns/33186
License
=========
The MIT License (MIT)
Copyright (c) 2016 fukuball
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. | PypiClean |
/KitronikPicoSmartAirQuality-1.0.2.tar.gz/KitronikPicoSmartAirQuality-1.0.2/README.md | Other Languages: [FRANCAIS](README_FR.md)
# Kitronik-Pico-Smart-Air-Quality-Board-MicroPython

A module and sample code for the Kitronik Smart Air Quality Board for the Raspberry Pi Pico. (www.kitronik.co.uk/5336)
To use, save the PicoAirQuality.py file onto the Pico so it can be imported.
There are several classes within the module for accessing and controlling the different board features.
## Import PicoAirQuality.py and construct instances of the differenet classes:
```python
from PicoAirQuality import KitronikBME688, KitronikOLED, KitronikRTC, KitronikZIPLEDs, KitronikBuzzer, KitronikDataLogger, KitronikOutputControl, KitronikButton
bme688 = KitronikBME688() # Class for using the BME688 air quality and environmental sensor
oled = KitronikOLED() # Class for using the OLED display screen
rtc = KitronikRTC() # Class for using the built-in Pico Real-Time Clock (RTC)
zipleds = KitronikZIPLEDs(3) # Class for using the ZIP LEDs (on-board and external connections)
buzzer = KitronikBuzzer() # Class for using the piezo buzzer
log = KitronikDataLogger("data_log.txt", "semicolon") # Class for using the built-in Pico file system for data logging
output = KitronikOutputControl() # Class for using the high-power and servo outputs
buttons = KitronikButton() # Class for using the input buttons
```
Below are explanations of the functions available in each class.
## KitronikBME688
The BME688 sensor is the main feature of the board, enabling measurements of:
* Temperature
* Pressure
* Humidity
* Index of Air Quality (IAQ)
* Estimated CO2 (eCO2)
Class instantiation reads and sets up all the calibration parameters for different calculations, creates all the class variables and sets up the default settings on the BME688 sensor.
After this process, the temperature, pressure and humidity sensors will be able to be used immediately, but the gas sensor - which provides the IAQ and eCO2 outputs - needs some further setup:
```python
bme688.setupGasSensor()
bme688.calcBaselines()
```
These functions set the temperature and 'on' time for the gas sensor plate, and then run a 5 minute process measuring gas resistance and ambient temperature and then calculating mean values - these are then used as baselines for future calculations.
To actually run the measurement process, call:
```python
bme688.measureData()
```
This function will take readings from all the sensor outputs and run any required compensation calculations, but further functions are needed for extracting the final values in a useable format:
```python
bme688.readTemperature(temperature_unit="C") # The default unit is degC, but can be changed to degF by calling "F".
bme688.readPressure(pressure_unit="Pa") # The default unit is Pascals ("Pa"), but can be changed to millibar by calling "mBar".
bme688.readHumidity() # Outputs relativy humidity as a %.
bme688.readeCO2() # An estimated value based on gas resistance, temperature and humidity - outputs a value in parts per million (ppm).
bme688.getAirQualityPercent() # A relative value based on gas resistance and humidity - outputs as a %.
bme688.getAirQualityScore() # A relative value based on gas resistance and humidty - outputs as a value on a 0 - 500 scale, where 0 = Excellent and 500 = Extremely Poor
```
## KitronikOLED
The OLED screen provides a way for adding useful visual outputs to programs and projects. The screen can display text, numbers, lines, shapes and simple graphs.
Class instantiation sets up the default settings for the screen, including the correct orientation for the Pico being at the bottom edge of the board.
To display text (or numbers converted to strings with the 'str(*numbers*)'):
```python
oled.displayText(text, line, x_offset=0)
```
The text can be displayed on any of the 6 lines (1 - 6), with up to 16 characters per line. Text will default to start showing on the left edge of the screen, but by setting an 'x_offset' by a certain number of pixels it can start anywhere across the screen (the screen is 128px wide).
**Note:** If the text is too long for a line, the end characters will be cut off, they will not move down to the next line.
To make the text actually appear, another function needs to be called:
```python
oled.show()
```
This function needs to be called to make all changes visible, so must be called after:
* displayText()
* drawLine()
* drawRect()
* plot()
* clear()
To draw a line from a starting (x, y) coordinate to an end (x, y) coordinate:
```python
oled.drawLine(start_x, start_y, end_x, end_y)
```
**Note:** The screen is 128px wide by 64px height.
To draw a rectangle:
```python
oled.drawRect(start_x, start_y, width, height, fill=False)
```
The starting (x, y) coordinate is always the top left corner, then a width and height in pixels can be set. By default the rectangle will just be a border, but by setting 'fill' to **True**, the rectangle will be completely filled in.
To plot a simple, single variable graph:
```python
oled.plot(variable)
```
If the function is called repeatedly (for example, in a 'while True' loop) then a variable (such as a sensor reading) can be plotted live on the OLED screen. The top line is left free for adding other text or graphics.
To clear the screen (removing the display data from the software buffer):
```python
oled.clear()
```
To turn off the screen, without losing the display data:
```python
oled.poweroff()
```
To turn the screen on, returning to whatever was previously displayed (or something new):
```python
oled.poweron()
```
To alter the contrast of the displayed graphics with the background (i.e. make things brighter or darker):
```python
oled.contrast(contrast) # 0 = Dim to 150 = Bright
```
To invert the display colours (switch the background and graphic display colours):
```python
oled.invert(invert) # 0 = White on black, 1 = Black on white
```
## KitronikRTC
The Pico has an onboard RTC (Real-Time Clock) which has a very simple user interface enabling the setting or reading of the date and time.
The KitronikRTC class expands this functionality, allowing separate setting of date and time, reading the date and time out as strings, reading individual date/time parameters and the ability to set alarms
Set the date and time:
```python
rtc.setDate(day, month, year)
rtc.setTime(hours, minutes, seconds)
```
Read the date and time as strings:
```python
rtc.readDateString() # DD/MM/YY
rtc.readTimeString() # HH:MM:SS
```
Read individual date or time parameters:
```python
rtc.readParameter(parameter)
```
'parameter' can be:
* 'd' = Day
* 'm' = Month
* 'y' = Year
* 'h' = Hour
* 'min' = Minute
* 's' = Second
Set an alarm:
```python
rtc.setAlarm(hour, minute)
```
Check whether an alarm time condition has been met - this function returns 'True' if the alarm is triggered:
```python
rtc.checkAlarm()
```
Stop the alarm triggering once the time condition has been met:
```python
rtc.silenceAlarm()
```
## KitronikZIPLEDs
ZIP LEDs have a 2 stage operation...
### Setup ZIP LEDs:
Set the LEDs with the colour required:
```python
zipleds.setLED(whichLED, whichColour)
```
where:
* whichLED => 0-2 for onboard ZIP LEDs (if further ZIP LEDs are connected to the ZIP LED extension, the full number will need to be included in the class instantiation)
* whichColour => tuple of (Red Value, Green Value, Blue Value), or one of the pre-defined colours:
```python
COLOURS = (BLACK, RED, YELLOW, GREEN, CYAN, BLUE, PURPLE, WHITE)
```
Turn off the LEDs:
```python
zipleds.clear(whichLED)
```
where:
* whichLED => 0-2 for onboard ZIP LEDs
Control the brightness:
```python
zipleds.setBrightness(value)
```
where:
* value => 0-100 (brightness value in %)
### Make the changes visible:
```python
zipleds.show():
```
## KitronikBuzzer
The piezo buzzer on the board can play single frequency tones, with the pitch and tone length controlled by the following functions.
Play a continous tone at a set frequency (in the range 30Hz to 3kHz):
```python
buzzer.playTone(freq)
```
Play a tone at a set frequency for a set length of time (in milliseconds):
```python
buzzer.playTone_Length(freq, length)
```
Stop the current tone sounding:
```python
buzzer.stopTone()
```
## KitronikDataLogger
The data logging functionality is one of the key features of the board as it enables the data captured from the sensors to be saved for later manipulation and analysis.
On class instantiation, a file is created with the chosen name and a chosen separator between the data fields is assigned:
```python
log = KitronikDataLogger(filename, separator)
```
There are three options for the data separator:
* "comma" = '**,**'
* "semicolon" = '**;**'
* "tab" = ' '
There are two functions which are used to setup the data log file with some extra information:
```python
log.writeProjectInfo(line1, line2, line3)
log.nameColumnHeadings(field1, field2, field3, field4, field5, field6, field7, field8, field9, field10)
```
The first writesup to three user-entered free text fields (if only two arguments are given, only two lines will be written).
The second allows the user to include up to 10 data field headings which can then need to be matched to the order of the data fields in the data entry (these headings will become column headings if the data is imported to a spreadsheet program).
With these sections included, the start of a log file will look something like this:
```
Kitronik Data Logger - Pico Smart Air Quality Board - www.kitronik.co.uk
Name: User Name
Subject: Project 1
Date;Time;Temperature;Pressure;Humidity;Soil Moisture;IAQ;eCO2;
```
To actually save data to the log file, use the following function:
```python
log.storeDataEntry(field1, field2, field3, field4, field5, field6, field7, field8, field9, field10)
```
**Note:** Data needs to be entered in string format (numbers can be easily converted with the 'str(*number*)' function).
There are 10 data fields available per data entry, which allows, for example: Date, Time, Temperature, Pressure, Humidity, IAQ, eCO2 + 3 others (e.g. external sensors).
There is a maximum file size of 500kB for the log file to make sure there is always enough space on the Pico flash. During the process of saving the data to the file, if the file will exceed the maximum size, the earliest data entry will be deleted to make space for the newest one.
There are two options for deleting data stored on the Pico.
The log file contents can be erased:
```python
logeraseAllData()
```
Or the log file itself can be deleted:
```python
log.deleteDataFile()
```
## KitronikOutputControl
### Servo:
The servo PWM (20ms repeat, on period capped between 500 and 2500us) is driven using the Pico PIO.
To register a servo ready to be used:
```python
output.registerServo()
```
This process sets the PIO PWM active on the servo pin (**Note:** The servo is registered by default).
To control the movement of a servo, turning it to a set angle (or controlling the speed/direction of a continuous rotation servo):
```python
output.servoToPosition(degrees)
```
To control the movement of the servo we can also set it using radians from 0 to 3.1416 (Pi to four digits).
``` python
output.servoToRadians(radians)
```
If the pin is needed for another purpose it can be 'deregistered' which sets the PIO to inactive:
```python
output.deregisterServo()
```
### High-Power Outputs:
The high-power outputs on the board are controlled via two pins on the Pico: GP3 and GP15.
The control of these outputs is very simple, either setting them to be **ON** or **OFF**:
```python
output.highPowerOn(pin)
output.highPowerOff(pin)
```
(*pin* is either '3' or '15')
## KitronikButton
On class instantiation, two buttons ('buttonA' and 'buttonB') are created which can then be accessed and used in the main program file.
One method of utilising the buttons is with interrupts (IRQs) and interrupt handlers - some examples are shown below.
### Button IRQ:
```python
buttons.buttonA.irq(trigger=machine.Pin.IRQ_RISING, handler=ButtonA_IRQHandler)
buttons.buttonB.irq(trigger=machine.Pin.IRQ_RISING, handler=ButtonB_IRQHandler)
```
### Button IRQ Handler:
```python
def ButtonA_IRQHandler(pin):
oled.clear()
bme688.measureData()
oled.displayText(rtc.readDateString(), 1)
oled.displayText(rtc.readTimeString(), 2)
oled.displayText("T: " + str(bme688.readTemperature()), 4)
oled.displayText("IAQ: " + str(bme688.getAirQualityScore()), 5)
oled.displayText("eCO2: " + str(bme688.readeCO2()), 6)
oled.show()
def ButtonB_IRQHandler(pin):
oled.clear()
oled.show()
```
# Troubleshooting
This code is designed to be used as a module.
See: https://kitronik.co.uk/blogs/resources/modules-micro-python-and-the-raspberry-pi-pico for more information.
| PypiClean |
/Neodroid-0.4.9-py36-none-any.whl/samples/gui_client/gui_components/motion_view_list.py | __author__ = "Christian Heider Nielsen"
from kivy.adapters.dictadapter import DictAdapter
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.listview import CompositeListItem, ListItemButton, ListItemLabel, ListView
integers_dict = {str(i): {"text": str(i), "is_selected": False} for i in range(100)}
class MotionViewList(GridLayout):
def __init__(self, **kwargs):
kwargs["cols"] = 2
super().__init__(**kwargs)
def args_converter(row_index, row_data):
return {
"text": row_data["text"],
"size_hint_y": None,
"height": 25,
"cls_dicts": [
{
"cls": ListItemLabel,
"kwargs": {
"text": row_data["text"],
"is_representing_cls": True,
},
},
{
"cls": ListItemLabel,
"kwargs": {
"text": f"Middle-{row_data['text']}",
"is_representing_cls": True,
},
},
{
"cls": ListItemLabel,
"kwargs": {
"text": f"End-{row_data['text']}",
"is_representing_cls": True,
},
},
{"cls": ListItemButton, "kwargs": {"text": row_data["text"]}},
],
}
item_strings = [f"{index}" for index in range(100)]
dict_adapter = DictAdapter(
sorted_keys=item_strings,
data=integers_dict,
args_converter=args_converter,
selection_mode="single",
allow_empty_selection=False,
cls=CompositeListItem,
)
self.list_view = ListView(adapter=dict_adapter)
self.step_button = Button(text="Step")
self.reset_button = Button(text="Reset")
# self.spacer = Label()
self.assemble_components()
def assemble_components(self):
self.step_button.bind(on_release=self.on_step_button)
self.reset_button.bind(on_release=self.on_reset_button)
self.add_widget(self.list_view)
self.add_widget(self.step_button)
self.add_widget(self.reset_button)
# self.add_widget(self.spacer)
return self
def on_step_button(self):
pass
def on_reset_button(self):
pass
if __name__ == "__main__":
from kivy.base import runTouchApp
runTouchApp(MotionViewList(width=800)) | PypiClean |
/LabtoolSuite-0.1.3.tar.gz/LabtoolSuite-0.1.3/Labtools/docs/custom_widgets.py | import sip,os
os.environ['QT_API'] = 'pyqt'
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
# Import the core and GUI elements of Qt
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import interface
from widgets.sliding import Ui_Form as Ui_Sliding
from widgets.clicking import Ui_Form as Ui_Clicking
from widgets.clickingOptions import Ui_Form as Ui_ClickingOptions
class CustomWidgets:
parent=None
def __init__(self):
print "widgets imported"
self.I=interface.Interface()
def newWidget(self,widget_type,**args):
b=widget_type(**args)
if(args.has_key('object_name')): b.setObjectName(args.get('object_name'))
if(args.has_key('text')): b.setText(args.get('text'))
if(args.has_key('items')):
for a in args.get('items'): b.addItem(a)
self.updateWidgetBay(b)
return b
def assignCommand(self,widget,signal,slot,*args):
buttonCallback = functools.partial(slot,*args)
QObject.connect(widget, SIGNAL(signal), buttonCallback)
class sineHandler(QFrame,Ui_Sliding):
def __init__(self,chan):
super(CustomWidgets.sineHandler, self).__init__()
#QFrame.__init__(self)
#Ui_Sliding.__init__(self)
self.I=interface.Interface()
self.setupUi(self)
self.name=['SINE1','SINE2'][chan-1]
self.label.setText(self.name)
self.chan=chan
self.slider.setMinimum(0)
self.slider.setMaximum(500000)
def setValue(self,val):
self.label.setText(self.name+':'+str(val)+' Hz')
if self.chan==1:self.I.set_sine1(val)
elif self.chan==2:self.I.set_sine2(val)
def widget_sine1(self):
self.updateWidgetBay(self.sineHandler(1))
def widget_sine2(self):
self.updateWidgetBay(self.sineHandler(2))
class gainHandler(QFrame,Ui_Sliding):
def __init__(self,chan):
super(CustomWidgets.gainHandler, self).__init__()
self.I=interface.Interface()
self.setupUi(self)
self.slider.setMinimum(0)
self.slider.setMaximum(7)
self.gaintxt=['1x','2x','4x','5x','8x','10x','16x','32x']
self.name=chan
self.label.setText(self.name)
def setValue(self,val):
self.label.setText(self.name+':'+self.gaintxt[val])
self.I.set_gain(self.name,val)
def widget_ch1(self):
self.updateWidgetBay(self.gainHandler('CH1'))
def widget_ch2(self):
self.updateWidgetBay(self.gainHandler('CH2'))
def widget_ch3(self):
self.updateWidgetBay(self.gainHandler('CH3'))
def widget_ch4(self):
self.updateWidgetBay(self.gainHandler('CH4'))
def widget_ch5(self):
self.updateWidgetBay(self.gainHandler('CH5'))
class voltHandler(QFrame,Ui_Clicking):
def __init__(self,chan):
super(CustomWidgets.voltHandler, self).__init__()
#QFrame.__init__(self)
#Ui_Sliding.__init__(self)
self.I=interface.Interface()
self.setupUi(self)
self.name='READ '+chan
self.button.setText(self.name)
self.chan=chan
def clicked(self):
val = self.I.get_average_voltage(self.chan)
self.label.setText('%.3f V'%(val))
def widget_volt1(self):
self.updateWidgetBay(self.voltHandler('CH1'))
def widget_volt2(self):
self.updateWidgetBay(self.voltHandler('CH2'))
def widget_volt3(self):
self.updateWidgetBay(self.voltHandler('CH3'))
def widget_volt4(self):
self.updateWidgetBay(self.voltHandler('CH4'))
def widget_volt5(self):
self.updateWidgetBay(self.voltHandler('CH5'))
class voltAllHandler(QFrame,Ui_ClickingOptions):
def __init__(self):
super(CustomWidgets.voltAllHandler, self).__init__()
#QFrame.__init__(self)
#Ui_Sliding.__init__(self)
self.I=interface.Interface()
self.setupUi(self)
self.names=['CH1','CH2','CH3','CH4','CH5','CH6','CH7','CH8','CH9','5V','9V','IN1','SEN']
self.button.setText('Read')
self.items.addItems(self.names)
def clicked(self):
val = self.I.get_average_voltage(self.items.currentText())
self.label.setText('%.3f V'%(val))
def widget_voltAll(self):
self.updateWidgetBay(self.voltAllHandler())
def widget_inductance(self):
class Handler(QFrame,Ui_Clicking):
def __init__(self):
super(Handler, self).__init__()
self.I=interface.Interface()
self.setupUi(self)
self.button.setText('INDUCTANCE')
def clicked(self):
val = self.I.get_inductance()
self.label.setText('%.3f'%(val))
self.updateWidgetBay(Handler())
class timingHandler(QFrame,Ui_ClickingOptions):
def __init__(self,cmd):
super(CustomWidgets.timingHandler, self).__init__()
#QFrame.__init__(self)
#Ui_Sliding.__init__(self)
self.I=interface.Interface()
self.setupUi(self)
self.cmd = getattr(self.I,cmd)
self.cmdname=cmd
self.button.setText(cmd)
self.items.addItems(['ID1','ID2','ID3','ID4','LMETER'])
def clicked(self):
val = self.cmd(self.items.currentIndex())
if self.cmdname=='duty_cycle':
if(val[0]!=-1):p=100*val[1]/val[0]
else: p=0
self.label.setText(' %.2f %%'%(p))
elif 'time' in self.cmdname:self.label.setText('%.2e S'%(val))
else:self.label.setText('%.1f Hz'%(val))
def widget_freq(self):
self.updateWidgetBay(self.timingHandler('get_freq'))
def widget_high_freq(self):
self.updateWidgetBay(self.timingHandler('get_high_freq'))
def widget_f2ftime(self):
self.updateWidgetBay(self.timingHandler('f2f_time'))
def widget_r2rtime(self):
self.updateWidgetBay(self.timingHandler('r2r_time'))
def widget_dutycycle(self):
self.updateWidgetBay(self.timingHandler('duty_cycle'))
def widget_pulse(self):
self.updateWidgetBay(self.timingHandler('pulse_time'))
class sourceHandler(QFrame,Ui_Sliding):
def __init__(self,name):
super(CustomWidgets.sourceHandler, self).__init__()
self.I=interface.Interface()
self.setupUi(self)
self.name=name
if name=='pvs1':
self.slider.setRange(0,4095)
if name=='pvs2':
self.slider.setRange(0,4095)
elif name=='pvs3':
self.slider.setRange(0,31)
elif name=='pcs':
self.slider.setRange(0,31)
def setValue(self,val):
if self.name=='pvs1':
retval=self.I.set_pvs1(val*10./4095 - 5)
elif self.name=='pvs2':
retval=self.I.set_pvs2(val*3.3/4095)
elif self.name=='pvs3':
retval=self.I.set_pvs3(val*6.6/31 - 3.3)
elif self.name=='pcs':
retval=self.I.set_pcs(val*3.3/31)
self.label.setText(self.name+': %.3f'%(retval))
def widget_pvs1(self):
self.updateWidgetBay(self.sourceHandler('pvs1'))
def widget_pvs2(self):
self.updateWidgetBay(self.sourceHandler('pvs2'))
def widget_pvs3(self):
self.updateWidgetBay(self.sourceHandler('pvs3'))
def widget_pcs(self):
self.updateWidgetBay(self.sourceHandler('pcs')) | PypiClean |
/Flask_MySQLPooled-0.1.1-py3-none-any.whl/flask_mysqlpooled.py | from __future__ import absolute_import
import pymysql
from dbutils.pooled_db import PooledDB
try:
from flask import _app_ctx_stack as _ctx_stack
except ImportError:
from flask import _request_ctx_stack as _ctx_stack
class MySQLPooled(object):
def __init__(self, app=None, prefix="MySQLPooled", **pool_args):
self.pool_args = pool_args
self.pool_args.setdefault('creator', pymysql)
self.prefix = prefix
self.connect_pool = None
if app is not None:
self.app = app
self.init_app(self.app)
else:
self.app = None
def init_app(self, app):
self.app = app
self.app.config.setdefault('MySQLPooled_DATABASE_HOST', 'localhost')
self.app.config.setdefault('MySQLPooled_DATABASE_PORT', 3306)
self.app.config.setdefault('MySQLPooled_DATABASE_USER', None)
self.app.config.setdefault('MySQLPooled_DATABASE_PASSWORD', None)
self.app.config.setdefault('MySQLPooled_DATABASE_DB', None)
self.app.config.setdefault('MySQLPooled_DATABASE_CHARSET', 'utf8')
self.app.config.setdefault('MySQLPooled_USE_UNICODE', True)
self.app.config.setdefault('MySQLPooled_DATABASE_SOCKET', None)
self.app.config.setdefault('MySQLPooled_SQL_MODE', None)
self.app.config.setdefault('MySQLPooled_MINCACHED', 0)
self.app.config.setdefault('MySQLPooled_MAXCACHED', 1)
self.app.config.setdefault('MySQLPooled_MAXCONNECTIONS', 1)
self.app.config.setdefault('MySQLPooled_BLOCKING', False)
self.app.config.setdefault('MySQLPooled_MAXUSAGE', None)
self.app.config.setdefault('MySQLPooled_SETSESSION', None)
self.app.config.setdefault('MySQLPooled_RESET', True)
self.app.config.setdefault('MySQLPooled_FAILURES', None)
self.app.config.setdefault('MySQLPooled_PING', 1)
self._init_pool_args()
self.connect_pool = PooledDB(**self.pool_args)
# Flask 0.9 or later
if hasattr(self.app, 'teardown_appcontext'):
self.app.teardown_request(self.teardown_request)
# Flask 0.7 to 0.8
elif hasattr(self.app, 'teardown_request'):
self.app.teardown_request(self.teardown_request)
# Older versions
else:
self.app.after_request(self.teardown_request)
def _init_pool_args(self):
self.pool_args.setdefault('host', self.app.config['MySQLPooled_DATABASE_HOST'])
self.pool_args.setdefault('port', self.app.config['MySQLPooled_DATABASE_PORT'])
self.pool_args.setdefault('user', self.app.config['MySQLPooled_DATABASE_USER'])
self.pool_args.setdefault('password', self.app.config['MySQLPooled_DATABASE_PASSWORD'])
self.pool_args.setdefault('db', self.app.config['MySQLPooled_DATABASE_DB'])
self.pool_args.setdefault('charset', self.app.config['MySQLPooled_DATABASE_CHARSET'])
self.pool_args.setdefault('use_unicode', self.app.config['MySQLPooled_USE_UNICODE'])
self.pool_args.setdefault('unix_socket', self.app.config['MySQLPooled_DATABASE_SOCKET'])
self.pool_args.setdefault('sql_mode', self.app.config['MySQLPooled_SQL_MODE'])
self.pool_args.setdefault('mincached', self.app.config['MySQLPooled_MINCACHED'])
self.pool_args.setdefault('maxcached', self.app.config['MySQLPooled_MAXCACHED'])
self.pool_args.setdefault('maxconnections', self.app.config['MySQLPooled_MAXCONNECTIONS'])
self.pool_args.setdefault('blocking', self.app.config['MySQLPooled_BLOCKING'])
self.pool_args.setdefault('maxusage', self.app.config['MySQLPooled_MAXUSAGE'])
self.pool_args.setdefault('setsession', self.app.config['MySQLPooled_SETSESSION'])
self.pool_args.setdefault('reset', self.app.config['MySQLPooled_RESET'])
self.pool_args.setdefault('failures', self.app.config['MySQLPooled_FAILURES'])
self.pool_args.setdefault('ping', self.app.config['MySQLPooled_PING'])
def connect(self):
return self.connect_pool.connection()
def teardown_request(self, exception):
ctx = _ctx_stack.top
if hasattr(ctx, "mysql_dbs"):
try:
if self.prefix in ctx.mysql_dbs:
ctx.mysql_dbs[self.prefix].close()
except Exception as e:
pass
def get_db(self):
ctx = _ctx_stack.top
if ctx is not None:
if not hasattr(ctx, "mysql_dbs"):
ctx.mysql_dbs = dict()
if self.prefix not in ctx.mysql_dbs:
ctx.mysql_dbs[self.prefix] = self.connect()
return ctx.mysql_dbs[self.prefix] | PypiClean |
/DoorPi-2.4.1.8.tar.gz/DoorPi-2.4.1.8/doorpi/sipphone/pjsua_lib/Recorder.py |
import logging
logger = logging.getLogger(__name__)
logger.debug("%s loaded", __name__)
import os
from doorpi import DoorPi
from doorpi.sipphone.AbstractBaseClass import RecorderAbstractBaseClass
class PjsuaRecorder(RecorderAbstractBaseClass):
__rec_id = None
__slot_id = None
__record_filename = ''
__last_record_filename = ''
@property
def record_filename(self): return self.__record_filename
@property
def parsed_record_filename(self): return DoorPi().parse_string(self.__record_filename)
@property
def last_record_filename(self): return self.__last_record_filename
def __init__(self):
self.__record_filename = DoorPi().config.get('DoorPi', 'records',
'!BASEPATH!/records/%Y-%m-%d_%H-%M-%S.wav')
if self.__record_filename is '':
logger.debug('no recorder found in config at section DoorPi and key records')
return
DoorPi().event_handler.register_event('OnRecorderStarted', __name__)
DoorPi().event_handler.register_event('OnRecorderStopped', __name__)
DoorPi().event_handler.register_event('OnRecorderCreated', __name__)
if DoorPi().config.get_bool('DoorPi', 'record_while_dialing', 'False') is True:
DoorPi().event_handler.register_action('OnSipPhoneMakeCall', self.start)
else:
DoorPi().event_handler.register_action('OnCallStateConnect', self.start)
DoorPi().event_handler.register_action('OnCallStateDisconnect', self.stop)
DoorPi().event_handler('OnRecorderCreated', __name__)
def start(self):
if self.__record_filename is '':
return
if self.__rec_id is not None:
logger.trace('recorder already created as rec_id %s and record to %s', self.__rec_id, self.last_record_filename)
return
DoorPi().sipphone.lib.thread_register('PjsuaPlayer_start_thread')
if self.__record_filename is not '':
self.__last_record_filename = DoorPi().parse_string(self.__record_filename)
if not os.path.exists(os.path.dirname(self.__last_record_filename)):
logger.info('Path %s not exists - create it now', os.path.dirname(self.__last_record_filename))
os.makedirs(os.path.dirname(self.__last_record_filename))
logger.debug('starting recording to %s', self.__last_record_filename)
self.__rec_id = DoorPi().sipphone.lib.create_recorder(self.__last_record_filename)
self.__slot_id = DoorPi().sipphone.lib.recorder_get_slot(self.__rec_id)
DoorPi().sipphone.lib.conf_connect(0, self.__slot_id)
DoorPi().event_handler('OnRecorderStarted', __name__)
def stop(self):
if self.__rec_id is not None:
DoorPi().sipphone.lib.thread_register('PjsuaPlayer_start_thread')
logger.debug('stopping recording to %s', self.__last_record_filename)
DoorPi().sipphone.lib.conf_disconnect(0, self.__slot_id)
DoorPi().sipphone.lib.recorder_destroy(self.__rec_id)
self.__rec_id = None
self.__slot_id = None
DoorPi().event_handler('OnRecorderStopped', __name__) | PypiClean |
/C99-1.0.7.zip/C99-1.0.7/README.rst | C99
===
C99 headers and libraries that are missing from the C compilers for Python2.
Overview
========
TBD...
`PyPI record`_.
Installation
============
Prerequisites:
+ Python 2.7
* https://www.python.org/
+ pip and setuptools
* https://pypi.org/project/pip/
* https://pypi.org/project/setuptools/
To install run:
.. parsed-literal::
python -m pip install --upgrade |package|
Development
===========
Prerequisites:
+ Development is strictly based on *tox*. To install it run::
python -m pip install --upgrade tox
Visit `development page`_.
Installation from sources:
clone the sources:
.. parsed-literal::
git clone |respository| |package|
and run:
.. parsed-literal::
python -m pip install ./|package|
or on development mode:
.. parsed-literal::
python -m pip install --editable ./|package|
License
=======
| Copyright (c) 2018-2020 Adam Karpierz
| Licensed under the zlib/libpng License
| https://opensource.org/licenses/Zlib
| Please refer to the accompanying LICENSE file.
Authors
=======
* Adam Karpierz <adam@karpierz.net>
.. |package| replace:: C99
.. |package_bold| replace:: **C99**
.. |respository| replace:: https://github.com/karpierz/C99.git
.. _development page: https://github.com/karpierz/C99/
.. _PyPI record: https://pypi.org/project/C99/
| PypiClean |
/Modular%20computer%20vision%20API%20GUI-0.3.31.tar.gz/Modular computer vision API GUI-0.3.31/mcvgui/dialogs/simple_filter/simple_filter.py | import pyforms, cv2
from pyforms.basewidget import BaseWidget
from pyforms.controls import ControlList
from pyforms.controls import ControlPlayer
from pyforms.controls import ControlText
from pyforms.controls import ControlCombo
from mcvapi.mcvbase import MCVBase
from confapp import conf
class SimpleFilter(MCVBase, BaseWidget):
"""
It implements a dialog that allow the user to choose several combinations of
filters and apply them to a video.
The player allow the user to pre visualize the result.
"""
def __init__(self, parent=None, video=None):
BaseWidget.__init__(self, 'Simple workflow editor', parent_win=parent)
self._parent = parent
self._player = ControlPlayer('Player')
self._imgfilters = ControlList('Image filters')
self._imageflows = ControlCombo('Image workflows')
self._blobsflows = ControlCombo('Blobs workflows')
self._blobsfilters = ControlList('Blobs filters')
self.formset = [
'_player',
'=',
[{
'a:Image filter':['_imageflows','_imgfilters'],
'b:Blobs filter':['_blobsflows','_blobsfilters']
}]
]
self.load_order = ['_imageflows', '_blobsflows','_imgfilters','_blobsfilters']
self._imgfilters.select_entire_row = True
self._blobsfilters.select_entire_row = True
self._imageflows.changed_event = self.__imageflows_changed_event
self._blobsflows.changed_event = self.__blobsflows_changed_event
self._player.process_frame_event = self.__process_frame
self.video_capture = video
self._pipelines = {} # dictinary with all the available pipelines
self._pipeline = None # active pipeline class
###########################################################################
### IO FUNCTIONS ##########################################################
###########################################################################
def save(self, data={}, **kwargs):
for n, f in self._imgfilters.value: f.save(data, **kwargs)
for n, f in self._blobsfilters.value: f.save(data, **kwargs)
return data
def load(self, data, **kwargs):
for n, f in self._imgfilters.value: f.load(data, **kwargs)
for n, f in self._blobsfilters.value: f.load(data, **kwargs)
###########################################################################
### FUNCTIONS #############################################################
###########################################################################
def clear(self):
"""
Reinit all the filters
"""
for name, f in self._imgfilters.value: data = f.clear()
for name, f in self._blobsfilters.value: data = f.clear()
self.pipeline = None
def processflow(self, data, **kwargs):
"""
Apply the selected workflow of filters.
"""
if self.pipeline is None:
self.pipeline = self.build_workflow_instance()
return self.pipeline.processflow(data, **kwargs)
def end(self, data, **kwargs):
"""
Apply the selected workflow of filters.
"""
if self.pipeline is None:
self.pipeline = self.build_workflow_instance()
return self.pipeline.end(data, **kwargs)
def pipeline_classes(self):
classes_list = [f.__class__ for title, f in self._imgfilters.value]
classes_list += [f.__class__ for title, f in self._blobsfilters.value]
classes_list.reverse()
return classes_list
def build_workflow_instance(self):
# export the configured parameters
data = {'load': True}
for name, f in self._imgfilters.value: f.save(data)
for name, f in self._blobsfilters.value: f.save(data)
# create the workflow class and import the configured parameters
classes_list = self.pipeline_classes()
instance = type('ProcessingPipeline', tuple(classes_list), {})(**data)
return instance
###########################################################################
### INTERFACE FUNCTIONS ###################################################
###########################################################################
def __load_default_blobsflows(self):
self._blobsflows.add_item('Find blobs + track path', 2)
self.__blobsflows_changed_event()
def __imageflows_changed_event(self):
# A new image worflow was selected
workflow = []
for title, flow_filter in self._pipelines.get(self._imageflows.value, []):
workflow.append( (title, flow_filter() ) )
self.image_filters = workflow
self.pipeline = None
def __blobsflows_changed_event(self):
# A new blob workflow was selected
workflow = []
for title, flow_filter in self._pipelines.get(self._blobsflows.value, []):
workflow.append( (title, flow_filter() ) )
self.blobs_filters = workflow
self.pipeline = None
def __process_frame(self, frame):
frame_index = self._player.video_index-1
data = frame
for name, f in self._imgfilters.value:
data = f.process(data, frame_index=frame_index)
thresh = data
for name, f in self._blobsfilters.value:
data = f.process(data, frame_index=frame_index)
step = 16581375 / (len(data)+1)
for i, blob in enumerate(data):
if blob is not None:
rgb_int = int(step*(i+1))
blue = rgb_int & 255
green = (rgb_int >> 8) & 255
red = (rgb_int >> 16) & 255
c = (blue, green, red)
#blob.draw(frame, color=c)
if blob.centroid:
cv2.putText(frame,str(i), blob.centroid, cv2.FONT_HERSHEY_SIMPLEX, 1, c)
return frame, thresh
def add_image_filters(self, filtername, pipeline):
"""
Add an image filter
"""
first_filters = self._imageflows.value==None
self._imageflows.add_item(filtername)
self._pipelines[filtername] = pipeline
if first_filters: self.__imageflows_changed_event()
def add_blobs_filters(self, filtername, pipeline):
"""
Add a blob filter
"""
first_filters = self._blobsflows.value==None
self._blobsflows.add_item(filtername)
self._pipelines[filtername] = pipeline
if first_filters: self.__blobsflows_changed_event()
###########################################################################
### PROPERTIES ############################################################
###########################################################################
@property
def image_filters(self):
"""
Set and retrieve the selected list of image filters
"""
for name, f in self._imgfilters.value: yield f
@image_filters.setter
def image_filters(self, value): self._imgfilters.value = value
@property
def blobs_filters(self):
"""
Set and retrieve the selected list of blobs filters
"""
for name, f in self._blobsfilters.value: yield f
@blobs_filters.setter
def blobs_filters(self, value): self._blobsfilters.value = value
@property
def video_capture(self):
"""
Set and retrieve the video for previsualization.
The value should be from type cv2.VideoCapture or a path to a video file.
"""
return self._player.value
@video_capture.setter
def video_capture(self, value):
self._player.value = value
for name, f in self._imgfilters.value: f.video = value
for name, f in self._blobsfilters.value: f.video = value
def show(self):
super(SimpleFilter, self).show()
self._blobsfilters.show()
self._blobsfilters.resize_rows_contents()
if __name__ == '__main__':
pyforms.start_app(SimpleFilter) | PypiClean |
/CFEC-0.0.5.tar.gz/CFEC-0.0.5/README.md |   [](https://counterfactuals.readthedocs.io/en/latest/?badge=latest)
# CFEC
This is a specialized programming library which contains three different counterfactual generation methods for tabular data, supporting various constraints, and to construct a tool for comparing their results.
## Requirements
Tha package has been tested under python 3.7 up to 3.9, on both Windows and Ubuntu platforms. Its main dependency is tensorflow, which all the methods use, and typical scientific stack (numpy, scipy, pandas).
Requirements include:
* tensorflow~=2.7.0
* pandas==1.3.4
* numpy==1.21.4
* scikit-learn==1.0.1
## Installation
This package can be installed using pip
```bash
pip install cfec
```
## Implemented algorithms
Our package includes implementation of algorithms, such as:
* [FIMAP](https://ojs.aaai.org/index.php/AAAI/article/view/17362)
* [CADEX](https://doi.org/10.1007/978-3-030-29908-8\_4)
* [Ensemble](https://arxiv.org/abs/2102.13076)
## Example usage
```python
from cfec.explainers import Fimap
from cfec.constraints import ValueMonotonicity, ValueNominal
from data import AdultData
from sklearn.ensemble import RandomForestClassifier
adult_data = AdultData('data/datasets/adult.csv')
rf = RandomForestClassifier()
rf.fit(adult_data.X_train, adult_data.y_train)
predictions = rf.predict(adult_data.X_train)
constraints = [
OneHot('workclass', 2, 8),
OneHot('martial.status', 9, 15),
OneHot('occupation', 16, 29),
OneHot('race', 30, 34),
OneHot('sex', 35, 36),
]
fimap = Fimap(constraints=constraints)
fimap.fit(adult_data.X_train, predictions)
x = adult_data.X_train.iloc[0]
cf = fimap.generate(x)
```
```python
from cfec.explainers import Cadex
from cfec.constraints import ValueMonotonicity, ValueNominal
from data import GermanData
from tensorflow import keras
german_data = GermanData('data/datasets/input_german.csv', 'data/datasets/labels_german.csv')
# simple model consisting of one dense layer with 2 units and a softmax activation
german_model = keras.models.load_model('models/model_german')
predictions = german_model.predict(german_data.X_train)
constraints = [
OneHot("account_status", 7, 10),
OneHot("credit_history", 11, 15),
OneHot("purpose", 16, 25),
OneHot("savings", 26, 30),
OneHot("sex_status", 31, 34),
OneHot("debtors", 35, 37),
OneHot("property", 38, 41),
OneHot("other_installment_plans", 42, 44),
OneHot("housing", 45, 47),
OneHot("job", 48, 51),
OneHot("phone", 52, 53),
OneHot("foreign", 54, 55),
OneHot("employment", 56, 60)
]
cadex = Cadex(german_model, constraints=constraints)
x = german_data.X_train.iloc[0]
cf = cadex.generate(x) # cadex method does not need to fit before generate
``` | PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/client/resources/oira/script/chunks/39563.7864566e200d7c61e576.min.js | (self.webpackChunk_patternslib_patternslib=self.webpackChunk_patternslib_patternslib||[]).push([[39563],{39563:function(e){const n=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],a=["true","false","null","undefined","NaN","Infinity"],t=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer","BigInt64Array","BigUint64Array","BigInt"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]);e.exports=function(e){const r={keyword:n.concat(["then","unless","until","loop","of","by","when","and","or","is","isnt","not","it","that","otherwise","from","to","til","fallthrough","case","enum","native","list","map","__hasProp","__extends","__slice","__bind","__indexOf"]),literal:a.concat(["yes","no","on","off","it","that","void"]),built_in:t.concat(["npm","print"])},i="[A-Za-z$_](?:-[0-9A-Za-z$_]|[0-9A-Za-z$_])*",s=e.inherit(e.TITLE_MODE,{begin:i}),o={className:"subst",begin:/#\{/,end:/\}/,keywords:r},c={className:"subst",begin:/#[A-Za-z$_]/,end:/(?:-[0-9A-Za-z$_]|[0-9A-Za-z$_])*/,keywords:r},l=[e.BINARY_NUMBER_MODE,{className:"number",begin:"(\\b0[xX][a-fA-F0-9_]+)|(\\b\\d(\\d|_\\d)*(\\.(\\d(\\d|_\\d)*)?)?(_*[eE]([-+]\\d(_\\d|\\d)*)?)?[_a-z]*)",relevance:0,starts:{end:"(\\s*/)?",relevance:0}},{className:"string",variants:[{begin:/'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE]},{begin:/'/,end:/'/,contains:[e.BACKSLASH_ESCAPE]},{begin:/"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,o,c]},{begin:/"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,o,c]},{begin:/\\/,end:/(\s|$)/,excludeEnd:!0}]},{className:"regexp",variants:[{begin:"//",end:"//[gim]*",contains:[o,e.HASH_COMMENT_MODE]},{begin:/\/(?![ *])(\\.|[^\\\n])*?\/[gim]*(?=\W)/}]},{begin:"@"+i},{begin:"``",end:"``",excludeBegin:!0,excludeEnd:!0,subLanguage:"javascript"}];o.contains=l;const d={className:"params",begin:"\\(",returnBegin:!0,contains:[{begin:/\(/,end:/\)/,keywords:r,contains:["self"].concat(l)}]};return{name:"LiveScript",aliases:["ls"],keywords:r,illegal:/\/\*/,contains:l.concat([e.COMMENT("\\/\\*","\\*\\/"),e.HASH_COMMENT_MODE,{begin:"(#=>|=>|\\|>>|-?->|!->)"},{className:"function",contains:[s,d],returnBegin:!0,variants:[{begin:"("+i+"\\s*(?:=|:=)\\s*)?(\\(.*\\)\\s*)?\\B->\\*?",end:"->\\*?"},{begin:"("+i+"\\s*(?:=|:=)\\s*)?!?(\\(.*\\)\\s*)?\\B[-~]{1,2}>\\*?",end:"[-~]{1,2}>\\*?"},{begin:"("+i+"\\s*(?:=|:=)\\s*)?(\\(.*\\)\\s*)?\\B!?[-~]{1,2}>\\*?",end:"!?[-~]{1,2}>\\*?"}]},{className:"class",beginKeywords:"class",end:"$",illegal:/[:="\[\]]/,contains:[{beginKeywords:"extends",endsWithParent:!0,illegal:/[:="\[\]]/,contains:[s]},s]},{begin:i+":",end:":",returnBegin:!0,returnEnd:!0,relevance:0}])}}}}]);
//# sourceMappingURL=39563.7864566e200d7c61e576.min.js.map | PypiClean |
/Contentstack-1.8.0.tar.gz/Contentstack-1.8.0/README.rst | ================
**Contentstack**
================
Python SDK for Contentstack
===========================
Contentstack is a headless CMS with an API-first approach. It is a CMS that developers can use to build powerful cross-platform applications in their favorite languages. Build your application frontend, and Contentstack will take care of the rest. `Read More <https://www.contentstack.com/>`_.
Contentstack provides Python SDK to build an application on top of Python. Given below is the detailed guide and helpful resources to get started with our Python SDK.
Prerequisite
============
You will need python 3 installed on your machine. You can install it from `here <https://www.python.org/ftp/python/3.7.4/python-3.7.4-macosx10.9.pkg>`_
Setup and Installation
======================
To use the Contentstack Python SDK to your existing project, perform the steps given below:
.. code-block:: python
install contentstack pip
This is the preferred method to install contentstack, as it will always install the most recent stable release. If you don't have `pip <https://pip.pypa.io/>`_
installed, this `Python installation guide <http://docs.python-guide.org/en/latest/starting/installation/>`_ can guide you through the process
Key Concepts for using Contentstack
-----------------------------------
**Stack**
A stack is like a container that holds the content of your app. Learn more about `Stacks <https://www.contentstack.com/docs/developers/set-up-stack>`_.
**Content-Type**
Content-type lets you define the structure or blueprint of a page or a section of your digital property. It is a form-like page that gives Content Managers an interface to input and upload content. `read_more <https://www.contentstack.com/docs/developers/create-content-types>`_.
**Entry**
An entry is the actual piece of content created using one of the defined content types. Learn more about `Entries <https://www.contentstack.com/docs/content-managers/work-with-entries>`_.
**Asset**
Assets refer to all the media files (images, videos, PDFs, audio files, and so on) uploaded to Contentstack. These files can be used in multiple entries. Read more about `Assets <https://www.contentstack.com/docs/content-managers/work-with-assets>`_.
**Environment**
A publishing environment corresponds to one or more deployment servers or a content delivery destination where the entries need to be published. Learn how to work with `Environments <https://www.contentstack.com/docs/developers/set-up-environments)>`_.
Contentstack Python SDK: 5-minute Quickstart
--------------------------------------------
**Initializing your SDK**
To initialize the SDK, specify the application API key, access token, and environment name of the stack as shown in the snippet given below:
.. code-block:: python
stack = contentstack.Stack('api_key', 'delivery_token', 'environment')
To get the API credentials mentioned above, log in to your Contentstack account and then in your top panel navigation, go to Settings > Stack to view the API Key and Access Token.
**Querying content from your stack**
To retrieve a single entry from a content type use the code snippet given below:
.. code-block:: python
content_type = stack.content_type("content_type_uid")
entry = content_type.entry("entry_uid")
result = entry.fetch()
**Get Multiple Entries**
To retrieve multiple entries of a particular content type, use the code snippet given below:
**stack is an instance of Stack class**
.. code-block:: python
query = stack.content_type("content_type_uid").query()
result = query.find()
**Advanced Queries**
You can query for content types, entries, assets, and more using our Java API Reference. `Python API Reference Doc <https://www.contentstack.com/docs/platforms/python/api-reference/>`_
**Working with Images**
We have introduced Image Delivery APIs that let you retrieve images and then manipulate and optimize them for your digital properties. It lets you perform a host of other actions such as crop, trim, resize, rotate, overlay, and so on.
*For example:*
If you want to crop an image (with a width of 300 and height of 400), you simply need to append query parameters at the end of the image URL, such as
.. code-block:: python
https://images.contentstack.io/v3/assets/download?crop=300,400
There are several more parameters that you can use for your images. `Read Image Delivery API documentation <https://www.contentstack.com/docs/platforms/python/api-reference/>`_
You can use the Image Delivery API functions in this SDK as well. Here are a few examples of its usage in the SDK.
.. code-block:: python
url = stack.image_transform(image_url, {'quality': 100})
url = stack.image_transform(imageUrl, {'width': 100, 'height': 100})
url = stack.image_transform(imageUrl, {'auto': 'webp'})
**Using the Sync API with Python SDK**
The Sync API takes care of syncing your Contentstack data with your application and ensures that the data is always up-to-date by providing delta updates. Contentstack’s Python SDK supports Sync API, which you can use to build powerful applications.
Read through to understand how to use the Sync API with Contentstack Python SDK. `Using the Sync API with Python SDK <https://www.contentstack.com/docs/developers/python/using-the-sync-api-with-python-sdk>`_
**Helpful Links**
`Contentstack Website <https://www.contentstack.com>`_
`Official Documentation <https://www.contentstack.com/docs/developers/apis/content-delivery-api/>`_
`Content Delivery API Docs <https://www.contentstack.com/docs/developers/apis/content-delivery-api>`_.
The MIT License (MIT)
^^^^^^^^^^^^^^^^^^^^^
Copyright © 2012-2023 Contentstack. All Rights Reserved Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
*THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE* | PypiClean |
/MedPy-0.4.0.tar.gz/MedPy-0.4.0/medpy/features/intensity.py |
# build-in modules
# third-party modules
import numpy
from scipy.ndimage.filters import gaussian_filter, median_filter
from scipy.ndimage.filters import gaussian_gradient_magnitude as scipy_gaussian_gradient_magnitude
from scipy.interpolate.interpolate import interp1d
from scipy.ndimage.morphology import distance_transform_edt
from scipy.ndimage._ni_support import _get_output
# own modules
from .utilities import join
from ..core import ArgumentError
from ..filter import sum_filter
# constants
def intensities(image, mask = slice(None)):
r"""Takes a simple or multi-spectral image and returns its voxel-wise intensities.
A multi-spectral image must be supplied as a list or tuple of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
mask : array_like
A binary mask for the image.
Returns
-------
intensities : ndarray
The images intensities.
"""
return _extract_feature(_extract_intensities, image, mask)
def centerdistance(image, voxelspacing = None, mask = slice(None)):
r"""
Takes a simple or multi-spectral image and returns its voxel-wise center distance in
mm. A multi-spectral image must be supplied as a list or tuple of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
The center distance is the exact euclidean distance in mm of each voxels center to
the central point of the overal image volume.
Note that this feature is independent of the actual image content, but depends
solely on its shape. Therefore always a one-dimensional feature is returned, even if
a multi-spectral image has been supplied.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
centerdistance : ndarray
The distance of each voxel to the images center.
See Also
--------
centerdistance_xdminus1
"""
if type(image) == tuple or type(image) == list:
image = image[0]
return _extract_feature(_extract_centerdistance, image, mask, voxelspacing = voxelspacing)
def centerdistance_xdminus1(image, dim, voxelspacing = None, mask = slice(None)):
r"""
Implementation of `centerdistance` that allows to compute sub-volume wise
centerdistances.
The same notes as for `centerdistance` apply.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
dim : int or sequence of ints
The dimension or dimensions along which to cut the image into sub-volumes.
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
centerdistance_xdminus1 : ndarray
The distance of each voxel to the images center in the supplied dimensions.
Raises
------
ArgumentError
If a invalid dim index of number of dim indices were supplied
Examples
--------
Considering a 3D medical image we want to compute the axial slice-wise
centerdistances instead of the ones over the complete image volume. Assuming that
the third image dimension corresponds to the axial axes of the image, we call
>>> centerdistance_xdminus1(image, 2)
Note that the centerdistance of each slice will be equal.
"""
# pre-process arguments
if type(image) == tuple or type(image) == list:
image = image[0]
if type(dim) is int:
dims = [dim]
else:
dims = list(dim)
# check arguments
if len(dims) >= image.ndim - 1:
raise ArgumentError('Applying a sub-volume extraction of depth {} on a image of dimensionality {} would lead to invalid images of dimensionality <= 1.'.format(len(dims), image.ndim))
for dim in dims:
if dim >= image.ndim:
raise ArgumentError('Invalid dimension index {} supplied for image(s) of shape {}.'.format(dim, image.shape))
# extract desired sub-volume
slicer = [slice(None)] * image.ndim
for dim in dims: slicer[dim] = slice(1)
subvolume = numpy.squeeze(image[slicer])
# compute centerdistance for sub-volume and reshape to original sub-volume shape (note that normalization and mask are not passed on in this step)
o = centerdistance(subvolume, voxelspacing).reshape(subvolume.shape)
# re-establish original shape by copying the resulting array multiple times
for dim in sorted(dims):
o = numpy.asarray([o] * image.shape[dim])
o = numpy.rollaxis(o, 0, dim + 1)
# extract intensities / centerdistance values, applying normalization and mask in this step
return intensities(o, mask)
def indices(image, voxelspacing = None, mask = slice(None)):
r"""
Takes an image and returns the voxels ndim-indices as voxel-wise feature. The voxel
spacing is taken into account, i.e. the indices are not array indices, but millimeter
indices.
This is a multi-element feature where each element corresponds to one of the images
axes, e.g. x, y, z, ...
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
indices : ndarray
Each voxels ndimensional index.
Notes
-----
This feature is independent of the actual image content, but depends
solely on its shape. Therefore always a one-dimensional feature is returned, even if
a multi-spectral image has been supplied.
"""
if type(image) == tuple or type(image) == list:
image = image[0]
if not type(mask) is slice:
mask = numpy.array(mask, copy=False, dtype=numpy.bool)
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
return join(*[a[mask].ravel() * vs for a, vs in zip(numpy.indices(image.shape), voxelspacing)])
def shifted_mean_gauss(image, offset = None, sigma = 5, voxelspacing = None, mask = slice(None)):
r"""
The approximate mean over a small region at an offset from each voxel.
Functions like `local_mean_gauss`, but instead of computing the average over a small
patch around the current voxel, the region is centered at an offset away. Can be used
to use a distant regions average as feature for a voxel.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
offset : sequence of ints
At this offset in voxels of the current position the region is placed.
sigma : number or sequence of numbers
Standard deviation for Gaussian kernel. The standard deviations of the
Gaussian filter are given for each axis as a sequence, or as a single number,
in which case it is equal for all axes. Note that the voxel spacing of the image
is taken into account, the given values are treated as mm.
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
shifted_mean_gauss : ndarray
The weighted mean intensities over a region at offset away from each voxel.
See also
--------
local_mean_gauss
"""
return _extract_feature(_extract_shifted_mean_gauss, image, mask, offset = offset, sigma = sigma, voxelspacing = voxelspacing)
def mask_distance(image, voxelspacing = None, mask = slice(None)):
r"""
Computes the distance of each point under the mask to the mask border taking the
voxel-spacing into account.
Note that this feature is independent of the actual image content, but depends
solely the mask image. Therefore always a one-dimensional feature is returned,
even if a multi-spectral image has been supplied.
If no mask has been supplied, the distances to the image borders are returned.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
mask_distance : ndarray
Each voxels distance to the mask borders.
"""
if type(image) == tuple or type(image) == list:
image = image[0]
return _extract_mask_distance(image, mask = mask, voxelspacing = voxelspacing)
def local_mean_gauss(image, sigma = 5, voxelspacing = None, mask = slice(None)):
r"""
Takes a simple or multi-spectral image and returns the approximate mean over a small
region around each voxel. A multi-spectral image must be supplied as a list or tuple
of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
For this feature a Gaussian smoothing filter is applied to the image / each spectrum
and then the resulting intensity values returned. Another name for this function
would be weighted local mean.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
sigma : number or sequence of numbers
Standard deviation for Gaussian kernel. The standard deviations of the
Gaussian filter are given for each axis as a sequence, or as a single number,
in which case it is equal for all axes. Note that the voxel spacing of the image
is taken into account, the given values are treated as mm.
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
local_mean_gauss : ndarray
The weighted mean intensities over a region around each voxel.
"""
return _extract_feature(_extract_local_mean_gauss, image, mask, sigma = sigma, voxelspacing = voxelspacing)
def gaussian_gradient_magnitude(image, sigma = 5, voxelspacing = None, mask = slice(None)):
r"""
Computes the gradient magnitude (edge-detection) of the supplied image using gaussian
derivates and returns the intensity values.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
sigma : number or sequence of numbers
Standard deviation for Gaussian kernel. The standard deviations of the
Gaussian filter are given for each axis as a sequence, or as a single number,
in which case it is equal for all axes. Note that the voxel spacing of the image
is taken into account, the given values are treated as mm.
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
gaussian_gradient_magnitude : ndarray
The gaussian gradient magnitude of the supplied image.
"""
return _extract_feature(_extract_gaussian_gradient_magnitude, image, mask, sigma = sigma, voxelspacing = voxelspacing)
def median(image, size = 5, voxelspacing = None, mask = slice(None)):
"""
Computes the multi-dimensional median filter and returns the resulting values per
voxel.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
size : number or sequence of numbers
Size of the structuring element. Can be given given for each axis as a sequence,
or as a single number, in which case it is equal for all axes. Note that the
voxel spacing of the image is taken into account, the given values are treated
as mm.
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
median : ndarray
Multi-dimesnional median filtered version of the input images.
"""
return _extract_feature(_extract_median, image, mask, size = size, voxelspacing = voxelspacing)
def local_histogram(image, bins=19, rang="image", cutoffp=(0.0, 100.0), size=None, footprint=None, output=None, mode="ignore", origin=0, mask=slice(None)):
r"""
Computes multi-dimensional histograms over a region around each voxel.
Supply an image and (optionally) a mask and get the local histogram of local
neighbourhoods around each voxel. These neighbourhoods are cubic with a sidelength of
size in voxels or, when a shape instead of an integer is passed to size, of this
shape.
If not argument is passed to output, the returned array will be of dtype float.
Voxels along the image border are treated as defined by mode. The possible values are
the same as for scipy.ndimage filter without the ''constant'' mode. Instead "ignore"
is the default and additional mode, which sets that the area outside of the image are
ignored when computing the histogram.
When a mask is supplied, the local histogram is extracted only for the voxels where
the mask is True. But voxels from outside the mask can be incorporated in the
compuation of the histograms.
The range of the histograms can be set via the rang argument. The 'image' keyword can
be supplied, to use the same range for all local histograms, extracted from the images
max and min intensity values. Alternatively, an own range can be supplied in the form
of a tuple of two numbers. Values outside the range of the histogram are ignored.
Setting a proper range is important, as all voxels that lie outside of the range are
ignored i.e. do not contribute to the histograms as if they would not exists. Some
of the local histograms can therefore be constructed from less than the expected
number of voxels.
Taking the histogram range from the whole image is sensitive to outliers. Supplying
percentile values to the cutoffp argument, these can be filtered out when computing
the range. This keyword is ignored if rang is not set to 'image'.
Setting the rang to None causes local ranges to be used i.e. the ranges of the
histograms are computed only over the local area covered by them and are hence
not comparable. This behaviour should normally not be taken.
The local histograms are normalized by dividing them through the number of elements
in the bins.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
bins : integer
The number of histogram bins.
rang : 'image' or tuple of numbers or None
The range of the histograms, can be supplied manually, set to 'image' to use
global or set to None to use local ranges.
cutoffp : tuple of numbers
The cut-off percentiles to exclude outliers, only processed if ``rang`` is set
to 'image'.
size : scalar or tuple of integers
See footprint, below
footprint : array
Either ``size`` or ``footprint`` must be defined. ``size`` gives the shape that
is taken from the input array, at every element position, to define the input to
the filter function. ``footprint`` is a boolean array that specifies (implicitly)
a shape, but also which of the elements within this shape will get passed to the
filter function. Thus ``size=(n,m)`` is equivalent to
``footprint=np.ones((n,m))``. We adjust ``size`` to the number of dimensions of
the input array, so that, if the input array is shape (10,10,10), and ``size``
is 2, then the actual size used is (2,2,2).
output ndarray or dtype
The ``output`` parameter passes an array in which to store the filter output.
mode : {'reflect', 'ignore', 'nearest', 'mirror', 'wrap'}
The ``mode`` parameter determines how the array borders are handled. Default is 'ignore'
origin : number
The ``origin`` parameter controls the placement of the filter. Default 0.
mask : array_like
A binary mask for the image.
Returns
-------
local_histogram : ndarray
The bin values of the local histograms for each voxel as a multi-dimensional image.
"""
return _extract_feature(_extract_local_histogram, image, mask, bins=bins, rang=rang, cutoffp=cutoffp, size=size, footprint=footprint, output=output, mode=mode, origin=origin)
def hemispheric_difference(image, sigma_active = 7, sigma_reference = 7, cut_plane = 0, voxelspacing = None, mask = slice(None)):
r"""
Computes the hemispheric intensity difference between the brain hemispheres of an brain image.
Cuts the image along the middle of the supplied cut-plane. This results in two
images, each containing one of the brains hemispheres.
For each of these two, the following steps are applied:
1. One image is marked as active image
2. The other hemisphere image is marked as reference image
3. The reference image is fliped along the cut_plane
4. A gaussian smoothing is applied to the active image with the supplied sigma
5. A gaussian smoothing is applied to the reference image with the supplied sigma
6. The reference image is substracted from the active image, resulting in the
difference image for the active hemisphere
Finally, the two resulting difference images are stitched back together, forming a
hemispheric difference image of the same size as the original.
Note that the supplied gaussian kernel sizes (sigmas) are sensitive to the images
voxel spacing.
If the number of slices along the cut-plane is odd, the central slice is
interpolated from the two hemisphere difference images when stitching them back
together.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
sigma_active : number or sequence of numbers
Standard deviation for Gaussian kernel of the active image. The standard
deviations of the Gaussian filter are given for each axis as a sequence, or as a
single number, in which case it is equal for all axes. Note that the voxel
spacing of the image is taken into account, the given values are treated
as mm.
sigma_reference : number or sequence of numbers
Standard deviation for Gaussian kernel of the reference image. The standard
deviations of the Gaussian filter are given for each axis as a sequence, or as a
single number, in which case it is equal for all axes. Note that the voxel
spacing of the image is taken into account, the given values are treated
as mm.
cut_plane : integer
he axes along which to cut. This is usually the coronal plane.
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
hemispheric_difference : ndarray
The intensity differences between the locally smoothed hemispheres of the image.
The resulting voxel value's magnitude denotes symmetrical its asymmetry. The
direction is revealed by the sign. That means that the resulting image will be
symmetric in absolute values, but differ in sign.
Raises
------
ArgumentError
If the supplied cut-plane dimension is invalid.
"""
return _extract_feature(_extract_hemispheric_difference, image, mask, sigma_active = sigma_active, sigma_reference = sigma_reference, cut_plane = cut_plane, voxelspacing = voxelspacing)
def _extract_hemispheric_difference(image, mask = slice(None), sigma_active = 7, sigma_reference = 7, cut_plane = 0, voxelspacing = None):
"""
Internal, single-image version of `hemispheric_difference`.
"""
# constants
INTERPOLATION_RANGE = int(10) # how many neighbouring values to take into account when interpolating the medial longitudinal fissure slice
# check arguments
if cut_plane >= image.ndim:
raise ArgumentError('The suppliedc cut-plane ({}) is invalid, the image has only {} dimensions.'.format(cut_plane, image.ndim))
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# compute the (presumed) location of the medial longitudinal fissure, treating also the special of an odd number of slices, in which case a cut into two equal halves is not possible
medial_longitudinal_fissure = int(image.shape[cut_plane] / 2)
medial_longitudinal_fissure_excluded = image.shape[cut_plane] % 2
# split the head into a dexter and sinister half along the saggital plane
# this is assumed to be consistent with a cut of the brain along the medial longitudinal fissure, thus separating it into its hemispheres
slicer = [slice(None)] * image.ndim
slicer[cut_plane] = slice(None, medial_longitudinal_fissure)
left_hemisphere = image[slicer]
slicer[cut_plane] = slice(medial_longitudinal_fissure + medial_longitudinal_fissure_excluded, None)
right_hemisphere = image[slicer]
# flip right hemisphere image along cut plane
slicer[cut_plane] = slice(None, None, -1)
right_hemisphere = right_hemisphere[slicer]
# substract once left from right and once right from left hemisphere, including smoothing steps
right_hemisphere_difference = _substract_hemispheres(right_hemisphere, left_hemisphere, sigma_active, sigma_reference, voxelspacing)
left_hemisphere_difference = _substract_hemispheres(left_hemisphere, right_hemisphere, sigma_active, sigma_reference, voxelspacing)
# re-flip right hemisphere image to original orientation
right_hemisphere_difference = right_hemisphere_difference[slicer]
# estimate the medial longitudinal fissure if required
if 1 == medial_longitudinal_fissure_excluded:
left_slicer = [slice(None)] * image.ndim
right_slicer = [slice(None)] * image.ndim
left_slicer[cut_plane] = slice(-1 * INTERPOLATION_RANGE, None)
right_slicer[cut_plane] = slice(None, INTERPOLATION_RANGE)
interp_data_left = left_hemisphere_difference[left_slicer]
interp_data_right = right_hemisphere_difference[right_slicer]
interp_indices_left = list(range(-1 * interp_data_left.shape[cut_plane], 0))
interp_indices_right = list(range(1, interp_data_right.shape[cut_plane] + 1))
interp_data = numpy.concatenate((left_hemisphere_difference[left_slicer], right_hemisphere_difference[right_slicer]), cut_plane)
interp_indices = numpy.concatenate((interp_indices_left, interp_indices_right), 0)
medial_longitudinal_fissure_estimated = interp1d(interp_indices, interp_data, kind='cubic', axis=cut_plane)(0)
# add singleton dimension
slicer[cut_plane] = numpy.newaxis
medial_longitudinal_fissure_estimated = medial_longitudinal_fissure_estimated[slicer]
# stich images back together
if 1 == medial_longitudinal_fissure_excluded:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, medial_longitudinal_fissure_estimated, right_hemisphere_difference), cut_plane)
else:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, right_hemisphere_difference), cut_plane)
# extract intensities and return
return _extract_intensities(hemisphere_difference, mask)
def _extract_local_histogram(image, mask=slice(None), bins=19, rang="image", cutoffp=(0.0, 100.0), size=None, footprint=None, output=None, mode="ignore", origin=0):
"""
Internal, single-image version of @see local_histogram
Note: Values outside of the histograms range are not considered.
Note: Mode constant is not available, instead a mode "ignore" is provided.
Note: Default dtype of returned values is float.
"""
if "constant" == mode:
raise RuntimeError('boundary mode not supported')
elif "ignore" == mode:
mode = "constant"
if 'image' == rang:
rang = tuple(numpy.percentile(image[mask], cutoffp))
elif not 2 == len(rang):
raise RuntimeError('the rang must contain exactly two elements or the string "image"')
_, bin_edges = numpy.histogram([], bins=bins, range=rang)
output = _get_output(numpy.float if None == output else output, image, shape = [bins] + list(image.shape))
# threshold the image into the histogram bins represented by the output images first dimension, treat last bin separately, since upper border is inclusive
for i in range(bins - 1):
output[i] = (image >= bin_edges[i]) & (image < bin_edges[i + 1])
output[-1] = (image >= bin_edges[-2]) & (image <= bin_edges[-1])
# apply the sum filter to each dimension, then normalize by dividing through the sum of elements in the bins of each histogram
for i in range(bins):
output[i] = sum_filter(output[i], size=size, footprint=footprint, output=None, mode=mode, cval=0.0, origin=origin)
divident = numpy.sum(output, 0)
divident[0 == divident] = 1
output /= divident
# Notes on modes:
# mode=constant with a cval outside histogram range for the histogram equals a mode=constant with a cval = 0 for the sum_filter
# mode=constant with a cval inside histogram range for the histogram has no equal for the sum_filter (and does not make much sense)
# mode=X for the histogram equals mode=X for the sum_filter
# treat as multi-spectral image which intensities to extracted
return _extract_feature(_extract_intensities, [h for h in output], mask)
def _extract_median(image, mask = slice(None), size = 1, voxelspacing = None):
"""
Internal, single-image version of `median`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine structure element size in voxel units
size = _create_structure_array(size, voxelspacing)
return _extract_intensities(median_filter(image, size), mask)
def _extract_gaussian_gradient_magnitude(image, mask = slice(None), sigma = 1, voxelspacing = None):
"""
Internal, single-image version of `gaussian_gradient_magnitude`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine gaussian kernel size in voxel units
sigma = _create_structure_array(sigma, voxelspacing)
return _extract_intensities(scipy_gaussian_gradient_magnitude(image, sigma), mask)
def _extract_shifted_mean_gauss(image, mask = slice(None), offset = None, sigma = 1, voxelspacing = None):
"""
Internal, single-image version of `shifted_mean_gauss`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# set offset
if offset is None:
offset = [0] * image.ndim
# determine gaussian kernel size in voxel units
sigma = _create_structure_array(sigma, voxelspacing)
# compute smoothed version of image
smoothed = gaussian_filter(image, sigma)
shifted = numpy.zeros_like(smoothed)
in_slicer = []
out_slicer = []
for o in offset:
in_slicer.append(slice(o, None))
out_slicer.append(slice(None, -1 * o))
shifted[out_slicer] = smoothed[in_slicer]
return _extract_intensities(shifted, mask)
def _extract_mask_distance(image, mask = slice(None), voxelspacing = None):
"""
Internal, single-image version of `mask_distance`.
"""
if isinstance(mask, slice):
mask = numpy.ones(image.shape, numpy.bool)
distance_map = distance_transform_edt(mask, sampling=voxelspacing)
return _extract_intensities(distance_map, mask)
def _extract_local_mean_gauss(image, mask = slice(None), sigma = 1, voxelspacing = None):
"""
Internal, single-image version of `local_mean_gauss`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine gaussian kernel size in voxel units
sigma = _create_structure_array(sigma, voxelspacing)
return _extract_intensities(gaussian_filter(image, sigma), mask)
def _extract_centerdistance(image, mask = slice(None), voxelspacing = None):
"""
Internal, single-image version of `centerdistance`.
"""
image = numpy.array(image, copy=False)
if None == voxelspacing:
voxelspacing = [1.] * image.ndim
# get image center and an array holding the images indices
centers = [(x - 1) / 2. for x in image.shape]
indices = numpy.indices(image.shape, dtype=numpy.float)
# shift to center of image and correct spacing to real world coordinates
for dim_indices, c, vs in zip(indices, centers, voxelspacing):
dim_indices -= c
dim_indices *= vs
# compute euclidean distance to image center
return numpy.sqrt(numpy.sum(numpy.square(indices), 0))[mask].ravel()
def _extract_intensities(image, mask = slice(None)):
"""
Internal, single-image version of `intensities`.
"""
return numpy.array(image, copy=True)[mask].ravel()
def _substract_hemispheres(active, reference, active_sigma, reference_sigma, voxel_spacing):
"""
Helper function for `_extract_hemispheric_difference`.
Smoothes both images and then substracts the reference from the active image.
"""
active_kernel = _create_structure_array(active_sigma, voxel_spacing)
active_smoothed = gaussian_filter(active, sigma = active_kernel)
reference_kernel = _create_structure_array(reference_sigma, voxel_spacing)
reference_smoothed = gaussian_filter(reference, sigma = reference_kernel)
return active_smoothed - reference_smoothed
def _create_structure_array(structure_array, voxelspacing):
"""
Convenient function to take a structure array (single number valid for all dimensions
or a sequence with a distinct number for each dimension) assumed to be in mm and
returns a structure array (a sequence) adapted to the image space using the supplied
voxel spacing.
"""
try:
structure_array = [s / float(vs) for s, vs in zip(structure_array, voxelspacing)]
except TypeError:
structure_array = [structure_array / float(vs) for vs in voxelspacing]
return structure_array
def _extract_feature(fun, image, mask = slice(None), **kwargs):
"""
Convenient function to cope with multi-spectral images and feature normalization.
Parameters
----------
fun : function
The feature extraction function to call
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
mask : ndarray
The binary mask to select the voxels for which to extract the feature
kwargs : sequence
Additional keyword arguments to be passed to the feature extraction function
"""
if not type(mask) is slice:
mask = numpy.array(mask, copy=False, dtype=numpy.bool)
if type(image) == tuple or type(image) == list:
return join(*[fun(i, mask, **kwargs) for i in image])
else:
return fun(image, mask, **kwargs) | PypiClean |
/AMAS_sb-1.0.1-py3-none-any.whl/AMAS/recommend_reaction.py |
# recommend_reaction.py
"""
Predicts annotations of reaction(s) using a local XML file
and the reaction ID.
Usage: python recommend_reaction.py files/BIOMD0000000190.xml --min_score 0.6 --outfile res.csv
"""
import argparse
import os
from os.path import dirname, abspath
import sys
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from AMAS import constants as cn
from AMAS import recommender
def main():
parser = argparse.ArgumentParser(description='Recommend reaction annotations of an SBML model and save results')
parser.add_argument('model', type=str, help='SBML model file (.xml)')
# One or more reaction IDs can be given
parser.add_argument('--reaction', type=str, help='ID(s) of reaction(s) to be recommended. ' +\
'If not provided, all reactions will be used', nargs='*')
parser.add_argument('--reject', type=int, help='number of the components of each reaction to reject. ' +\
'Only reactions with components greater than this value ' +\
'will be used. Default is zero', nargs='?', default=0)
parser.add_argument('--cutoff', type=float, help='minimum match score cutoff', nargs='?', default=0.0)
parser.add_argument('--method', type=str,
help='Choose either "top" or "above". "top" recommends ' +\
'the best candidates that are above the cutoff, ' +\
'and "above" recommends all candidates that are above ' +\
'the cutoff. Default is "top"',
nargs='?',
default='top')
parser.add_argument('--outfile', type=str, help='file path to save recommendation', nargs='?',
default=os.path.join(os.getcwd(), 'reaction_rec.csv'))
args = parser.parse_args()
recom = recommender.Recommender(libsbml_fpath=args.model)
one_fpath = args.model
reacts = args.reaction
reject = args.reject
cutoff = args.cutoff
method = args.method
outfile = args.outfile
recom = recommender.Recommender(libsbml_fpath=one_fpath)
recom.current_type = 'reaction'
# if nothing is given, predict all IDs
if reacts is None:
reacts = recom.getReactionIDs()
print("...\nAnalyzing %d reaction(s)...\n" % len(reacts))
# removing ids with less components than 'reject'
filt_reacts = [val for val in reacts \
if len(recom.reactions.reaction_components[val]) > reject]
# stops if all elements were removed by filtering...
if len(filt_reacts) == 0:
print("No element found after the element filter.")
return None
res = recom.getReactionListRecommendation(pred_ids=filt_reacts, get_df=True)
for idx, one_df in enumerate(res):
filt_df = recom.autoSelectAnnotation(df=one_df,
min_score=cutoff,
method=method)
recom.updateSelection(filt_reacts[idx], filt_df)
# save file to csv
recom.saveToCSV(outfile)
print("Recommendations saved as:\n%s\n" % os.path.abspath(outfile))
if __name__ == '__main__':
main() | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/entry/cli_ditask.py | import click
import os
import sys
import importlib
import importlib.util
import json
from click.core import Context, Option
from ding import __TITLE__, __VERSION__, __AUTHOR__, __AUTHOR_EMAIL__
from ding.framework import Parallel
from ding.entry.cli_parsers import PLATFORM_PARSERS
def print_version(ctx: Context, param: Option, value: bool) -> None:
if not value or ctx.resilient_parsing:
return
click.echo('{title}, version {version}.'.format(title=__TITLE__, version=__VERSION__))
click.echo('Developed by {author}, {email}.'.format(author=__AUTHOR__, email=__AUTHOR_EMAIL__))
ctx.exit()
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option(
'-v',
'--version',
is_flag=True,
callback=print_version,
expose_value=False,
is_eager=True,
help="Show package's version information."
)
@click.option('-p', '--package', type=str, help="Your code package path, could be a directory or a zip file.")
@click.option('--parallel-workers', type=int, default=1, help="Parallel worker number, default: 1")
@click.option(
'--protocol',
type=click.Choice(["tcp", "ipc"]),
default="tcp",
help="Network protocol in parallel mode, default: tcp"
)
@click.option(
"--ports",
type=str,
help="The port addresses that the tasks listen to, e.g. 50515,50516, default: k8s, local: 50515, slurm: 15151"
)
@click.option("--attach-to", type=str, help="The addresses to connect to.")
@click.option("--address", type=str, help="The address to listen to (without port).")
@click.option("--labels", type=str, help="Labels.")
@click.option("--node-ids", type=str, help="Candidate node ids.")
@click.option(
"--topology",
type=click.Choice(["alone", "mesh", "star"]),
default="alone",
help="Network topology, default: alone."
)
@click.option("--platform-spec", type=str, help="Platform specific configure.")
@click.option("--platform", type=str, help="Platform type: slurm, k8s.")
@click.option("--mq-type", type=str, default="nng", help="Class type of message queue, i.e. nng, redis.")
@click.option("--redis-host", type=str, help="Redis host.")
@click.option("--redis-port", type=int, help="Redis port.")
@click.option("-m", "--main", type=str, help="Main function of entry module.")
@click.option("--startup-interval", type=int, default=1, help="Start up interval between each task.")
@click.option("--local_rank", type=int, default=0, help="Compatibility with PyTorch DDP")
def cli_ditask(*args, **kwargs):
return _cli_ditask(*args, **kwargs)
def _parse_platform_args(platform: str, platform_spec: str, all_args: dict):
if platform_spec:
try:
if os.path.splitext(platform_spec) == "json":
with open(platform_spec) as f:
platform_spec = json.load(f)
else:
platform_spec = json.loads(platform_spec)
except:
click.echo("platform_spec is not a valid json!")
exit(1)
if platform not in PLATFORM_PARSERS:
click.echo("platform type is invalid! type: {}".format(platform))
exit(1)
all_args.pop("platform")
all_args.pop("platform_spec")
try:
parsed_args = PLATFORM_PARSERS[platform](platform_spec, **all_args)
except Exception as e:
click.echo("error when parse platform spec configure: {}".format(e))
raise e
return parsed_args
def _cli_ditask(
package: str,
main: str,
parallel_workers: int,
protocol: str,
ports: str,
attach_to: str,
address: str,
labels: str,
node_ids: str,
topology: str,
mq_type: str,
redis_host: str,
redis_port: int,
startup_interval: int,
local_rank: int = 0,
platform: str = None,
platform_spec: str = None,
):
# Parse entry point
all_args = locals()
if platform:
parsed_args = _parse_platform_args(platform, platform_spec, all_args)
return _cli_ditask(**parsed_args)
if not package:
package = os.getcwd()
sys.path.append(package)
if main is None:
mod_name = os.path.basename(package)
mod_name, _ = os.path.splitext(mod_name)
func_name = "main"
else:
mod_name, func_name = main.rsplit(".", 1)
root_mod_name = mod_name.split(".", 1)[0]
sys.path.append(os.path.join(package, root_mod_name))
mod = importlib.import_module(mod_name)
main_func = getattr(mod, func_name)
# Parse arguments
ports = ports or 50515
if not isinstance(ports, int):
ports = ports.split(",")
ports = list(map(lambda i: int(i), ports))
ports = ports[0] if len(ports) == 1 else ports
if attach_to:
attach_to = attach_to.split(",")
attach_to = list(map(lambda s: s.strip(), attach_to))
if labels:
labels = labels.split(",")
labels = set(map(lambda s: s.strip(), labels))
if node_ids and not isinstance(node_ids, int):
node_ids = node_ids.split(",")
node_ids = list(map(lambda i: int(i), node_ids))
Parallel.runner(
n_parallel_workers=parallel_workers,
ports=ports,
protocol=protocol,
topology=topology,
attach_to=attach_to,
address=address,
labels=labels,
node_ids=node_ids,
mq_type=mq_type,
redis_host=redis_host,
redis_port=redis_port,
startup_interval=startup_interval
)(main_func) | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/moment/locale/lv.js |
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
//! moment.js locale configuration
var units = {
ss: 'sekundes_sekundēm_sekunde_sekundes'.split('_'),
m: 'minūtes_minūtēm_minūte_minūtes'.split('_'),
mm: 'minūtes_minūtēm_minūte_minūtes'.split('_'),
h: 'stundas_stundām_stunda_stundas'.split('_'),
hh: 'stundas_stundām_stunda_stundas'.split('_'),
d: 'dienas_dienām_diena_dienas'.split('_'),
dd: 'dienas_dienām_diena_dienas'.split('_'),
M: 'mēneša_mēnešiem_mēnesis_mēneši'.split('_'),
MM: 'mēneša_mēnešiem_mēnesis_mēneši'.split('_'),
y: 'gada_gadiem_gads_gadi'.split('_'),
yy: 'gada_gadiem_gads_gadi'.split('_'),
};
/**
* @param withoutSuffix boolean true = a length of time; false = before/after a period of time.
*/
function format(forms, number, withoutSuffix) {
if (withoutSuffix) {
// E.g. "21 minūte", "3 minūtes".
return number % 10 === 1 && number % 100 !== 11 ? forms[2] : forms[3];
} else {
// E.g. "21 minūtes" as in "pēc 21 minūtes".
// E.g. "3 minūtēm" as in "pēc 3 minūtēm".
return number % 10 === 1 && number % 100 !== 11 ? forms[0] : forms[1];
}
}
function relativeTimeWithPlural(number, withoutSuffix, key) {
return number + ' ' + format(units[key], number, withoutSuffix);
}
function relativeTimeWithSingular(number, withoutSuffix, key) {
return format(units[key], number, withoutSuffix);
}
function relativeSeconds(number, withoutSuffix) {
return withoutSuffix ? 'dažas sekundes' : 'dažām sekundēm';
}
var lv = moment.defineLocale('lv', {
months: 'janvāris_februāris_marts_aprīlis_maijs_jūnijs_jūlijs_augusts_septembris_oktobris_novembris_decembris'.split(
'_'
),
monthsShort: 'jan_feb_mar_apr_mai_jūn_jūl_aug_sep_okt_nov_dec'.split('_'),
weekdays: 'svētdiena_pirmdiena_otrdiena_trešdiena_ceturtdiena_piektdiena_sestdiena'.split(
'_'
),
weekdaysShort: 'Sv_P_O_T_C_Pk_S'.split('_'),
weekdaysMin: 'Sv_P_O_T_C_Pk_S'.split('_'),
weekdaysParseExact: true,
longDateFormat: {
LT: 'HH:mm',
LTS: 'HH:mm:ss',
L: 'DD.MM.YYYY.',
LL: 'YYYY. [gada] D. MMMM',
LLL: 'YYYY. [gada] D. MMMM, HH:mm',
LLLL: 'YYYY. [gada] D. MMMM, dddd, HH:mm',
},
calendar: {
sameDay: '[Šodien pulksten] LT',
nextDay: '[Rīt pulksten] LT',
nextWeek: 'dddd [pulksten] LT',
lastDay: '[Vakar pulksten] LT',
lastWeek: '[Pagājušā] dddd [pulksten] LT',
sameElse: 'L',
},
relativeTime: {
future: 'pēc %s',
past: 'pirms %s',
s: relativeSeconds,
ss: relativeTimeWithPlural,
m: relativeTimeWithSingular,
mm: relativeTimeWithPlural,
h: relativeTimeWithSingular,
hh: relativeTimeWithPlural,
d: relativeTimeWithSingular,
dd: relativeTimeWithPlural,
M: relativeTimeWithSingular,
MM: relativeTimeWithPlural,
y: relativeTimeWithSingular,
yy: relativeTimeWithPlural,
},
dayOfMonthOrdinalParse: /\d{1,2}\./,
ordinal: '%d.',
week: {
dow: 1, // Monday is the first day of the week.
doy: 4, // The week that contains Jan 4th is the first week of the year.
},
});
return lv;
}))); | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/gis/db/backends/oracle/operations.py | import re
from decimal import Decimal
from django.db.backends.oracle.base import DatabaseOperations
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.util import SpatialFunction
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.utils import six
class SDOOperation(SpatialFunction):
"Base class for SDO* Oracle operations."
sql_template = "%(function)s(%(geo_col)s, %(geometry)s) %(operator)s '%(result)s'"
def __init__(self, func, **kwargs):
kwargs.setdefault('operator', '=')
kwargs.setdefault('result', 'TRUE')
super(SDOOperation, self).__init__(func, **kwargs)
class SDODistance(SpatialFunction):
"Class for Distance queries."
sql_template = ('%(function)s(%(geo_col)s, %(geometry)s, %(tolerance)s) '
'%(operator)s %(result)s')
dist_func = 'SDO_GEOM.SDO_DISTANCE'
def __init__(self, op, tolerance=0.05):
super(SDODistance, self).__init__(self.dist_func,
tolerance=tolerance,
operator=op, result='%s')
class SDODWithin(SpatialFunction):
dwithin_func = 'SDO_WITHIN_DISTANCE'
sql_template = "%(function)s(%(geo_col)s, %(geometry)s, %%s) = 'TRUE'"
def __init__(self):
super(SDODWithin, self).__init__(self.dwithin_func)
class SDOGeomRelate(SpatialFunction):
"Class for using SDO_GEOM.RELATE."
relate_func = 'SDO_GEOM.RELATE'
sql_template = ("%(function)s(%(geo_col)s, '%(mask)s', %(geometry)s, "
"%(tolerance)s) %(operator)s '%(mask)s'")
def __init__(self, mask, tolerance=0.05):
# SDO_GEOM.RELATE(...) has a peculiar argument order: column, mask, geom, tolerance.
# Moreover, the runction result is the mask (e.g., 'DISJOINT' instead of 'TRUE').
super(SDOGeomRelate, self).__init__(self.relate_func, operator='=',
mask=mask, tolerance=tolerance)
class SDORelate(SpatialFunction):
"Class for using SDO_RELATE."
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
sql_template = "%(function)s(%(geo_col)s, %(geometry)s, 'mask=%(mask)s') = 'TRUE'"
relate_func = 'SDO_RELATE'
def __init__(self, mask):
if not self.mask_regex.match(mask):
raise ValueError('Invalid %s mask: "%s"' % (self.relate_func, mask))
super(SDORelate, self).__init__(self.relate_func, mask=mask)
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
class OracleOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = "django.contrib.gis.db.backends.oracle.compiler"
name = 'oracle'
oracle = True
valid_aggregates = dict([(a, None) for a in ('Union', 'Extent')])
Adapter = OracleSpatialAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'SDO_GEOM.SDO_AREA'
gml= 'SDO_UTIL.TO_GMLGEOMETRY'
centroid = 'SDO_GEOM.SDO_CENTROID'
difference = 'SDO_GEOM.SDO_DIFFERENCE'
distance = 'SDO_GEOM.SDO_DISTANCE'
extent= 'SDO_AGGR_MBR'
intersection= 'SDO_GEOM.SDO_INTERSECTION'
length = 'SDO_GEOM.SDO_LENGTH'
num_geom = 'SDO_UTIL.GETNUMELEM'
num_points = 'SDO_UTIL.GETNUMVERTICES'
perimeter = length
point_on_surface = 'SDO_GEOM.SDO_POINTONSURFACE'
reverse = 'SDO_UTIL.REVERSE_LINESTRING'
sym_difference = 'SDO_GEOM.SDO_XOR'
transform = 'SDO_CS.TRANSFORM'
union = 'SDO_GEOM.SDO_UNION'
unionagg = 'SDO_AGGR_UNION'
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
distance_functions = {
'distance_gt' : (SDODistance('>'), dtypes),
'distance_gte' : (SDODistance('>='), dtypes),
'distance_lt' : (SDODistance('<'), dtypes),
'distance_lte' : (SDODistance('<='), dtypes),
'dwithin' : (SDODWithin(), dtypes),
}
geometry_functions = {
'contains' : SDOOperation('SDO_CONTAINS'),
'coveredby' : SDOOperation('SDO_COVEREDBY'),
'covers' : SDOOperation('SDO_COVERS'),
'disjoint' : SDOGeomRelate('DISJOINT'),
'intersects' : SDOOperation('SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'equals' : SDOOperation('SDO_EQUAL'),
'exact' : SDOOperation('SDO_EQUAL'),
'overlaps' : SDOOperation('SDO_OVERLAPS'),
'same_as' : SDOOperation('SDO_EQUAL'),
'relate' : (SDORelate, six.string_types), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches' : SDOOperation('SDO_TOUCH'),
'within' : SDOOperation('SDO_INSIDE'),
}
geometry_functions.update(distance_functions)
gis_terms = ['isnull']
gis_terms += list(geometry_functions)
gis_terms = dict([(term, None) for term in gis_terms])
truncate_params = {'relate' : None}
def convert_extent(self, clob):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read())
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def convert_geom(self, clob, geo_field):
if clob:
return Geometry(clob.read(), geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return 'NULL'
def transform_value(val, srid):
return val.srid != srid
if hasattr(value, 'expression'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitue in
# the column name instead.
return placeholder % self.get_expression_column(value)
else:
if transform_value(value, f.srid):
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (self.transform, value.srid, f.srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % f.srid
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"Returns the SQL WHERE clause for use in Oracle spatial SQL construction."
alias, col, db_type = lvalue
# Getting the quoted table name as `geo_col`.
geo_col = '%s.%s' % (qn(alias), qn(col))
# See if a Oracle Geometry function matches the lookup type next
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# 'dwithin' lookup types.
if isinstance(lookup_info, tuple):
# First element of tuple is lookup type, second element is the type
# of the expected argument (e.g., str, float)
sdo_op, arg_type = lookup_info
geom = value[0]
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, tuple):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
if len(value) != 2:
raise ValueError('2-element tuple required for %s lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
if lookup_type == 'relate':
# The SDORelate class handles construction for these queries,
# and verifies the mask argument.
return sdo_op(value[1]).as_sql(geo_col, self.get_geom_placeholder(field, geom))
else:
# Otherwise, just call the `as_sql` method on the SDOOperation instance.
return sdo_op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
else:
# Lookup info is a SDOOperation instance, whose `as_sql` method returns
# the SQL necessary for the geometry function call. For example:
# SDO_CONTAINS("geoapp_country"."poly", SDO_GEOMTRY('POINT(5 23)', 4326)) = 'TRUE'
return lookup_info.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__.lower()
if agg_name == 'union' : agg_name += 'agg'
if agg.is_extent:
sql_template = '%(function)s(%(field)s)'
else:
sql_template = '%(function)s(SDOAGGRTYPE(%(field)s,%(tolerance)s))'
sql_function = getattr(self, agg_name)
return self.select % sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
return SpatialRefSys
def modify_insert_params(self, placeholders, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888
"""
# This code doesn't work for bulk insert cases.
assert len(placeholders) == 1
return [[param for pholder,param
in six.moves.zip(placeholders[0], params[0]) if pholder != 'NULL'], ] | PypiClean |
/GalSim-2.4.11-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl/galsim/second_kick.py | import numpy as np
from . import _galsim
from .gsobject import GSObject
from .gsparams import GSParams
from .utilities import lazy_property, doc_inherit
from .angle import arcsec, AngleUnit, radians
from .deltafunction import DeltaFunction
class SecondKick(GSObject):
"""Class describing the expectation value of the high-k turbulence portion of an atmospheric
PSF convolved by an `Airy` PSF.
The power spectrum of atmospheric phase fluctuations is assumed to follow the von Karman
prescription, but possibly modified by the addition of a critical scale below which the power
is zero. (See the `VonKarman` docstring for more details).
As an expectation value, this profile is formally only exact in the infinite-exposure limit.
However, at least for large apertures, we have found that this expectation value is approached
rapidly, and can be applied for even fairly small exposure times.
The intended use for this profile is as a correction to applying the geometric approximation to
`PhaseScreenPSF` objects when drawing using geometric photon shooting. In this case, the
`PhaseScreenPSF` will simulate the effects of the low frequency turbulence modes, which can be
treated purely using refraction, while the SecondKick handles the high frequency modes.
The geometric approximation is only valid for length scales larger than some critical scale
where the effects of interference are unimportant. For smaller length scales, interference
(diffraction) must be handled using an optical paradigm that acknowledges the wave nature of
light, such as Fourier optics.
Fourier optics calculations are many orders of magnitude slower than geometric optics
calculations for typical flux levels, however, so we implement a scale-splitting algorithm first
described in Peterson et al. (2015) for the LSST PhoSim package. Essentially, phase
fluctuations below a critical mode in Fourier space, labeled ``kcrit``, are handled by the fast
geometric optics calculations present in `PhaseScreenPSF`. Fluctuations for Fourier modes above
``kcrit`` are then calculated analytically by SecondKick. Because very many oscillations of
these high-k modes both fit within a given telescope aperture and pass by the aperture during a
moderate length exposure time, we can use the same analytic expectation value calculation for
the high-k component of all PSFs across a field of view, thus incurring the somewhat expensive
calculation for Fourier optics only once.
There are two limiting cases for this profile that may helpful for readers trying to understand
how this class works. When kcrit = 0, then all turbulent modes are included, and this surface
brightness profile becomes identical to the convolution of an `Airy` profile and a Von Karman
profile. In contrast, when kcrit = inf, then none of the turbulent modes are included, and this
surface brightness profile is just an `Airy` profile. In other words, the full effect of an
`Airy` profile, and additionally some portion (which depends on kcrit) of a `VonKarman` profile
are modeled.
For more details, we refer the reader to the original implementation described in
Peterson et al. 2015 ApJSS vol. 218
Parameters:
lam: Wavelength in nanometers
r0: Fried parameter in meters.
diam: Aperture diameter in meters.
obscuration: Linear dimension of central obscuration as fraction of aperture
linear dimension. [0., 1.). [default: 0.0]
kcrit: Critical Fourier mode (in units of 1/r0) below which the turbulence
power spectrum will be truncated. [default: 0.2]
flux: The flux (in photons/cm^2/s) of the profile. [default: 1]
scale_unit: Units assumed when drawing this profile or evaluating xValue, kValue,
etc. Should be a `galsim.AngleUnit` or a string that can be used to
construct one (e.g., 'arcsec', 'radians', etc.). [default: galsim.arcsec]
gsparams: An optional `GSParams` argument. [default: None]
"""
_req_params = { "lam" : float, "r0" : float, "diam" : float }
_opt_params = { "obscuration" : float, "kcrit" : float, "flux" : float, "scale_unit" : str }
_has_hard_edges = False
_is_axisymmetric = True
_is_analytic_x = False
_is_analytic_k = True
def __init__(self, lam, r0, diam, obscuration=0, kcrit=0.2, flux=1,
scale_unit=arcsec, gsparams=None):
if isinstance(scale_unit, str):
self._scale_unit = AngleUnit.from_name(scale_unit)
else:
self._scale_unit = scale_unit
self._scale = radians / self._scale_unit
self._flux = float(flux)
self._r0 = float(r0)
self._lam = float(lam)
self._diam = float(diam)
self._obscuration = float(obscuration)
self._kcrit = float(kcrit)
self._gsparams = GSParams.check(gsparams)
@lazy_property
def _sbs(self):
lam_over_r0 = (1.e-9*self._lam/self._r0)*self._scale
return _galsim.SBSecondKick(lam_over_r0, self._kcrit, self._flux, self._gsparams._gsp)
@lazy_property
def _sba(self):
lam_over_diam = (1.e-9*self._lam/self._diam)*self._scale
return _galsim.SBAiry(lam_over_diam, self._obscuration, 1., self._gsparams._gsp)
@lazy_property
def _sbd(self):
return _galsim.SBDeltaFunction(self._sbs.getDelta(), self._gsparams._gsp)
@lazy_property
def _sbp(self):
full_sbs = _galsim.SBAdd([self._sbs, self._sbd], self._gsparams._gsp)
return _galsim.SBConvolve([full_sbs, self._sba], False, self._gsparams._gsp)
@property
def lam(self):
"""The input lam value.
"""
return self._lam
@property
def r0(self):
"""The input r0 value.
"""
return self._r0;
@property
def diam(self):
"""The input diam value.
"""
return self._diam;
@property
def obscuration(self):
"""The input obscuration value.
"""
return self._obscuration;
@property
def kcrit(self):
"""The input kcrit value.
"""
return self._kcrit;
@property
def scale_unit(self):
"""The input scale_unit value.
"""
return self._scale_unit
def _structure_function(self, rho):
return self._sbs.structureFunction(rho)
def __eq__(self, other):
return (self is other or
(isinstance(other, SecondKick) and
self.lam == other.lam and
self.r0 == other.r0 and
self.diam == other.diam and
self.obscuration == other.obscuration and
self.kcrit == other.kcrit and
self.flux == other.flux and
self.scale_unit == other.scale_unit and
self.gsparams == other.gsparams))
def __hash__(self):
return hash(("galsim.SecondKick", self.lam, self.r0, self.diam, self.obscuration,
self.kcrit, self.flux, self.scale_unit, self.gsparams))
def __repr__(self):
out = "galsim.SecondKick("
out += "lam=%r"%self.lam
out += ", r0=%r"%self.r0
out += ", diam=%r"%self.diam
if self.obscuration != 0.0:
out += ", obscuration=%r"%self.obscuration
out += ", kcrit=%r"%self.kcrit
if self.flux != 1:
out += ", flux=%r"%self.flux
if self.scale_unit != arcsec:
out += ", scale_unit=%r"%self.scale_unit
out += ", gsparams=%r)"%self.gsparams
return out
def __str__(self):
return "galsim.SecondKick(lam=%r, r0=%r, kcrit=%r)"%(self.lam, self.r0, self.kcrit)
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_sbp',None)
d.pop('_sba',None)
d.pop('_sbd',None)
d.pop('_sbs',None)
return d
def __setstate__(self, d):
self.__dict__ = d
@property
def _maxk(self):
return self._sbp.maxK()
@property
def _stepk(self):
return self._sbp.stepK()
@property
def _max_sb(self):
return DeltaFunction._mock_inf
def _xValue(self, pos):
return self._sbp.xValue(pos._p)
def _kValue(self, kpos):
return self._sbp.kValue(kpos._p)
def _shoot(self, photons, rng):
self._sbp.shoot(photons._pa, rng._rng)
def _drawKImage(self, image, jac=None):
_jac = 0 if jac is None else jac.__array_interface__['data'][0]
self._sbp.drawK(image._image, image.scale, _jac)
@doc_inherit
def withFlux(self, flux):
return SecondKick(lam=self.lam, r0=self.r0, diam=self.diam, obscuration=self.obscuration,
kcrit=self.kcrit, flux=flux, scale_unit=self.scale_unit,
gsparams=self.gsparams) | PypiClean |
/IPython-Dashboard-0.1.5.tar.gz/IPython-Dashboard-0.1.5/dashboard/static/js/dash.js | var box_template = ' \
<div data-gs-min-height="4" data-gs-min-width="6"> \
<div class="grid-stack-item-content"> \
<div class="chart-wrapper"> \
<div class="chart-title bold"> \
<table class="table input-title-level-2"> \
<tr class="active" style="padding-left: 10%;"> \
<td style="padding: 0px; padding-left: 5px;"> \
<button class="fa fa-fw fa-sm fa-circle-o-notch" onclick=toggleGridMovable(this) style="background: none;background-color: inherit;border: none; padding: 0px 0px"> \
</button></td> \
<td style="padding: 0px; width: 90%; padding-left: 5px"> \
<input class="form-control input-lg input-title-level-2" maxlength="128" placeholder="Naming your graph"> \
</td> \
<td style="padding: 0px; width: 10%"> \
<ul class="nav navbar-nav" style="padding-left: 7%;"> \
<li class="dropdown"> \
<a href="#" class="dropdown-toggle" data-toggle="dropdown" style="padding: 2px 2px;"><span class="fa fa-fw fa-lg fa-cog" style="color: green"></span></a> \
<ul class="dropdown-menu" style="min-width: 30px;"> \
<button class="btn btn-primary edit-button" data-toggle="modal" data-target="#myModal"> \
<i class="fa fa-fw fa-sm fa-edit" style="color: black;" graph-id={0} onclick="editModal(this)"></i> \
</button> \
<button class="btn btn-primary edit-button"> \
<i class="fa fa-fw fa-sm fa-group" style="color: black;"></i> \
</button> \
<li class="divider" style="margin: auto;"></li> \
<button class="btn btn-primary edit-button" onclick=deleteGraph(this)> \
<i class="fa fa-fw fa-sm fa-times-circle" style="color: black;" ></i> \
</button> \
</ul> \
</li> \
</ul> \
</td> \
</tr> \
</table> \
</div> \
<div class="chart-graph" graph-id={0} style="width: 100%; overflow-x:auto; overflow-y:auto; color: #444;" type_name="none" key_name="none"> \
</div> \
</div> \
</div> \
</div>';
// the head row of a table
var th_template = ' \
<div class="btn-group"> \
<button type="button" class="btn btn-xs dropdown-toggle btn-success" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" style="padding: 0px 0px;"> \
{0} <span class="caret"></span> \
</button> \
<ul class="dropdown-menu"> \
<li style="width: 50px;"><input type="checkbox" onclick="markXy(this, 1)" style="margin-left: 15px;"> x</li> \
<li style="width: 50px;"><input type="checkbox" onclick="markXy(this, 0)" style="margin-left: 15px;"> y</li> \
</ul> \
</div>'
// setting dropdown box in home page
var setting_template = ' \
<ul class="nav navbar-nav"> \
<li class="dropdown"> \
<a href="#" class="dropdown-toggle" data-toggle="dropdown" style="padding: 2px 2px;"><span class="fa fa-fw fa-lg fa-cog" style="color: green"></span></a> \
<ul class="dropdown-menu" style="min-width: 20px;"> \
<li ><a><span class="fa fa-fw fa-sm fa-group"></span></a></li> \
<li class="divider" style="margin: auto;"></li> \
<li onclick=deleteDash({0})><a href="#"><span class="fa fa-fw fa-sm fa-times-circle"></span></a></li> \
</ul> \
</li> \
</ul>'
function deleteGraph(obj) {
var grid = $('.grid-stack').data('gridstack');
var current_dash = store.get(store.get("current-dash"));
delete current_dash.grid[$(obj).parents(".grid-stack-item")[0].getAttribute("graph-id")];
store.set(store.get("current-dash"), current_dash);
grid.remove_widget($(obj).parents(".grid-stack-item")[0]);
saveDash();
}
function editModal(obj){
var tmpGraph = {"graph_id": obj.getAttribute("graph-id"), key: "", type: "", option: {"x": [], "y": []}};
store.set("modal", tmpGraph);
console.log(store.getAll());
}
function toggleGridMovable(obj){
// clear any active button if set before
$.each($("button.fa-circle-o-notch"), function(index, btn_obj){
if (obj != btn_obj){
btn_obj.className.includes("down") ? btn_obj.classList.remove("fa-spin") : null;
btn_obj.className.includes("down") ? btn_obj.classList.remove("down") : null;
btn_obj.style.color = "";
}
});
// tag a new button
$(obj).toggleClass("down");
$('.grid-stack').data('gridstack').movable('.grid-stack-item', obj.className.includes("down"));
$('.grid-stack').data('gridstack').resizable('.grid-stack-item', obj.className.includes("down"));
obj.style.color = obj.className.includes("down") ? "green" : "";
obj.className.includes("down") ? obj.classList.add("fa-spin") : obj.classList.remove("fa-spin");
console.log("grid stack movable");
}
// re-arrange when mouse hover on the side bar
function resizeContent(direction) {
var padding = (direction==1) ? "100px" : "0px";
document.getElementById("main-content").style.paddingLeft = padding;
}
// initialize gridstack configure
function initGridstack(){
var options = {
width: 12,
animate: true,
vertical_margin: 5,
resizable: {handles: "e, se, s, sw, w"},
movable: false,
};
$('.grid-stack').gridstack(options);
}
// create gridstack grids according the data from server
function createGrids(){
// clear local storage
store.clear();
var grid = $('.grid-stack').data('gridstack');
var dash_id = $("meta[name=dash_id]")[0].attributes.value.value;
var dash_content = getDash(dash_id);
store.set(strFormat("dash-{0}", dash_id), dash_content);
store.set("current-dash", strFormat("dash-{0}", dash_id));
var tmp = null;
var graph_with_key = {}
// initialized boxes using data from server & set key_name and type_name attribute
$("#dashboard_name")[0].value = dash_content.name;
$.each(dash_content.grid, function(index, obj){
tmp = grid.add_widget(strFormat(box_template, index), obj.x, obj.y, obj.width, obj.height);
tmp[0].setAttribute("graph-id", index);
$(tmp).find("input.input-title-level-2")[0].value = obj.graph_name;
$(tmp).find(".chart-graph")[0].setAttribute("key_name", obj.key);
$(tmp).find(".chart-graph")[0].setAttribute("type_name", obj.type);
$(tmp).find(".chart-graph")[0].setAttribute("graph_id", index);
graph_with_key[obj.id] = obj.key;
})
// initialized graph data
current_dash = store.get(store.get("current-dash"));
$.each(graph_with_key, function(index, key){
if (key == "none"){
console.log("no key exist");
}else{
$.getJSON(api_root + "key/" + key, function(data){
store.set(key, $.parseJSON(data.data));
console.log($(strFormat("div [graph-id={0}] .chart-graph", index)));
initChart(current_dash.grid[index].type, index)
console.log($.parseJSON(data.data));
})
// $.ajax({
// url: api_root + "key/" + key,
// method: "GET",
// dataType: "JSONP",
// contentType: "application/json",
// async: false,
// })
// .success(function(data){
// store.set(key, $.parseJSON(data.data));
// initChart(current_dash.grid[index].type, index)
// console.log($.parseJSON(data.data));
// })
}
})
// make it unmovable after init
$('.grid-stack').data('gridstack').movable('.grid-stack-item', false);
$('.grid-stack').data('gridstack').resizable('.grid-stack-item', false);
}
// get all the keys from server
function registerKeysFunc(){
$("[data-target]").on("click", function(){
var keys_select = $("#keys");
keys_select.empty();
// var url = "http://127.0.0.1:9090/keys";
var url = api_root + "keys";
$.getJSON(url, function(data){
$.each(data.data, function(index, value){
keys_select.append("<option>" + value + "</option>")
})
});
})
}
// get the value for a key and parse it as a table by default
function getValue(){
$("#keys").on("change", function(){
var selectDOM = $("#keys")[0];
var key = selectDOM.options[selectDOM.selectedIndex].text;
var modal = store.get("modal");
// var url = "http://127.0.0.1:9090/key/" + key;
var url = api_root + "key/" + key;
$.getJSON(url, function(data){
var jsonData = $.parseJSON(data.data);
store.set(key, jsonData);
modal.key = key;
modal.type = "table"; // default graph type
store.set("modal", modal);
drawChartIntoModal("table");
})
// change the btn-chart, table button default as clicked
$(".btn-chart")[0].classList.add("active");
});
}
function genElement(type){
var element = document.createElement(type);
if (type == "td"){
element.setAttribute("nowrap", "nowrap");
}
return element;
}
function markXy(obj, xy){
var btn_type = xy ? 'btn-info' : 'btn-warning';
var axesName = obj.parentElement.parentElement.parentElement.children[0].innerText.trim();
var modalData = store.get("modal");
// change hightlight colour
if (obj.checked){
var node = obj.parentElement.parentElement.parentElement.children[0];
node.classList.remove('btn-success');
node.classList.add(btn_type);
// push axes info
modalData.option[xy ? "x" : "y"].push(axesName);
}else{
var node = obj.parentElement.parentElement.parentElement.children[0];
node.classList.remove(btn_type);
node.classList.add('btn-success');
// remove axes info
modalData.option[xy ? "x" : "y"] = $.grep(modalData.option[xy ? "x" : "y"], function(value){return value != axesName});
}
store.set("modal", modalData);
console.log(store.get("modal"));
}
function parseTable(data, selector){
var table = genElement("table");
var thead = genElement("thead");
var tbody = genElement("tbody");
var tr = genElement("tr");
var th = genElement("th");
var td = genElement("td");
tr.appendChild(th);
var columns = [];
$.each(data, function(key, value){
var th = genElement("th");
var tmp = strFormat(th_template, "  " + key + "  ");
th.innerHTML = tmp;
tr.appendChild(th);
columns.push(key);
})
thead.appendChild(tr);
var indexes = [];
$.each(data[columns[0]], function(index, value){
indexes.push(index);
})
for (var row = 0; row < indexes.length; row++) {
var tr = genElement("tr");
var th = genElement("th");
th.innerText = indexes[row];
tr.appendChild(th);
$.each(columns, function(no_user, col){
var td = genElement("td");
td.innerText = data[col][indexes[row]];
tr.appendChild(td);
})
tbody.appendChild(tr);
};
table.setAttribute("id", "table_value");
table.setAttribute("border", "1px");
table.className = "table-condensed table-hover";
table.style.fontSize = "small";
table.style.fontWeight = "400";
var tableDOM = $(selector)[0]
// add table
table.appendChild(thead);
table.appendChild(tbody);
tableDOM.appendChild(table);
}
function saveGraph(){
var modalData = store.get("modal");
var current_dash = store.get(store.get("current-dash"));
drawChartIntoGrid(modalData.type, modalData.graph_id);
current_dash.grid[modalData.graph_id].key = modalData.key;
current_dash.grid[modalData.graph_id].type = modalData.type;
current_dash.grid[modalData.graph_id].option = modalData.option;
store.set(store.get("current-dash"), current_dash);
}
function saveDash(){
var dash = store.get(store.get("current-dash"));
// dash name
var dashName = $("#dashboard_name")[0].value; // must need
if (100 < dashName.length || dashName.length < 4) {
alert("dashboard name note valid, digits should between 4 and 100, thanks.")
return null;
}
dash.name = dashName;
// dash data
var res = _.map($('.grid-stack .grid-stack-item:visible'), function (el) {
el = $(el);
var node = el.data('_gridstack_node');
var name = el.find("input.input-title-level-2")[0].value;
var key = dash.grid[el[0].getAttribute("graph-id")].key;
var type = dash.grid[el[0].getAttribute("graph-id")].type;
var option = dash.grid[el[0].getAttribute("graph-id")].option;
var grid = {
id: el.attr("graph-id"),
x: node.x,
y: node.y,
option: option,
width: node.width,
height: node.height,
key: (key) ? key : "none",
type: (type) ? type : "none",
graph_name: (name) ? name : "hi, give me a name ^_^",
};
dash.grid[el[0].getAttribute("graph-id")] = grid;
return grid;
});
store.set(store.get("current-dash"), dash);
var resJson = JSON.stringify(dash);
// var url = "http://127.0.0.1:9090/data/dash/" + dash.id;
var url = api_root + "data/dash/" + dash.id;
var method = "PUT";
$.ajax({
url: url,
data: resJson,
method: method,
contentType: "application/json"
})
.done(function(){console.log("ajax done")})
.fail(function(){console.log("ajax fail")})
.success(function(data){
console.log("ajax success");
console.log(data);
})
.complete(function(){console.log("ajax complete")})
.always(function(){console.log("ajax always")})
;
}
function getDash(dash_id){
// var url = "http://127.0.0.1:9090/data/dash/" + dash_id;
var url = api_root + "data/dash/" + dash_id;
var resJson = $.ajax({
url: url,
method: "GET",
contentType: "application/json",
async: false,
})
.done(function(data){console.log("ajax done");})
.fail(function(){console.log("ajax fail")})
.success(function(data){
console.log("ajax success");
console.log(data);
return data;
})
.complete(function(){console.log("ajax complete")})
.always(function(){console.log("ajax always")});
return resJson.responseJSON.data;
}
function getDashList(){
// var url = "http://127.0.0.1:9090/data/dashes/";
var url = api_root + "data/dashes/";
var resJson = $.ajax({
url: url,
method: "GET",
contentType: "application/json",
async: false,
})
.done(function(data){console.log("ajax done");})
.fail(function(){console.log("ajax fail")})
.success(function(data){
console.log("ajax success");
console.log(data);
return data;
})
.complete(function(){console.log("ajax complete")})
.always(function(){console.log("ajax always")});
return resJson.responseJSON.data;
}
function initDashList(){
var list = getDashList();
var tbody = $("#dash_list")[0];
// var url = "http://127.0.0.1:9090/dash/";
var url = api_root + "dash/";
$.each(list, function(index, obj){
var a = genElement("a");
var i = genElement("i");
var tr = genElement("tr");
var name = genElement("td");
var author = genElement("td");
var time = genElement("td");
var action = genElement("td");
a.innerText = obj.name;
a.setAttribute("href", url + obj.id);
name.appendChild(a);
name.setAttribute("data-field", "name");
author.innerText = obj.author;
time.innerText = moment(parseInt(obj.time_modified) * 1000).format("YYYY-MM-DD HH:mm:ss");
i.className = "fa fa-fw fa-lg fa-cog";
action.innerHTML = strFormat(setting_template, obj.id);
tr.appendChild(name);
tr.appendChild(author);
tr.appendChild(time);
tr.appendChild(action);
tbody.appendChild(tr);
});
$("#submit").on("click", function submit() {
var newDash = {
"name": $("#name")[0].value,
"author": $("#author")[0].value,
};
console.log(api_root);
$.ajax({
// I don't know why set url to api_root will cause an error here,
// need to take little time on diving into this. but it as the doc says:
// the default value of url is current page, so it works when leave out
// the url paramter, will take back to this later.
// url: api_root,
// url: "http://127.0.0.1:9090/",
method: "POST",
dataType: "JSONP",
data: JSON.stringify(newDash),
contentType: "application/json",
async: false,
})
.done(function(data){
console.log("ajax done");
})
.fail(function(){
console.log("ajax fail");
})
.success(function(data){
console.log("ajax success");
console.log(data);
return data;
})
.complete(function(){
console.log("ajax complete");
})
.always(function(){
console.log("ajax always");
});
});
}
function deleteDash(dash_id) {
$.ajax({
url: api_root + "data/dash/" + dash_id,
// url: strFormat("http://127.0.0.1:9090/data/dash/{0}", dash_id),
method: "DELETE",
contentType: "application/json",
// async: false,
})
.done(function(data){console.log("ajax done");})
.fail(function(){console.log("ajax fail")})
.success(function(){
location.reload();
})
.complete(function(){console.log("ajax complete")})
.always(function(){console.log("ajax always")});
}
/*
{
graph_name: "graph name 2",
height: 5,
id: "2",
key: "none",
option: {},
type: "none",
width: 6,
x: 6,
y: 0
}
*/
function addBox(){
var grid = $('.grid-stack').data('gridstack');
var new_box = grid.add_widget(box_template, 200, 200, 6, 5, true);
registerKeysFunc();
var current_dash = store.get(store.get("current-dash"));
var new_graph_id = Object.keys(current_dash.grid).length;
var new_graph_name = strFormat("graph name {0}", new_graph_id);
new_box[0].setAttribute("graph-id", new_graph_id);
new_box.find("input.input-title-level-2")[0].value = new_graph_name;
var new_graph_data = {
"id": new_graph_id, "key": "none", "type": "none", "option": {"x": [], "y": []},
"x": new_box[0].getAttribute("data-gs-x"), "y": new_box[0].getAttribute("data-gs-y"),
"width": new_box[0].getAttribute("data-gs-width"), "height": new_box[0].getAttribute("data-gs-height"),
"graph_name": new_graph_name
};
current_dash.grid[new_graph_id] = new_graph_data;
store.set(store.get("current-dash"), current_dash);
console.log("add a new grid box : ", new_graph_data);
}
function strFormat(theString){
// The string containing the format items (e.g. "{0}")
for (var i = 1; i < arguments.length; i++) {
// "gm" = RegEx options for Global search (more than one instance)
// and for Multiline search
var regEx = new RegExp("\\{" + (i - 1) + "\\}", "gm");
theString = theString.replace(regEx, arguments[i]);
}
return theString;
}
function my_alert(msg, error){
var background_color = error ? 'orangered' : 'cadetblue';
scrollBy(0, -1000);
$("#error")[0].style.backgroundColor = background_color;
$("#error_msg")[0].innerText = msg;
$("#error").fadeIn(3000);
$("#error").fadeOut(3000);
} | PypiClean |
/LumberMill-0.9.5.7-py3-none-any.whl/lumbermill/plugin/WebserverTornado.py | import os.path
import socket
import sys
import time
import tornado.autoreload
import tornado.httpserver
import tornado.ioloop
import tornado.web
import handler.ActionHandler
import handler.WebsocketHandler
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Decorators import ModuleDocstringParser
@ModuleDocstringParser
class WebserverTornado(BaseThreadedModule):
"""
A tornado based web server.
Configuration template:
- plugin.WebserverTornado:
port: # <default: 5100; type: integer; is: optional>
tls: # <default: False; type: boolean; is: optional>
key: # <default: False; type: boolean||string; is: required if tls is True else optional>
cert: # <default: False; type: boolean||string; is: required if tls is True else optional>
document_root: # <default: '../assets/webserver_docroot'; type: string; is: optional>
statistic_module_id: # <default: "SimpleStats"; type: string; is: optional>
application_settings: # <default: None; type: None||dict; is: optional>
"""
module_type = "stand_alone"
"""Set module type"""
can_run_forked = False
def configure(self, configuration):
# Call parent configure method
BaseThreadedModule.configure(self, configuration)
self.server = False
self.settings = self.getSettings()
self.application = tornado.web.Application([], **self.settings)
# Default handlers.
handlers = [ # REST ActionHandler
(r"/rest/server/restart", handler.ActionHandler.RestartHandler),
(r"/rest/server/info", handler.ActionHandler.GetServerInformation),
(r"/rest/server/statistics", handler.ActionHandler.GetServerStatistics),
(r"/rest/server/configuration", handler.ActionHandler.GetServerConfiguration),
# WebsocketHandler
(r"/websockets/statistics", handler.WebsocketHandler.StatisticsWebSocketHandler),
(r"/websockets/get_logs", handler.WebsocketHandler.LogToWebSocketHandler) ]
self.addHandlers(handlers)
def addHandlers(self, host_handlers=[], host_pattern='.*$'):
self.application.add_handlers(host_pattern, host_handlers)
def getSettings(self):
base_path = self.getConfigurationValue('document_root')
if base_path == 'docroot':
base_path = "%s/docroot" % os.path.dirname(__file__)
settings = {'template_path' : "%s/templates" % base_path,
'static_path': "%s/static" % base_path,
'ui_modules': [],
'debug': False,
'TornadoWebserver': self}
if self.getConfigurationValue('application_settings'):
settings.update(self.getConfigurationValue('application_settings'))
return settings
def addUiModules(self, modules):
if not isinstance(modules, list):
modules = [modules]
for module in modules:
if module in self.application.settings['ui_modules']:
continue
self.application.settings['ui_modules'].append(module)
self.application._load_ui_modules(module)
def getUiModule(self, module_name):
try:
return self.application.ui_modules[module_name]
except KeyError:
return False
def start(self):
ssl_options = None
if self.getConfigurationValue("tls"):
ssl_options = { 'certfile': self.getConfigurationValue("cert"),
'keyfile': self.getConfigurationValue("key")}
try:
self.server = tornado.httpserver.HTTPServer(self.application, ssl_options=ssl_options)
self.server.listen(self.getConfigurationValue('port'))
for fd, server_socket in self.server._sockets.items():
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not start webserver on %s. Exception: %s, Error: %s." % (self.getConfigurationValue('port'), etype, evalue))
return
tornado.autoreload.add_reload_hook(self.shutDown)
return
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.make_current()
try:
ioloop.start()
except ValueError:
# Ignore errors like "ValueError: I/O operation on closed kqueue fd". These might be thrown during a reload.
pass
def shutDown(self):
if self.server:
self.server.stop()
# Give os time to free the socket. Otherwise a reload will fail with 'address already in use'
time.sleep(.2)
# Call parent shutDown method.
BaseThreadedModule.shutDown(self) | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.