content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import gym
import gym_maze
import copy
#env = gym.make("maze-random-10x10-plus-v0")
#env = gym.make("maze-sample-100x100-v0")
#env = gym.make("maze-random-30x30-plus-v0")
env_name= "maze-sample-10x10-v0"
env = gym.make(env_name)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
max_steps = env._max_episode_steps
threshold = env.spec.reward_threshold
print(state_size, action_size, max_steps, threshold)
#print(dir(env.action_space))
#env1 = copy.copy(env)
#env2 = copy.copy(env)
for i in range(10):
print(f"*** RUNNING ENVIRONMENT {i+1}")
copy_env = copy.copy(env)
observation = copy_env.reset()
st = 0
while True:
st += 1
copy_env.render()
action = copy_env.action_space.sample()
#print(observation, action)
observation, reward, done, info = copy_env.step(action)
if done or (st == copy_env._max_episode_steps - 1):
copy_env.close()
break
#done = False
#observation = env.reset()
#while True:
# env.render()
# action = env.action_space.sample()
# print(observation, action)
# observation, reward, done, info = env.step(action)
|
nilq/baby-python
|
python
|
# program that asks user for number and prompts user to guess number untill the user guess the right number
# helen o'shea
# 20210211
import random
number = random.randint(0,100)
guess = int(input("Please guess the number between 0 and 100: "))
attempt = 1
while guess!=number:
if guess<number:
attempt +=1
guess = int(input("your guess is too low please guess again: "))
else:
attempt += 1
guess = int(input("your guess is too high please guess again: "))
print("you guessed {} correctly in {} attempts".format(guess, attempt))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Script to retrieve regobs data relevant for forecast analysis at NVE.
"""
__author__ = 'kmu'
import datetime as dt
import pandas as pd
from varsomdata import getobservations as go
def get_snow_obs(from_date, to_date):
all_data_snow = go.get_all_observations(from_date, to_date, geohazard_tids=10)
return all_data_snow
def get_weak_layers_from_snow_profiles(from_date, to_date):
snow_profiles = go.get_snow_profile('2018-12-13', '2018-12-26')
def get_danger_signs(from_date, to_date, region_ids):
ds_list = go.get_danger_sign(from_date, to_date, region_ids=None, location_id=None, group_id=None,
observer_ids=None, observer_nick=None, observer_competence=None, output='List', geohazard_tids=10,
lang_key=1)
df = go._make_data_frame(ds_list)
return df
def get_incident(from_date, to_date, region_ids=None, location_id=None, group_id=None, observer_ids=None, observer_nick=None, observer_competence=None, output='List', geohazard_tids=None, lang_key=1):
inc_list = go.get_incident(from_date, to_date, region_ids=None, location_id=None, group_id=None, observer_ids=None, observer_nick=None, observer_competence=None, output='List', geohazard_tids=10, lang_key=1)
inc_list = [i.to_dict() for i in inc_list]
df = pd.DataFrame(inc_list)
return df
def get_stability_tests_for_article(from_date, to_date, region_ids):
st_list = go.get_column_test(from_date, to_date, region_ids)
_st = []
for st in st_list:
_st.append(st.OriginalData)
return _st
if __name__ == "__main__":
region_ids = [3003, 3007, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3022, 3023, 3024, 3027, 3028, 3029,
3031, 3032, 3034, 3035]
from_date = dt.date(2018, 12, 1)
to_date = dt.date(2019, 1, 31)
#all_data_snow = get_snow_obs(from_date, to_date)
#ds = get_danger_signs(from_date, to_date, region_ids)
#inc = get_incident(from_date, to_date, region_ids=region_ids)
#inc.to_csv('../localstorage/aval_incidents_2013_2019.csv', index_label='index')
st_list = get_stability_tests_for_article(from_date, to_date, region_ids)
df = pd.DataFrame(st_list)
df.to_csv('../localstorage/stability_tests.csv', index_label='index')
k = 'm'
#aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
#df = pandas.DataFrame(aw_dict)
#df.to_csv('../localstorage/norwegian_avalanche_warnings_season_17_18.csv', index_label='index')
|
nilq/baby-python
|
python
|
def f(*a<caret>rgs):
"""
"""
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# This creates the level1 fsf's and the script to run the feats on condor
import os
import glob
studydir ='/mnt/40TB-raid6/Experiments/FCTM_S/FCTM_S_Data/Analyses'
fsfdir="%s/group/lvl2_B_hab_feats_v1"%(studydir)
subdirs=glob.glob("%s/1[0-9][0-9][0-9][0-9]"%(studydir))
#subdirs=glob.glob("%s/18301"%(studydir))
setnum = 'B'
for dir in list(subdirs):
splitdir = dir.split('/')
splitdir_sub = splitdir[7] # You will need to edit this
subnum=splitdir_sub[-5:] # You also may need to edit this
subfeats=glob.glob("%s/model/B_hab_lvl1_v1/B_run[0-9].feat"%(dir))
if len(subfeats)==6: # Add your own second loop for 2 feat cases
print(subnum)
replacements = {'17271':subnum}
with open("%s/lvl2_B.fsf"%(fsfdir)) as infile:
with open("%s/B_hab-lvl2fe-TEMP%s.fsf"%(fsfdir, subnum), 'w') as outfile:
for line in infile:
for src, target in replacements.iteritems():
line = line.replace(src, target)
outfile.write(line)
|
nilq/baby-python
|
python
|
import tempfile
import mdtraj
import pandas as pd
from kmbio import PDB
from kmtools import sequence_tools, structure_tools
from .distances_and_orientations import (
construct_residue_df,
construct_residue_pairs_df,
residue_df_to_row,
residue_pairs_df_to_row,
validate_residue_df,
validate_residue_pairs_df,
)
def get_interaction_dataset(structure, r_cutoff=5):
"""Copied from "datapkg/pdb-analysis/notebooks/extract_pdb_interactions.ipynb" """
interactions = structure_tools.get_interactions(structure, r_cutoff=r_cutoff, interchain=False)
interactions_core, interactions_interface = structure_tools.process_interactions(interactions)
interactions_core_aggbychain = structure_tools.process_interactions_core(
structure, interactions_core
)
# Not neccessary to drop duplicates in our cases
# interactions_core, interactions_core_aggbychain = structure_tools.drop_duplicates_core(
# interactions_core, interactions_core_aggbychain
# )
return interactions_core, interactions_core_aggbychain
def get_interaction_dataset_wdistances(structure_file, model_id, chain_id, r_cutoff=12):
structure = PDB.load(structure_file)
chain = structure[0][chain_id]
num_residues = len(list(chain.residues))
dd = structure_tools.DomainDef(model_id, chain_id, 1, num_residues)
domain = structure_tools.extract_domain(structure, [dd])
distances_core = structure_tools.get_distances(
domain.to_dataframe(), r_cutoff, groupby="residue"
)
assert (distances_core["residue_idx_1"] <= distances_core["residue_idx_2"]).all()
return domain, distances_core
GET_ADJACENCY_WITH_DISTANCES_ROW_ATTRIBUTES = [
"structure_id",
"model_id",
"chain_id",
"sequence",
"s_start",
"s_end",
"q_start",
"q_end",
"sseq",
"a2b",
"b2a",
"residue_idx_1_corrected",
"residue_idx_2_corrected",
]
def get_adjacency_with_distances_and_orientations(
row, max_cutoff=12, min_cutoff=None, structure_url_prefix="rcsb://"
):
""""""
missing_attributes = [
attr for attr in GET_ADJACENCY_WITH_DISTANCES_ROW_ATTRIBUTES if not hasattr(row, attr)
]
assert not missing_attributes, missing_attributes
# === Parse input structure ===
# Load structure
url = f"{structure_url_prefix}{row.structure_id.lower()}.cif.gz"
structure = PDB.load(url)
# Template sequence
chain_sequence = structure_tools.get_chain_sequence(
structure[row.model_id][row.chain_id], if_unknown="replace"
)
template_sequence = chain_sequence[int(row.s_start - 1) : int(row.s_end)]
assert len(template_sequence) == len(row.a2b)
# Target sequence
target_sequence = row.sequence[int(row.q_start - 1) : int(row.q_end)]
assert len(target_sequence) == len(row.b2a)
# Extract domain
dd = structure_tools.DomainDef(row.model_id, row.chain_id, int(row.s_start), int(row.s_end))
domain = structure_tools.extract_domain(structure, [dd])
assert template_sequence == structure_tools.get_chain_sequence(domain, if_unknown="replace")
assert template_sequence == row.sseq.replace("-", "")
# === Generate mdtraj trajectory ===
with tempfile.NamedTemporaryFile(suffix=".pdb") as pdb_file:
PDB.save(domain, pdb_file.name)
traj = mdtraj.load(pdb_file.name)
assert template_sequence == traj.top.to_fasta()[0]
# === Extract residues and residue-residue interactions ===
# Residue info
residue_df = construct_residue_df(traj)
validate_residue_df(residue_df)
residue_df["residue_idx_corrected"] = pd.array(
residue_df["residue_idx"].apply(
lambda idx: sequence_tools.convert_residue_index_a2b(idx, row.b2a)
),
dtype=pd.Int64Dtype(),
)
# Residue pair info
residue_pairs_df = construct_residue_pairs_df(traj)
validate_residue_pairs_df(residue_pairs_df)
for i in [1, 2]:
residue_pairs_df[f"residue_idx_{i}_corrected"] = pd.array(
residue_pairs_df[f"residue_idx_{i}"].apply(
lambda idx: sequence_tools.convert_residue_index_a2b(idx, row.b2a)
),
dtype=pd.Int64Dtype(),
)
# === Sanity check ===
# Get the set of interactions
interactions_1 = set(
residue_pairs_df[
(
residue_pairs_df["residue_idx_1_corrected"]
< residue_pairs_df["residue_idx_2_corrected"]
)
& (residue_pairs_df["distance"] <= 5.0)
][["residue_idx_1_corrected", "residue_idx_2_corrected"]].apply(tuple, axis=1)
)
# Get the reference set of interactions
interactions_2 = {
(int(r1), int(r2)) if r1 <= r2 else (int(r2), int(r1))
for r1, r2 in zip(row.residue_idx_1_corrected, row.residue_idx_2_corrected)
if pd.notnull(r1) and pd.notnull(r2)
}
assert not interactions_1 ^ interactions_2, interactions_1 ^ interactions_2
return {**residue_df_to_row(residue_df), **residue_pairs_df_to_row(residue_pairs_df)}
def get_adjacency_with_distances(
row, max_cutoff=12, min_cutoff=None, structure_url_prefix="rcsb://"
):
"""
Notes:
- This is the 2018 version, where we calculated distnaces only.
"""
missing_attributes = [
attr for attr in GET_ADJACENCY_WITH_DISTANCES_ROW_ATTRIBUTES if not hasattr(row, attr)
]
assert not missing_attributes, missing_attributes
# Load structure
url = f"{structure_url_prefix}{row.structure_id.lower()}.cif.gz"
structure = PDB.load(url)
# Template sequence
chain_sequence = structure_tools.get_chain_sequence(
structure[row.model_id][row.chain_id], if_unknown="replace"
)
template_sequence = chain_sequence[int(row.s_start - 1) : int(row.s_end)]
assert len(template_sequence) == len(row.a2b)
# Target sequence
target_sequence = row.sequence[int(row.q_start - 1) : int(row.q_end)]
assert len(target_sequence) == len(row.b2a)
# Extract domain
dd = structure_tools.DomainDef(row.model_id, row.chain_id, int(row.s_start), int(row.s_end))
domain = structure_tools.extract_domain(structure, [dd])
assert template_sequence == structure_tools.get_chain_sequence(domain, if_unknown="replace")
assert template_sequence == row.sseq.replace("-", "")
# Get interactions
distances_core = structure_tools.get_distances(
domain, max_cutoff, min_cutoff, groupby="residue"
)
assert (distances_core["residue_idx_1"] <= distances_core["residue_idx_2"]).all()
# Map interactions to target
for i in [1, 2]:
distances_core[f"residue_idx_{i}_corrected"] = distances_core[f"residue_idx_{i}"].apply(
lambda idx: sequence_tools.convert_residue_index_a2b(idx, row.b2a)
)
# Remove missing values
distances_core = distances_core[
distances_core["residue_idx_1_corrected"].notnull()
& distances_core["residue_idx_2_corrected"].notnull()
]
# Convert to integers
distances_core[["residue_idx_1_corrected", "residue_idx_2_corrected"]] = distances_core[
["residue_idx_1_corrected", "residue_idx_2_corrected"]
].astype(int)
# Sanity check
assert (
distances_core["residue_idx_1_corrected"] < distances_core["residue_idx_2_corrected"]
).all()
# Get the set of interactions
interactions_1 = set(
distances_core[(distances_core["distance"] <= 5)][
["residue_idx_1_corrected", "residue_idx_2_corrected"]
].apply(tuple, axis=1)
)
# Get the reference set of interactions
interactions_2 = {
(int(r1), int(r2)) if r1 <= r2 else (int(r2), int(r1))
for r1, r2 in zip(row.residue_idx_1_corrected, row.residue_idx_2_corrected)
if pd.notnull(r1) and pd.notnull(r2)
}
assert not interactions_1 ^ interactions_2
return (
distances_core["residue_idx_1_corrected"].values,
distances_core["residue_idx_2_corrected"].values,
distances_core["distance"].values,
)
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
# Copyright (c) 2010 - 2014, Pascal Volk
# See COPYING for distribution information.
"""
vmm.password
~~~~~~~~~~~~~~~~~~~~~~~~~~~
vmm's password module to generate password hashes from
passwords or random passwords. This module provides following
functions:
hashed_password = pwhash(password[, scheme][, user])
random_password = randompw()
scheme, encoding = verify_scheme(scheme)
schemes, encodings = list_schemes()
scheme = extract_scheme(hashed_password)
"""
import hashlib
import re
from base64 import b64encode
from binascii import b2a_hex
from crypt import crypt
from random import SystemRandom
from subprocess import Popen, PIPE
from gettext import gettext as _
from vmm import ENCODING
from vmm.emailaddress import EmailAddress
from vmm.common import get_unicode, version_str
from vmm.constants import VMM_ERROR
from vmm.errors import VMMError
SALTCHARS = "./0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
PASSWDCHARS = "._-+#*23456789abcdefghikmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ"
DEFAULT_B64 = (None, "B64", "BASE64")
DEFAULT_HEX = (None, "HEX")
CRYPT_ID_MD5 = 1
CRYPT_ID_BLF = "2a"
CRYPT_ID_SHA256 = 5
CRYPT_ID_SHA512 = 6
CRYPT_SALT_LEN = 2
CRYPT_BLF_ROUNDS_MIN = 4
CRYPT_BLF_ROUNDS_MAX = 31
CRYPT_BLF_SALT_LEN = 22
CRYPT_MD5_SALT_LEN = 8
CRYPT_SHA2_ROUNDS_DEFAULT = 5000
CRYPT_SHA2_ROUNDS_MIN = 1000
CRYPT_SHA2_ROUNDS_MAX = 999999999
CRYPT_SHA2_SALT_LEN = 16
SALTED_ALGO_SALT_LEN = 4
cfg_dget = lambda option: None
_sys_rand = SystemRandom()
_choice = _sys_rand.choice
def _get_salt(s_len):
return "".join(_choice(SALTCHARS) for _ in range(s_len))
def _doveadmpw(password, scheme, encoding):
"""Communicates with Dovecot's doveadm and returns
the hashed password: {scheme[.encoding]}hash
"""
if encoding:
scheme = ".".join((scheme, encoding))
cmd_args = [
cfg_dget("bin.doveadm"),
"pw",
"-s",
scheme,
"-p",
get_unicode(password),
]
process = Popen(cmd_args, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if process.returncode:
raise VMMError(stderr.strip().decode(ENCODING), VMM_ERROR)
hashed = stdout.strip().decode(ENCODING)
if not hashed.startswith("{%s}" % scheme):
raise VMMError(
"Unexpected result from %s: %s" % (cfg_dget("bin.doveadm"), hashed),
VMM_ERROR,
)
return hashed
def _md4_new():
"""Returns an new MD4-hash object if supported by the hashlib -
otherwise `None`.
"""
try:
return hashlib.new("md4")
except ValueError as err:
if err.args[0].startswith("unsupported hash type"):
return None
else:
raise
def _format_digest(digest, scheme, encoding):
"""Formats the arguments to a string: {scheme[.encoding]}digest."""
if not encoding:
return "{%s}%s" % (scheme, digest)
return "{%s.%s}%s" % (scheme, encoding, digest)
def _clear_hash(password, scheme, encoding):
"""Generates a (encoded) CLEARTEXT/PLAIN 'hash'."""
password = password.decode(ENCODING)
if encoding:
if encoding == "HEX":
password = b2a_hex(password.encode()).decode()
else:
password = b64encode(password.encode()).decode()
return _format_digest(password, scheme, encoding)
return "{%s}%s" % (scheme, password)
def _get_crypt_blowfish_salt():
"""Generates a salt for Blowfish crypt."""
rounds = cfg_dget("misc.crypt_blowfish_rounds")
if rounds < CRYPT_BLF_ROUNDS_MIN:
rounds = CRYPT_BLF_ROUNDS_MIN
elif rounds > CRYPT_BLF_ROUNDS_MAX:
rounds = CRYPT_BLF_ROUNDS_MAX
return "$%s$%02d$%s" % (CRYPT_ID_BLF, rounds, _get_salt(CRYPT_BLF_SALT_LEN))
def _get_crypt_sha2_salt(crypt_id):
"""Generates a salt for crypt using the SHA-256 or SHA-512 encryption
method.
*crypt_id* must be either `5` (SHA-256) or `6` (SHA-512).
"""
assert crypt_id in (CRYPT_ID_SHA256, CRYPT_ID_SHA512), (
"invalid crypt " "id: %r" % crypt_id
)
if crypt_id is CRYPT_ID_SHA512:
rounds = cfg_dget("misc.crypt_sha512_rounds")
else:
rounds = cfg_dget("misc.crypt_sha256_rounds")
if rounds < CRYPT_SHA2_ROUNDS_MIN:
rounds = CRYPT_SHA2_ROUNDS_MIN
elif rounds > CRYPT_SHA2_ROUNDS_MAX:
rounds = CRYPT_SHA2_ROUNDS_MAX
if rounds == CRYPT_SHA2_ROUNDS_DEFAULT:
return "$%d$%s" % (crypt_id, _get_salt(CRYPT_SHA2_SALT_LEN))
return "$%d$rounds=%d$%s" % (crypt_id, rounds, _get_salt(CRYPT_SHA2_SALT_LEN))
def _crypt_hash(password, scheme, encoding):
"""Generates (encoded) CRYPT/MD5/{BLF,MD5,SHA{256,512}}-CRYPT hashes."""
if scheme == "CRYPT":
salt = _get_salt(CRYPT_SALT_LEN)
elif scheme == "BLF-CRYPT":
salt = _get_crypt_blowfish_salt()
elif scheme in ("MD5-CRYPT", "MD5"):
salt = "$%d$%s" % (CRYPT_ID_MD5, _get_salt(CRYPT_MD5_SALT_LEN))
elif scheme == "SHA256-CRYPT":
salt = _get_crypt_sha2_salt(CRYPT_ID_SHA256)
else:
salt = _get_crypt_sha2_salt(CRYPT_ID_SHA512)
encrypted = crypt(password.decode(ENCODING), salt)
if encoding:
if encoding == "HEX":
encrypted = b2a_hex(encrypted.encode()).decode()
else:
encrypted = b64encode(encrypted.encode()).decode()
return _format_digest(encrypted, scheme, encoding)
def _md4_hash(password, scheme, encoding):
"""Generates encoded PLAIN-MD4 hashes."""
md4 = _md4_new()
if md4:
md4.update(password)
if encoding in DEFAULT_HEX:
digest = md4.hexdigest()
else:
digest = b64encode(md4.digest()).decode()
return _format_digest(digest, scheme, encoding)
return _doveadmpw(password, scheme, encoding)
def _md5_hash(password, scheme, encoding, user=None):
"""Generates DIGEST-MD5 aka PLAIN-MD5 and LDAP-MD5 hashes."""
md5 = hashlib.md5()
if scheme == "DIGEST-MD5":
md5.update(user.localpart.encode() + b":" + user.domainname.encode() + b":")
md5.update(password)
if (scheme in ("PLAIN-MD5", "DIGEST-MD5") and encoding in DEFAULT_HEX) or (
scheme == "LDAP-MD5" and encoding == "HEX"
):
digest = md5.hexdigest()
else:
digest = b64encode(md5.digest()).decode()
return _format_digest(digest, scheme, encoding)
def _ntlm_hash(password, scheme, encoding):
"""Generates NTLM hashes."""
md4 = _md4_new()
if md4:
password = b"".join(bytes(x) for x in zip(password, bytes(len(password))))
md4.update(password)
if encoding in DEFAULT_HEX:
digest = md4.hexdigest()
else:
digest = b64encode(md4.digest()).decode()
return _format_digest(digest, scheme, encoding)
return _doveadmpw(password, scheme, encoding)
def _create_hashlib_hash(algorithm, with_salt=False):
def hash_password(password, scheme, encoding):
# we default to an empty byte-string to keep the internal logic
# clean as it behaves like we would not have used a salt
salt = _get_salt(SALTED_ALGO_SALT_LEN).encode() if with_salt else b""
_hash = algorithm(password + salt)
if encoding in DEFAULT_B64:
digest = b64encode(_hash.digest() + salt).decode()
else:
digest = _hash.hexdigest() + b2a_hex(salt).decode()
return _format_digest(digest, scheme, encoding)
return hash_password
_sha1_hash = _create_hashlib_hash(hashlib.sha1)
_sha256_hash = _create_hashlib_hash(hashlib.sha256)
_sha512_hash = _create_hashlib_hash(hashlib.sha512)
_smd5_hash = _create_hashlib_hash(hashlib.md5, with_salt=True)
_ssha1_hash = _create_hashlib_hash(hashlib.sha1, with_salt=True)
_ssha256_hash = _create_hashlib_hash(hashlib.sha256, with_salt=True)
_ssha512_hash = _create_hashlib_hash(hashlib.sha512, with_salt=True)
_scheme_info = {
"CLEAR": (_clear_hash, 0x2010DF00),
"CLEARTEXT": (_clear_hash, 0x10000F00),
"CRAM-MD5": (_doveadmpw, 0x10000F00),
"CRYPT": (_crypt_hash, 0x10000F00),
"DIGEST-MD5": (_md5_hash, 0x10000F00),
"HMAC-MD5": (_doveadmpw, 0x10000F00),
"LANMAN": (_doveadmpw, 0x10000F00),
"LDAP-MD5": (_md5_hash, 0x10000F00),
"MD5": (_crypt_hash, 0x10000F00),
"MD5-CRYPT": (_crypt_hash, 0x10000F00),
"NTLM": (_ntlm_hash, 0x10000F00),
"OTP": (_doveadmpw, 0x10100A01),
"PLAIN": (_clear_hash, 0x10000F00),
"PLAIN-MD4": (_md4_hash, 0x10000F00),
"PLAIN-MD5": (_md5_hash, 0x10000F00),
"RPA": (_doveadmpw, 0x10000F00),
"SCRAM-SHA-1": (_doveadmpw, 0x20200A01),
"SHA": (_sha1_hash, 0x10000F00),
"SHA1": (_sha1_hash, 0x10000F00),
"SHA256": (_sha256_hash, 0x10100A01),
"SHA512": (_sha512_hash, 0x20000B03),
"SKEY": (_doveadmpw, 0x10100A01),
"SMD5": (_smd5_hash, 0x10000F00),
"SSHA": (_ssha1_hash, 0x10000F00),
"SSHA256": (_ssha256_hash, 0x10200A04),
"SSHA512": (_ssha512_hash, 0x20000B03),
}
def extract_scheme(password_hash):
"""Returns the extracted password scheme from *password_hash*.
If the scheme couldn't be extracted, **None** will be returned.
"""
scheme = re.match(r"^\{([^\}]{3,37})\}", password_hash)
if scheme:
return scheme.groups()[0]
return scheme
def list_schemes():
"""Returns the tuple (schemes, encodings).
`schemes` is an iterator for all supported password schemes (depends on
the used Dovecot version and features of the libc).
`encodings` is a tuple with all usable encoding suffixes.
"""
dcv = cfg_dget("misc.dovecot_version")
schemes = (k for (k, v) in _scheme_info.items() if v[1] <= dcv)
encodings = (".B64", ".BASE64", ".HEX")
return schemes, encodings
def verify_scheme(scheme):
"""Checks if the password scheme *scheme* is known and supported by the
configured `misc.dovecot_version`.
The *scheme* maybe a password scheme's name (e.g.: 'PLAIN') or a scheme
name with a encoding suffix (e.g. 'PLAIN.BASE64'). If the scheme is
known and supported by the used Dovecot version,
a tuple ``(scheme, encoding)`` will be returned.
The `encoding` in the tuple may be `None`.
Raises a `VMMError` if the password scheme:
* is unknown
* depends on a newer Dovecot version
* has a unknown encoding suffix
"""
assert isinstance(scheme, str), "Not a str: {!r}".format(scheme)
scheme_encoding = scheme.upper().split(".")
scheme = scheme_encoding[0]
if scheme not in _scheme_info:
raise VMMError(_("Unsupported password scheme: '%s'") % scheme, VMM_ERROR)
if cfg_dget("misc.dovecot_version") < _scheme_info[scheme][1]:
raise VMMError(
_("The password scheme '%(scheme)s' requires Dovecot " ">= v%(version)s.")
% {"scheme": scheme, "version": version_str(_scheme_info[scheme][1])},
VMM_ERROR,
)
if len(scheme_encoding) > 1:
if scheme_encoding[1] not in ("B64", "BASE64", "HEX"):
raise VMMError(
_("Unsupported password encoding: '%s'") % scheme_encoding[1], VMM_ERROR
)
encoding = scheme_encoding[1]
else:
encoding = None
return scheme, encoding
def pwhash(password, scheme=None, user=None):
"""Generates a password hash from the plain text *password* string.
If no *scheme* is given the password scheme from the configuration will
be used for the hash generation. When 'DIGEST-MD5' is used as scheme,
also an EmailAddress instance must be given as *user* argument.
"""
if not isinstance(password, str):
raise TypeError("Password is not a string: %r" % password)
password = password.encode(ENCODING).strip()
if not password:
raise ValueError("Could not accept empty password.")
if scheme is None:
scheme = cfg_dget("misc.password_scheme")
scheme, encoding = verify_scheme(scheme)
if scheme == "DIGEST-MD5":
assert isinstance(user, EmailAddress)
return _md5_hash(password, scheme, encoding, user)
return _scheme_info[scheme][0](password, scheme, encoding)
def randompw(pw_len):
"""Generates a plain text random password.
The length of the password can be configured in the ``vmm.cfg``
(account.password_length).
"""
if pw_len < 8:
pw_len = 8
return "".join(_sys_rand.sample(PASSWDCHARS, pw_len))
# Check for Blowfish/SHA-256/SHA-512 support in crypt.crypt()
if "$2a$04$0123456789abcdefABCDE.N.drYX5yIAL1LkTaaZotW3yI0hQhZru" == crypt(
"08/15!test~4711", "$2a$04$0123456789abcdefABCDEF$"
):
_scheme_info["BLF-CRYPT"] = (_crypt_hash, 0x20000B06)
if (
"$5$rounds=1000$0123456789abcdef$K/DksR0DT01hGc8g/kt9McEgrbFMKi9qrb1jehe7hn4"
== crypt("08/15!test~4711", "$5$rounds=1000$0123456789abcdef$")
):
_scheme_info["SHA256-CRYPT"] = (_crypt_hash, 0x20000B06)
if (
"$6$rounds=1000$0123456789abcdef$ZIAd5WqfyLkpvsVCVUU1GrvqaZTqvhJoouxdSqJO71l9Ld3"
"tVrfOatEjarhghvEYADkq//LpDnTeO90tcbtHR1"
== crypt("08/15!test~4711", "$6$rounds=1000$0123456789abcdef$")
):
_scheme_info["SHA512-CRYPT"] = (_crypt_hash, 0x20000B06)
del cfg_dget
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import gettext as _
from .models import *
class MyUserCreationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(MyUserCreationForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True
class Meta(UserCreationForm.Meta):
model = student
fields = ('username', 'email', 'first_name', 'last_name', 'state', 'city',
'educational_role', 'institute', 'language', 'password1',
'password2')
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = student
class MyUserAdmin(UserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
fieldsets = UserAdmin.fieldsets + (
(None, {'fields': ('username', 'email', )}),
(_('Personal info'), {'fields': ('first_name', 'last_name',
'state', 'city', 'educational_role', 'institute', 'language')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'first_name', 'last_name', 'state', 'city',
'educational_role', 'institute', 'language', 'password1',
'password2')}
),
)
admin.site.register(board)
admin.site.register(exam)
try:
admin.site.unregister(User)
except:
pass
admin.site.register(student, MyUserAdmin)
admin.site.register(educational_institute)
admin.site.register(search_result)
|
nilq/baby-python
|
python
|
from memoria import *
class Filas():
def __init__(self, dic_process_id):
self.filas = [[],[],[],[]]
self.dic_process_id = dic_process_id
self.memoria = Memoria()
self.ultimo_executado = None
self.qtd_processos = len(dic_process_id) # qtd de processos.
self.aging = 5 # tanto de tempo para aumentar a prioridade.
self.qtd_proc_fin = 0
return
def insere_processo(self, processo) :
## insere um processo de acordo com sua prioridade na lista
"""verifico aqui se há espaço para ser executado ou na hora de executar? """
prioridade = processo.prioridade
if (prioridade == 0 ):
self.filas[0].append(processo.id)
elif (prioridade == 1):
self.filas[1].append(processo.id)
elif (prioridade == 2):
self.filas[2].append(processo.id)
elif (prioridade == 3):
self.filas[3].append(processo.id)
return
def executa_processo(self):
""" verifica qual é o processoa ser executado.
e executa o mesmo.
se acabar o tempo, apaga ele da fila.
"""
def _executa( id , fila_atual):
"""retorna true se um processo acabou. Falso se não. """
self.ultimo_executado = id
if ( self.dic_process_id[id].tempo_processador > 0):
self.dic_process_id[id].tempo_processador -= 1
if (self.dic_process_id[id].tempo_processador == 0 ) :
self.qtd_proc_fin += 1
self.remove_processo(fila_atual)
# retorna se acabou
return True
#retorna q não acabou
return False
self.ultimo_executado = None
for i in range(0,4):
if (len(self.filas[i]) > 0):
acabou =_executa(self.filas[i][0] , i)
return acabou
return False
def remove_processo(self, fila_atual):
## remove um processo de acordo com sua fila
self.filas[fila_atual] = self.filas[fila_atual][1:]
return
def aumenta_prioridade(self):
#print(self.filas)
for filas in self.filas:
for processo in filas:
pid = processo
#print('pid',pid,'\t','ultimo',self.ultimo_executado)
if (pid != self.ultimo_executado):
self.dic_process_id[pid].tempo_ultima_execucao += 1
# se tiver passado de 10 sem executar aumenta em 1 a prioridade.
if (self.dic_process_id[pid].tempo_ultima_execucao >= self.aging):
if (self.dic_process_id[pid].prioridade > 1):
prio = self.dic_process_id[pid].prioridade
#remove o processo da fila antiga
self.filas[prio] = [x for x in self.filas[prio] if x != pid]
self.dic_process_id[pid].prioridade -= 1
self.insere_processo(self.dic_process_id[pid])
self.dic_process_id[pid].tempo_ultima_execucao = 0
else:
self.dic_process_id[pid].tempo_ultima_execucao = 0
return
def log_filas(self):
log = set()
for filas in self.filas:
for pid in filas:
log.add(pid)
return log if len(log) > 0 else set([None])
def __repr__(self):
return str(self.filas)
|
nilq/baby-python
|
python
|
def assert_keys_exist(obj, keys):
assert set(keys) <= set(obj.keys())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import requests
from datetime import datetime, timedelta
import time
import numpy as np
import pandas as pd
import psycopg2
from psycopg2.extras import execute_values
import config as cfg
def connect_to_rds():
conn = psycopg2.connect(
host=cfg.HOST,
database=cfg.DATABASE,
user=cfg.UID,
password=cfg.PWD)
return conn
def get_epoch_and_pst_24hr():
utc = datetime.utcnow()
pst = timedelta(hours=7)
current_hour = (utc - pst).hour
epoch = round(utc.timestamp())
return current_hour, epoch
def remove_agency_tag(id_string):
underscore_loc = id_string.find('_')
final = int(id_string[underscore_loc+1:])
return(final)
def query_active_trips(key, endpoint):
call_text = endpoint + key
response = requests.get(call_text)
response = response.json()
return response
def clean_active_trips(response):
active_trip_statuses = response['data']['list']
to_remove = []
# Find indices of trips that are inactive or have no data
for i, bus in enumerate(response['data']['list']):
if bus['tripId'] == '' or bus['status'] == 'CANCELED' or bus['location'] == None:
to_remove.append(i)
# Remove inactive trips starting with the last index
for index in sorted(to_remove, reverse=True):
del active_trip_statuses[index]
return active_trip_statuses
def upload_to_rds(to_upload, conn, collected_time):
to_upload_list = []
for bus_status in to_upload:
to_upload_list.append(
(str(remove_agency_tag(bus_status['tripId'])),
str(remove_agency_tag(bus_status['vehicleId'])),
str(round(bus_status['location']['lat'], 10)),
str(round(bus_status['location']['lon'], 10)),
str(round(bus_status['tripStatus']['orientation'])),
str(bus_status['tripStatus']['scheduleDeviation']),
str(round(bus_status['tripStatus']['totalDistanceAlongTrip'], 10)),
str(round(bus_status['tripStatus']['distanceAlongTrip'], 10)),
str(remove_agency_tag(bus_status['tripStatus']['closestStop'])),
str(remove_agency_tag(bus_status['tripStatus']['nextStop'])),
str(bus_status['tripStatus']['lastLocationUpdateTime'])[:-3],
str(collected_time)))
with conn.cursor() as curs:
try:
args_str = ','.join(curs.mogrify('(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', x).decode('utf-8') for x in to_upload_list)
query_str = 'INSERT INTO active_trips_study (tripid, vehicleid, lat, lon, orientation, scheduledeviation, totaltripdistance, tripdistance, closeststop, nextstop, locationtime, collectedtime) VALUES ' + args_str
curs.execute(query_str)
conn.commit()
except:
# Catch all errors and continue to keep server up and running
conn.rollback()
return query_str
def main_function():
endpoint = 'http://api.pugetsound.onebusaway.org/api/where/vehicles-for-agency/1.json?key='
conn = connect_to_rds()
current_hour, current_epoch = get_epoch_and_pst_24hr()
while current_hour < 19:
response = query_active_trips(cfg.API_KEY, endpoint)
current_hour, current_epoch = get_epoch_and_pst_24hr()
cleaned_response = clean_active_trips(response)
args_str = upload_to_rds(cleaned_response, conn, current_epoch)
time.sleep(8)
conn.close()
if __name__ == "__main__":
main_function()
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from itertools import product, count
from matplotlib.colors import LinearSegmentedColormap
# it produce more vectors pointing diagonally than vectors pointing along
# an axis
# # generate uniform unit vectors
# def generate_unit_vectors(n):
# 'Generates matrix NxN of unit length vectors'
# v = np.random.uniform(-1, 1, (n, n, 2))
# l = np.sqrt(v[:, :, 0] ** 2 + v[:, :, 1] ** 2).reshape(n, n, 1)
# v /= l
# return v
def generate_unit_vectors(n,m):
'Generates matrix NxN of unit length vectors'
phi = np.random.uniform(0, 2*np.pi, (n, m))
v = np.stack((np.cos(phi), np.sin(phi)), axis=-1)
return v
# quintic interpolation
def qz(t):
return t * t * t * (t * (t * 6 - 15) + 10)
# cubic interpolation
def cz(t):
return -2 * t * t * t + 3 * t * t
def generate_2D_perlin_noise(size = (200,200), ns=1):
'''
generate_2D_perlin_noise(size, ns)
Generate 2D array of size x size filled with Perlin noise.
Parameters
----------
size : int or (int, int)
Size of 2D array size x size.
ns : int
Distance between nodes.
Returns
-------
m : ndarray
The 2D array filled with Perlin noise.
'''
if type(size) == int:
size = (size, size)
assert size[0]%ns==0 and size[1]%ns==0, 'divisible error. re-set node distance(ns)'
nc = [int(size[0] / ns), int(size[1] / ns)] # number of nodes
grid_size_h = int(size[0] / ns + 1) # number of points in grid
grid_size_w = int(size[1] / ns + 1) # number of points in grid
# generate grid of vectors
v = generate_unit_vectors(grid_size_h, grid_size_w)
# generate some constans in advance
ad, ar = np.arange(ns), np.arange(-ns, 0, 1)
bd, br = np.arange(ns), np.arange(-ns, 0, 1)
# vectors from each of the 4 nearest nodes to a point in the NSxNS patch
vd = np.zeros((ns, ns, 4, 1, 2))
# for (l1, l2), c in zip(product((ad, ar), repeat=2), count()):
vd[:, :, 0, 0] = np.stack(np.meshgrid(bd, ad, indexing='xy'), axis=2)
vd[:, :, 1, 0] = np.stack(np.meshgrid(br, ad, indexing='xy'), axis=2)
vd[:, :, 2, 0] = np.stack(np.meshgrid(bd, ar, indexing='xy'), axis=2)
vd[:, :, 3, 0] = np.stack(np.meshgrid(br, ar, indexing='xy'), axis=2)
# interpolation coefficients
d = qz(np.stack((np.zeros((ns, ns, 2)),
np.stack(np.meshgrid(ad, bd, indexing='ij'), axis=2)),
axis=2)/ns)
dd = np.stack(np.meshgrid(ad, bd, indexing='ij'), axis=2)
dd = dd.astype('float')
d[:, :, 0] = 1 - d[:, :, 1]
# make copy and reshape for convenience
d0 = d[..., 0].copy().reshape(ns, ns, 1, 2)
d1 = d[..., 1].copy().reshape(ns, ns, 2, 1)
# print(d0,d1)
# make an empy matrix
m = np.zeros((size[0], size[1]))
# reshape for convenience
t = m.reshape(nc[0], ns, nc[1], ns)
# calculate values for a NSxNS patch at a time
for i in np.arange(nc[0]):
for j in np.arange(nc[1]): # loop through the grid
# get four node vectors
av = v[i:i+2, j:j+2].reshape(4, 2, 1)
# 'vector from node to point' dot 'node vector'
at = np.matmul(vd, av).reshape(ns, ns, 2, 2)
# horizontal and vertical interpolation
t[i, :, j, :] = np.matmul(np.matmul(d0, at), d1).reshape(ns, ns)
return m
if __name__ == "__main__":
img = generate_2D_perlin_noise(200, 20)
plt.figure()
plt.imshow(img, cmap=cm.gray)
img = generate_2D_perlin_noise((200,300), 10)
print(type(img), img.shape, img.min(), img.max())
plt.figure()
plt.imshow(img, cmap=cm.gray)
plt.axis('off')
img = generate_2D_perlin_noise((200,50), 25)
print(type(img), img.shape, img.min(), img.max())
plt.figure()
plt.imshow(img, cmap=cm.gray)
plt.axis('off')
plt.figure()
plt.imshow(img>3, cmap=cm.gray)
plt.figure()
plt.imshow(img>1, cmap=cm.gray)
# generate "sky"
#img0 = generate_2D_perlin_noise(400, 80)
#img1 = generate_2D_perlin_noise(400, 40)
#img2 = generate_2D_perlin_noise(400, 20)
#img3 = generate_2D_perlin_noise(400, 10)
#
#img = (img0 + img1 + img2 + img3) / 4
#cmap = LinearSegmentedColormap.from_list('sky',
# [(0, '#0572D1'),
# (0.75, '#E5E8EF'),
# (1, '#FCFCFC')])
#img = cm.ScalarMappable(cmap=cmap).to_rgba(img)
#plt.imshow(img)
|
nilq/baby-python
|
python
|
from aiogram import Dispatcher
from bulletin_board_bot.misc.user_data import UserDataStorage
from bulletin_board_bot.dependencies import DIContainer
from bulletin_board_bot.middlewares.di import DIContainerMiddleware
from bulletin_board_bot.middlewares.userdata import UserDataMiddleware
def setup_middlewares(dp: Dispatcher,
user_data_storage: UserDataStorage,
container: DIContainer):
dp.setup_middleware(UserDataMiddleware(user_data_storage))
dp.setup_middleware(DIContainerMiddleware(container))
|
nilq/baby-python
|
python
|
from django.contrib import admin
from core.models import Profile, BraFitting, Suggestion, Resource
# Register your models here.
@admin.register(BraFitting)
class BraFittingAdmin(admin.ModelAdmin):
pass
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
pass
@admin.register(Suggestion)
class SuggestionAdmin(admin.ModelAdmin):
pass
@admin.register(Resource)
class ResourceAdmin(admin.ModelAdmin):
pass
|
nilq/baby-python
|
python
|
"""Tests for _get_tablename_schema_names association schemas function."""
import pytest
from open_alchemy.schemas import association
class TestGetTablenameSchemaNames:
"""Tests for _get_tablename_schema_names."""
# pylint: disable=protected-access
TESTS = [
pytest.param({}, set(), {}, id="empty"),
pytest.param({"Schema1": {}}, set(), {}, id="single not constructable"),
pytest.param(
{"Schema1": {"x-tablename": "table 1"}},
set("table 2"),
{},
id="single miss",
),
pytest.param(
{"Schema1": {"x-tablename": "table 1"}},
{"table 1"},
{"table 1": ("Schema1", ["Schema1"])},
id="single hit",
),
pytest.param(
{
"Schema1": {
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-inherits": True},
]
},
"RefSchema": {"x-tablename": "table 1"},
},
{"table 1"},
{"table 1": ("RefSchema", ["Schema1", "RefSchema"])},
id="single hit $ref first",
),
pytest.param(
{
"RefSchema": {"x-tablename": "table 1"},
"Schema1": {
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-inherits": True},
]
},
},
{"table 1"},
{"table 1": ("RefSchema", ["RefSchema", "Schema1"])},
id="single hit $ref second",
),
pytest.param(
{"Schema1": {"allOf": [{"x-tablename": "table 1"}]}},
{"table 1"},
{"table 1": ("Schema1", ["Schema1"])},
id="single hit allOf",
),
pytest.param(
{
"Schema1": {
"allOf": [
{"x-tablename": "table 1"},
{"$ref": "#/components/schemas/RefSchema"},
]
},
"RefSchema": {"x-tablename": "ref_table"},
},
{"table 1"},
{"table 1": ("Schema1", ["Schema1"])},
id="single hit allOf local $ref local first",
),
pytest.param(
{
"Schema1": {
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-tablename": "table 1"},
]
},
"RefSchema": {"x-tablename": "ref_table"},
},
{"table 1"},
{"table 1": ("Schema1", ["Schema1"])},
id="single hit allOf local $ref $ref first",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
},
set(),
{},
id="multiple miss",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
},
{"table 1"},
{"table 1": ("Schema1", ["Schema1"])},
id="multiple first hit",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
},
{"table 2"},
{"table 2": ("Schema2", ["Schema2"])},
id="multiple second hit",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
},
{"table 1", "table 2"},
{"table 1": ("Schema1", ["Schema1"]), "table 2": ("Schema2", ["Schema2"])},
id="multiple all hit",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 1"},
},
{"table 1"},
{"table 1": ("Schema2", ["Schema1", "Schema2"])},
id="multiple same tablename",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
"Schema3": {"x-tablename": "table 3"},
},
{"table 1", "table 2", "table 3"},
{
"table 1": ("Schema1", ["Schema1"]),
"table 2": ("Schema2", ["Schema2"]),
"table 3": ("Schema3", ["Schema3"]),
},
id="many different tablename",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 1"},
"Schema3": {"x-tablename": "table 3"},
},
{"table 1", "table 2", "table 3"},
{
"table 1": ("Schema2", ["Schema1", "Schema2"]),
"table 3": ("Schema3", ["Schema3"]),
},
id="many different first middle same tablename",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
"Schema3": {"x-tablename": "table 1"},
},
{"table 1", "table 2", "table 3"},
{
"table 1": ("Schema3", ["Schema1", "Schema3"]),
"table 2": ("Schema2", ["Schema2"]),
},
id="many first last same tablename",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 2"},
"Schema3": {"x-tablename": "table 2"},
},
{"table 1", "table 2", "table 3"},
{
"table 1": ("Schema1", ["Schema1"]),
"table 2": ("Schema3", ["Schema2", "Schema3"]),
},
id="many middle last same tablename",
),
pytest.param(
{
"Schema1": {"x-tablename": "table 1"},
"Schema2": {"x-tablename": "table 1"},
"Schema3": {"x-tablename": "table 1"},
},
{"table 1", "table 2", "table 3"},
{"table 1": ("Schema3", ["Schema1", "Schema2", "Schema3"])},
id="many all same tablename",
),
]
@staticmethod
@pytest.mark.parametrize("schemas, tablenames, expected_mapping", TESTS)
@pytest.mark.schemas
@pytest.mark.association
def test_(schemas, tablenames, expected_mapping):
"""
GIVEN schemas, tablenames and expected mappng
WHEN _get_tablename_schema_names is called with the schemas and tablenames
THEN the expected mapping is returned.
"""
returned_mapping = association._get_tablename_schema_names(
schemas=schemas, tablenames=tablenames
)
assert returned_mapping == expected_mapping
|
nilq/baby-python
|
python
|
from django.template import Library
from ..classes import Menu, SourceColumn
register = Library()
def _navigation_resolve_menu(context, name, source=None, sort_results=None):
result = []
menu = Menu.get(name)
link_groups = menu.resolve(
context=context, source=source, sort_results=sort_results
)
if link_groups:
result.append(
{
'link_groups': link_groups, 'menu': menu
}
)
return result
@register.simple_tag(takes_context=True)
def navigation_get_sort_field_querystring(context, column):
return column.get_sort_field_querystring(context=context)
@register.simple_tag
def navigation_get_source_columns(
source, exclude_identifier=False, names=None, only_identifier=False
):
return SourceColumn.get_for_source(
source=source, exclude_identifier=exclude_identifier,
names=names, only_identifier=only_identifier
)
@register.simple_tag(takes_context=True)
def navigation_resolve_menu(context, name, source=None, sort_results=None):
return _navigation_resolve_menu(
context=context, name=name, source=source, sort_results=sort_results
)
@register.simple_tag(takes_context=True)
def navigation_resolve_menus(context, names, source=None, sort_results=None):
result = []
for name in names.split(','):
result.extend(
_navigation_resolve_menu(
context=context, name=name, source=source, sort_results=sort_results
)
)
return result
@register.simple_tag(takes_context=True)
def navigation_source_column_get_sort_icon(context, column):
if column:
result = column.get_sort_icon(context=context)
return result
else:
return ''
@register.simple_tag(takes_context=True)
def navigation_source_column_resolve(context, column):
if column:
result = column.resolve(context=context)
return result
else:
return ''
|
nilq/baby-python
|
python
|
from esipy.client import EsiClient
from waitlist.utility.swagger.eve import get_esi_client
from waitlist.utility.swagger import get_api
from waitlist.utility.swagger.eve.universe.responses import ResolveIdsResponse,\
CategoriesResponse, CategoryResponse, GroupResponse, GroupsResponse,\
TypesResponse, TypeResponse
from typing import List
from esipy.exceptions import APIException
class UniverseEndpoint(object):
def __init__(self, client: EsiClient = None) -> None:
if client is None:
self.__client: EsiClient = get_esi_client(
token=None, noauth=True, retry_request=True)
self.__api: App = get_api()
else:
self.__client: EsiClient = client
self.__api: App = get_api()
def resolve_ids(self, ids_list: [int]) -> ResolveIdsResponse:
"""
:param list maximum of 1000 ids allowed at once
"""
resp = self.__client.request(
self.__api.op['post_universe_names'](ids=ids_list))
return ResolveIdsResponse(resp)
def get_categories(self) -> CategoriesResponse:
"""
Get response containing a list of all category ids
"""
resp = self.__client.request(
self.__api.op['get_universe_categories']())
return CategoriesResponse(resp)
def get_category(self, category_id: int) -> CategoryResponse:
"""
Get response containing information about the category
"""
resp = self.__client.request(
self.__api.op['get_universe_categories_category_id'](
category_id=category_id))
return CategoryResponse(resp)
def get_category_multi(self,
category_ids: List[int]) -> List[CategoryResponse]:
ops = []
for category_id in category_ids:
ops.append(self.__api.op['get_universe_categories_category_id'](
category_id=category_id))
response_infos = self.__client.multi_request(ops)
return [CategoryResponse(info[1]) for info in response_infos]
def get_groups(self) -> List[GroupsResponse]:
"""
Get response containing a list of all group ids
"""
resp = self.__client.head(
self.__api.op['get_universe_groups'](page=1))
if (resp.status != 200):
raise APIException("", resp.status)
pages = 1
if 'X-Pages' in resp.header:
pages = int(resp.header['X-Pages'][0])
ops = []
for page in range(1, pages+1):
ops.append(self.__api.op['get_universe_groups'](page=page))
responses = self.__client.multi_request(ops)
response_list: List[GroupsResponse] = []
for data_tuple in responses: # (request, response)
response_list.append(GroupsResponse(data_tuple[1]))
return response_list
def get_group(self, group_id: int) -> GroupResponse:
"""
Get response containing information about the group
"""
resp = self.__client.request(
self.__api.op['get_universe_groups_group_id'](
group_id=group_id))
return GroupResponse(resp)
def get_group_multi(self, group_ids: List[int]) -> List[GroupResponse]:
ops = []
for group_id in group_ids:
ops.append(self.__api.op['get_universe_groups_group_id'](
group_id=group_id))
response_infos = self.__client.multi_request(ops)
return [GroupResponse(info[1]) for info in response_infos]
def get_types(self) -> List[TypesResponse]:
"""
Get response containing a list of all type ids
"""
resp = self.__client.head(
self.__api.op['get_universe_types'](page=1))
if (resp.status != 200):
raise APIException("", resp.status)
pages = 1
if 'X-Pages' in resp.header:
pages = int(resp.header['X-Pages'][0])
ops = []
for page in range(1, pages+1):
ops.append(self.__api.op['get_universe_types'](page=page))
responses = self.__client.multi_request(ops)
response_list: List[TypesResponse] = []
for data_tuple in responses: # (request, response)
response_list.append(TypesResponse(data_tuple[1]))
return response_list
def get_type(self, type_id: int) -> TypeResponse:
"""
Get response containing information about the type
"""
resp = self.__client.request(
self.__api.op['get_universe_types_type_id'](
type_id=type_id))
return TypeResponse(resp)
def get_type_multi(self, type_ids: List[int]) -> List[TypeResponse]:
ops = []
for type_id in type_ids:
ops.append(self.__api.op['get_universe_types_type_id'](
type_id=type_id))
response_infos = self.__client.multi_request(ops)
return [TypeResponse(info[1]) for info in response_infos]
|
nilq/baby-python
|
python
|
titulo = str(input('qual o titulo: '))
autor = str(input('escritor: '))
comando = ['/give @p written_book{pages:[', "'", '"','] ,title:', ',author:', '}', titulo, autor] #estrutura do comando
l = str(input('cole aqui: ')) #livro em si.
tl = len(l) #total de caracteres do livro
print(tl)
qcn = (tl/256) #quantidade de cortes nessessarios
print(qcn)
qbn = 256 #quantidade de caracteres suportado
aux = 1 #variavel auxiliar
input('s? ')
for ndp in range(0, int(qcn)):
qnd = (l[aux -1:qbn])
comando.append(qnd)
print(qnd)
qbn = qbn + 256
if tl > 255:
aux = aux + 256
print('fim conferencia')
print(comando[0], comando[1], comando[2], comando[8],comando[2],
comando[1],',', comando[1], comando[2], comando[9],comando[2], comando[1],
',', comando[1], comando[2], comando[10],comando[2], comando[1],
',', comando[1], comando[2], comando[11],comando[2], comando[1],
',', comando[1], comando[2], comando[12],comando[2], comando[1],
',', comando[1], comando[2], comando[13],comando[2], comando[1],
',', comando[1], comando[2], comando[14],comando[2], comando[1],
',', comando[1], comando[2], comando[15],comando[2], comando[1],
',', comando[1], comando[2], comando[16],comando[2], comando[1],
',', comando[1], comando[2], comando[17],comando[2], comando[1],
',', comando[1], comando[2], comando[18],comando[2], comando[1],
',', comando[1], comando[2], comando[19],comando[2], comando[1],
',', comando[1], comando[2], comando[20],comando[2], comando[1],
',', comando[1], comando[2], comando[21],comando[2], comando[1],
',', comando[1], comando[2], comando[22],comando[2], comando[1],
',', comando[1], comando[2], comando[23],comando[2], comando[1],
',', comando[1], comando[2], comando[24],comando[2], comando[1],
',', comando[1], comando[2], comando[25],comando[2], comando[1],
',', comando[1], comando[2], comando[26],comando[2], comando[1],
',', comando[1], comando[2], comando[27],comando[2], comando[1],
',', comando[1], comando[2], comando[28],comando[2], comando[1],
',', comando[1], comando[2], comando[29],comando[2], comando[1],
',', comando[1], comando[2], comando[30],comando[2], comando[1],
',', comando[1], comando[2], comando[31],comando[2], comando[1],
',', comando[1], comando[2], comando[32],comando[2], comando[1],
',', comando[1], comando[2], comando[33],comando[2], comando[1],
',', comando[1], comando[2], comando[34],comando[2], comando[1],
',', comando[1], comando[2], comando[35],comando[2], comando[1],
',', comando[1], comando[2], comando[36],comando[2], comando[1],
',', comando[1], comando[2], comando[37],comando[2], comando[1],
',', comando[1], comando[2], comando[38],comando[2], comando[1],
',', comando[1], comando[2], comando[39],comando[2], comando[1],
',', comando[1], comando[2], comando[40],comando[2], comando[1],
',', comando[1], comando[2], comando[41],comando[2], comando[1],
',', comando[1], comando[2], comando[42],comando[2], comando[1],
',', comando[1], comando[2], comando[43],comando[2], comando[1],
',', comando[1], comando[2], comando[44],comando[2], comando[1],
',', comando[1], comando[2], comando[45],comando[2], comando[1],
',', comando[1], comando[2], comando[46],comando[2], comando[1],
',', comando[1], comando[2], comando[47],comando[2], comando[1],
',', comando[1], comando[2], comando[48],comando[2], comando[1],
',', comando[1], comando[2], comando[49],comando[2], comando[1],
',', comando[1], comando[2], comando[50],comando[2], comando[1],
',', comando[1], comando[2], comando[51],comando[2], comando[1],
',', comando[1], comando[2], comando[52],comando[2], comando[1],
',', comando[1], comando[2], comando[53],comando[2], comando[1],
',', comando[1], comando[2], comando[54],comando[2], comando[1],
',', comando[1], comando[2], comando[55],comando[2], comando[1],
',', comando[1], comando[2], comando[56],comando[2], comando[1],
',', comando[1], comando[2], comando[57],comando[2], comando[1],
',', comando[1], comando[2], comando[58],comando[2], comando[1],
',', comando[1], comando[2], comando[59],comando[2], comando[1],
',', comando[1], comando[2], comando[60],comando[2], comando[1],
',', comando[1], comando[2], comando[61],comando[2], comando[1],
',', comando[1], comando[2], comando[62],comando[2], comando[1],
',', comando[1], comando[2], comando[63],comando[2], comando[1],
',', comando[1], comando[2], comando[64],comando[2], comando[1],
',', comando[1], comando[2], comando[65],comando[2], comando[1],
',', comando[1], comando[2], comando[66],comando[2], comando[1],
',', comando[1], comando[2], comando[67],comando[2], comando[1],
',', comando[1], comando[2], comando[68],comando[2], comando[1],
',', comando[1], comando[2], comando[69],comando[2], comando[1],
',', comando[1], comando[2], comando[70],comando[2], comando[1],
',', comando[1], comando[2], comando[71],comando[2], comando[1],
',', comando[1], comando[2], comando[72],comando[2], comando[1],
',', comando[1], comando[2], comando[73],comando[2], comando[1],
',', comando[1], comando[2], comando[74],comando[2], comando[1],
',', comando[1], comando[2], comando[75],comando[2], comando[1],
',', comando[1], comando[2], comando[76],comando[2], comando[1],
',', comando[1], comando[2], comando[77],comando[2], comando[1],
',', comando[1], comando[2], comando[78],comando[2], comando[1],
',', comando[1], comando[2], comando[79],comando[2], comando[1],
',', comando[1], comando[2], comando[80],comando[2], comando[1],
',', comando[1], comando[2], comando[81],comando[2], comando[1],
',', comando[1], comando[2], comando[82],comando[2], comando[1],
',', comando[1], comando[2], comando[83],comando[2], comando[1],
',', comando[1], comando[2], comando[84],comando[2], comando[1],
',', comando[1], comando[2], comando[85],comando[2], comando[1],
',', comando[1], comando[2], comando[86],comando[2], comando[1],
',', comando[1], comando[2], comando[87],comando[2], comando[1],
',', comando[1], comando[2], comando[88],comando[2], comando[1],
',', comando[1], comando[2], comando[89],comando[2], comando[1],
',', comando[1], comando[2], comando[90],comando[2], comando[1],
',', comando[1], comando[2], comando[91],comando[2], comando[1],
',', comando[1], comando[2], comando[92],comando[2], comando[1],
',', comando[1], comando[2], comando[93],comando[2], comando[1],
',', comando[1], comando[2], comando[94],comando[2], comando[1],
',', comando[1], comando[2], comando[95],comando[2], comando[1],
',', comando[1], comando[2], comando[96],comando[2], comando[1],
',', comando[1], comando[2], comando[97],comando[2], comando[1],
',', comando[1], comando[2], comando[98],comando[2], comando[1],
',', comando[1], comando[2], comando[99],comando[2], comando[1],
',', comando[1], comando[2], comando[100],comando[2], comando[1],
',', comando[1], comando[2], comando[101],comando[2], comando[1],
',', comando[1], comando[2], comando[102],comando[2], comando[1],
',', comando[1], comando[2], comando[103],comando[2], comando[1],
',', comando[1], comando[2], comando[104],comando[2], comando[1],
',', comando[1], comando[2], comando[105],comando[2], comando[1],
',', comando[1], comando[2], comando[106],comando[2], comando[1],
',', comando[1], comando[2], comando[107],comando[2], comando[1],
comando[3],comando[2], comando[6],comando[2], comando[4],comando[2], comando[7],comando[2], comando[5])
|
nilq/baby-python
|
python
|
r"""
Super Partitions
AUTHORS:
- Mike Zabrocki
A super partition of size `n` and fermionic sector `m` is a
pair consisting of a strict partition of some integer `r` of
length `m` (that may end in a `0`) and an integer partition of
`n - r`.
This module provides tools for manipulating super partitions.
Super partitions are the indexing set for symmetric functions in
super space.
Super partitions may be input in two different formats: one as a pair
consisiting of fermionic (strict partition) and a bosonic (partition) part
and the other as a list of integer values where the negative entries come
first and are listed in strict order followed by the positive values in
weak order.
A super partition is displayed as two partitions separated by a semicolon
as a default. Super partitions may also be displayed as a weakly increasing
sequence of integers that are strict if the numbers are not positive.
These combinatorial objects index the space of symmetric polynomials in
two sets of variables, one commuting and one anti-commuting, and they
are known as symmetric functions in super space (hence the origin of the
name super partitions).
EXAMPLES::
sage: SuperPartitions()
Super Partitions
sage: SuperPartitions(2)
Super Partitions of 2
sage: SuperPartitions(2).cardinality()
8
sage: SuperPartitions(4,2)
Super Partitions of 4 and of fermionic sector 2
sage: [[2,0],[1,1]] in SuperPartitions(4,2)
True
sage: [[1,0],[1,1]] in SuperPartitions(4,2)
False
sage: [[1,0],[2,1]] in SuperPartitions(4)
True
sage: [[1,0],[2,2,1]] in SuperPartitions(4)
False
sage: [[1,0],[2,1]] in SuperPartitions()
True
sage: [[1,1],[2,1]] in SuperPartitions()
False
sage: [-2, 0, 1, 1] in SuperPartitions(4,2)
True
sage: [-1, 0, 1, 1] in SuperPartitions(4,2)
False
sage: [-2, -2, 2, 1] in SuperPartitions(7,2)
False
REFERENCES:
- [JL2016]_
"""
#*****************************************************************************
# Copyright (C) 2018 Mike Zabrocki <zabrocki at mathstat.yorku.ca>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function, absolute_import, division
from six import add_metaclass
from functools import reduce
from sage.structure.list_clone import ClonableArray
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.structure.richcmp import richcmp, richcmp_method
from sage.combinat.partition import Partition, Partitions, _Partitions
from sage.combinat.composition import Composition
from sage.categories.enumerated_sets import EnumeratedSets
from sage.rings.integer import Integer
from sage.structure.global_options import GlobalOptions
from sage.rings.all import ZZ
from sage.misc.inherit_comparison import InheritComparisonClasscallMetaclass
from sage.misc.all import uniq
@richcmp_method
@add_metaclass(InheritComparisonClasscallMetaclass)
class SuperPartition(ClonableArray):
r"""
A super partition.
A *super partition* of size `n` and fermionic sector `m` is a
pair consisting of a strict partition of some integer `r` of
length `m` (that may end in a `0`) and an integer partition of
`n - r`.
EXAMPLES::
sage: sp = SuperPartition([[1,0],[2,2,1]]); sp
[1, 0; 2, 2, 1]
sage: sp[0]
(1, 0)
sage: sp[1]
(2, 2, 1)
sage: sp.fermionic_degree()
2
sage: sp.bosonic_degree()
6
sage: sp.length()
5
sage: sp.conjugate()
[4, 2; ]
"""
@staticmethod
def __classcall_private__(cls, lst):
r"""
Construct a superpartition in the correct parent
EXAMPLES::
sage: SuperPartition([[1],[1]]).parent()
Super Partitions
sage: SuperPartition([[1],[1]])
[1; 1]
sage: SuperPartition([-1, 1])
[1; 1]
sage: SuperPartition([[1,1],[1]])
Traceback (most recent call last):
...
ValueError: [[1, 1], [1]] not in Super Partitions
sage: SuperPartition([-1,1])
[1; 1]
sage: SuperPartition([])
[; ]
sage: SP = SuperPartitions(8,4)([[3,2,1,0],[2]])
sage: SuperPartition(SP) is SP
True
"""
if isinstance(lst, SuperPartition):
return lst
SPs = SuperPartitions()
if not lst:
return SPs([[],[]])
elif isinstance(lst[0], (list, tuple)):
return SPs([[Integer(a) for a in lst[0]],
[Integer(a) for a in lst[1]]])
else:
return SPs([[-a for a in lst if a <= 0],
[a for a in lst if a > 0]])
def __init__(self, parent, lst, check=True, immutable=True):
"""
Initialize ``self``.
EXAMPLES::
sage: SP = SuperPartition([[1],[1]])
sage: TestSuite(SP).run()
"""
if check and lst not in parent:
raise ValueError("%s not in %s" % (lst, parent))
lst = [tuple(lst[0]), tuple(lst[1])]
ClonableArray.__init__(self, parent, lst, False, immutable)
def check(self):
"""
Check that ``self`` is a valid super partition.
EXAMPLES::
sage: SP = SuperPartition([[1],[1]])
sage: SP.check()
"""
if self not in self.parent():
raise ValueError("%s not in %s"%(self, self.parent()))
def __richcmp__(self, other, op):
r"""
Check whether ``self`` is equal to ``other``.
.. TODO::
This overwrites the equality check of
:class:`~sage.structure.list_clone.ClonableArray`
in order to circumvent the coercion framework.
Eventually this should be solved more elegantly.
For now, two elements are compared by their defining lists.
"""
if isinstance(other, SuperPartition):
return richcmp(list(self), list(other), op)
else:
return richcmp(list(self), other, op)
def _hash_(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: SP = SuperPartition([[1],[1]])
sage: hash(tuple(SP)) == hash(SP)
True
"""
return hash(tuple(self))
def _repr_(self):
r"""
Return a string representation of ``self``.
A super partition is represented by the antisymmetric and symmetric
parts separated by a semicolon.
EXAMPLES::
sage: SuperPartition([[1],[1]])
[1; 1]
sage: SuperPartition([[],[1]])
[; 1]
sage: SuperPartition([])
[; ]
sage: SuperPartitions.options.display = "list"
sage: SuperPartition([[1],[1]])
[-1, 1]
sage: SuperPartition([[],[1]])
[1]
sage: SuperPartition([-2,-1,0,2,1])
[-2, -1, 0, 2, 1]
sage: SuperPartitions.options.display = "pair"
sage: SuperPartition([[1],[1]])
[[1], [1]]
sage: SuperPartition([[],[1]])
[[], [1]]
sage: SuperPartition([-2,-1,0,2,1])
[[2, 1, 0], [2, 1]]
sage: SuperPartitions.options._reset()
"""
display = self.parent().options.display
if display == "default":
return '['+', '.join(str(a) for a in self.antisymmetric_part())+\
'; '+', '.join(str(a) for a in self.symmetric_part())+']'
elif display == "pair":
return self._repr_pair()
elif display == "list":
return self._repr_list()
def _repr_pair(self):
r"""
Represention of a super partition as a pair.
A super partition is represented by a list consisting of the
antisymmetric and symmetric parts.
EXAMPLES::
sage: SuperPartition([[1],[1]])._repr_pair()
'[[1], [1]]'
sage: SuperPartition([[],[1]])._repr_pair()
'[[], [1]]'
sage: SuperPartition([[],[]])._repr_pair()
'[[], []]'
"""
return repr(self.to_list())
def _repr_list(self):
r"""
Represention of a super partition as a list.
A super partition is represented by a list consisting of the
negative values for the antisymmetric part listed first followed
by positive values for the symmetric part
EXAMPLES::
sage: SuperPartition([[1],[1]])._repr_list()
'[-1, 1]'
sage: SuperPartition([[],[1]])._repr_list()
'[1]'
sage: SuperPartition([[],[]])._repr_list()
'[]'
"""
return repr([-a for a in self[0]] + list(self[1]))
def _latex_(self):
r"""
Latex a super partition.
A super partition is represented by the antisymmetric and symmetric
parts separated by a semicolon.
EXAMPLES::
sage: latex(SuperPartition([[1],[1]]))
(1; 1)
sage: latex(SuperPartition([[],[1]]))
(; 1)
"""
return ('(' + ','.join(str(a) for a in self.antisymmetric_part())
+ '; ' + ', '.join(str(a) for a in self.symmetric_part()) + ')')
def to_list(self):
r"""
The list of two lists with the antisymmetric and symmetric parts.
EXAMPLES::
sage: SuperPartition([[1],[1]]).to_list()
[[1], [1]]
sage: SuperPartition([[],[1]]).to_list()
[[], [1]]
"""
return [list(self[0]), list(self[1])]
def to_composition(self):
r"""
Concatenate the antisymmetric and symmetric parts to a composition.
OUTPUT:
- a (possibly weak) composition
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).to_composition()
[3, 1, 2, 2, 1]
sage: SuperPartition([[2,1,0],[3,3]]).to_composition()
[2, 1, 0, 3, 3]
sage: SuperPartition([[2,1,0],[3,3]]).to_composition().parent()
Compositions of non-negative integers
"""
return Composition(self[0] + self[1])
def to_partition(self):
r"""
Concatenate and sort the antisymmetric and symmetric parts
to a partition.
OUTPUT:
- a partition
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).to_partition()
[3, 2, 2, 1, 1]
sage: SuperPartition([[2,1,0],[3,3]]).to_partition()
[3, 3, 2, 1]
sage: SuperPartition([[2,1,0],[3,3]]).to_partition().parent()
Partitions
"""
return Partition(sorted(self[0] + self[1], reverse=True))
def antisymmetric_part(self):
r"""
The antisymmetric part as a list of strictly decreasing integers.
OUTPUT:
- a list
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).antisymmetric_part()
[3, 1]
sage: SuperPartition([[2,1,0],[3,3]]).antisymmetric_part()
[2, 1, 0]
"""
return list(self[0])
a_part = antisymmetric_part
def symmetric_part(self):
r"""
The symmetric part as a list of weakly decreasing integers.
OUTPUT:
- a list
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).symmetric_part()
[2, 2, 1]
sage: SuperPartition([[2,1,0],[3,3]]).symmetric_part()
[3, 3]
"""
return list(self[1])
s_part = symmetric_part
def bosonic_degree(self):
r"""
Return the bosonic degree of ``self``.
The *bosonic degree* is the sum of the sizes of the
antisymmetric and symmetric parts.
OUTPUT:
- an integer
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).bosonic_degree()
9
sage: SuperPartition([[2,1,0],[3,3]]).bosonic_degree()
9
"""
return sum(self.antisymmetric_part() + self.symmetric_part())
degree = bosonic_degree
def fermionic_degree(self):
r"""
Return the fermionic degree of ``self``.
The *fermionic degree* is the length of the antisymmetric part.
OUTPUT:
- an integer
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).fermionic_degree()
2
sage: SuperPartition([[2,1,0],[3,3]]).fermionic_degree()
3
"""
return len(self.antisymmetric_part())
fermionic_sector = fermionic_degree
def bi_degree(self):
r"""
Return the bidegree of ``self``, which is a pair consisting
of the bosonic and fermionic degree.
OUTPUT:
- a tuple of two integers
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).bi_degree()
(9, 2)
sage: SuperPartition([[2,1,0],[3,3]]).bi_degree()
(9, 3)
"""
return (self.bosonic_degree(), self.fermionic_degree())
def length(self):
r"""
Return the length of ``self``, which is the sum of the
lengths of the antisymmetric and symmetric part.
OUTPUT:
- an integer
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).length()
5
sage: SuperPartition([[2,1,0],[3,3]]).length()
5
"""
return self.fermionic_degree()+len(self.symmetric_part())
def bosonic_length(self):
r"""
Return the length of the partition of the symmetric part.
OUTPUT:
- an integer
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).bosonic_length()
3
sage: SuperPartition([[2,1,0],[3,3]]).bosonic_length()
2
"""
return len(self.symmetric_part())
def shape_circled_diagram(self):
r"""
A concatenated partition with an extra cell for each antisymmetric part
OUTPUT:
- a partition
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).shape_circled_diagram()
[4, 2, 2, 2, 1]
sage: SuperPartition([[2,1,0],[3,3]]).shape_circled_diagram()
[3, 3, 3, 2, 1]
"""
return Partition(sorted([a+1 for a in self.antisymmetric_part()]
+ self.symmetric_part(), reverse=True))
@staticmethod
def from_circled_diagram(shape, corners):
r"""
Construct a super partition from a circled diagram.
A circled diagram consists of a partition of the concatenation of
the antisymmetric and symmetric parts and a list of addable cells
of the partition which indicate the location of the circled cells.
INPUT:
- ``shape`` -- a partition or list of integers
- ``corners`` -- a list of removable cells of ``shape``
OUTPUT:
- a :class:`SuperPartition`
EXAMPLES::
sage: SuperPartition.from_circled_diagram([3, 2, 2, 1, 1], [(0, 3), (3, 1)])
[3, 1; 2, 2, 1]
sage: SuperPartition.from_circled_diagram([3, 3, 2, 1], [(2, 2), (3, 1), (4, 0)])
[2, 1, 0; 3, 3]
sage: from_cd = SuperPartition.from_circled_diagram
sage: all(sp == from_cd(*sp.to_circled_diagram()) for sp in SuperPartitions(4))
True
"""
return SuperPartition([sorted([c[1] for c in corners], reverse=True),
[shape[i] for i in range(len(shape))
if i not in [c[0] for c in corners]]])
def to_circled_diagram(self):
r"""
The shape of the circled diagram and a list of addable cells
A circled diagram consists of a partition for the outer shape
and a list of removable cells of the partition indicating the
location of the circled cells
OUTPUT:
- a list consisting of a partition and a list of pairs of integers
EXAMPLES::
sage: SuperPartition([[3,1],[2,2,1]]).to_circled_diagram()
[[3, 2, 2, 1, 1], [(0, 3), (3, 1)]]
sage: SuperPartition([[2,1,0],[3,3]]).to_circled_diagram()
[[3, 3, 2, 1], [(2, 2), (3, 1), (4, 0)]]
sage: from_cd = SuperPartition.from_circled_diagram
sage: all(sp == from_cd(*sp.to_circled_diagram()) for sp in SuperPartitions(4))
True
"""
shape = self.to_partition()
corners = [c for c in shape.addable_cells() if c[1] in self.antisymmetric_part()]
return [shape, corners]
def conjugate(self):
r"""
Conjugate of a super partition.
The *conjugate* of a super partition is defined by conjugating
the circled diagram.
OUPUT:
- a :class:`SuperPartition`
EXAMPLES::
sage: SuperPartition([[3, 1, 0], [4, 3, 2, 1]]).conjugate()
[6, 4, 1; 3]
sage: all(sp == sp.conjugate().conjugate() for sp in SuperPartitions(4))
True
sage: all(sp.conjugate() in SuperPartitions(3,2) for sp in SuperPartitions(3,2))
True
"""
sd = self.to_circled_diagram()
return SuperPartition.from_circled_diagram(sd[0].conjugate(),
[(j,i) for (i,j) in sd[1]])
def zee(self):
r"""
Return the centralizer size of a permutation of cycle
type symmetric part of ``self``.
OUTPUT:
- a positive integer
EXAMPLES::
sage: SuperPartition([[1,0],[3,1,1]]).zee()
6
sage: SuperPartition([[1],[2,2,1]]).zee()
8
sage: sum(1/sp.zee() for sp in SuperPartitions(6,0))
1
"""
return Partition(self.symmetric_part()).centralizer_size()
def sign(self):
r"""
Return the sign of a permutation of cycle type the
symmetric part of ``self``.
OUTPUT:
- either `1` or `-1`
EXAMPLES::
sage: SuperPartition([[1,0],[3,1,1]]).sign()
-1
sage: SuperPartition([[1,0],[3,2,1]]).sign()
1
sage: sum(sp.sign()/sp.zee() for sp in SuperPartitions(6,0))
0
"""
return (-1)**(self.degree()-len(self.symmetric_part()))
def dominates(self, other):
r"""
Return ``True`` if and only if ``self`` dominates ``other``.
If the symmetric and anti-symmetric parts of ``self`` and ``other``
are not the same size then the result is ``False``.
EXAMPLES::
sage: LA = SuperPartition([[2,1],[2,1,1]])
sage: LA.dominates([[2,1],[3,1]])
False
sage: LA.dominates([[2,1],[1,1,1,1]])
True
sage: LA.dominates([[3],[2,1,1]])
False
sage: LA.dominates([[1],[1]*6])
False
"""
return (self.degree() == sum(other[0]) + sum(other[1]) and
Partition(self.antisymmetric_part()).dominates(other[0]) and
Partition(self.symmetric_part()).dominates(other[1]))
def add_horizontal_border_strip_star(self, h):
r"""
Return a list of super partitions that differ from ``self``
by a horizontal strip.
The notion of horizontal strip comes from the Pieri rule for the
Schur-star basis of symmetric functions in super space (see
Theorem 7 from [JL2016]_).
INPUT:
- ``h`` -- number of cells in the horizontal strip
OUPUT:
- a list of super partitions
EXAMPLES::
sage: SuperPartition([[4,1],[3]]).add_horizontal_border_strip_star(3)
[[4, 1; 3, 3],
[4, 1; 4, 2],
[3, 1; 5, 2],
[4, 1; 5, 1],
[3, 1; 6, 1],
[4, 0; 4, 3],
[3, 0; 5, 3],
[4, 0; 5, 2],
[3, 0; 6, 2],
[4, 1; 6],
[3, 1; 7]]
sage: SuperPartition([[2,1],[3]]).add_horizontal_border_strip_star(2)
[[2, 1; 3, 2], [2, 1; 4, 1], [2, 0; 3, 3], [2, 0; 4, 2], [2, 1; 5]]
"""
sp1, circ_list = self.to_circled_diagram()
nsp = [list(la) + [0] for la in sp1.add_horizontal_border_strip(h)]
sp1 = sp1 + [0]
out = []
for elt in nsp:
row_changed = [row1-row2 for row1,row2 in zip(elt,sp1)]
new_sp = [elt, [(i[0]+1, elt[i[0]+1]) for i in circ_list
if row_changed[i[0]] != 0]
# TODO: Check that this is not suppose to be
# a tuple of size 1
+ [(i) for i in circ_list if row_changed[i[0]] == 0]]
if len(uniq([k for (j,k) in new_sp[1]])) == len(new_sp[1]):
out += [SuperPartition.from_circled_diagram(*new_sp)]
return out
def add_horizontal_border_strip_star_bar(self, h):
r"""
List super partitions that differ from ``self`` by a horizontal strip.
The notion of horizontal strip comes from the Pieri rule for the
Schur-star-bar basis of symmetric functions in super space (see
Theorem 10 from [JL2016]_).
INPUT:
- ``h`` -- number of cells in the horizontal strip
OUPUT:
- a list of super partitions
EXAMPLES::
sage: SuperPartition([[4,1],[5,4]]).add_horizontal_border_strip_star_bar(3)
[[4, 3; 5, 4, 1],
[4, 1; 5, 4, 3],
[4, 2; 5, 5, 1],
[4, 1; 5, 5, 2],
[4, 2; 6, 4, 1],
[4, 1; 6, 4, 2],
[4, 1; 6, 5, 1],
[4, 1; 7, 4, 1],
[4, 3; 5, 5],
[4, 3; 6, 4],
[4, 2; 6, 5],
[4, 2; 7, 4],
[4, 1; 7, 5],
[4, 1; 8, 4]]
sage: SuperPartition([[3,1],[5]]).add_horizontal_border_strip_star_bar(2)
[[3, 2; 5, 1],
[3, 1; 5, 2],
[4, 1; 5, 1],
[3, 1; 6, 1],
[4, 2; 5],
[3, 2; 6],
[4, 1; 6],
[3, 1; 7]]
"""
sp1, circ_list = self.to_circled_diagram()
nsp = [list(la) + [0] for la in sp1.add_horizontal_border_strip(h)]
sp1 = sp1 + [0]
out = []
for asp in nsp:
asp = asp + [0]
change_in_rows = [asp[i] - sp1[i] for i in range(len(sp1))]
moved_circ_list = [[] for i in range(len(circ_list))]
for i,pos in enumerate(circ_list):
if change_in_rows[pos[0]] == 0:
moved_circ_list[i].append(pos)
else:
if pos[0] == 0:
moved_circ_list[i].append((0, pos[1]+change_in_rows[0]))
if pos[1] == asp[1]:
moved_circ_list[i].append((1, asp[1]))
else:
if pos[1] + change_in_rows[pos[0]] < sp1[pos[0]-1]:
moved_circ_list[i].append((pos[0], pos[1]+change_in_rows[pos[0]]))
if asp[pos[0]+1] == sp1[pos[0]]:
moved_circ_list[i].append((pos[0]+1, pos[1]))
out += [[moved_circ_list, asp]]
result = []
for i in out:
if not i[0]:
result += [[i[1],i[0]]]
else:
x = reduce(lambda a,b: [item_a + item_b for item_a in a for item_b in b], i[0])
for j in x:
result += [[i[1], list(zip(j,j[1:]))[::2]]]
return [SuperPartition.from_circled_diagram(*i)
for i in result if len(i[1]) == len(self[0])]
class SuperPartitions(UniqueRepresentation, Parent):
r"""
Super partitions.
A super partition of size `n` and fermionic sector `m` is a
pair consisting of a strict partition of some integer `r` of
length `m` (that may end in a `0`) and an integer partition of
`n - r`.
INPUT:
- ``n`` -- an integer (optional: default ``None``)
- ``m`` -- if ``n`` is specified, an integer (optional: default ``None``)
Super partitions are the indexing set for symmetric functions
in super space.
EXAMPLES::
sage: SuperPartitions()
Super Partitions
sage: SuperPartitions(2)
Super Partitions of 2
sage: SuperPartitions(2).cardinality()
8
sage: SuperPartitions(4,2)
Super Partitions of 4 and of fermionic sector 2
sage: [[2,0],[1,1]] in SuperPartitions(4,2)
True
sage: [[1,0],[1,1]] in SuperPartitions(4,2)
False
sage: [[1,0],[2,1]] in SuperPartitions(4)
True
sage: [[1,0],[2,2,1]] in SuperPartitions(4)
False
sage: [[1,0],[2,1]] in SuperPartitions()
True
sage: [[1,1],[2,1]] in SuperPartitions()
False
"""
@staticmethod
def __classcall_private__(self, n=None, m=None, **kwargs):
r"""
Return the corresponding parent based upon input.
TESTS::
sage: from sage.combinat.superpartition import *
sage: isinstance(SuperPartitions(), SuperPartitions_all)
True
sage: isinstance(SuperPartitions(3), SuperPartitions_n)
True
sage: isinstance(SuperPartitions(3,2), SuperPartitions_n_m)
True
::
sage: SP = SuperPartitions(5,2)
sage: SP2 = SuperPartitions(int(5),int(2))
sage: SP3 = SuperPartitions(ZZ(5),int(2))
sage: SP is SP2
True
sage: SP is SP3
True
::
sage: SP = SuperPartitions(5)
sage: SP2 = SuperPartitions(int(5))
sage: SP3 = SuperPartitions(ZZ(5))
sage: SP is SP2
True
sage: SP is SP3
True
"""
if n is None:
return SuperPartitions_all()
elif n in ZZ:
if m is None:
return SuperPartitions_n(n)
elif m in ZZ:
return SuperPartitions_n_m(n, m)
raise ValueError("m must be an integer")
raise ValueError("n must be an integer")
def __init__(self, is_infinite=False):
"""
Initialize ``self``.
EXAMPLES::
sage: SP = SuperPartitions()
sage: TestSuite(SP).run()
"""
cat = EnumeratedSets()
if is_infinite:
cat = cat.Infinite()
else:
cat = cat.Finite()
Parent.__init__(self, category=cat)
Element = SuperPartition
class options(GlobalOptions):
"""
Set the global options for elements of the SuperPartition class.
The defaults are for Super Partitions to be displayed in a list
notation with the fermionic part and the bosonic part separated
by a semicolon. There is a slight disadvantage to this notation
because a list containing a semicolon can not be used as input
for a super partition.
@OPTIONS@
EXAMPLES::
sage: sp = SuperPartition([[1, 0], [2, 2, 1]])
sage: SuperPartitions.options.display
default
sage: sp
[1, 0; 2, 2, 1]
sage: SuperPartitions.options.display = 'list'
sage: sp
[-1, 0, 2, 2, 1]
sage: SuperPartitions.options._reset()
""",
NAME = 'SuperPartition'
module = 'sage.combinat.superpartition'
display = dict(default="default",
description="Specifies how the super partitions should "
"be printed",
values=dict(list="the super partitions are displayed in "
"a list of two lists",
pair="the super partition is displayed as a "
"list of integers",
default="the super partition is displayed in "
"a form [fermionic part; bosonic part]"),
case_sensitive=False)
def _element_constructor_(self, lst, check=True):
"""
Construct an element with ``self`` as parent.
EXAMPLES::
sage: SP = SuperPartitions()
sage: SP([[],[3,3,1]])
[; 3, 3, 1]
sage: SP([[],[3,3,1]]) in SP
True
sage: SP([[],[3,3,1]]).parent()
Super Partitions
sage: SuperPartitions(7)([[],[3,3,1]])
[; 3, 3, 1]
sage: SuperPartitions(7,0)([[],[3,3,1]])
[; 3, 3, 1]
sage: SuperPartitions(7,1)([[],[3,3,1]])
Traceback (most recent call last):
...
ValueError: [[], [3, 3, 1]] not in Super Partitions of 7 and of fermionic sector 1
"""
if not lst:
return self.element_class(self, [[], []], check=check)
if isinstance(lst, SuperPartition):
lst = list(lst)
if isinstance(lst[0], (list, tuple)):
return self.element_class(self, [lst[0], [a for a in lst[1] if a > 0]],
check=check)
else:
return self.element_class(self, [[-a for a in lst if a <= 0],
[a for a in lst if a > 0]],
check=check)
def __contains__(self, x):
"""
TESTS::
sage: [[1],[2,1]] in SuperPartitions()
True
sage: [[],[]] in SuperPartitions()
True
sage: [[0],[]] in SuperPartitions()
True
sage: [[],[0]] in SuperPartitions()
True
sage: [-1, 2, 1] in SuperPartitions()
True
sage: [2, -1, 1, 0] in SuperPartitions()
True
sage: [2, 0, 1, -1] in SuperPartitions()
False
sage: [] in SuperPartitions()
True
sage: [0] in SuperPartitions()
True
"""
if isinstance(x, SuperPartition):
return True
if isinstance(x, (list, tuple)) and all(isinstance(i, (int, Integer))
or i in ZZ for i in x):
sp = [a for a in x if a <= 0]
return (all(sp[i] > sp[i-1] for i in range(1,len(sp)))
and [a for a in x if a > 0] in _Partitions)
elif (isinstance(x, (list, tuple)) and len(x) == 2
and isinstance(x[0], (list, tuple)) and isinstance(x[1], (list, tuple))):
for i in x[0] + x[1]:
if i not in ZZ:
return False
if i < 0:
return False
return (all(x[0][i] > x[0][i+1] for i in range(len(x[0])-1))
and all(x[1][i] >= x[1][i+1] for i in range(len(x[1])-1))
and ((not x[0]) or x[0][-1] >= 0) and ((not x[1]) or x[1][-1] >= 0))
else:
return False
class SuperPartitions_n_m(SuperPartitions):
def __init__(self, n, m):
"""
Initialize ``self``.
TESTS::
sage: SP = SuperPartitions(3,2)
sage: TestSuite(SP).run()
"""
self.n = n
self.m = m
SuperPartitions.__init__(self, False)
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: repr(SuperPartitions(3,2))
'Super Partitions of 3 and of fermionic sector 2'
"""
return "Super Partitions of %s and of fermionic sector %s"%(self.n, self.m)
def __contains__(self, x):
"""
TESTS::
sage: [[3,2,1,0],[2]] in SuperPartitions(8,4)
True
sage: [[3,2,1,0],[]] in SuperPartitions(6,3)
False
sage: [[],[]] in SuperPartitions(0,0)
True
sage: [[0],[]] in SuperPartitions(0,1)
True
sage: [[],[]] in SuperPartitions(0,1)
False
sage: [-3,-2,-1,0,2] in SuperPartitions(8,4)
True
sage: [0] in SuperPartitions(0,0)
False
sage: [] in SuperPartitions(0,0)
True
sage: [0] in SuperPartitions(0,1)
True
"""
if x in SuperPartitions():
if not x:
return self.n == 0 and self.m == 0
if isinstance(x[0], (list, tuple)):
n = sum(x[0] + x[1])
m = len(x[0])
else:
n = sum(abs(a) for a in x)
m = len([a for a in x if a <= 0])
return n == self.n and m == self.m
else:
return False
def __iter__(self):
r"""
An iterator for super partitions of degree ``n`` and sector ``m``.
EXAMPLES::
sage: SuperPartitions(6,2).cardinality()
28
sage: SuperPartitions(6,4).first()
[3, 2, 1, 0; ]
"""
for r in range(self.n+1):
for p1 in Partitions(r):
for p0 in Partitions(self.n-r, max_slope=-1, length=self.m):
yield self.element_class(self, [list(p0), list(p1)])
for p0 in Partitions(self.n-r, max_slope=-1, length=self.m-1):
yield self.element_class(self, [list(p0)+[0], list(p1)])
class SuperPartitions_n(SuperPartitions):
def __init__(self, n):
"""
Initialize ``self``.
TESTS::
sage: SP = SuperPartitions(3)
sage: TestSuite(SP).run()
"""
self.n = n
SuperPartitions.__init__(self, False)
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: repr(SuperPartitions(3))
'Super Partitions of 3'
"""
return "Super Partitions of %s"%self.n
def __contains__(self, x):
"""
EXAMPLES::
sage: SuperPartitions(7)([[],[3,3,1]]) in SuperPartitions()
True
sage: SuperPartitions()([[],[3,3,1]]) in SuperPartitions(7)
True
sage: [[],[]] in SuperPartitions(0)
True
sage: [[0],[]] in SuperPartitions(0)
True
sage: [0] in SuperPartitions(0)
True
sage: [] in SuperPartitions(0)
True
sage: [1] in SuperPartitions(0)
False
"""
if x in SuperPartitions():
if not x:
return self.n == 0
if isinstance(x[0], (list, tuple)):
n = sum(x[0] + x[1])
else:
n = sum(abs(a) for a in x)
return n == self.n
else:
return False
def __iter__(self):
r"""
An iterator for super partitions of degree ``n``.
EXAMPLES::
sage: SuperPartitions(1).list()
[[; 1], [1; ], [0; 1], [1, 0; ]]
sage: SuperPartitions(6).cardinality()
80
"""
m = 0
while self.n >= m * (m-1) // 2:
for LA in SuperPartitions(self.n, m):
yield self.element_class(self, LA)
m += 1
class SuperPartitions_all(SuperPartitions):
def __init__(self):
"""
Initialize ``self``.
TESTS::
sage: SP = SuperPartitions()
sage: TestSuite(SP).run()
"""
SuperPartitions.__init__(self, True)
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: repr(SuperPartitions())
'Super Partitions'
"""
return "Super Partitions"
def __iter__(self):
"""
Iterate over all super partitions.
EXAMPLES::
sage: SP = SuperPartitions()
sage: it = SP.__iter__()
sage: [next(it) for i in range(6)]
[[; ], [0; ], [; 1], [1; ], [0; 1], [1, 0; ]]
"""
n = 0
while True:
for sp in SuperPartitions(n):
yield self.element_class(self, list(sp))
n += 1
|
nilq/baby-python
|
python
|
from ..job.job_context import JobContext
from ..task.task_context import TaskContext
from ..tc.tc_context import TcContext
class VerticalContext:
def __init__(self,
sys_conf_dict,
task_context: TaskContext = None,
job_context: JobContext = None,
tc_context: TcContext = None):
self.sys_conf_dict = sys_conf_dict
self.task_context = task_context
self.job_context = job_context
self.tc_context = tc_context
|
nilq/baby-python
|
python
|
import numpy as np
import random
import heapq
from itertools import count
def time_fun(x, slope, shift):
return 1/(1+np.exp((slope*x - shift)))
class eligibility_trace():
def __init__(self, lambda_v, r, slope=3, shift=5):
self.E = 0
self.lambda_v = lambda_v
self.r = r
self.slope = slope
self.shift = shift
def get_trace(self):
return time_fun(self.E, slope=self.slope, shift=self.shift)
def general_iterate(self):
self.E = self.r*self.lambda_v*self.E
def is_pushed(self):
self.E += 1
class Transition_tuple():
def __init__(self, state, action, action_mean, reward, curiosity, next_state, done_mask, t):
#expects as list of items for each initalization variable
self.state = np.array(state)
self.action = np.array(action)
self.action_mean = np.array(action_mean)
self.reward = np.array(reward)
self.curiosity = np.array(curiosity)
self.next_state = np.array(next_state)
self.done_mask = np.array(done_mask)
self.t = np.array(t)
def get_all_attributes(self):
return [self.state, self.action, self.action_mean, self.reward, self.curiosity, self.next_state, self.done_mask, self.t]
class Reservoir_with_Cur_n_Time_Restirction_Replay_Memory():
def __init__(self, capacity=10000, lambda_v=0.5, r=1, slope=3, shift=5):
self.capacity = capacity
self.storage = []
self.tiebreaker = count()
self.time_eligibilty_trace = eligibility_trace(lambda_v=lambda_v, r=r, slope=slope, shift=shift)
def push(self, state, action, action_mean, reward, curiosity, next_state, done_mask, t):
ran = random.uniform(0,1)
self.time_eligibilty_trace.general_iterate()
if ran < self.time_eligibilty_trace.get_trace():
data = (state, action, action_mean, reward, curiosity, next_state, done_mask, t)
priority = curiosity.item()
d = (priority, next(self.tiebreaker), data)
if len(self.storage) < self.capacity:
heapq.heappush(self.storage, d)
return True
elif priority > self.storage[0][0]:
heapq.heapreplace(self.storage, d)
self.time_eligibilty_trace.is_pushed()
return True
else:
return False
else:
return False
def sample(self, batch_size):
indices = self.get_sample_indices(batch_size)
state, action, action_mean, reward, curiosity, next_state, done_mask, t_array = self.encode_sample(indices=indices)
return Transition_tuple(state, action, action_mean, reward, curiosity, next_state, done_mask, t_array)
def encode_sample(self, indices):
state, action, action_mean, reward, curiosity, next_state, done_mask, t_array = [], [], [], [], [], [], [], []
for i in indices:
data = self.storage[i][2]
s, a, a_m, r, c, n_s, d, t = data
state.append(s)
action.append(a)
action_mean.append(a_m)
reward.append(r)
curiosity.append(c)
next_state.append(n_s)
done_mask.append(d)
t_array.append(t)
return state, action, action_mean, reward, curiosity, next_state, done_mask, t_array
def get_sample_indices(self, batch_size):
if len(self.storage) < self.capacity:
indices = np.random.choice(len(self.storage), batch_size)
else:
indices = np.random.choice(self.capacity, batch_size)
return indices
def __len__(self):
return len(self.storage)
|
nilq/baby-python
|
python
|
'''
true_env = ["../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/online_learning/esarsa/step50k/gridsearch_realenv/"]
k1_notimeout = ["../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k1_notimeout/esarsa/step10k/optimalfixed_eps0/"]
k1_timeout1000 = ["../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k1_timeout1000/esarsa/step10k/optimalfixed_eps0/"]
k3ensemble_notimeout = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
k3ensemble_timeout1000 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
k3ensemble_adversarial_notimeout = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
k3ensemble_adverarial_timeout1000 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
AcrobotdistantStart_regularTrans_timeout200 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
AcrobottrueStart_adversarialTrans_timeout1000 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/",
]
'''
'''
data2500_eps0_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps0"]
data2500_eps10_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps10"]
data2500_eps25_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps25"]
data2500_eps50_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps50"]
data2500_eps75_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps75"]
data2500_eps100_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step2500/optimalfixed_eps100"]
data2500_eps0_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps0"]
data2500_eps10_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps10"]
data2500_eps25_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps25"]
data2500_eps50_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps50"]
data2500_eps75_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps75"]
data2500_eps100_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step2500/optimalfixed_eps100"]
data2500_eps0_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps0"]
data2500_eps10_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps10"]
data2500_eps25_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps25"]
data2500_eps50_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps50"]
data2500_eps75_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps75"]
data2500_eps100_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step2500/optimalfixed_eps100"]
data5k_eps0_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps0"]
data5k_eps10_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps10"]
data5k_eps25_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps25"]
data5k_eps50_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps50"]
data5k_eps75_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps75"]
data5k_eps100_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step5k/optimalfixed_eps100"]
data5k_eps0_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps0"]
data5k_eps10_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps10"]
data5k_eps25_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps25"]
data5k_eps50_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps50"]
data5k_eps75_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps75"]
data5k_eps100_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step5k/optimalfixed_eps100"]
data5k_eps0_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps0"]
data5k_eps10_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps10"]
data5k_eps25_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps25"]
data5k_eps50_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps50"]
data5k_eps75_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps75"]
data5k_eps100_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step5k/optimalfixed_eps100"]
data10k_eps0_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps0"]
data10k_eps10_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps10"]
data10k_eps25_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps25"]
data10k_eps50_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps50"]
data10k_eps75_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps75"]
data10k_eps100_k1_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k1/esarsa/step10k/optimalfixed_eps100"]
data10k_eps0_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps0"]
data10k_eps10_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps10"]
data10k_eps25_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps25"]
data10k_eps50_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps50"]
data10k_eps75_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps75"]
data10k_eps100_k3_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k3/esarsa/step10k/optimalfixed_eps100"]
data10k_eps0_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps0"]
data10k_eps10_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps10"]
data10k_eps25_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps25"]
data10k_eps50_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps50"]
data10k_eps75_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps75"]
data10k_eps100_k5_p0 = ["../../data/hyperparam/acrobot/offline_learning/knn/k5/esarsa/step10k/optimalfixed_eps100"]
'''
'''
data10k_eps10_k5_p20_ens = [
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed1/transfer_optimalfixed_eps10",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed2/transfer_optimalfixed_eps10",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed3/transfer_optimalfixed_eps10",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed4/transfer_optimalfixed_eps10",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed5/transfer_optimalfixed_eps10"
]
data10k_eps25_k5_p20_ens = [
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed1/transfer_optimalfixed_eps25",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed2/transfer_optimalfixed_eps25",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed3/transfer_optimalfixed_eps25",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed4/transfer_optimalfixed_eps25",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed5/transfer_optimalfixed_eps25"
]
data10k_eps50_k5_p20_ens = [
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed1/transfer_optimalfixed_eps50",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed2/transfer_optimalfixed_eps50",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed3/transfer_optimalfixed_eps50",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed4/transfer_optimalfixed_eps50",
"../../data/hyperparam/acrobot/offline_learning/knn-ensemble/k5/esarsa/step10k/drop0.2/ensembleseed5/transfer_optimalfixed_eps50"
]
'''
'''
ac_true = ["../../../../../../Downloads/transferabledata/new/hyperparam_ap_CEM_gridsearch/data/hyperparam_ap/acrobot/online_learning/esarsa/step15k/sweep/"]
ac_rnd = [34, 4, 43, 30, 24, 32, 40, 11, 20, 30, 3, 16, 53, 45, 0, 21, 43, 23, 44, 50, 9, 41, 37, 37, 11, 2, 26, 33, 18, 20]
ac_offline = ["../../../../../../Downloads/transferabledata/new/hyperparam_ap_CEM_gridsearch/data/hyperparam_ap/acrobot/offline_learning/k3_timeout750/esarsa/step15k/optimalfixed_eps0/sweep/"]
ac_cemOffline = ["../../../../../../Downloads/transferabledata/new/hyperparam_ap_CEM_gridsearch/data/hyperparam_ap/acrobot/list/CEMoffline_onlineEvaluation/esarsa/step15k/sweep/"]
ac_cemOnline = ["../../../../../../Downloads/transferabledata/new/hyperparam_ap_CEM_gridsearch/data/hyperparam_ap/acrobot/list/CEMonline_onlineEvaluation/esarsa/step15k/sweep/"]
'''
ac_true_temp = ["../../data/hyperparam_v5/acrobot/online_learning/esarsa/step15k/sweep/"]
ac_true_dqn = ["../../data/hyperparam_v5/acrobot/online_learning/dqn/step600k/sweep/"]
ac_laplace_knn_5k_dqn = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout20k/dqn/step5k_env/data_optimal/drop0/sweep_rep1/"]
ac_knn_15k = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step15k_env/data_optimal/drop0/sweep/"]
ac_knn_10k = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step10k_env/data_optimal/drop0/sweep/"]
ac_knn_5k = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step5k_env/data_optimal/drop0/sweep/"]
ac_knn_2p5k = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step2.5k_env/data_optimal/drop0/sweep/"]
ac_knn_1k = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step1k_env/data_optimal/drop0/sweep/"]
ac_knn_500 = ["../../data/hyperparam_v4.8/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step500_env/data_optimal/drop0/sweep/"]
# ac_knn_15k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step15k_env/data_optimal/drop0/sweep/"]
# ac_knn_10k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step10k_env/data_optimal/drop0/sweep/"]
# ac_knn_5k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step5k_env/data_optimal/drop0/sweep/"]
# ac_knn_2p5k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step2.5k_env/data_optimal/drop0/sweep/"]
# ac_knn_1k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step1k_env/data_optimal/drop0/sweep/"]
# ac_knn_500 = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3/timeout500/esarsa/step500_env/data_optimal/drop0/sweep/"]
ac_laplace_knn_15k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step15k_env/data_optimal/drop0/sweep_rep1/"]
ac_laplace_knn_10k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step10k_env/data_optimal/drop0/sweep_rep1/"]
ac_laplace_knn_5k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step5k_env/data_optimal/drop0/sweep_rep1/"]
ac_laplace_knn_2p5k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step2.5k_env/data_optimal/drop0/sweep_rep1/"]
ac_laplace_knn_1k = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step1k_env/data_optimal/drop0/sweep_rep1/"]
ac_laplace_knn_500 = ["../../data/hyperparam_v5/acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step500_env/data_optimal/drop0/sweep_rep1/"]
ac_scale_network_15k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step15k_env/data_optimal/sweep"]
ac_scale_network_10k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step10k_env/data_optimal/sweep"]
ac_scale_network_5k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step5k_env/data_optimal/sweep"]
ac_scale_network_2p5k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step2.5k_env/data_optimal/sweep"]
ac_scale_network_1k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step1k_env/data_optimal/sweep"]
ac_scale_network_500 = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_separated/timeout500/esarsa/step500_env/data_optimal/sweep"]
ac_scale_laplace_network_15k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step15k_env/data_optimal/sweep"]
ac_scale_laplace_network_10k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step10k_env/data_optimal/sweep"]
ac_scale_laplace_network_5k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step5k_env/data_optimal/sweep"]
ac_scale_laplace_network_2p5k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step2.5k_env/data_optimal/sweep"]
ac_scale_laplace_network_1k = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step1k_env/data_optimal/sweep"]
ac_scale_laplace_network_500 = ["../../data/hyperparam_v5/acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step500_env/data_optimal/sweep"]
ac_rnd = [34, 4, 43, 30, 24, 32, 40, 11, 20, 30, 3, 16, 53, 45, 0, 21, 43, 23, 44, 50, 9, 41, 37, 37, 11, 2, 26, 33, 18, 20]
basepath = "../../../../../../Downloads/transferabledata/new/data_dcp/final/data/hyperparam_v5/"
ac_true = [basepath + "acrobot/online_learning/esarsa/step15k/sweep/"]
ac_optim_knn = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step15k_env/data_optimal/drop0/sweep_rep1/"]
ac_suboptim_knn = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step15k_env/data_suboptimal/drop0/sweep_rep1/"]
ac_subsuboptim_knn = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step15k_env/data_subsuboptimal/drop0/sweep_rep1/"]
ac_optim_network = [basepath + "acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step15k_env/data_optimal/sweep/"]
ac_suboptim_network = [basepath + "acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step15k_env/data_suboptimal/sweep/"]
ac_subsuboptim_network = [basepath + "acrobot/offline_learning/network/learning/clip_scale_laplace_separated/timeout500/esarsa/step15k_env/data_subsuboptimal/sweep/"]
ac_fqi_eps0 = ["../../data/hyperparam_v5/acrobot/offline_learning/fqi/eps0/fqi/fqi-adam/alpha_hidden_epsilon/step15k_env/optimalfixed_eps0/lambda1e-3/lockat_baseline_online/"]
ac_fqi_eps0p1 = ["../../data/hyperparam_v5/acrobot/offline_learning/fqi/eps0.1/fqi/fqi-adam/alpha_hidden_epsilon/step15k_env/optimalfixed_eps0/lambda1e-3/lockat_baseline_online/"]
ac_rnd = [34, 4, 43, 30, 24, 32, 40, 11, 20, 30, 3, 16, 53, 45, 0, 21, 43, 23, 44, 50, 9, 41, 37, 37, 11, 2, 26, 33, 18, 20]
basepath = "../../data/finalPlots/data/hyperparam_v5/"
ac_true = [basepath + "acrobot/online_learning/esarsa/step15k/sweep/"]
ac_knnlaplace_optim_5k_plot1 = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step5k_env/data_optimal/drop0/sweep_rep1/"]
ac_knnlaplace_optim_500_plot2 = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step500_env/data_optimal/drop0/sweep_rep1/"]
ac_knnlaplace_optim_1k_plot2 = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step1k_env/data_optimal/drop0/sweep_rep1/"]
ac_knnlaplace_optim_2500_plot2 = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step2.5k_env/data_optimal/drop0/sweep_rep1/"]
ac_knnlaplace_optim_5k_plot2 = [basepath + "acrobot/offline_learning/knn/learning/k3_laplace/timeout500/esarsa/step5k_env/data_optimal/drop0/sweep_rep1/"]
|
nilq/baby-python
|
python
|
# Copyright 2021 Universidade da Coruña
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Miguel Ángel Abella González <miguel.abella@udc.es>
# Gabriel Rodríguez <gabriel.rodriguez@udc.es>
#
# Contact:
# Gabriel Rodríguez <gabriel.rodriguez@udc.es>
"""<replace_with_module_description>"""
from benchmarks.polybench import PolyBench
from benchmarks.polybench_classes import ArrayImplementation
from benchmarks.polybench_classes import PolyBenchOptions, PolyBenchSpec
from numpy.core.multiarray import ndarray
import numpy as np
class Trmm(PolyBench):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
implementation = options.POLYBENCH_ARRAY_IMPLEMENTATION
if implementation == ArrayImplementation.LIST:
return _StrategyList.__new__(_StrategyList, options, parameters)
elif implementation == ArrayImplementation.LIST_PLUTO:
return _StrategyListPluto.__new__(_StrategyListPluto, options, parameters)
elif implementation == ArrayImplementation.LIST_FLATTENED:
return _StrategyListFlattened.__new__(_StrategyListFlattened, options, parameters)
elif implementation == ArrayImplementation.NUMPY:
return _StrategyNumPy.__new__(_StrategyNumPy, options, parameters)
elif implementation == ArrayImplementation.LIST_FLATTENED_PLUTO:
return _StrategyListFlattenedPluto.__new__(_StrategyListFlattenedPluto, options, parameters)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
# The parameters hold the necessary information obtained from "polybench.spec" file
params = parameters.DataSets.get(self.DATASET_SIZE)
if not isinstance(params, dict):
raise NotImplementedError(f'Dataset size "{self.DATASET_SIZE.name}" not implemented '
f'for {parameters.Category}/{parameters.Name}.')
# Set up problem size from the given parameters (adapt this part with appropriate parameters)
self.M = params.get('M')
self.N = params.get('N')
def run_benchmark(self):
# Create data structures (arrays, auxiliary variables, etc.)
alpha = 1.5
A = self.create_array(2, [self.M, self.M], self.DATA_TYPE(0))
B = self.create_array(2, [self.M, self.N], self.DATA_TYPE(0))
# Initialize data structures
self.initialize_array(alpha, A, B)
# Benchmark the kernel
self.time_kernel(alpha, A, B)
# Return printable data as a list of tuples ('name', value).
# Each tuple element must have the following format:
# (A: str, B: matrix)
# - A: a representative name for the data (this string will be printed out)
# - B: the actual data structure holding the computed result
#
# The syntax for the return statement would then be:
# - For single data structure results:
# return [('data_name', data)]
# - For multiple data structure results:
# return [('matrix1', m1), ('matrix2', m2), ... ]
return [('B', B)]
class _StrategyList(Trmm):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyList)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
def initialize_array(self, alpha, A: list, B: list):
for i in range(0, self.M):
for j in range(0, i):
A[i][j] = self.DATA_TYPE((i + j) % self.M) / self.M
A[i][i] = 1.0
for j in range(0, self.N):
B[i][j] = self.DATA_TYPE((self.N + (i - j)) % self.N) / self.N
def print_array_custom(self, B: list, name: str):
for i in range(0, self.M):
for j in range(0, self.N):
if (i * self.M + j) % 20 == 0:
self.print_message('\n')
self.print_value(B[i][j])
def kernel(self, alpha, A: list, B: list):
# BLAS parameters
# SIDE = 'L'
# UPLO = 'L'
# TRANSA = 'T'
# DIAG = 'U'
# = > Form B := alpha * A ** T * B.
# A is MxM
# B is MxN
# scrop begin
for i in range(0, self.M):
for j in range(0, self.N):
for k in range(i + 1, self.M):
B[i][j] += A[k][i] * B[k][j]
B[i][j] = alpha * B[i][j]
# scop end
class _StrategyListPluto(_StrategyList):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyListPluto)
def kernel(self, alpha, A: list, B: list):
# scop begin
if((self.M-1>= 0) and (self.N-1>= 0)):
if((self.M-2>= 0)):
for c1 in range ((self.N-1)+1):
for c2 in range ((self.M-2)+1):
for c3 in range (c2 + 1 , (self.M-1)+1):
B[c2][c1] += A[c3][c2] * B[c3][c1]
for c1 in range ((self.M-1)+1):
for c2 in range ((self.N-1)+1):
B[c1][c2] = alpha * B[c1][c2]
# scop end
class _StrategyListFlattened(Trmm):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyListFlattened)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
if options.LOAD_ELIMINATION: self.kernel = self.kernel_le
else: self.kernel = self.kernel_regular
def initialize_array(self, alpha, A: list, B: list):
for i in range(0, self.M):
for j in range(0, i):
A[self.M * i + j] = self.DATA_TYPE((i+j) % self.M) / self.M
A[self.M * i + i] = 1.0
for j in range(0, self.N):
B[self.N * i + j] = self.DATA_TYPE((self.N+(i-j)) % self.N) / self.N
def print_array_custom(self, B: list, name: str):
for i in range(0, self.M):
for j in range(0, self.N):
if (i * self.M + j) % 20 == 0:
self.print_message('\n')
self.print_value(B[self.N * i + j])
# Regular version
def kernel_regular(self, alpha, A: list, B: list):
# scrop begin
for i in range(0, self.M):
for j in range(0, self.N):
for k in range(i + 1, self.M):
B[self.N * i + j] += A[self.M * k + i] * B[self.N * k + j]
B[self.N * i + j] = alpha * B[self.N * i + j]
# scop end
# Load elimination
def kernel_le(self, alpha, A: list, B: list):
# scrop begin
for i in range(0, self.M):
for j in range(0, self.N):
tmp = B[self.N*i+j]
for k in range(i + 1, self.M):
tmp += A[self.M * k + i] * B[self.N * k + j]
B[self.N * i + j] = alpha * tmp
# scop end
class _StrategyNumPy(Trmm):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyNumPy)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
def initialize_array(self, alpha, A: list, B: list):
for i in range(0, self.M):
for j in range(0, i):
A[i, j] = self.DATA_TYPE((i + j) % self.M) / self.M
A[i, i] = 1.0
for j in range(0, self.N):
B[i, j] = self.DATA_TYPE((self.N + (i - j)) % self.N) / self.N
def print_array_custom(self, B: ndarray, name: str):
for i in range(0, self.M):
for j in range(0, self.N):
if (i * self.M + j) % 20 == 0:
self.print_message('\n')
self.print_value(B[i, j])
def kernel(self, alpha, A: ndarray, B: ndarray):
# BLAS parameters
# SIDE = 'L'
# UPLO = 'L'
# TRANSA = 'T'
# DIAG = 'U'
# = > Form B := alpha * A ** T * B.
# A is MxM
# B is MxN
# scop begin
for i in range(0, self.M):
B[i,0:self.N] += (A[i+1:self.M,i,np.newaxis] * B[i+1:self.M,0:self.N]).sum(axis=0)
B[i,0:self.N] = alpha * B[i,0:self.N]
# scop end
class _StrategyListFlattenedPluto(_StrategyListFlattened):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyListFlattenedPluto)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
self.kernel_vectorizer = self.kernel_pluto
self.kernel = getattr( self, "kernel_%s" % (options.POCC) )
def kernel_pluto(self, alpha, A: list, B: list):
# --pluto
# scop begin
if((self.M-1>= 0) and (self.N-1>= 0)):
if((self.M-2>= 0)):
for c1 in range ((self.N-1)+1):
for c2 in range ((self.M-2)+1):
for c3 in range (c2 + 1 , (self.M-1)+1):
B[self.N*(c2) + c1] += A[self.M*(c3) + c2] * B[self.N*(c3) + c1]
for c1 in range ((self.M-1)+1):
for c2 in range ((self.N-1)+1):
B[self.N*(c1) + c2] = alpha * B[self.N*(c1) + c2]
# scop end
def kernel_maxfuse(self, alpha, A: list, B: list):
# --pluto --pluto-fuse maxfuse
# scop begin
if((self.M-1>= 0) and (self.N-1>= 0)):
if((self.M-2>= 0)):
for c0 in range ((self.N-1)+1):
for c1 in range ((self.M-2)+1):
for c4 in range (c1 + 1 , (self.M-1)+1):
B[(c1)*self.N + c0] += A[(c4)*self.M + c1] * B[(c4)*self.N + c0]
B[(c1)*self.N + c0] = alpha * B[(c1)*self.N + c0]
B[(self.M + -1)*self.N + c0] = alpha * B[(self.M + -1)*self.N + c0]
if self.M == 1:
for c0 in range ((self.N-1)+1):
B[(0)*self.N + c0] = alpha * B[(0)*self.N + c0]
# scop end
|
nilq/baby-python
|
python
|
"""
Code for the paper "Mesh Classification with Dilated Mesh Convolutions."
published in 2021 IEEE International Conference on Image Processing.
Code Author: Vinit Veerendraveer Singh.
Copyright (c) VIMS Lab and its affiliates.
We adapt MeshNet to perform dilated convolutions by replacing the Stacked Dilated
Mesh Convolution block in place of its Mesh Convolution block.
This file test this redesigned model after training.
Note: For the ease of exposition and to keep this file coherent with the train.py
in the original MeshNet code, we do not add code comments to this file.
"""
import os
import random
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.utils.data as data
from config import get_test_config
from data import ModelNet40
from models import MeshNet
dataset = 'ModelNet40'
cfg = get_test_config(dataset=dataset)
os.environ['CUDA_VISIBLE_DEVICES'] = cfg['cuda_devices']
data_set = ModelNet40(cfg=cfg[dataset], part='test')
data_loader = data.DataLoader(data_set,
batch_size=1,
num_workers=4,
shuffle=True,
pin_memory=False)
def test_model(model):
correct_num = 0
for (centers, corners, normals, neighbors, rings, targets) in data_loader:
corners = corners - torch.cat([centers, centers, centers], 1)
centers = Variable(torch.cuda.FloatTensor(centers.cuda()))
corners = Variable(torch.cuda.FloatTensor(corners.cuda()))
normals = Variable(torch.cuda.FloatTensor(normals.cuda()))
for idx, ring in enumerate(rings):
rings[idx] = Variable(torch.cuda.LongTensor(ring.cuda()))
targets = Variable(torch.cuda.LongTensor(targets.cuda()))
outputs, _ = model(centers, corners, normals, neighbors, rings)
_, preds = torch.max(outputs, 1)
if preds[0] == targets[0]:
correct_num += 1
print('Accuracy: {:.4f}'.format(float(correct_num) / len(data_set)))
if __name__ == '__main__':
os.environ['PYTHONHASHSEED'] = str(0)
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_deterministic(False)
model_ft = MeshNet(cfg=cfg['MeshNet'], require_fea=True)
model_ft.cuda()
model_ft = nn.DataParallel(model_ft)
model_ft.load_state_dict(torch.load(cfg[dataset]['load_model']))
model_ft.eval()
test_model(model_ft)
|
nilq/baby-python
|
python
|
from collections import namedtuple
import hexbytes
from eth_utils import is_checksum_address
from relay.signing import eth_sign, eth_validate, keccak256
EcSignature = namedtuple("EcSignature", "v r s")
class Order(object):
def __init__(
self,
exchange_address: str,
maker_address: str,
taker_address: str,
maker_token: str,
taker_token: str,
fee_recipient: str,
maker_token_amount: int,
taker_token_amount: int,
maker_fee: int,
taker_fee: int,
expiration_timestamp_in_sec: int,
salt: int,
v: int,
r: hexbytes.HexBytes,
s: hexbytes.HexBytes,
filled_maker_token_amount: int = 0,
filled_taker_token_amount: int = 0,
cancelled_maker_token_amount: int = 0,
cancelled_taker_token_amount: int = 0,
) -> None:
self.exchange_address = exchange_address
self.maker_address = maker_address
self.taker_address = taker_address
self.maker_token = maker_token
self.taker_token = taker_token
self.fee_recipient = fee_recipient
self.maker_token_amount = maker_token_amount
self.taker_token_amount = taker_token_amount
self.maker_fee = maker_fee
self.taker_fee = taker_fee
self.expiration_timestamp_in_sec = expiration_timestamp_in_sec
self.salt = salt
self.v = v
self.r = r
self.s = s
self.filled_maker_token_amount = filled_maker_token_amount
self.filled_taker_token_amount = filled_taker_token_amount
self.cancelled_maker_token_amount = cancelled_maker_token_amount
self.cancelled_taker_token_amount = cancelled_taker_token_amount
@property
def price(self) -> float:
return self.taker_token_amount / self.maker_token_amount
@property
def available_maker_token_amount(self) -> float:
return (
self.maker_token_amount
- self.filled_maker_token_amount
- self.cancelled_maker_token_amount
)
@property
def available_taker_token_amount(self) -> float:
return (
self.taker_token_amount
- self.filled_taker_token_amount
- self.cancelled_taker_token_amount
)
@property
def ec_signature(self):
return EcSignature(self.v, self.r, self.s)
def validate(self) -> bool:
return self.validate_signature() and self.validate_addresses()
def validate_signature(self) -> bool:
return eth_validate(self.hash(), (self.v, self.r, self.s), self.maker_address)
def validate_addresses(self) -> bool:
for address in [
self.exchange_address,
self.maker_token,
self.taker_token,
self.fee_recipient,
]:
if not is_checksum_address(address):
return False
return True
def is_expired(self, current_timestamp_in_sec: int) -> bool:
return current_timestamp_in_sec > self.expiration_timestamp_in_sec
def is_filled(self) -> bool:
return (
self.available_maker_token_amount <= 0
or self.available_taker_token_amount <= 0
)
def hash(self) -> hexbytes.HexBytes:
return hexbytes.HexBytes(
keccak256(
self.exchange_address,
self.maker_address,
self.taker_address,
self.maker_token,
self.taker_token,
self.fee_recipient,
self.maker_token_amount,
self.taker_token_amount,
self.maker_fee,
self.taker_fee,
self.expiration_timestamp_in_sec,
self.salt,
)
)
def __eq__(self, other: object) -> bool:
if isinstance(other, Order):
return self.hash() == other.hash()
else:
return False
class SignableOrder(Order):
def __init__(
self,
exchange_address: str,
maker_address: str,
taker_address: str,
maker_token: str,
taker_token: str,
fee_recipient: str,
maker_token_amount: int,
taker_token_amount: int,
maker_fee: int,
taker_fee: int,
expiration_timestamp_in_sec: int,
salt: int,
) -> None:
super().__init__(
exchange_address,
maker_address,
taker_address,
maker_token,
taker_token,
fee_recipient,
maker_token_amount,
taker_token_amount,
maker_fee,
taker_fee,
expiration_timestamp_in_sec,
salt,
v=0,
r=hexbytes.HexBytes(b""),
s=hexbytes.HexBytes(b""),
)
def sign(self, key) -> None:
v, r, s = eth_sign(self.hash(), key)
self.v = v
self.r = hexbytes.HexBytes(r)
self.s = hexbytes.HexBytes(s)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Test whether a client subscribed to a topic receives its own message sent to that topic, for long topics.
from mosq_test_helper import *
def do_test(topic, succeeds):
rc = 1
mid = 53
keepalive = 60
connect_packet = mosq_test.gen_connect("subpub-qos0-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
subscribe_packet = mosq_test.gen_subscribe(mid, topic, 0)
suback_packet = mosq_test.gen_suback(mid, 0)
publish_packet = mosq_test.gen_publish(topic, qos=0, payload="message")
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
if succeeds == True:
mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback")
mosq_test.do_send_receive(sock, publish_packet, publish_packet, "publish")
else:
mosq_test.do_send_receive(sock, subscribe_packet, b"", "suback")
rc = 0
sock.close()
except mosq_test.TestError:
pass
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
exit(rc)
do_test("/"*200, True) # 200 max hierarchy limit
do_test("abc/"*199+"d", True) # 200 max hierarchy limit, longer overall string than 200
do_test("/"*201, False) # Exceeds 200 max hierarchy limit
do_test("abc/"*201+"d", False) # Exceeds 200 max hierarchy limit, longer overall string than 200
exit(0)
|
nilq/baby-python
|
python
|
import numpy as np
from pydeeprecsys.rl.agents.agent import ReinforcementLearning
from typing import Any, List, Optional
from pydeeprecsys.rl.experience_replay.experience_buffer import ExperienceReplayBuffer
from pydeeprecsys.rl.experience_replay.buffer_parameters import (
ExperienceReplayBufferParameters,
)
from pydeeprecsys.rl.neural_networks.policy_estimator import PolicyEstimator
from torch import FloatTensor
class ReinforceAgent(ReinforcementLearning):
"""Policy estimator using a value estimator as a baseline.
It's on-policy, for discrete action spaces, and episodic environments."""
def __init__(
self,
n_actions: int,
state_size: int,
hidden_layers: Optional[List[int]] = None,
discount_factor: int = 0.99, # a.k.a gamma
learning_rate=1e-3,
):
self.episode_count = 0
if not hidden_layers:
hidden_layers = [state_size * 2, state_size * 2]
self.policy_estimator = PolicyEstimator(
state_size,
hidden_layers,
n_actions,
learning_rate=learning_rate,
)
self.discount_factor = discount_factor
# starts the buffer
self.reset_buffer()
def reset_buffer(self):
self.buffer = ExperienceReplayBuffer(
ExperienceReplayBufferParameters(10000, 1, 1)
)
def top_k_actions_for_state(self, state: Any, k: int = 1) -> List[int]:
return self.policy_estimator.predict(state, k=k)
def action_for_state(self, state: Any) -> int:
return self.top_k_actions_for_state(state)[0]
def store_experience(
self, state: Any, action: Any, reward: float, done: bool, new_state: Any
):
state_flat = state.flatten()
new_state_flat = new_state.flatten()
self.buffer.store_experience(state_flat, action, reward, done, new_state_flat)
# FIXME: should learn after every episode, or after every N experiences?
if done: # and self.buffer.ready_to_predict():
self.learn_from_experiences()
self.reset_buffer()
def discounted_rewards(self, rewards: np.array) -> np.array:
"""From a list of rewards obtained in an episode, we calculate
the return minus the baseline. The baseline is the list of discounted
rewards minus the mean, divided by the standard deviation."""
discount_r = np.zeros_like(rewards)
timesteps = range(len(rewards))
reward_sum = 0
for i in reversed(timesteps):
reward_sum = rewards[i] + self.discount_factor * reward_sum
discount_r[i] = reward_sum
return_mean = discount_r.mean()
return_std = discount_r.std()
baseline = (discount_r - return_mean) / return_std
return baseline
def learn_from_experiences(self):
experiences = list(self.buffer.experience_queue)
states, actions, rewards, dones, next_states = zip(*experiences)
advantages = self.discounted_rewards(rewards)
advantages_tensor = FloatTensor(advantages).to(
device=self.policy_estimator.device
)
self.policy_estimator.update(states, advantages_tensor, actions)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.api import monitored_resource_pb2 # type: ignore
from google.cloud.logging_v2.types import log_entry
from google.protobuf import duration_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.logging.v2',
manifest={
'DeleteLogRequest',
'WriteLogEntriesRequest',
'WriteLogEntriesResponse',
'WriteLogEntriesPartialErrors',
'ListLogEntriesRequest',
'ListLogEntriesResponse',
'ListMonitoredResourceDescriptorsRequest',
'ListMonitoredResourceDescriptorsResponse',
'ListLogsRequest',
'ListLogsResponse',
'TailLogEntriesRequest',
'TailLogEntriesResponse',
},
)
class DeleteLogRequest(proto.Message):
r"""The parameters to DeleteLog.
Attributes:
log_name (str):
Required. The resource name of the log to delete:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example,
``"projects/my-project-id/logs/syslog"``,
``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``.
For more information about log names, see
[LogEntry][google.logging.v2.LogEntry].
"""
log_name = proto.Field(
proto.STRING,
number=1,
)
class WriteLogEntriesRequest(proto.Message):
r"""The parameters to WriteLogEntries.
Attributes:
log_name (str):
Optional. A default log resource name that is assigned to
all log entries in ``entries`` that do not specify a value
for ``log_name``:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example:
::
"projects/my-project-id/logs/syslog"
"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"
The permission ``logging.logEntries.create`` is needed on
each project, organization, billing account, or folder that
is receiving new log entries, whether the resource is
specified in ``logName`` or in an individual log entry.
resource (google.api.monitored_resource_pb2.MonitoredResource):
Optional. A default monitored resource object that is
assigned to all log entries in ``entries`` that do not
specify a value for ``resource``. Example:
::
{ "type": "gce_instance",
"labels": {
"zone": "us-central1-a", "instance_id": "00000000000000000000" }}
See [LogEntry][google.logging.v2.LogEntry].
labels (Mapping[str, str]):
Optional. Default labels that are added to the ``labels``
field of all log entries in ``entries``. If a log entry
already has a label with the same key as a label in this
parameter, then the log entry's label is not changed. See
[LogEntry][google.logging.v2.LogEntry].
entries (Sequence[google.cloud.logging_v2.types.LogEntry]):
Required. The log entries to send to Logging. The order of
log entries in this list does not matter. Values supplied in
this method's ``log_name``, ``resource``, and ``labels``
fields are copied into those log entries in this list that
do not include values for their corresponding fields. For
more information, see the
[LogEntry][google.logging.v2.LogEntry] type.
If the ``timestamp`` or ``insert_id`` fields are missing in
log entries, then this method supplies the current time or a
unique identifier, respectively. The supplied values are
chosen so that, among the log entries that did not supply
their own values, the entries earlier in the list will sort
before the entries later in the list. See the
``entries.list`` method.
Log entries with timestamps that are more than the `logs
retention
period <https://cloud.google.com/logging/quota-policy>`__ in
the past or more than 24 hours in the future will not be
available when calling ``entries.list``. However, those log
entries can still be `exported with
LogSinks <https://cloud.google.com/logging/docs/api/tasks/exporting-logs>`__.
To improve throughput and to avoid exceeding the `quota
limit <https://cloud.google.com/logging/quota-policy>`__ for
calls to ``entries.write``, you should try to include
several log entries in this list, rather than calling this
method for each individual log entry.
partial_success (bool):
Optional. Whether valid entries should be written even if
some other entries fail due to INVALID_ARGUMENT or
PERMISSION_DENIED errors. If any entry is not written, then
the response status is the error associated with one of the
failed entries and the response includes error details keyed
by the entries' zero-based index in the ``entries.write``
method.
dry_run (bool):
Optional. If true, the request should expect
normal response, but the entries won't be
persisted nor exported. Useful for checking
whether the logging API endpoints are working
properly before sending valuable data.
"""
log_name = proto.Field(
proto.STRING,
number=1,
)
resource = proto.Field(
proto.MESSAGE,
number=2,
message=monitored_resource_pb2.MonitoredResource,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=3,
)
entries = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=log_entry.LogEntry,
)
partial_success = proto.Field(
proto.BOOL,
number=5,
)
dry_run = proto.Field(
proto.BOOL,
number=6,
)
class WriteLogEntriesResponse(proto.Message):
r"""Result returned from WriteLogEntries.
"""
class WriteLogEntriesPartialErrors(proto.Message):
r"""Error details for WriteLogEntries with partial success.
Attributes:
log_entry_errors (Mapping[int, google.rpc.status_pb2.Status]):
When ``WriteLogEntriesRequest.partial_success`` is true,
records the error status for entries that were not written
due to a permanent error, keyed by the entry's zero-based
index in ``WriteLogEntriesRequest.entries``.
Failed requests for which no entries are written will not
include per-entry errors.
"""
log_entry_errors = proto.MapField(
proto.INT32,
proto.MESSAGE,
number=1,
message=status_pb2.Status,
)
class ListLogEntriesRequest(proto.Message):
r"""The parameters to ``ListLogEntries``.
Attributes:
resource_names (Sequence[str]):
Required. Names of one or more parent resources from which
to retrieve log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
May alternatively be one or more views
projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
Projects listed in the ``project_ids`` field are added to
this list.
filter (str):
Optional. A filter that chooses which log entries to return.
See `Advanced Logs
Queries <https://cloud.google.com/logging/docs/view/advanced-queries>`__.
Only log entries that match the filter are returned. An
empty filter matches all log entries in the resources listed
in ``resource_names``. Referencing a parent resource that is
not listed in ``resource_names`` will cause the filter to
return no results. The maximum length of the filter is 20000
characters.
order_by (str):
Optional. How the results should be sorted. Presently, the
only permitted values are ``"timestamp asc"`` (default) and
``"timestamp desc"``. The first option returns entries in
order of increasing values of ``LogEntry.timestamp`` (oldest
first), and the second option returns entries in order of
decreasing timestamps (newest first). Entries with equal
timestamps are returned in order of their ``insert_id``
values.
page_size (int):
Optional. The maximum number of results to return from this
request. Default is 50. If the value is negative or exceeds
1000, the request is rejected. The presence of
``next_page_token`` in the response indicates that more
results might be available.
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``page_token`` must be the value of ``next_page_token`` from
the previous response. The values of other method parameters
should be identical to those in the previous call.
"""
resource_names = proto.RepeatedField(
proto.STRING,
number=8,
)
filter = proto.Field(
proto.STRING,
number=2,
)
order_by = proto.Field(
proto.STRING,
number=3,
)
page_size = proto.Field(
proto.INT32,
number=4,
)
page_token = proto.Field(
proto.STRING,
number=5,
)
class ListLogEntriesResponse(proto.Message):
r"""Result returned from ``ListLogEntries``.
Attributes:
entries (Sequence[google.cloud.logging_v2.types.LogEntry]):
A list of log entries. If ``entries`` is empty,
``nextPageToken`` may still be returned, indicating that
more entries may exist. See ``nextPageToken`` for more
information.
next_page_token (str):
If there might be more results than those appearing in this
response, then ``nextPageToken`` is included. To get the
next set of results, call this method again using the value
of ``nextPageToken`` as ``pageToken``.
If a value for ``next_page_token`` appears and the
``entries`` field is empty, it means that the search found
no log entries so far but it did not have time to search all
the possible log entries. Retry the method with this value
for ``page_token`` to continue the search. Alternatively,
consider speeding up the search by changing your filter to
specify a single log name or resource type, or to narrow the
time range of the search.
"""
@property
def raw_page(self):
return self
entries = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=log_entry.LogEntry,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListMonitoredResourceDescriptorsRequest(proto.Message):
r"""The parameters to ListMonitoredResourceDescriptors
Attributes:
page_size (int):
Optional. The maximum number of results to return from this
request. Non-positive values are ignored. The presence of
``nextPageToken`` in the response indicates that more
results might be available.
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``pageToken`` must be the value of ``nextPageToken`` from
the previous response. The values of other method parameters
should be identical to those in the previous call.
"""
page_size = proto.Field(
proto.INT32,
number=1,
)
page_token = proto.Field(
proto.STRING,
number=2,
)
class ListMonitoredResourceDescriptorsResponse(proto.Message):
r"""Result returned from ListMonitoredResourceDescriptors.
Attributes:
resource_descriptors (Sequence[google.api.monitored_resource_pb2.MonitoredResourceDescriptor]):
A list of resource descriptors.
next_page_token (str):
If there might be more results than those appearing in this
response, then ``nextPageToken`` is included. To get the
next set of results, call this method again using the value
of ``nextPageToken`` as ``pageToken``.
"""
@property
def raw_page(self):
return self
resource_descriptors = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=monitored_resource_pb2.MonitoredResourceDescriptor,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListLogsRequest(proto.Message):
r"""The parameters to ListLogs.
Attributes:
parent (str):
Required. The resource name that owns the logs:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]".
page_size (int):
Optional. The maximum number of results to return from this
request. Non-positive values are ignored. The presence of
``nextPageToken`` in the response indicates that more
results might be available.
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``pageToken`` must be the value of ``nextPageToken`` from
the previous response. The values of other method parameters
should be identical to those in the previous call.
resource_names (Sequence[str]):
Optional. The resource name that owns the logs:
projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
To support legacy queries, it could also be:
"projects/[PROJECT_ID]" "organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]" "folders/[FOLDER_ID]".
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
resource_names = proto.RepeatedField(
proto.STRING,
number=8,
)
class ListLogsResponse(proto.Message):
r"""Result returned from ListLogs.
Attributes:
log_names (Sequence[str]):
A list of log names. For example,
``"projects/my-project/logs/syslog"`` or
``"organizations/123/logs/cloudresourcemanager.googleapis.com%2Factivity"``.
next_page_token (str):
If there might be more results than those appearing in this
response, then ``nextPageToken`` is included. To get the
next set of results, call this method again using the value
of ``nextPageToken`` as ``pageToken``.
"""
@property
def raw_page(self):
return self
log_names = proto.RepeatedField(
proto.STRING,
number=3,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class TailLogEntriesRequest(proto.Message):
r"""The parameters to ``TailLogEntries``.
Attributes:
resource_names (Sequence[str]):
Required. Name of a parent resource from which to retrieve
log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
May alternatively be one or more views:
"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
"organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
"folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]".
filter (str):
Optional. A filter that chooses which log entries to return.
See `Advanced Logs
Filters <https://cloud.google.com/logging/docs/view/advanced_filters>`__.
Only log entries that match the filter are returned. An
empty filter matches all log entries in the resources listed
in ``resource_names``. Referencing a parent resource that is
not in ``resource_names`` will cause the filter to return no
results. The maximum length of the filter is 20000
characters.
buffer_window (google.protobuf.duration_pb2.Duration):
Optional. The amount of time to buffer log
entries at the server before being returned to
prevent out of order results due to late
arriving log entries. Valid values are between
0-60000 milliseconds. Defaults to 2000
milliseconds.
"""
resource_names = proto.RepeatedField(
proto.STRING,
number=1,
)
filter = proto.Field(
proto.STRING,
number=2,
)
buffer_window = proto.Field(
proto.MESSAGE,
number=3,
message=duration_pb2.Duration,
)
class TailLogEntriesResponse(proto.Message):
r"""Result returned from ``TailLogEntries``.
Attributes:
entries (Sequence[google.cloud.logging_v2.types.LogEntry]):
A list of log entries. Each response in the stream will
order entries with increasing values of
``LogEntry.timestamp``. Ordering is not guaranteed between
separate responses.
suppression_info (Sequence[google.cloud.logging_v2.types.TailLogEntriesResponse.SuppressionInfo]):
If entries that otherwise would have been
included in the session were not sent back to
the client, counts of relevant entries omitted
from the session with the reason that they were
not included. There will be at most one of each
reason per response. The counts represent the
number of suppressed entries since the last
streamed response.
"""
class SuppressionInfo(proto.Message):
r"""Information about entries that were omitted from the session.
Attributes:
reason (google.cloud.logging_v2.types.TailLogEntriesResponse.SuppressionInfo.Reason):
The reason that entries were omitted from the
session.
suppressed_count (int):
A lower bound on the count of entries omitted due to
``reason``.
"""
class Reason(proto.Enum):
r"""An indicator of why entries were omitted."""
REASON_UNSPECIFIED = 0
RATE_LIMIT = 1
NOT_CONSUMED = 2
reason = proto.Field(
proto.ENUM,
number=1,
enum='TailLogEntriesResponse.SuppressionInfo.Reason',
)
suppressed_count = proto.Field(
proto.INT32,
number=2,
)
entries = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=log_entry.LogEntry,
)
suppression_info = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=SuppressionInfo,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
nilq/baby-python
|
python
|
from source.exceptions.not_found import NotFoundException
from source.repositories.player_tournament import PlayerTournamentRepository
import source.commons.message as message
class PlayerTournamentBusiness:
def __init__(self):
self.player_tournament_repository = PlayerTournamentRepository()
def find_all(self):
result = self.player_tournament_repository.find_all()
if not result:
raise NotFoundException(None, message.REGISTER_NOT_FOUND)
return result
def get_ranking(self, tournament_id):
result = self.player_tournament_repository.get_ranking(tournament_id)
if not result:
raise NotFoundException(None, message.REGISTER_NOT_FOUND)
return result
def get_player_tournament(self, data):
result = self.player_tournament_repository.get_player_tournament(data)
if not result:
raise NotFoundException(None, message.REGISTER_NOT_FOUND)
return result
def find_by_id(self, field_id):
result = self.player_tournament_repository.find_by_id(field_id)
if not result:
raise NotFoundException(None, message.REGISTER_NOT_FOUND)
return result
def save(self, data):
return self.player_tournament_repository.save(
data.get('player_id'),
data.get('tournament_id'),
data.get('position'),
data.get('points_acum'),
data.get('adm')
)
def update(self, field_id, data):
if not self.player_tournament_repository.find_by_id(field_id):
raise NotFoundException(None, message.PLAYER_NOT_FOUND)
for i in data:
self.player_tournament_repository.update(field_id, i, data[i])
return []
def delete(self, field_id):
if not self.player_tournament_repository.find_by_id(field_id):
raise NotFoundException(None, message.PLAYER_NOT_FOUND)
self.player_tournament_repository.delete(field_id)
return []
|
nilq/baby-python
|
python
|
from datetime import datetime
from pathlib import Path
from typing import Optional, Tuple, Union, Sequence, List
from pydantic import BaseModel, validator
class MdocSectionData(BaseModel):
"""Data model for section data in a SerialEM mdoc file.
https://bio3d.colorado.edu/SerialEM/hlp/html/about_formats.htm
"""
ZValue: Optional[int]
TiltAngle: Optional[float]
PieceCoordinates: Optional[Tuple[float, float, int]]
StagePosition: Tuple[float, float]
StageZ: Optional[float]
Magnification: Optional[float]
CameraLength: Optional[float]
MagIndex: Optional[int]
Intensity: Optional[float]
SuperMontCoords: Optional[Tuple[float, float]]
PixelSpacing: Optional[float]
ExposureDose: Optional[float]
DoseRate: Optional[float]
SpotSize: Optional[float]
Defocus: Optional[float]
TargetDefocus: Optional[float]
ImageShift: Optional[Tuple[float, float]]
RotationAngle: Optional[float]
ExposureTime: Optional[float]
Binning: Optional[int]
UsingCDS: Optional[bool]
CameraIndex: Optional[int]
DividedBy2: Optional[bool]
LowDoseConSet: Optional[int]
MinMaxMean: Optional[Tuple[float, float, float]]
PriorRecordDose: Optional[float]
XedgeDxy: Optional[Tuple[float, float]]
YedgeDxy: Optional[Tuple[float, float]]
XedgeDxyVS: Optional[Tuple[float, float]]
YedgeDxyVS: Optional[Tuple[float, float]]
StageOffsets: Optional[Tuple[float, float]]
AlignedPieceCoords: Optional[Tuple[float, float]]
AlignedPieceCoordsVS: Optional[Tuple[float, float]]
SubFramePath: Optional[Path]
NumSubFrames: Optional[int]
FrameDosesAndNumbers: Optional[Sequence[Tuple[float, int]]]
DateTime: Optional[datetime]
NavigatorLabel: Optional[str]
FilterSlitAndLoss: Optional[Tuple[float, float]]
ChannelName: Optional[str]
MultiShotHoleAndPosition: Optional[Union[Tuple[int, int], Tuple[int, int, int]]]
CameraPixelSize: Optional[float]
Voltage: Optional[float]
@validator(
'PieceCoordinates',
'SuperMontCoords',
'ImageShift',
'MinMaxMean',
'StagePosition',
'XedgeDxy',
'YedgeDxy',
'XedgeDxyVS',
'XedgeDxyVS',
'StageOffsets',
'AlignedPieceCoords',
'AlignedPieceCoordsVS',
'FrameDosesAndNumbers',
'FilterSlitAndLoss',
'MultiShotHoleAndPosition',
pre=True)
def multi_number_string_to_tuple(cls, value: str):
return tuple(value.split())
@validator('DateTime', pre=True)
def mdoc_datetime_to_datetime(cls, value: str):
return datetime.strptime(value, '%d-%b-%y %H:%M:%S', )
@classmethod
def from_lines(cls, lines: List[str]):
lines = [line.strip('[]')
for line
in lines
if len(line) > 0]
key_value_pairs = [line.split('=') for line in lines]
key_value_pairs = [
(k.strip(), v.strip())
for k, v
in key_value_pairs
]
lines = {k: v for k, v in key_value_pairs}
return cls(**lines)
|
nilq/baby-python
|
python
|
from logging import getLogger
from typing import List
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import backref, relationship
from app.models import orm
from app.settings import env_settings
logger = getLogger(__name__)
class ActorEntryAssociation(orm.Base):
actor_id = Column(Integer, ForeignKey("actor.id"), primary_key=True)
entry_id = Column(Integer, ForeignKey("entry.id"), primary_key=True)
role = Column(String)
def __init__(self, actor_id: int, role: str):
self.actor_id = actor_id
self.role = role
def __repr__(self):
if not env_settings().is_dev():
logger.warning(
f"Calling {self.__class__.__name__} __repr__ might cause additional select queries"
)
return "entry actor: %s: %s:%s " % (
self.entry.title[:40] + "..."
if len(self.entry.title) > 40
else self.entry.title,
self.actor.registered_name,
self.role,
)
def csv_format(self, sep: str):
return self.actor.registered_name + sep + self.role
class EntryTagAssociation(orm.Base):
entry_id = Column(Integer, ForeignKey("entry.id"), primary_key=True)
tag_id = Column(Integer, ForeignKey("tag.id"), primary_key=True)
entry = relationship(orm.Entry, back_populates="tags")
tag = relationship(orm.Tag, backref=backref("entries_tag"))
group_name = Column(String, nullable=True)
config = Column(JSONB)
def __init__(self, tag: orm.Tag, group_name: str):
self.tag = tag
self.group_name = group_name
def __repr__(self):
if not env_settings().is_dev():
logger.warning(
f"Calling {self.__class__.__name__} __repr__ might cause additional select queries"
)
return "entry tag: %s -> %s " % (
self.entry.title[:40] + "..."
if len(self.entry.title) > 40
else self.entry.title,
self.tag.value,
)
class EntryEntryAssociation(orm.Base):
id = Column(Integer, primary_key=True, autoincrement=True)
source_id = Column(Integer, ForeignKey("entry.id"))
destination_id = Column(Integer, ForeignKey("entry.id"))
source = relationship(orm.Entry, foreign_keys=[source_id])
destination = relationship(orm.Entry, foreign_keys=[destination_id])
# maybe also primary_key=True, if there could be multiple types of links between 2 entries
# reference_type = Column(String, index=True, nullable=True)
reference = Column(JSONB, nullable=True, default={})
def __init__(self, source: orm.Entry, destination: orm.Entry, reference: dict):
self.source = source
self.destination = destination
self.reference = reference
def __repr__(self):
return (
f"Entry-Entry ref: {self.source.id}/{self.source.slug} -> "
f"{self.destination.id}/{self.destination.slug}: {self.reference}"
)
class EntryTranslation(orm.Base):
id = Column(Integer, primary_key=True)
entries = relationship("Entry", back_populates="translation_group")
# we should have this so that no issue is raised
def __init__(self, entries: List[orm.Entry]):
self.entries = entries
# class ActorTagAssociation(orm.Base):
# actor_id = Column(Integer, ForeignKey("actor.id"), primary_key=True)
# tag_id = Column(Integer, ForeignKey("tag.id"), primary_key=True)
#
# def __repr__(self):
# if not env_settings().is_dev():
# logger.warning(
# f"Calling {self.__class__.__name__} __repr__ might cause additional select queries"
# )
# return "actor tag: %s: %s " % (
# self.entry.title[:40] + "..."
# if len(self.entry.title) > 40
# else self.entry.title,
# self.actor.registered_name,
# )
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import os
import sklearn
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
# normalize numerical columns
# one-hot categorical columns
def get_data(classification=True, regression=False, download=False):
url = 'https://raw.githubusercontent.com/Shane-Neeley/DrugMarket/master/drugmarket/drugmarket_dataframe.tsv'
if download:
df = pd.read_csv('drugmarket_dataframe.tsv', dtype={'MC':np.int64}, sep="\t")
else:
df = pd.read_csv(url, dtype={'MC':np.int64}, sep="\t")
# remove outliers
df = df[df['MC'] > 0]
# df = df[ (df['Phase 4'] > 0) | (df['Phase 3'] > 0) | (df['Phase 2'] > 0) | (df['Phase 1'] > 0)] # has any trials
# df = df[ (df['Phase 4'] < 500) | (df['Phase 3'] < 500) | (df['Phase 2'] < 500) | (df['Phase 1'] < 500)] # has too many trials
df = df[df['Symbol'] != "SYK"] # stryker an outlier
# easier to work with numpy array
data = df.values
# create a final output column of a category
# 1 = >$1Billion market cap, 0 = less
categ = np.array(data[:, -1] > 1e9, dtype=bool).astype(int)
categ = np.array([categ]).T
data = np.concatenate((data,categ),1)
# shuffle it
np.random.shuffle(data)
# split features and labels
X = data[:, 3:-2].astype(np.int64) # this just pulled excluded the last two columns
if (classification == True):
Y = data[:, -1].astype(np.int64) # this is the last column, 0 or 1 class for billion dollar valuation
if (regression == True):
Y = data[:, -2].astype(np.int64) # continuous value for marketcap
# print(df)
print(X)
# print('X.shape before')
# print(X.shape)
# Too many tags, do dimensionality reduction just on the tags (column 4 and on ..)
pca = PCA()
reduced = pca.fit_transform(X[:, 4:])
# print('reduced.shape before')
# print(reduced.shape)
# plt.scatter(reduced[:,0], reduced[:,1], s=100, c=Y, alpha=0.5)
# plt.title('reduced')
# plt.show()
reduced = reduced[:, :25] # .. however much cutoff u want
# print('reduced.shape after cutoff')
# print(reduced.shape)
# make new X
X = np.concatenate((X[:,:4], reduced),1)
#X = X[:,:4] # without tag data
# print('X.shape after concatenate')
# print(X.shape)
# print(X)
# plt.plot(pca.explained_variance_ratio_)
# plt.title('explained_variance_ratio_')
# plt.show()
# cumulative variance
# choose k = number of dimensions that gives us 95-99% variance
cumulative = []
last = 0
for v in pca.explained_variance_ratio_:
cumulative.append(last + v)
last = cumulative[-1]
# plt.plot(cumulative)
# plt.title('cumulative')
# plt.show()
print('size X: ' + str(X.shape))
print('size Y: ' + str(Y.shape))
# normalize phase columns by X - mean / std
for i in (0, 1, 2, 3):
m = X[:, i].mean()
s = X[:, i].std()
X[:, i] = (X[:, i] - m) / s
return X, Y, data
if __name__ == '__main__':
get_data()
|
nilq/baby-python
|
python
|
import logging, sys, os, json, uuid
from datapackage_pipelines.wrapper import ingest, spew
from datapackage_pipelines.utilities.resources import PROP_STREAMING
CLI_MODE = len(sys.argv) > 1 and sys.argv[1] == '--cli'
if CLI_MODE:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logging.debug("CLI MODE!")
parameters, datapackage, resources = {}, {}, []
else:
parameters, datapackage, resources = ingest()
default_parameters = {"num-rows": int(os.environ.get("NUM_ROWS", "10"))}
parameters = dict(default_parameters, **parameters)
logging.info(parameters)
stats = {}
aggregations = {"stats": stats}
def get_resource():
for i in range(0, parameters["num-rows"]):
yield {"uuid": str(uuid.uuid1()), "row_num": i}
if CLI_MODE:
for row in get_resource():
print(row)
else:
resource_descriptor = {PROP_STREAMING: True,
"name": "noise",
"path": "noise.csv",
"schema": {"fields": [{"name": "uuid", "type": "string"},
{"name": "row_num", "type": "integer"}],
"primaryKey": ["uuid"]}}
spew(dict(datapackage, resources=[resource_descriptor]),
[get_resource()], aggregations["stats"])
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import logging
import os
import json
import io
import uuid
# --------------------------------------------------------------------------------------
# Save this code in file "process_wrapper.py" and adapt as indicated in inline comments.
#
# Notes:
# - This is a Python 3 script.
# - The inputs will be given values by name, thus their order has no importance ...
# - ... except that the inputs with a default value must be listed last.
# - Parameter names are automatically converted into valid Python variable names.
# - Any empty line or line starting with a '#' character will be ignored.
# --------------------------------------------------------------------------------------
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
stream = io.StringIO()
handler = logging.StreamHandler(stream)
logger.addHandler(handler)
max_cpu=8
def execute(out_dir, collection_dir, models_dir, tilesAndShapes_json, daterange_json):
"""
Inputs:
collection_dir -- collection_dir -- 45/User String
models_dir -- models_dir -- 45/User String
tilesAndShapes_json -- tilesAndShapes_json -- 45/User String
daterange_json -- daterange_json -- 45/User String
Outputs:
segmentedfiles_json -- segmentedfiles_json -- 45/User String
exceptionLog -- exceptionLog -- 45/User String
Main Dependency:
mep-wps/uc-bundle-1
Software Dependencies:
pywps-4
Processing Resources:
ram -- 15
disk -- 10
cpu -- 8
"""
segmentedfiles_json = None
exceptionLog=None
# ----------------------------------------------------------------------------------
# Insert your own code below.
# The files generated by your code must be stored in the "out_dir" folder.
# Only the content of that folder is persisted in the datastore.
# Give appropriate values to the output parameters. These will be passed to the next
# process(es) following the workflow connections.
# ----------------------------------------------------------------------------------
try:
logger.info("Starting...")
out_dir=os.path.join('/'.join(models_dir.split('/')[:-1]),'output',str(uuid.uuid4().hex))
os.makedirs(out_dir,exist_ok=True)
logger.info("Overriding out_dir to "+str(out_dir))
logger.info("Contents of out_dir: "+str(os.listdir(path=str(out_dir))))
#os.environ['JAVA_HOME']='/usr/local/jre'
#os.environ['JRE_HOME']='/usr/local/jre'
ncpu=1
import subprocess
try: ncpu=int(subprocess.check_output("/usr/bin/nproc"))
except: pass
if ncpu>max_cpu: ncpu=max_cpu
logger.info("Using {} cores".format(str(ncpu)))
logger.info("Loading dependencies...")
from parcel.feature.segmentation.segmentation_filebased import main_segmentation
from asb_usecases.logic.common import polygon2bboxwindow
logger.info("Loading input jsons...")
tilesAndShapes=json.loads(tilesAndShapes_json)
daterange=json.loads(daterange_json)
logger.info("Computing...")
segmentedfiles=[]
for i in range(len(tilesAndShapes)):
workdir=os.path.join(str(out_dir),str(i))
#workdir=os.path.join(models_dir,str(i))
os.makedirs(workdir,exist_ok=True)
iresults={}
for tile,shape in tilesAndShapes[i].items():
# TODO this needs to be merged with segmentation, not to glob twice
bbox=polygon2bboxwindow.compute(collection_dir+'/*/01/*/*'+tile+'*/**/*'+tile+'*.tif', shape)
outimg=main_segmentation(
imgdir=collection_dir,
maskdir=os.path.join(models_dir,'convmasks10m'),
modeldir=os.path.join(models_dir,'models'),
outdir=workdir,
tiles=tile,
startdate=daterange['start'],
enddate=daterange['end'],
maxcloudcover=int(100),
bbox=bbox,
#nwindowspermodel=5,
ncpu=ncpu
)
iresults[tile]=outimg
segmentedfiles.append(iresults)
logger.info("Contents of out_dir: "+str(os.listdir(path=str(out_dir))))
logger.info("Dumping results into json...")
segmentedfiles_json=json.dumps(segmentedfiles)
logger.info("Finished...")
except Exception as e:
logger.exception("Exception in wrapper.")
logging.shutdown()
stream.flush()
exceptionLog=stream.getvalue()
# ----------------------------------------------------------------------------------
# The wrapper must return a dictionary that contains the output parameter values.
# ----------------------------------------------------------------------------------
return {
"segmentedfiles_json": segmentedfiles_json,
"exceptionLog": exceptionLog
}
|
nilq/baby-python
|
python
|
from spec2wav.modules import Generator, Audio2Mel, Audio2Cqt
from pathlib import Path
import yaml
import torch
import os
def get_default_device():
if torch.cuda.is_available():
return "cuda"
else:
return "cpu"
def load_model(spec2wav_path, device=get_default_device()):
"""
Args:
spec2wav_path (str or Path): path to the root folder of dumped text2mel
device (str or torch.device): device to load the model
"""
root = Path(spec2wav_path)
with open(root / "args.yml", "r") as f:
args = yaml.load(f, Loader=yaml.FullLoader)
netG = Generator(args.n_mel_channels, args.ngf, args.n_residual_layers).to(device)
netG.load_state_dict(torch.load(root / "best_netG.pt", map_location=device))
return netG
class MelVocoder:
def __init__(
self,
path,
device=get_default_device(),
github=False,
model_name="multi_speaker",
):
#self.fft = Audio2Mel().to(device)
self.fft = Audio2Cqt().to(device)
if github:
netG = Generator(80, 32, 3).to(device)
root = Path(os.path.dirname(__file__)).parent
netG.load_state_dict(
torch.load(root / f"models/{model_name}.pt", map_location=device)
)
self.spec2wav = netG
else:
self.spec2wav = load_model(path, device)
self.device = device
def __call__(self, audio):
"""
Performs audio to mel conversion (See Audio2Mel in spec2wav/modules.py)
Args:
audio (torch.tensor): PyTorch tensor containing audio (batch_size, timesteps)
Returns:
torch.tensor: log-mel-spectrogram computed on input audio (batch_size, 80, timesteps)
"""
return self.fft(audio.unsqueeze(1).to(self.device))
def inverse(self, mel):
"""
Performs mel2audio conversion
Args:
mel (torch.tensor): PyTorch tensor containing log-mel spectrograms (batch_size, 80, timesteps)
Returns:
torch.tensor: Inverted raw audio (batch_size, timesteps)
"""
with torch.no_grad():
return self.spec2wav(mel.to(self.device)).squeeze(1)
|
nilq/baby-python
|
python
|
from app import app
app.run('0.0.0.0')
|
nilq/baby-python
|
python
|
import concurrent.futures as cf
import numpy as np
from multiprocessing import cpu_count
from tqdm import tqdm
from worms.util import jit, InProcessExecutor
from worms.search.result import ResultJIT
from worms.clashgrid import ClashGrid
def prune_clashes(
ssdag,
crit,
rslt,
max_clash_check=-1,
ca_clash_dis=4.0,
parallel=False,
approx=0,
verbosity=0,
merge_bblock=None,
pbar=False,
pbar_interval=10.0,
context_structure=None,
**kw,
):
# print('todo: clash check should handle symmetry')
if max_clash_check == 0:
return rslt
max_clash_check = min(max_clash_check, len(rslt.idx))
if max_clash_check < 0:
max_clash_check = len(rslt.idx)
if not pbar:
print(
f"mbb{f'{merge_bblock:04}' if merge_bblock else 'none'} checking clashes",
max_clash_check,
"of",
len(rslt.err),
)
verts = tuple(ssdag.verts)
# exe = cf.ProcessPoolExecutor if parallel else InProcessExecutor
exe = InProcessExecutor
with exe() as pool:
futures = list()
for i in range(max_clash_check):
dirns = tuple([v.dirn for v in verts])
iress = tuple([v.ires for v in verts])
chains = tuple([
ssdag.bbs[k][verts[k].ibblock[rslt.idx[i, k]]].chains for k in range(len(ssdag.verts))
])
ncacs = tuple([
ssdag.bbs[k][verts[k].ibblock[rslt.idx[i, k]]].ncac for k in range(len(ssdag.verts))
])
if isinstance(context_structure, ClashGrid):
clash = False
for pos, ncac in zip(rslt.pos[i], ncacs):
xyz = pos @ ncac[..., None]
if context_structure.clashcheck(xyz.squeeze()):
clash = True
break
if clash:
continue
futures.append(
pool.submit(
_check_all_chain_clashes,
dirns=dirns,
iress=iress,
idx=rslt.idx[i],
pos=rslt.pos[i],
chn=chains,
ncacs=ncacs,
thresh=ca_clash_dis * ca_clash_dis,
approx=approx,
))
futures[-1].index = i
if pbar:
desc = "checking clashes "
if merge_bblock is not None and merge_bblock >= 0:
desc = f"{desc} mbb{merge_bblock:04d}"
if merge_bblock is None:
merge_bblock = 0
futures = tqdm(
cf.as_completed(futures),
desc=desc,
total=len(futures),
mininterval=pbar_interval,
position=merge_bblock + 1,
)
ok = np.zeros(max_clash_check, dtype="?")
for f in futures:
ok[f.index] = f.result()
return ResultJIT(
rslt.pos[:max_clash_check][ok],
rslt.idx[:max_clash_check][ok],
rslt.err[:max_clash_check][ok],
rslt.stats,
)
@jit
def _chain_bounds(dirn, ires, chains, spliced_only=False, trim=8):
"return bounds for only spliced chains, with spliced away sequence removed"
chains = np.copy(chains)
bounds = []
seenchain = -1
if dirn[0] < 2:
ir = ires[0]
for i in range(len(chains)):
lb, ub = chains[i]
if lb <= ir < ub:
chains[i, dirn[0]] = ir + trim * (1, -1)[dirn[0]]
bounds.append((chains[i, 0], chains[i, 1]))
seenchain = i
if dirn[1] < 2:
ir = ires[1]
for i in range(len(chains)):
lb, ub = chains[i]
if lb <= ir < ub:
chains[i, dirn[1]] = ir + trim * (1, -1)[dirn[1]]
if seenchain == i:
if dirn[1]:
tmp = bounds[0][0], chains[i, 1]
else:
tmp = chains[i, 0], bounds[0][1]
# bounds[0][dirn[1]] = chains[i, dirn[1]]
bounds[0] = tmp
else:
bounds.append((chains[i, 0], chains[i, 1]))
if spliced_only:
return np.array(bounds, dtype=np.int32)
else:
return chains
@jit
def _has_ca_clash(position, ncacs, i, ichntrm, j, jchntrm, thresh, step=1):
for ichain in range(len(ichntrm)):
ilb, iub = ichntrm[ichain]
for jchain in range(len(jchntrm)):
jlb, jub = jchntrm[jchain]
for ir in range(ilb, iub, step):
ica = position[i] @ ncacs[i][ir, 1]
for jr in range(jlb, jub, step):
jca = position[j] @ ncacs[j][jr, 1]
d2 = np.sum((ica - jca)**2)
if d2 < thresh:
return True
return False
@jit
def _check_all_chain_clashes(dirns, iress, idx, pos, chn, ncacs, thresh, approx):
pos = pos.astype(np.float64)
for step in (3, 1): # 20% speedup.... ug... need BVH...
# only adjacent verts, only spliced chains
for i in range(len(dirns) - 1):
ichn = _chain_bounds(dirns[i], iress[i][idx[i]], chn[i], 1, 8)
for j in range(i + 1, i + 2):
jchn = _chain_bounds(dirns[j], iress[j][idx[j]], chn[j], 1, 8)
if _has_ca_clash(pos, ncacs, i, ichn, j, jchn, thresh, step):
return False
if step == 1 and approx == 2:
return True
# only adjacent verts, all chains
for i in range(len(dirns) - 1):
ichn = _chain_bounds(dirns[i], iress[i][idx[i]], chn[i], 0, 8)
for j in range(i + 1, i + 2):
jchn = _chain_bounds(dirns[j], iress[j][idx[j]], chn[j], 0, 8)
if _has_ca_clash(pos, ncacs, i, ichn, j, jchn, thresh, step):
return False
if step == 1 and approx == 1:
return True
# all verts, all chains
for i in range(len(dirns) - 1):
ichn = _chain_bounds(dirns[i], iress[i][idx[i]], chn[i], 0, 8)
for j in range(i + 1, len(dirns)):
jchn = _chain_bounds(dirns[j], iress[j][idx[j]], chn[j], 0, 8)
if _has_ca_clash(pos, ncacs, i, ichn, j, jchn, thresh, step):
return False
return True
|
nilq/baby-python
|
python
|
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from ResNet import ResNet
import argparse
from utils import *
import time
from common.utils import allocate_gpu
def find_next_time(path_list, default=-1):
if default > -1:
return default
run_times = [int(path.split('_')[0]) for path in path_list]
# last_time = max(run_times)
if default == -1:
next_time = max(run_times) + 1 if run_times else 0
return next_time
elif default == -2:
return max(run_times) if run_times else 0
def find_next_time(path_list,default=-1):
run_times=[int(path.split('_')[0]) for path in path_list ]
# print(run_times)
last_time=max(run_times) if run_times else 0
if default == -1:
return last_time+1
else:
return default
"""parsing and configuration"""
def parse_args():
GPU = -1
GPU_ID = allocate_gpu(GPU)
print('Using GPU %d'%GPU_ID)
gpuNo = 'gpu10_%d'%GPU_ID
optimizer_name='adashift' #adam adashift amsgrad sgd
lr=0.01
beta1=0.9
beta2=0.999
keep_num=10
pred_g_op='max'
epoch_num = 50
desc = "Tensorflow implementation of ResNet"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--phase', type=str, default='train', help='train or test ?')
parser.add_argument('--dataset', type=str, default='cifar10', help='[cifar10, mnist, fashion-mnist, tiny')
parser.add_argument('--epoch', type=int, default=epoch_num, help='The number of epochs to run')
parser.add_argument('--test_span', type=int, default=20, help='step interval for test')
parser.add_argument('--batch_size', type=int, default=128, help='The size of batch per gpu')
parser.add_argument('--res_n', type=int, default=18, help='18, 34, 50, 101, 152')
parser.add_argument('--gpuNo', type=str, default=gpuNo, help='which gpu to use')
parser.add_argument('--run_time', type=int, default=-1, help="which time to run this experiment, used in the identifier of experiment. -1 automaticly add one to last time, -2 keep last record")
# parser.add_argument('--GPU', type=int, default=-1, help="which gpu to use")
# parser.add_argument('--T', type=str, default=T, help='identifier of experiment')
parser.add_argument('--optimizer_name', type=str, default=optimizer_name, help='[sgd, adam, amsgrad, adashift')
parser.add_argument('--lr', type=float, default=lr, help='initial learning rate')
parser.add_argument('--beta1', type=float, default=beta1, help='beta1 for optimizer')
parser.add_argument('--beta2', type=float, default=beta2, help='beta2 for optimizer')
parser.add_argument('--epsilon', type=float, default=1e-8, help='epsilon for optimizer')
parser.add_argument('--keep_num', type=int, default=keep_num, help='keep_num for adashift optimizer')
parser.add_argument('--pred_g_op', type=str, default=pred_g_op, help='pred_g_op for adashift optimizer')
parser.add_argument('--checkpoint_dir', type=str, default="",
help='Directory name to save the checkpoints')
parser.add_argument('--log_dir', type=str, default="",
help='Directory name to save training logs')
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# # --checkpoint_dir
# check_folder(args.checkpoint_dir)
#
# # --result_dir
# check_folder(args.log_dir)
# --epoch
try:
assert args.epoch >= 1
except:
print('number of epochs must be larger than or equal to one')
# --batch_size
try:
assert args.batch_size >= 1
except:
print('batch size must be larger than or equal to one')
return args
if __name__ == '__main__':
# parse arguments
args = parse_args()
if not os.path.exists('./logs'):
os.makedirs('./logs')
# return args
if args is None:
exit()
run_time=find_next_time(os.listdir('./logs'),args.run_time)
T='%d_%s_%s_%d_%.3f_%.2f_%.3f'%(run_time,args.optimizer_name,args.pred_g_op,args.keep_num,args.lr,args.beta1,args.beta2)
args.T = T
print('Check params: %s'%T)
if args.run_time ==-1:
time.sleep(6)
log_dir='./logs/%s'%T
if not os.path.exists('./logs'):
os.makedirs('./logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
checkpoint_dir='./checkpoints/model_%s'%T
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
args.log_dir = log_dir
args.checkpoint_dir = checkpoint_dir
# open session
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, intra_op_parallelism_threads=4, inter_op_parallelism_threads=4)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
cnn = ResNet(sess, args)
# build graph
cnn.build_model()
# show network architecture
show_all_variables()
if args.phase == 'train' :
# launch the graph in a session
result=cnn.train()
print(" [:)] Training finished! \n")
cnn.test()
print(" [:)] Test finished!")
if args.phase == 'test' :
cnn.test()
print(" [:)] Test finished!")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (C) 2015 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Apple puter, Inc. ("Apple") nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
import os
def lookFor(relativePath):
return os.path.isfile(sys.argv[1] + relativePath)
def fileContains(relativePath, regexp):
with open(sys.argv[1] + relativePath) as file:
for line in file:
if regexp.search(line):
return True
return False
print("/* Identifying AVFoundation Support */")
if lookFor("/include/AVFoundationCF/AVCFBase.h"):
print("#define HAVE_AVCF 1")
if lookFor("/include/AVFoundationCF/AVCFPlayerItemLegibleOutput.h"):
print("#define HAVE_AVCF_LEGIBLE_OUTPUT 1")
if lookFor("/include/AVFoundationCF/AVCFAssetResourceLoader.h"):
print("#define HAVE_AVFOUNDATION_LOADER_DELEGATE 1")
if lookFor("/include/AVFoundationCF/AVCFAsset.h"):
regexp = re.compile("AVCFURLAssetIsPlayableExtendedMIMEType")
if fileContains("/include/AVFoundationCF/AVCFAsset.h", regexp):
print("#define HAVE_AVCFURL_PLAYABLE_MIMETYPE 1")
if lookFor("/include/QuartzCore/CACFLayer.h"):
regexp = re.compile("CACFLayerSetContentsScale")
if fileContains("/include/QuartzCore/CACFLayer.h", regexp):
print("#define HAVE_CACFLAYER_SETCONTENTSSCALE 1")
if lookFor("/include/AVFoundationCF/AVCFPlayerItemLegibleOutput.h"):
regexp = re.compile("kAVCFPlayerItemLegibleOutput_CallbacksVersion_2")
if fileContains("/include/AVFoundationCF/AVCFPlayerItemLegibleOutput.h", regexp):
print("#define HAVE_AVCFPLAYERITEM_CALLBACK_VERSION_2 1")
|
nilq/baby-python
|
python
|
import os,sys
model = sys.argv[1]
stamp = int(sys.argv[2])
lr = float(sys.argv[3])
dropout = float(sys.argv[4])
bsize = int(sys.argv[5])
filein = 'test_result/' + model + '_' + str(dropout) + '_' + str(lr) + '_x_test.npy'
fileout = 'test_result/' + model + '_' + str(dropout) + '_' + str(lr) + '_x_test_' + str(stamp) + '.npy'
os.rename(filein, fileout)
filein = 'test_result/' + model + '_' + str(dropout) + '_' + str(lr) + '_y_test.npy'
fileout = 'test_result/' + model + '_' + str(dropout) + '_' + str(lr) + '_y_test_' + str(stamp) + '.npy'
os.rename(filein, fileout)
|
nilq/baby-python
|
python
|
from tkinter import *
from tkinter.ttk import Combobox
from qiskit import IBMQ
import qiskit
import math
# THIS PART IS THE QUANTUM SHIT SO PUCKER YOUR BUTTHOLES
_backend = qiskit.BasicAer.get_backend('qasm_simulator')
_circuit = None
_bitCache = ''
def setqbits(n):
global _circuit
qr = qiskit.QuantumRegister(n)
cr = qiskit.ClassicalRegister(n)
_circuit = qiskit.QuantumCircuit(qr, cr)
_circuit.h(qr) # Apply Hadamard gate to qubits
_circuit.measure(qr, cr) # Collapses qubit to either 1 or 0 w/ equal prob.
setqbits(8) # Default Circuit is 8 Qubits
def set_backend(b='qasm_simulator'):
global _backend
if b == 'ibmqx4' or b == 'ibmqx5':
_backend = IBMQ.get_backend(b)
setqbits(5)
elif b == 'ibmq_16_melbourne':
_backend = IBMQ.get_backend(b)
setqbits(16)
elif b == 'ibmq_qasm_simulator':
_backend = IBMQ.get_backend(b)
setqbits(32)
else:
_backend = qiskit.BasicAer.get_backend('qasm_simulator')
setqbits(8)
# Strips QISKit output to just a bitstring.
def bitcount(counts):
return [k for k, v in counts.items() if v == 1][0]
# Populates the bitCache with at least n more bits.
def _request_bits(n):
global _bitCache
iterations = math.ceil(n / _circuit.width())
for _ in range(iterations):
# Create new job and run the quantum circuit
job = qiskit.execute(_circuit, _backend, shots=1)
_bitCache += bitcount(job.result().get_counts())
# Returns a random n-bit string by popping n bits from bitCache.
def bitstring(n):
global _bitCache
if len(_bitCache) < n:
_request_bits(n - len(_bitCache))
bitString = _bitCache[0:n]
_bitCache = _bitCache[n:]
return bitString
# Returns a random integer between and including [min, max].
# Running time is probabalistic but complexity is still O(n)
def randint(min, max):
delta = max - min
n = math.floor(math.log(delta, 2)) + 1
result = int(bitstring(n), 2)
while (result > delta):
result = int(bitstring(n), 2)
return result + min
def roll(nb_dice, nb_face):
roll_list = []
for i in range(nb_dice):
roll_list.append(randint(1, nb_face))
return roll_list
root = Tk()
class App:
# define the widgets
def __init__(self, master):
self.title = Label(master, fg="black", text="The Quantum Dice", font=('arial', 40))
self.nb_dices_entry = Combobox(master,
values=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
self.nb_faces_entry = Combobox(master, values=[4, 6, 8, 10, 12, 20, 100])
self.mod_entry = Combobox(master,
values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
self.nb_dices_label = Label(master, fg="black", text="How many dices? ", font=('arial', 20))
self.nb_faces_label = Label(master, fg="black", text="How many side?", font=('arial', 20))
self.mod_label = Label(master, fg="black", text="Would you like to include a modifier?", font=('arial', 20))
self.generate_button = Button(master, text="ROLL DICE", command=self.get_output)
self.list_output_int = Label(master, fg="black", bg="white", text="? ? ?") # TODO: add text function
self.mod_output_int = Label(master, fg="black", bg="white", text="0")
self.final_output_int = Label(master, fg="black", bg="white", text="0")
self.space = Label(master, fg="black", bg="white", text="")
self.list_output_lab = Label(master, fg="black", bg="white", text="Dices Thrown: ") # TODO: add text function
self.mod_output_lab = Label(master, fg="black", bg="white", text="Modifier: ")
self.final_output_lab = Label(master, fg="black", bg="white", text="Final: ")
# Call the widgets
self.title.grid(row=0, columnspan=3)
self.nb_dices_entry.grid(row=2, column=1)
self.nb_dices_entry.current(3)
self.nb_dices_label.grid(row=2, sticky=E)
self.nb_faces_entry.grid(row=3, column=1)
self.nb_faces_entry.current(4)
self.nb_faces_label.grid(row=3, sticky=E)
self.mod_entry.grid(row=4, column=1)
self.mod_entry.current(0)
self.mod_label.grid(row=4, sticky=E)
self.generate_button.grid(row=6, columnspan=3)
self.space.grid(row=7, columnspan=3)
self.list_output_lab.grid(row=8, sticky=E)
self.list_output_int.grid(row=8, column=1)
self.mod_output_lab.grid(row=9, sticky=E)
self.mod_output_int.grid(row=9, column=1)
self.final_output_lab.grid(row=10, sticky=E)
self.final_output_int.grid(row=10, column=1)
def get_output(self):
nb_dice = int(self.nb_dices_entry.get())
nb_face = int(self.nb_faces_entry.get())
output = roll(nb_dice, nb_face)
mod = int(self.mod_entry.get())
final = sum(output) + mod
self.list_output_int["text"] = output
self.mod_output_int["text"] = mod
self.final_output_int["text"] = final
app = App(root)
root.mainloop()
|
nilq/baby-python
|
python
|
from selenium import webdriver
from bs4 import BeautifulSoup
import time
driver = webdriver.PhantomJS()
client_info_search_url = "https://xclient.info/search/s/"
app_list = ["cleanmymac", "alfred", "betterzip", "beyond compare", "iina", "Navicat Premium", "charles", "DaisyDisk",
"paw", "Typora"]
class update():
def execute(self):
for app_name in app_list:
# app_name = input("请输入App名称: ")
driver.get(client_info_search_url + app_name)
tags = BeautifulSoup(driver.page_source, 'lxml').findAll("div", class_="main")
for tag in tags:
name = tag.a["title"]
if app_name.lower() in name.lower():
name_list = name.split(" ")
name_list.pop(len(name_list) - 1)
name_version = ""
for item in name_list:
name_version += item
href = tag.a["href"] + "#versions"
date = tag.find("span", class_="item date").text
print(date + " - " + name_version + " - " + href)
time.sleep(2)
update().execute()
|
nilq/baby-python
|
python
|
__author__='Pablo Leal'
import argparse
from keras.callbacks import LambdaCallback
import trainer.board as board
import trainer.loader as loader
import trainer.modeller as modeller
import trainer.saver as saver
from trainer.constans import BATCH_SIZE, CHECKPOINT_PERIOD
from trainer.constans import EPOCHS
from trainer.constans import PREDICTION_LENGTH
from trainer.constans import WINDOW_LENGTH
def saveModelToCloud(epoch, period=1):
if epoch % period = 0:
server.saveModelToCloud(model, pathToJobDir + '/epochs_' + jobname, '{:03d}'.format(epoch))
if __name__='__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-train-file',
help='GCS or local paths to training data',
required=True
)
parser.add_argument(
'-job-name',
help='GCS to write checkpoints and export models',
required=True
)
parser.add_argument(
'-job-dir',
help='GCS to write checkpoints and export models',
required=True
)
args=parser.parse_args()
arguments = args.__dict__
pathToJobDir = arguments.pop('job_dir')
jobName = arguments.pop('job_name')
pathToData = arguments.pop('train_file')
trainingDataDict, trainingLabelsDict, testingDataDict, testingLabelsDict = \
loader.loadObjectFromPickle(pathToData)
model = modeller.buildModel(WINDOW_LENGTH - PREDICTION_LENGTH, PREDICTION_LENGTH)
epochCallback = LambdaCallback (on_epoch_end=lambda epoch, logs: saveModelToCloud(epoch, CHECKPOINT_PERIOD))
model.fit(
[
trainingDataDict["weightedAverage"],
trainingDataDict["volume"],
],
[
trainingLabelsDict["weightedAverage"]
],
validation_data=(
[
testingDataDict["weightedAverage"],
testingDataDict["volume"],
],
[
testingLabelsDict["weightedAverage"]
]),
epochs=EPOCHS,
batch_size=BATCH_SIZE,
shuffle=True,
callback=[
board.createTensorboardConfig(pathToJobDir + "/logs"),
epochCallback
])
server.saveModelToCloud(model, pathToJobDir)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
awsecommerceservice
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class ItemSearchRequest(object):
"""Implementation of the 'ItemSearchRequest' model.
TODO: type model description here.
Attributes:
actor (string): TODO: type description here.
artist (string): TODO: type description here.
availability (AvailabilityEnum): TODO: type description here.
audience_rating (list of AudienceRatingEnum): TODO: type description
here.
author (string): TODO: type description here.
brand (string): TODO: type description here.
browse_node (string): TODO: type description here.
composer (string): TODO: type description here.
condition (ConditionEnum): TODO: type description here.
conductor (string): TODO: type description here.
director (string): TODO: type description here.
item_page (int): TODO: type description here.
keywords (string): TODO: type description here.
manufacturer (string): TODO: type description here.
maximum_price (int): TODO: type description here.
merchant_id (string): TODO: type description here.
minimum_price (int): TODO: type description here.
min_percentage_off (int): TODO: type description here.
music_label (string): TODO: type description here.
orchestra (string): TODO: type description here.
power (string): TODO: type description here.
publisher (string): TODO: type description here.
related_item_page (object): TODO: type description here.
relationship_type (list of string): TODO: type description here.
response_group (list of string): TODO: type description here.
search_index (string): TODO: type description here.
sort (string): TODO: type description here.
title (string): TODO: type description here.
release_date (string): TODO: type description here.
include_reviews_summary (string): TODO: type description here.
truncate_reviews_at (int): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"actor":'Actor',
"artist":'Artist',
"availability":'Availability',
"audience_rating":'AudienceRating',
"author":'Author',
"brand":'Brand',
"browse_node":'BrowseNode',
"composer":'Composer',
"condition":'Condition',
"conductor":'Conductor',
"director":'Director',
"item_page":'ItemPage',
"keywords":'Keywords',
"manufacturer":'Manufacturer',
"maximum_price":'MaximumPrice',
"merchant_id":'MerchantId',
"minimum_price":'MinimumPrice',
"min_percentage_off":'MinPercentageOff',
"music_label":'MusicLabel',
"orchestra":'Orchestra',
"power":'Power',
"publisher":'Publisher',
"related_item_page":'RelatedItemPage',
"relationship_type":'RelationshipType',
"response_group":'ResponseGroup',
"search_index":'SearchIndex',
"sort":'Sort',
"title":'Title',
"release_date":'ReleaseDate',
"include_reviews_summary":'IncludeReviewsSummary',
"truncate_reviews_at":'TruncateReviewsAt'
}
def __init__(self,
actor=None,
artist=None,
availability=None,
audience_rating=None,
author=None,
brand=None,
browse_node=None,
composer=None,
condition=None,
conductor=None,
director=None,
item_page=None,
keywords=None,
manufacturer=None,
maximum_price=None,
merchant_id=None,
minimum_price=None,
min_percentage_off=None,
music_label=None,
orchestra=None,
power=None,
publisher=None,
related_item_page=None,
relationship_type=None,
response_group=None,
search_index=None,
sort=None,
title=None,
release_date=None,
include_reviews_summary=None,
truncate_reviews_at=None):
"""Constructor for the ItemSearchRequest class"""
# Initialize members of the class
self.actor = actor
self.artist = artist
self.availability = availability
self.audience_rating = audience_rating
self.author = author
self.brand = brand
self.browse_node = browse_node
self.composer = composer
self.condition = condition
self.conductor = conductor
self.director = director
self.item_page = item_page
self.keywords = keywords
self.manufacturer = manufacturer
self.maximum_price = maximum_price
self.merchant_id = merchant_id
self.minimum_price = minimum_price
self.min_percentage_off = min_percentage_off
self.music_label = music_label
self.orchestra = orchestra
self.power = power
self.publisher = publisher
self.related_item_page = related_item_page
self.relationship_type = relationship_type
self.response_group = response_group
self.search_index = search_index
self.sort = sort
self.title = title
self.release_date = release_date
self.include_reviews_summary = include_reviews_summary
self.truncate_reviews_at = truncate_reviews_at
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
actor = dictionary.get('Actor')
artist = dictionary.get('Artist')
availability = dictionary.get('Availability')
audience_rating = dictionary.get('AudienceRating')
author = dictionary.get('Author')
brand = dictionary.get('Brand')
browse_node = dictionary.get('BrowseNode')
composer = dictionary.get('Composer')
condition = dictionary.get('Condition')
conductor = dictionary.get('Conductor')
director = dictionary.get('Director')
item_page = dictionary.get('ItemPage')
keywords = dictionary.get('Keywords')
manufacturer = dictionary.get('Manufacturer')
maximum_price = dictionary.get('MaximumPrice')
merchant_id = dictionary.get('MerchantId')
minimum_price = dictionary.get('MinimumPrice')
min_percentage_off = dictionary.get('MinPercentageOff')
music_label = dictionary.get('MusicLabel')
orchestra = dictionary.get('Orchestra')
power = dictionary.get('Power')
publisher = dictionary.get('Publisher')
related_item_page = dictionary.get('RelatedItemPage')
relationship_type = dictionary.get('RelationshipType')
response_group = dictionary.get('ResponseGroup')
search_index = dictionary.get('SearchIndex')
sort = dictionary.get('Sort')
title = dictionary.get('Title')
release_date = dictionary.get('ReleaseDate')
include_reviews_summary = dictionary.get('IncludeReviewsSummary')
truncate_reviews_at = dictionary.get('TruncateReviewsAt')
# Return an object of this model
return cls(actor,
artist,
availability,
audience_rating,
author,
brand,
browse_node,
composer,
condition,
conductor,
director,
item_page,
keywords,
manufacturer,
maximum_price,
merchant_id,
minimum_price,
min_percentage_off,
music_label,
orchestra,
power,
publisher,
related_item_page,
relationship_type,
response_group,
search_index,
sort,
title,
release_date,
include_reviews_summary,
truncate_reviews_at)
|
nilq/baby-python
|
python
|
a, b, c = map(int, input().split())
print((a+b)%c)
print((a%c+b%c)%c)
print((a*b)%c)
print((a%c*b%c)%c)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
from __future__ import unicode_literals
#===============================================================================
def read_input(strip=True):
return raw_input().strip() if strip else raw_input()
def read_input_multi(strip=True):
return read_input(strip).split()
def read_int():
return int(read_input())
def read_int_multi():
return [int(s) for s in read_input_multi()]
def print_solution(i, solution):
print('Case #{}: {}'.format(i, solution))
#===============================================================================
def solve_matrix(n, soldier_lists):
solution = []
used_edges = []
for iteration in xrange(n):
valid_soldiers = [(i, l) for i, l in enumerate(soldier_lists) if i not in used_edges]
# print valid_soldiers
top_left = min([min([x for j, x in enumerate(l) if j >= iteration]) for i, l in valid_soldiers])
#print("top: {}".format(top_left))
edges = [l for i, l in valid_soldiers if l[iteration] == top_left]
used_edges += [i for i, l in valid_soldiers if l[iteration] == top_left]
if len(edges) == 2:
edge_heights = edges[0] + edges[1]
# print edge_heights
for soldiers in soldier_lists:
value = soldiers[iteration]
# print "value: " + str(value)
edge_heights.remove(value)
solution.append(edge_heights[0])
used_edges
else:
solution.append(edges[0][iteration])
return ' '.join([str(x) for x in solution])
#------------------------------------------------------------------------------
def solve():
n = read_int()
num_lists = 2 * n - 1
soldier_lists = [read_int_multi() for _ in xrange(num_lists)]
line = solve_matrix(n, soldier_lists)
return line
#===============================================================================
if __name__ == '__main__':
test_cases = read_int()
for t in xrange(test_cases):
solution = solve()
print_solution(t + 1, solution)
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import re
from typing import Any
import pytest
from _pytest.python_api import RaisesContext
from omegaconf import DictConfig, OmegaConf
from hydra._internal import utils
from hydra._internal.utils import _locate
from hydra.types import ObjectConf
from tests import AClass, Adam, AnotherClass, ASubclass, NestingClass, Parameters
@pytest.mark.parametrize( # type: ignore
"matrix,expected",
[
([["a"]], [1]),
([["a", "bb"]], [1, 2]),
([["a", "bb"], ["aa", "b"]], [2, 2]),
([["a"], ["aa", "b"]], [2, 1]),
([["a", "aa"], ["bb"]], [2, 2]),
([["a"]], [1]),
([["a"]], [1]),
([["a"]], [1]),
],
)
def test_get_column_widths(matrix: Any, expected: Any) -> None:
assert utils.get_column_widths(matrix) == expected
@pytest.mark.parametrize( # type: ignore
"config, expected, warning",
[
pytest.param(
OmegaConf.create({"_target_": "foo"}), "foo", False, id="ObjectConf:target"
),
pytest.param(
OmegaConf.create({"cls": "foo"}), "foo", "cls", id="DictConfig:cls"
),
pytest.param(
OmegaConf.create({"class": "foo"}), "foo", "class", id="DictConfig:class"
),
pytest.param(
OmegaConf.create({"target": "foo"}),
"foo",
"target",
id="DictConfig:target",
),
pytest.param(
OmegaConf.create({"cls": "foo", "_target_": "bar"}),
"bar",
False,
id="DictConfig:cls_target",
),
pytest.param(
OmegaConf.create({"class": "foo", "_target_": "bar"}),
"bar",
"class",
id="DictConfig:class_target",
),
# check that `target` is prioritized over `cls`/`class`.
pytest.param(
OmegaConf.create({"cls": "foo", "_target_": "bar"}),
"bar",
"cls",
id="DictConfig:pri_cls",
),
pytest.param(
OmegaConf.create({"class": "foo", "_target_": "bar"}),
"bar",
"class",
id="DictConfig:pri_class",
),
pytest.param(
OmegaConf.create({"target": "foo", "_target_": "bar"}),
"bar",
"target",
id="DictConfig:pri_target",
),
],
)
def test_get_class_name(
config: DictConfig, expected: Any, warning: Any, recwarn: Any
) -> None:
assert utils._get_cls_name(config) == expected
target_field_deprecated = (
"\nConfig key '{key}' is deprecated since Hydra 1.0 and will be removed in Hydra 1.1."
"\nUse '_target_' instead of '{field}'."
"\nSee https://hydra.cc/docs/next/upgrades/0.11_to_1.0/object_instantiation_changes"
)
if warning is not False:
assert recwarn[0].category == UserWarning
assert recwarn[0].message.args[0] == target_field_deprecated.format(
key=warning, field=warning
)
# TODO: why?
# @pytest.mark.skipif( # type: ignore
# sys.version_info < (3, 7), reason="requires python3.7"
# )
@pytest.mark.parametrize( # type: ignore
"name,expected",
[
("tests.Adam", Adam),
("tests.Parameters", Parameters),
("tests.AClass", AClass),
("tests.ASubclass", ASubclass),
("tests.NestingClass", NestingClass),
("tests.AnotherClass", AnotherClass),
("", pytest.raises(ImportError, match=re.escape("Empty path"))),
(
"not_found",
pytest.raises(
ImportError, match=re.escape("Error loading module 'not_found'")
),
),
(
"tests.b.c.Door",
pytest.raises(ImportError, match=re.escape("No module named 'tests.b'")),
),
],
)
def test_locate(name: str, expected: Any) -> None:
if isinstance(expected, RaisesContext):
with expected:
_locate(name)
else:
assert _locate(name) == expected
def test_object_conf_deprecated() -> None:
msg = (
"\nObjectConf is deprecated in favor of TargetConf since Hydra 1.0.0rc3 and will be removed in Hydra 1.1."
"\nSee https://hydra.cc/docs/next/upgrades/0.11_to_1.0/object_instantiation_changes"
)
with pytest.warns(
expected_warning=UserWarning, match=msg,
):
ObjectConf(target="foo")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'QuestionnaireText'
db.create_table('cmsplugin_questionnairetext', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('body', self.gf('django.db.models.fields.TextField')()),
('depends_on_answer', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='trigger_text', null=True, to=orm['cms_saq.Answer'])),
))
db.send_create_signal('cms_saq', ['QuestionnaireText'])
def backwards(self, orm):
# Deleting model 'QuestionnaireText'
db.delete_table('cmsplugin_questionnairetext')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 23, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms_saq.answer': {
'Meta': {'ordering': "('question', 'order', 'slug')", 'unique_together': "(('question', 'slug'),)", 'object_name': 'Answer'},
'help_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['cms_saq.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms_saq.bulkanswer': {
'Meta': {'object_name': 'BulkAnswer', 'db_table': "'cmsplugin_bulkanswer'", '_ormbases': ['cms.CMSPlugin']},
'answer_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms_saq.formnav': {
'Meta': {'object_name': 'FormNav', 'db_table': "'cmsplugin_formnav'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'end_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'formnav_ends'", 'null': 'True', 'to': "orm['cms.Page']"}),
'end_page_condition_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms_saq.Question']", 'null': 'True', 'blank': 'True'}),
'end_page_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'end_submission_set': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'next_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'formnav_nexts'", 'null': 'True', 'to': "orm['cms.Page']"}),
'next_page_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'prev_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'formnav_prevs'", 'null': 'True', 'to': "orm['cms.Page']"}),
'prev_page_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'submission_set_tag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'cms_saq.groupedanswer': {
'Meta': {'ordering': "('group', 'order', 'slug')", 'object_name': 'GroupedAnswer', '_ormbases': ['cms_saq.Answer']},
'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms_saq.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms_saq.progressbar': {
'Meta': {'object_name': 'ProgressBar', 'db_table': "'cmsplugin_progressbar'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'count_optional': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'cms_saq.question': {
'Meta': {'object_name': 'Question', 'db_table': "'cmsplugin_question'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'depends_on_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trigger_questions'", 'null': 'True', 'to': "orm['cms_saq.Answer']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'optional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'cms_saq.questionnairetext': {
'Meta': {'object_name': 'QuestionnaireText', 'db_table': "'cmsplugin_questionnairetext'"},
'body': ('django.db.models.fields.TextField', [], {}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'depends_on_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trigger_text'", 'null': 'True', 'to': "orm['cms_saq.Answer']"})
},
'cms_saq.scoresection': {
'Meta': {'ordering': "('order', 'label')", 'object_name': 'ScoreSection'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['cms_saq.SectionedScoring']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms_saq.sectionedscoring': {
'Meta': {'object_name': 'SectionedScoring', 'db_table': "'cmsplugin_sectionedscoring'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'cms_saq.submission': {
'Meta': {'ordering': "('submission_set', 'user', 'question')", 'unique_together': "(('question', 'user', 'submission_set'),)", 'object_name': 'Submission'},
'answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'submission_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'null': 'True', 'to': "orm['cms_saq.SubmissionSet']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'saq_submissions'", 'to': "orm['auth.User']"})
},
'cms_saq.submissionset': {
'Meta': {'object_name': 'SubmissionSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'saq_submissions_sets'", 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['cms_saq']
|
nilq/baby-python
|
python
|
def fibonacci(n):
for i in range(n+1):
fibo = [0, 1]
if i == 0:
print ("fibo( 0 ) = ", 0)
elif i == 1:
print ("fibo( 1 ) = ", 1)
else:
flag = True
for j in range(2, i):
if flag: # Replace first element fibonacci(n) + fibonacci(fibo[1])
fibo[0] = fibo[1] + fibo[0]
else: # Replace second element fibonacci(n) + fibonacci(fibo[0])
fibo[1] = fibo[0] + fibo[1]
flag = not flag
print (fibo[0]+fibo[1])
if __name__ == "__main__":
fibonacci(40)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
Contains function to identify bad channels based on time and freq domain
methods.
authors: Niko Kampel, n.kampel@gmail.com
Praveen Sripad, pravsripad@gmail.com
'''
import numpy as np
import mne
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import euclidean_distances
from .jumeg_utils import check_read_raw
def compute_euclidean_stats(epoch, sensitivity, mode='adaptive',
fraction=None):
'''
Compute the Euclidean matrix along with necessary statistics for data
from one single epoch.
Function can also be used for psd. (generic function)
Parameters
epoch: np.array
The data from which to compute the Euclidean matrices.
sensitivity: float in range of [0,100]
Percentile to compute threshold used for clustering,
which must be between 0 and 100 inclusive.
mode: str
The mode in which to return the statistics results.
Can be 'fixed' for fixed threshold or 'nearest'
for nearest neighbour points.
When a fixed threshold is used, a single percentile based value is
used for all the epochs/windows of the data. If adaptive is chosen,
a threshold value for every epoch is used.
Note: Fixed threshold is currently incompletely implemented and
we do not suggest using it.
fraction: float | None
Ratio of the number of samples to be chosen for clustering.
Returns
If mode is fixed returns a fixed percentile threshold.
If mode is nearest, returns the nearest neighbour.
#TODO doc needs to be updated
'''
if fraction:
number_of_samples = int(epoch.shape[1]*fraction)
sorted_peaks = np.sort(np.square(np.diff(epoch)), axis=1)
# just keep 1% of the samples
afp = sorted_peaks[:, sorted_peaks.shape[1]-number_of_samples:]
else:
# do not do reduced sampling fro psds
afp = epoch # slightly confusing, this part actually handles psd code
mydist = euclidean_distances(afp, afp)
# average_distances = np.average(mydist, axis=1)
if mode == 'adaptive':
# adaptive threshold depending on epochs
nearest_neighbour = np.sort(mydist, axis=1)[:, 1]
selected_threshold = np.percentile(np.tril(mydist), sensitivity)
return afp, nearest_neighbour, selected_threshold
elif mode == 'fixed':
# fixed threshold for all epochs
# not to be used
fixed_threshold = np.percentile(np.tril(mydist), sensitivity)
return afp, fixed_threshold
else:
raise RuntimeError('Mode should be one of fixed or nearest')
def clustered_afp(epochs, sensitivity_steps, fraction, mode='adaptive',
min_samples=1, n_jobs = None):
'''
Perform clustering on difference in signals from one sample to another.
This method helps us to identify flux jumps and largespikes in the data.
Parameters
epochs: mne.Epochs
sensitivity_steps: float in range of [0,100]
Percentile to compute threshold used for clusterin
signals,
which must be between 0 and 100 inclusive.
picks: list
Picks of the channels to be used.
min_samples: int
Number of samples to be chosen for DBSCAN clustering.
Returns
afps: np.array
Power spectral density values (n_epochs, n_chans, n_freqs)
afp_suspects: list
Suspected bad channels.
afp_nearest_neighbour: list
The nearest neighbour identified before DBSCAN clustering.
zlimit_afp: float
A scaling value used for plotting.
'''
# epochs = epochs.get_data()
afps, afp_suspects, afp_percentiles, afp_nearest_neighbour = [], [], [], []
# statistics for every epoch
for epoch in epochs:
if mode == 'adaptive':
afp, nearest_neighbour, selected_threshold = \
compute_euclidean_stats(epoch, sensitivity_steps, mode='adaptive')
afp_nearest_neighbour.append(nearest_neighbour)
afp_percentiles.append(selected_threshold)
elif mode == 'fixed':
# TODO complete fixed threshold computation
# statistics and clustering for every epoch for fixed threshold
afp, selected_threshold = compute_euclidean_stats(epoch, sensitivity_steps,
mode='fixed')
afp_percentiles.append(selected_threshold)
else:
raise RuntimeError('Mode unknown.')
# do the clustering for every epoch
db = DBSCAN(eps=selected_threshold, min_samples=min_samples,
metric='euclidean',n_jobs = n_jobs).fit(afp)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
suspect = [i for i, x in enumerate(db.labels_) if x]
afps.append(afp)
afp_suspects.append(suspect)
afps = np.asarray(afps)
afp_nearest_neighbour = np.asarray(afp_nearest_neighbour)
# hack to get a limit for plotting (this is not supposd to be here)
zlimit_afp = np.percentile(afp_percentiles, 50) * 4
return afps, afp_suspects, afp_nearest_neighbour, zlimit_afp
def clustered_psd(epochs, sensitivity_psd, picks, min_samples=1, n_jobs = None):
'''
Perform clustering on PSDs to identify bad channels.
Parameters
epochs: mne.Epochs
sensitivity_psd: float in range of [0,100]
Percentile to compute threshold used for clustering PSDs,
which must be between 0 and 100 inclusive.
picks: list
Picks of the channels to be used.
min_samples: int
Number of samples to be chosen for DBSCAN clustering.
Returns
psds: np.array
Power spectral density values (n_epochs, n_chans, n_freqs)
psd_suspects: list
Suspected bad channels.
psd_nearest_neighbour: list
The nearest neighbour identified before DBSCAN clustering.
zlimit_psd: float
A scaling value used for plotting.
'''
psds, freqs = mne.time_frequency.psd_welch(epochs, fmin=2., fmax=200.,
picks=picks)
psd_percentiles, psd_nearest_neighbour, psd_suspects = [], [], []
for ipsd in psds:
psd, nearest_neighbour, selected_threshold = \
compute_euclidean_stats(ipsd, sensitivity_psd, mode='adaptive')
psd_nearest_neighbour.append(nearest_neighbour)
psd_percentiles.append(selected_threshold)
db = DBSCAN(eps=selected_threshold, min_samples=min_samples,
metric='euclidean', n_jobs = n_jobs).fit(psd)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
suspect = [i for i, x in enumerate(db.labels_) if x]
psd_suspects.append(suspect)
psd_nearest_neighbour = np.asarray(psd_nearest_neighbour)
zlimit_psd = np.percentile(psd_percentiles, 50) * 4
return psds, psd_suspects, psd_nearest_neighbour, zlimit_psd
def make_minimap(picks, afp_suspects, psd_suspects):
'''
Make a minimap with bad channels identifed using time domain and freq
domain methods.
Helper function for plotting the values
'''
# values inside minimap are a workaround for colormap 'brg'
minimap = np.zeros((len(picks), len(afp_suspects))) # 0 if channel is regular
for e in range(0, len(afp_suspects)):
for c in afp_suspects[e]:
minimap[c, e] = 3 # yellow if afp is unusual
for e in range(0, len(afp_suspects)):
for c in psd_suspects[e]:
if minimap[c, e] == 3:
minimap[c, e] = 2 # red if afp+psd is unusual
else:
minimap[c, e] = 1 # purple if psd is unusual
# minimap marker
# coordinates for markers
x_afp, y_afp, x_psd, y_psd, x_both, y_both = [], [], [], [], [], []
for e in range(0, minimap.shape[1]):
for c in range(0, len(minimap)):
if minimap[c, e] == 3: # condition for afp
x_afp.append(e)
y_afp.append(c)
if minimap[c, e] == 1: # condition for psd
x_psd.append(e)
y_psd.append(c)
if minimap[c, e] == 2: # condition for both
x_both.append(e)
y_both.append(c)
return minimap, x_afp, y_afp, x_psd, y_psd, x_both, y_both
def validation_marker(minimap, picks_bad, picks_fp):
'''
Helper function for plotting bad channels identified using time domain (afp)
or freq domain (psd) methods.
Using the validation marker helps compare already marked bad channels with
automatically identified ones for testing purposes.
'''
x_miss, y_miss, x_hit, y_hit, x_fp, y_fp = [], [], [], [], [], []
for e in range(0, minimap.shape[1]):
for c in range(0, len(minimap)):
if c in picks_bad and minimap[c, e] > 0: # condition for hit
x_hit.append(e)
y_hit.append(c)
if c in picks_bad and minimap[c, e] == 0: # condition for miss
x_miss.append(e)
y_miss.append(c)
if c in picks_fp and minimap[c, e] > 0: # condition for miss
x_fp.append(e)
y_fp.append(c)
return x_miss, y_miss, x_hit, y_hit, x_fp, y_fp
def plot_autosuggest_summary(afp_nearest_neighbour, psd_nearest_neighbour,
picks, afp_suspects, psd_suspects, picks_bad,
picks_fp, zlimit_afp, zlimit_psd,
epoch_length, marks, validation=False):
'''
Plot showing the automated identification of bad channels using time and
frequency domain methods.
#TODO Improve documentation.
'''
import matplotlib.pyplot as plt
plt.style.use(['seaborn-deep'])
# calculate data for summary_plot
minimap, x_afp, y_afp, x_psd, y_psd, x_both, y_both = \
make_minimap(picks, afp_suspects, psd_suspects)
# calculate validation markers if necessary (for testing purposes only)
if validation:
x_miss, y_miss, x_hit, y_hit, x_fp, y_fp = \
validation_marker(minimap, picks_bad, picks_fp)
# do the actual plotting
summary_plot = plt.figure(figsize=(16, 10))
plt.subplots_adjust(hspace=0.2)
t = np.arange(len(minimap[1]))
# minimap
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=2)
ax1.xaxis.tick_top()
ax1.set_xticks((t))
plt.xticks(t, (t+1)*epoch_length-epoch_length/2) # align minimap with clusterplots
ax1.grid(which='both')
plt.xlim([0, len(t)-1])
plt.ylim([len(minimap), 0])
plt.yticks(marks, [x+1 for x in marks]) # only tick channels of interest +1 cause numpy and mne coordinates are differnt
plt.ylabel('channel number')
# plt.xlabel('raw_fname = '+"'"+raw_fname+"'" + ' ; marked_chn = '+str(list(marks)))
ax1.xaxis.set_label_position('top')
#TODO find better way to find zlimit
# zlimit_afp = np.percentile(afp_percentiles, 50) * 4
plt.imshow(np.clip(afp_nearest_neighbour, 0, zlimit_afp).T*-1,
aspect='auto', interpolation='nearest', cmap='Blues')
# mark the default points
plt.scatter(x_afp, y_afp, s=60, marker='o', color='gold')
plt.scatter(x_both, y_both, s=60, marker='o', color='red')
# validation marker
if validation:
plt.scatter(x_miss, y_miss, s=10, marker='s', color='r')
plt.scatter(x_hit, y_hit, s=10, marker='s', color='limegreen')
plt.scatter(x_fp, y_fp, s=10, marker='s', color='gold')
# plot the AFP clustering
ax2 = plt.subplot2grid((4, 1), (2, 0), rowspan=2)
ax2.xaxis.tick_top()
ax2.set_xticks((t))
plt.xticks(t, (t+1)*epoch_length-epoch_length/2) # align minimap with clusterplots
ax2.grid(which='both')
plt.xlim([0, len(t)-1])
plt.ylim([len(minimap), 0])
plt.yticks(marks, [x+1 for x in marks]) # only tick channels of interest +1 cause numpy and mne coordinates are differnt
plt.ylabel('channel number')
plt.xlabel('time')
plt.scatter(x_psd, y_psd, s=60, marker='o', color='purple')
plt.scatter(x_both, y_both, s=60, marker='o', color='red')
# validation marker
if validation:
plt.scatter(x_miss, y_miss, s=20, marker='s', color='r')
plt.scatter(x_hit, y_hit, s=20, marker='s', color='limegreen')
plt.scatter(x_fp, y_fp, s=20, marker='s', color='gold')
#TODO find better way to find zlimit
# zlimit_psd = np.percentile(psd_percentiles, 50) * 4
ax2.imshow(np.clip(psd_nearest_neighbour, 0, zlimit_psd).T*-1,
aspect='auto', interpolation='nearest', cmap='Blues')
plt.close()
return summary_plot
def suggest_bads(raw, sensitivity_steps=97, sensitivity_psd=95,
fraction=0.001, epoch_length=None, summary_plot=False,
show_raw=False, n_jobs = 1, validation=True):
'''
Function to suggest bad channels. The bad channels are identified using
time domain methods looking for sharp jumps in short windows of data and
in the frequency domain looking for channels with unusual power
spectral densities.
Note: This function is still in the development stage and contains a lot of
hard coded values.
Parameters
----------
raw: str | mne.io.Raw
Filename or the raw object.
epoch_length: int | None
Length of the window to apply methods on.
summary_plot: bool
Set True to generate a summary plot showing suggested bads.
# parameters for step detection (AFP)
# in %, 0 marks all chanels 100 marks none; percentile of
# parameter for frequency analysis
# in %, 0 marks all chanels 100 marks none; percentile of
Returns
-------
suggest_bads: list
List of suggested bad channels.
raw: mne.io.Raw
Raw object updated with suggested bad channels.
'''
raw = check_read_raw(raw, preload=False)
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
ecg=False, exclude=[])
# if epoch length is not provided, chose a suitable length
if not epoch_length:
epoch_length = int(raw.n_times/(raw.info['sfreq'] * 20))
print('epoch_length of %d chosen' % epoch_length)
# add 0.01 to avoid 'dropping' of first epoch
events = mne.make_fixed_length_events(raw, 42, start=0.01,
duration=epoch_length)
epochs = mne.Epochs(raw, events, event_id=42, tmin=-epoch_length/2,
tmax=epoch_length/2, picks=picks)
picks_bad = [raw.ch_names.index(l) for l in raw.info['bads']]
# compute differences in time domain to identify abrupt jumps in the data
afps, afp_suspects, afp_nearest_neighbour, zlimit_afp = \
clustered_afp(epochs, sensitivity_steps, fraction, n_jobs = n_jobs)
# compute the psds and do the clustering to identify unusual channels
psds, psd_suspects, psd_nearest_neighbour, zlimit_psd = \
clustered_psd(epochs, sensitivity_psd, picks, n_jobs = n_jobs)
# if any of the channels' psds are all zeros, mark as suspect
zero_suspects = [ind for ind in range(psds.shape[1]) if not np.any(psds[:, ind, :])]
# reduce lists of marked epochs to lists of bad channels
picks_autodetect = \
list(set().union([item for sublist in psd_suspects for item in sublist],
[item for sublist in afp_suspects for item in sublist]))
# get the bads suggested but not previosuly marked
picks_fp = [x for x in set(picks_autodetect) if x not in set(picks_bad)]
# marks are all channels of interest, including premarked bad channels
# and zero channels (channel indices)
jumps = list(set([item for sublist in afp_suspects for item in sublist]))
jumps_ch_names = [raw.ch_names[i] for i in jumps]
unusual = list(set([item for sublist in psd_suspects for item in sublist]))
unusual_ch_names = [raw.ch_names[i] for i in unusual]
dead_ch_names = [raw.ch_names[i] for i in zero_suspects]
print("Suggested bads [jumps]:", jumps_ch_names)
print("Suggested bads [unusual]:", unusual_ch_names)
print("Suggested bads [dead]:", dead_ch_names)
marks = list(set(picks_autodetect) | set(picks_bad) | set(zero_suspects))
# show summary plot for enhanced manual inspection
#TODO zero suspects do not have any colour coding for the moment
if summary_plot:
fig = \
plot_autosuggest_summary(afp_nearest_neighbour, psd_nearest_neighbour,
picks, afp_suspects, psd_suspects, picks_bad,
picks_fp, zlimit_afp, zlimit_psd,
epoch_length, marks,
validation=False)
fig.show()
# channel names in str
suggested = [raw.ch_names[i] for i in marks]
# add suggested channels to the raw.info
raw.info['bads'] = suggested
print('Suggested bad channels: ', suggested)
if show_raw:
raw.plot(block=True)
visual = raw.info['bads']
visual.sort()
print('Bad channels after visual inspection: ', visual)
return visual, raw
else:
return suggested, raw
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from __future__ import with_statement
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from powerdns import PowerDNSCollector
################################################################################
class TestPowerDNSCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('PowerDNSCollector', {
'interval': 1,
'bin': 'true',
'use_sudo': False,
})
self.collector = PowerDNSCollector(config, None)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_fake_data(self, publish_mock):
with patch('subprocess.Popen.communicate', Mock(return_value=(
self.getFixture('pdns_control-2.9.22.6-1.el6-A').getvalue(),
''))):
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
with patch('subprocess.Popen.communicate', Mock(return_value=(
self.getFixture('pdns_control-2.9.22.6-1.el6-B').getvalue(),
''))):
self.collector.collect()
metrics = {
'corrupt-packets': 1.0,
'deferred-cache-inserts': 2.0,
'deferred-cache-lookup': 3.0,
'latency': 4.0,
'packetcache-hit': 5.0,
'packetcache-miss': 6.0,
'packetcache-size': 7.0,
'qsize-q': 8.0,
'query-cache-hit': 9.0,
'query-cache-miss': 10.0,
'recursing-answers': 11.0,
'recursing-questions': 12.0,
'servfail-packets': 13.0,
'tcp-answers': 14.0,
'tcp-queries': 15.0,
'timedout-packets': 16.0,
'udp-answers': 17.0,
'udp-queries': 18.0,
'udp4-answers': 19.0,
'udp4-queries': 20.0,
'udp6-answers': 21.0,
'udp6-queries': 22.0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
"""Testing phantombuild."""
import tempfile
from pathlib import Path
import pytest
import phantombuild as pb
from phantombuild.phantombuild import (
CompileError,
HDF5LibraryNotFound,
PatchError,
RepoError,
)
VERSION = '3252f52501cac9565f9bc40527346c0e224757b9'
def test_get_phantom():
"""Test getting Phantom from GitHub."""
with tempfile.TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / 'phantom'
pb.get_phantom(path)
pb.get_phantom(path)
(path / '.git/config').unlink()
with pytest.raises(RepoError):
pb.get_phantom(path)
def test_checkout_phantom_version_clean():
"""Test checking out a Phantom version."""
with tempfile.TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / 'phantom'
pb.get_phantom(path)
pb.checkout_phantom_version(path=path, version=VERSION)
pb.checkout_phantom_version(path=path, version=VERSION)
def test_checkout_phantom_version_dirty():
"""Test checking out a Phantom version."""
with tempfile.TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / 'phantom'
pb.get_phantom(path)
(path / 'src/main/phantom.F90').unlink()
pb.checkout_phantom_version(path=path, version=VERSION)
def test_phantom_patch():
"""Test patching Phantom."""
with tempfile.TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / 'phantom'
pb.get_phantom(path)
pb.checkout_phantom_version(path=path, version=VERSION)
patch = Path(__file__).parent / 'stub' / 'test.patch'
pb.patch_phantom(path=path, patch=patch)
kwargs = {'path': path, 'patch': patch}
with pytest.raises(PatchError):
pb.patch_phantom(**kwargs)
def test_build_phantom():
"""Test building Phantom."""
with tempfile.TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / 'phantom'
hdf5_path = Path('non_existent_dir')
pb.get_phantom(path)
pb.build_phantom(
path=path,
setup='empty',
system='gfortran',
extra_options={'MAXP': '1000000'},
)
kwargs = {
'path': path,
'setup': 'empty',
'system': 'gfortran',
'hdf5_path': hdf5_path,
}
with pytest.raises(HDF5LibraryNotFound):
pb.build_phantom(**kwargs)
kwargs = {
'path': path,
'setup': 'FakeSetup',
'system': 'gfortran',
}
with pytest.raises(CompileError):
pb.build_phantom(**kwargs)
def test_setup_calculation():
"""Test setting up Phantom calculation."""
with tempfile.TemporaryDirectory() as tmpdirname:
phantom_path = Path(tmpdirname) / 'phantom'
run_path = Path(tmpdirname) / 'run_path'
input_dir = Path(__file__).parent / 'stub'
in_file = input_dir / 'disc.in'
setup_file = input_dir / 'disc.setup'
pb.get_phantom(phantom_path)
pb.build_phantom(
path=phantom_path, version=VERSION, setup='disc', system='gfortran'
)
pb.setup_calculation(
prefix='disc',
setup_file=setup_file,
in_file=in_file,
run_path=run_path,
phantom_path=phantom_path,
)
|
nilq/baby-python
|
python
|
import os
import time
from github import Github
from django.db import models
from calaccess_raw import get_model_list
from django.template.loader import render_to_string
from calaccess_raw.management.commands import CalAccessCommand
class Command(CalAccessCommand):
help = 'Create GitHub issues for fields missing verbose and/or help text'
def add_arguments(self, parser):
"""
Adds custom arguments specific to this command.
"""
super(Command, self).add_arguments(parser)
parser.add_argument(
"--dry-run",
action="store_true",
dest="dry_run",
default=False,
help="Print text of issues without sending to Github"
)
def handle(self, *args, **options):
super(Command, self).handle(*args, **options)
"""
Connect to Github using token stored in environment, loop over model fields, and \
create an issue for any choice field missing
"""
self.dry_run = options["dry_run"]
# set up connect to Github account
self.gh = Github(os.getenv('GITHUB_TOKEN'))
self.org = self.gh.get_organization("california-civic-data-coalition")
self.repo = self.org.get_repo("django-calaccess-raw-data")
self.labels = [
self.repo.get_label("small"),
self.repo.get_label("documentation"),
self.repo.get_label("enhancement"),
]
self.header(
"Creating GitHub issues for model choice fields"
)
model_list = sorted(
get_model_list(),
key=lambda x: (x().klass_group, x().klass_name)
)
models_to_fix = []
for m in model_list:
fields_to_fix = {}
for f in m._meta.fields:
if f.name == 'id':
continue
# test for verbose name
if not f.__dict__['_verbose_name']:
fields_to_fix[f] = {'no_verbose': True, 'no_help': False}
elif len(f.__dict__['_verbose_name']) == 0:
fields_to_fix[f] = {'no_verbose': True, 'no_help': False}
# test for help text
if len(f.help_text) == 0:
try:
fields_to_fix[f]['no_help'] = True
except KeyError:
fields_to_fix[f] = {'no_verbose': False, 'no_help': True}
if len(fields_to_fix) > 0:
fs = []
for k, v in fields_to_fix.items():
fs.append((k, v))
models_to_fix.append(
(m, tuple(fs))
)
for model, fields in models_to_fix:
context = dict(
model_name=model.__name__,
model_docs=model().DOCUMENTCLOUD_PAGES,
file_name=model.__module__.split('.')[-1] + '.py',
fields=fields,
)
title = "Add verbose and/or help text fields on {model_name} (in \
{file_name})".format(**context)
body = render_to_string(
'toolbox/createverboseandhelptextissues.md',
context,
)
self.log("-- Creating issue for {model_name}".format(**context))
if self.dry_run:
print '=========================='
print title
print '--------------------------'
print body
print '=========================='
else:
self.repo.create_issue(
title,
body=body,
labels=self.labels,
)
time.sleep(2.5)
|
nilq/baby-python
|
python
|
"""
just run this script with python converter.py .
It will convert pytorch.ipynb to html page docs/pytorch-examples.html
"""
import nbformat
import markdown
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
notebook = nbformat.read('Pytorch.ipynb', as_version=nbformat.NO_CONVERT)
content = ''
cache = ''
for cell in notebook['cells']:
if cell['cell_type'] == 'code':
source = cell['source']
if source.startswith('#left') or source.startswith('#right'):
trimmed_source = source[source.index('\n') + 1:]
cache += "<div>{}</div>".format(highlight(trimmed_source, PythonLexer(), HtmlFormatter()))
if source.startswith('#right'):
content += "<div class='leftright-wrapper'><div class='leftright-cells'>{}</div></div> ".format(cache)
cache = ''
elif cell['cell_type'] == 'markdown':
content += "<div class='markdown-cell'>{}</div>".format(markdown.markdown(cell['source']))
else:
raise RuntimeError('not expected type of cell' + cell['cell_type'])
styles = HtmlFormatter().get_style_defs('.highlight')
styles += '''
body {
padding: 50px 10px;
}
.leftright-wrapper {
text-align: center;
overflow-x: auto;
}
.leftright-cells {
display: inline-flex;
text-align: left;
}
.leftright-cells > div {
padding: 0px 10px;
min-width: 350px;
}
.markdown-cell{
max-width: 700px;
margin: 0px auto;
}
h1 {
text-align: center;
padding: 10px 0px 0px;
}
'''
meta_tags = '''
<meta property="og:title" content="Writing better code with pytorch and einops">
<meta property="og:description" content="Learning by example: rewriting and fixing popular code fragments">
<meta property="og:image" content="http://arogozhnikov.github.io/images/einops/einops_video.gif">
<meta property="og:video" content="http://arogozhnikov.github.io/images/einops/einops_video.mp4" />
<meta property="og:url" content="https://arogozhnikov.github.io/einops/pytorch-examples.html">
<meta name="twitter:card" content="summary_large_image">
<!-- Non-Essential, But Recommended -->
<meta property="og:site_name" content="Writing better code with pytorch and einops">
<meta name="twitter:image:alt" content="Learning by example: rewriting and fixing popular code fragments">
'''
github_ribbon = '''
<a href="https://github.com/arogozhnikov/einops" class="github-corner" aria-label="View source on GitHub">
<svg width="80" height="80" viewBox="0 0 250 250" style="fill:#151513; color:#fff; position: absolute; top: 0; border: 0; right: 0;" aria-hidden="true">
<path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path><path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path>
<path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z" fill="currentColor" class="octo-body"></path>
</svg></a>
<style>.github-corner:hover .octo-arm{animation:octocat-wave 560ms ease-in-out}@keyframes octocat-wave{0%,100%{transform:rotate(0)}20%,60%{transform:rotate(-25deg)}40%,80%{transform:rotate(10deg)}}@media (max-width:500px){.github-corner:hover .octo-arm{animation:none}.github-corner .octo-arm{animation:octocat-wave 560ms ease-in-out}}</style>
'''
result = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
{meta_tags}
<title>Writing better code with pytorch+einops</title>
<style>{styles}</style>
</head>
<body>
{github_ribbon}
{content}
</body>
</html>
'''
with open('../pytorch-examples.html', 'w') as f:
f.write(result)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""This is an example to train a task with CMA-ES.
Here it runs CartPole-v1 environment with 100 epoches.
Results:
AverageReturn: 100
RiseTime: epoch 38 (itr 760),
but regression is observed in the course of training.
"""
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv
from metarl.experiment import LocalTFRunner
from metarl.experiment.deterministic import set_seed
from metarl.np.algos import CMAES
from metarl.np.baselines import LinearFeatureBaseline
from metarl.sampler import OnPolicyVectorizedSampler
from metarl.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def cma_es_cartpole(ctxt=None, seed=1):
"""Train CMA_ES with Cartpole-v1 environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = MetaRLEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 20
algo = CMAES(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
runner.train(n_epochs=100, batch_size=1000)
cma_es_cartpole()
|
nilq/baby-python
|
python
|
import uvicorn
from .main import app
uvicorn.run(app)
|
nilq/baby-python
|
python
|
import collections
from torch.optim.lr_scheduler import LambdaLR
def chunk(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def flatten(d, parent_key='', sep='__'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def unflatten(dictionary, sep='__'):
out_dict = dict()
for key, value in dictionary.items():
parts = key.split(sep)
d = out_dict
for part in parts[:-1]:
if part not in d:
d[part] = dict()
d = d[part]
d[parts[-1]] = value
return out_dict
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy
import random
# Titan modules
import create_model
## Generate initial data set
N = 400
sig3 = 1 # 3-sigma error (controls the statistical fluctuation)
a = 10 # radius of the helix
b = 33/(2*np.pi) # 2*pi*b step of the helix
epsihelical = 1 # -1 or 1
theta = np.zeros((N, 1)) #Initialise column vector
for i in range(0, N):
theta[i] = 100/b*random.random()
X1 = a*np.cos(theta)
X2 = a*epsihelical*np.sin(theta)
X3 = b*theta
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X1,X2,X3, c='b', marker='o', s=0.5)
ax.set_xlim(-20,20)
ax.set_ylim(-20,20)
ax.set_zlim(-10,100)
# We add the statistical fluctuation
X1_ = np.zeros((N, 1)) #Initialise column vector
X2_ = np.zeros((N, 1)) #Initialise column vector
X3_ = np.zeros((N, 1)) #Initialise column vector
for i in range(0, N):
X1_[i] = X1[i] + sig3/3*random.gauss(0, 1)
X2_[i] = X2[i] + sig3/3*random.gauss(0, 1)
X3_[i] = X3[i] + sig3/3*random.gauss(0, 1)
fig = plt.figure(2)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X1_,X2_,X3_, c='b', marker='o', s=0.5)
ax.set_xlim(-20,20)
ax.set_ylim(-20,20)
ax.set_zlim(-10,100)
## 3 - Create probabilistic representation of the data
PLoM_model = create_model.TitanPLoM()
PLoM_model.Type = 'PLoM'
PLoM_model.ExpDesign.X = np.concatenate((X1_, X2_), axis=1) #X must be column vectors
PLoM_model.ExpDesign.Y = X3_
PLoM_model.Opt.scaling = 0
PLoM_model.Opt.optimizationm = 0
PLoM_model.Opt.epsvalue = 1.57 # value of the smoothing parameter (is not determined with the optimization procedure)
PLoM_model.Opt.m = 4
PLoM_model.titan_PLoM()
plt.show()
# 4 - Sample new realizations from the probabilistic representation of the data
PLoM_model.Itoopt.nMC = 10
PLoM_model.Itoopt.M0 = 110
PLoM_model.Itoopt.l0 = 0
PLoM_model.Itoopt.dt = 0.1196
Y = titan_PLoM_eval(PLoM_model)
# # 5 - Processing of new realizations
# nMC = Metamodel.Itoopt.nMC
# X1new = zeros(N*nMC,1)
# X2new = zeros(N*nMC,1)
# X3new = zeros(N*nMC,1)
# for ll=1:nMC
# for ii=1:N
# X1new((ll-1)*N+ii) = Y(1,ii,ll)
# X2new((ll-1)*N+ii) = Y(2,ii,ll)
# X3new((ll-1)*N+ii) = Y(3,ii,ll)
# end
# end
# figure
# scatter3(X1,X2,X3,'blue')
# hold on
# scatter3(X1new,X2new,X3new,'red')
# xlim([-20,20])
# ylim([-20,20])
# zlim([-10,100])
# figure
# plot(X1new)
# figure
# plot(X2new)
# figure
# plot(X3new)
|
nilq/baby-python
|
python
|
from ast.Expresion import Expresion
from ast.Symbol import Symbol
from ast.Expresion import Expresion
from ast.Symbol import TIPOVAR as Tipo
from ast.Sentencia import Sentencia
import Reportes.ReporteD as Sentencias
class Select(Sentencia):
#SELECT selectclausules FROM selectbody wherecondicion
#Selecttable : SELECT selectclausules FROM selectbody wherecondicion
#St[0] = Select(t[2],t[4],t.slice[1].lineno,find_column(t.slice[1]),t[5])
def __init__(self,id,value,line, column,declare):
self.id = id
self.line= line
self.column = column
self.value = value
self.type = declare
def ejecutar(self,entorno,tree):
print("zVV Select")
#print("sentencias v "+Expres.id)
y= {}
try:
if self.id.type=="*":
print("xxc")
try:
print("zVV 1")
if self.value.type=="ID":
y = self.value.value
print(" y= "+str(y))
SentenciasR = Sentencias.ReporteD()
print("7000a ")
SentenciasR.write(y,"Select*from "+y,entorno,tree)
print("7001 ")
except:
pass
except:
pass
tree.agregarnodos(self)
return False
|
nilq/baby-python
|
python
|
"""Function wrapping logic."""
import importlib
import os
import sys
import logging
import tempfile
import atexit
import functools
import typing
import traceback
from flask import request
import dploy_kickstart.errors as pe
import dploy_kickstart.transformers as pt
import dploy_kickstart.annotations as pa
log = logging.getLogger(__name__)
def nb_to_py(nb_file: str, location: str) -> str:
"""Convery .ipynb to temporary .py file."""
try:
import nbformat
import nbconvert
except ImportError as e:
raise pe.ScriptImportError(
f"{e}\nCannot import notebook conversion libraries."
+ "Please add `jupyter` (or `nbformat` and `nbconvert`)"
+ " to your dependencies.",
)
handle, filename = tempfile.mkstemp(text=True, suffix=".py")
with os.fdopen(handle, "w") as tf:
with open(os.path.join(location, nb_file)) as fh:
nb = nbformat.reads(fh.read(), nbformat.NO_CONVERT)
exporter = nbconvert.PythonExporter()
src, _ = exporter.from_notebook_node(nb)
tf.writelines(src)
# delete file on exit
atexit.register(functools.partial(os.remove, filename))
return os.path.basename(filename), os.path.dirname(filename)
def get_func_annotations(mod: typing.Generic) -> typing.List[pa.AnnotatedCallable]:
"""Scan usercode for function annotations."""
cm = []
# check which functions have relevant args and return 'em
for name, val in mod.__dict__.items():
if callable(val):
ac = pa.AnnotatedCallable(val)
if ac.has_args():
cm.append(ac)
return cm
def import_entrypoint(entrypoint: str, location: str) -> typing.Generic:
"""Import entrypoint from user code."""
# assert if entrypoint contains a path prefix and if so add it to location
if os.path.dirname(entrypoint) != "":
location = os.path.join(location, os.path.dirname(entrypoint))
entrypoint = os.path.basename(entrypoint)
# add location to path for mod importing
sys.path.insert(0, location)
# switch to location to allow for relative asset loading in usercode
os.chdir(location)
_, ext = os.path.splitext(entrypoint)
if ext == ".ipynb":
entrypoint, location = nb_to_py(entrypoint, location)
# add location of temporary .py file so it can be imported
sys.path.insert(0, location)
elif ext == ".py":
pass
else:
log.error(f"unsupportered entrypoint: {entrypoint}")
raise pe.UnsupportedEntrypoint(entrypoint)
mod_file, _ = os.path.splitext(entrypoint)
msg = "loading module '{}' (modfile: {}) from location '{}'".format(
entrypoint, mod_file, location
)
log.debug(msg)
try:
mod = importlib.import_module(mod_file, location)
except Exception as e:
raise pe.ScriptImportError(f"{msg}: {e}")
return mod
def func_wrapper(f: pa.AnnotatedCallable) -> typing.Callable:
"""Wrap functions with request logic."""
def exposed_func() -> typing.Callable:
# preprocess input for callable
try:
res = pt.MIME_TYPE_REQ_MAPPER[request.is_json](f, request)
except Exception:
raise pe.UserApplicationError(
message=f"error in executing '{f.__name__()}' method.",
traceback=traceback.format_exc(),
)
# determine whether or not to process response before sending it back to caller
try:
return pt.MIME_TYPE_RES_MAPPER[res.__class__.__name__](res)
except Exception:
raise pe.UserApplicationError(
message=f"error in executing '{f.__name__()}' method, the return type "
f"{res.__class__.__name__} is not supported",
traceback=traceback.format_exc(),
)
return exposed_func
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# minimal imports for faster startup
import os
from logger import logger
def run():
import time
import sys
import signal
import json
os.environ["QT_QPA_PLATFORM"] = "xcb" # window oddly resizes when regaining focus
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QHBoxLayout
from PyQt5.QtCore import QThread, QObject, pyqtSignal, pyqtSlot, Qt
from PyQt5.QtGui import QFont
# local
from unix_socket import UnixSocket
SOCKET_PATH = "/tmp/speech_gui.sock"
class Server(QThread):
update_signal = pyqtSignal(str)
def __init__(self):
super().__init__()
self._quit = False
self.state = {
"pause": False,
"hold": False,
"shift": False,
"ctrl": False,
"alt": False,
"win": False,
"key": ""
}
def run(self):
self._update()
while not self._quit:
try:
os.unlink(SOCKET_PATH)
except OSError:
if os.path.exists(SOCKET_PATH):
raise
sock = UnixSocket(SOCKET_PATH, 100)
sock.listen()
while True:
logger.info('Wait for a connection')
sock.accept()
logger.info('Connected. Listening for keys ...')
try:
# Receive the data in small chunks and retransmit it
while True:
msg = sock.receive()
self.state["pause"] = msg[0] == "1"
self.state["hold"] = msg[1] == "1"
self.state["shift"] = msg[2] == "1"
self.state["ctrl"] = msg[3] == "1"
self.state["alt"] = msg[4] == "1"
self.state["win"] = msg[5] == "1"
self.state["key"] = msg[6:]
self._update()
except RuntimeError as err:
logger.info(err)
finally:
logger.info('Clean up the connection')
sock.close_connection()
exit()
def _update(self):
message = json.dumps(self.state)
logger.info(message)
self.update_signal.emit(message)
def quit(self):
self._quit = True
class App(QObject):
colors = {
"background": "#B7EBB9",
"mods": "#8875E4DE",
"mods_hold": "#88F3E803",
"mod_active": "#8804C1E1",
"mod_active_hold": "#88F6AC0D"
}
labels = {}
def __init__(self):
super().__init__()
modsLayout = QHBoxLayout()
self.modsWidget = QWidget()
self.modsWidget.setLayout(modsLayout)
self.modsWidget.setStyleSheet('''
QLabel { font-size: 12pt; min-width: 14px; }
.QWidget { border-bottom-left-radius: 10px; border-top-left-radius: 10px; }
''')
layout = QHBoxLayout()
layout.addWidget(self.modsWidget)
self.widget = QWidget()
self.widget.setLayout(layout)
self.widget.setStyleSheet('''
QLabel { font-size: 12pt; }
QWidget { background-color: #88B7EBB9 }
''')
self.widget.setAttribute(Qt.WA_TranslucentBackground, True)
self.widget.setWindowFlags(Qt.FramelessWindowHint)
for labelKey in ['shift', 'ctrl', 'alt', 'win']:
label = QLabel()
label.setText(labelKey[0])
label.setAlignment(Qt.AlignCenter)
modsLayout.addWidget(label)
self.labels[labelKey] = label
self.labelKey = QLabel()
self.labelKey.setText('')
self.labelKey.setFixedWidth(100)
layout.addWidget(self.labelKey)
# layout.addStretch()
layout.setAlignment(Qt.AlignLeft)
layout.setSpacing(0)
self.widget.setWindowTitle("speechwindow")
self.widget.show()
class PostResizeThread(QThread):
def __init__(self, widget):
super().__init__()
self.widget = widget
def run(self):
time.sleep(.1)
self.widget.setGeometry(0, 0, 130, 40)
self.post_resize_thread = PostResizeThread(self.widget)
self.post_resize_thread.start()
@pyqtSlot(str)
def update(self, message):
data = json.loads(message)
modsBGColor = self.colors['mods_hold'] if data['hold'] else self.colors['mods']
self.modsWidget.setStyleSheet("""
QLabel { font-size: 12pt; min-width: 14px; }
.QWidget { border-bottom-left-radius: 10px; border-top-left-radius: 10px; background-color: %s}
""" % (modsBGColor))
for key, label in self.labels.items():
if data[key]:
colorKey = f"mod_active{'_hold' if data['hold'] else ''}"
label.setStyleSheet(f"background-color: {self.colors[colorKey]}")
else:
label.setStyleSheet(f"background-color: none")
self.labelKey.setText(" " + data["key"])
qapp = QApplication(sys.argv)
app = App()
serverthread = Server()
# thread safe communication, QtGui requires all gui related code to be called from the same thread
serverthread.update_signal.connect(app.update, Qt.QueuedConnection)
# design flaw, see https://stackoverflow.com/q/4938723/6040478
signal.signal(signal.SIGINT, signal.SIG_DFL)
serverthread.start()
qapp.exec_()
logger.info('Quit, collecting threads.')
serverthread.quit()
serverthread.wait()
# signal.pause()
run()
|
nilq/baby-python
|
python
|
from __future__ import print_function, absolute_import
import abc
import six
from lazy import lazy
from pyreference.utils.genomics_utils import iv_from_pos_range, \
iv_from_pos_directional_before_after, dict_to_iv
class GenomicRegion(object):
""" Base class for both Gene and Transcript """
def __init__(self, reference, accession_id, data_dict):
self.reference = reference
self.accession_id = accession_id
self._dict = data_dict
def get_id(self):
return self.accession_id
@property
def biotype(self):
return '/'.join(sorted(self.get_biotypes()))
def get_biotypes(self):
# On gene it's a string
biotype = self._dict["biotype"]
if isinstance(biotype, six.string_types):
biotypes = biotype.split(",")
elif isinstance(biotype, list):
biotypes = biotype
return biotypes
@lazy
def iv(self):
return dict_to_iv(self._dict)
@lazy
def tss(self):
""" (Representative) Transcript Start Site
This is NOT the most 5' position (use iv.start_d_as_pos for that) """
transcript_iv = self.get_representative_transcript().iv
return transcript_iv.start_d_as_pos
def get_promoter_iv(self, promoter_range=1000):
return iv_from_pos_range(self.tss, promoter_range)
def get_promoter_sequence(self, promoter_range=1000):
iv = self.get_promoter_iv(promoter_range)
return self.reference.get_sequence_from_iv(iv)
def get_promoter_iv_custom_range(self, upstream_distance, downstream_distance):
"""Get any interval surrounding TSS
Note: total length of interval = upstream_distance + downstream_distance (The TSS base is included in downstream_distance)"""
return iv_from_pos_directional_before_after(self.tss, upstream_distance, downstream_distance)
def get_promoter_sequence_custom_range(self, upstream_distance, downstream_distance):
iv = self.get_promoter_iv_custom_range(upstream_distance, downstream_distance)
return self.reference.get_sequence_from_iv(iv)
@abc.abstractmethod
def get_representative_transcript(self):
pass
|
nilq/baby-python
|
python
|
import os
import dill
import unittest
import collections
from swamp.utils import remove
from swamp.mr.mrresults import MrResults
RESULTS = collections.namedtuple('results', ['results'])
WORKDIR = os.path.join(os.environ['CCP4_SCR'], 'test_workdir')
MR_DIR = os.path.join(WORKDIR, 'swamp_mr')
class MrResultsTestCase(unittest.TestCase):
def test_1(self):
search_1 = os.path.join(MR_DIR, 'search_1')
search_1_run_1 = os.path.join(MR_DIR, 'search_1', 'run_1')
search_2 = os.path.join(MR_DIR, 'search_2')
search_2_run_1 = os.path.join(MR_DIR, 'search_2', 'run_1')
search_2_run_2 = os.path.join(MR_DIR, 'search_2', 'run_2')
directories = [WORKDIR, MR_DIR, search_1, search_1_run_1, search_2, search_2_run_1, search_2_run_2]
for directory in directories:
if not os.path.isdir(directory):
os.mkdir(directory)
self.addCleanup(remove, WORKDIR)
results = RESULTS(
results=[['SEARCH_1', 'RUN_1', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor', 'local_CC',
'overall_CC', '15', 'acl', 'is_extended', 'solution']])
with open(os.path.join(search_1_run_1, 'results.pckl'), 'wb') as fhandle:
dill.dump(results, fhandle)
results = RESULTS(
results=[['SEARCH_2', 'RUN_1', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor', 'local_CC',
'overall_CC', '45', 'acl', 'is_extended', 'solution']])
with open(os.path.join(search_2_run_1, 'results.pckl'), 'wb') as fhandle:
dill.dump(results, fhandle)
results = RESULTS(
results=[['SEARCH_2', 'RUN_2', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor', 'local_CC',
'overall_CC', '9', 'acl', 'is_extended', 'solution']])
with open(os.path.join(search_2_run_2, 'results.pckl'), 'wb') as fhandle:
dill.dump(results, fhandle)
results = MrResults(swamp_workdir=WORKDIR)
self.assertListEqual(sorted((os.path.join(search_1_run_1, 'results.pckl'),
os.path.join(search_2_run_2, 'results.pckl'),
os.path.join(search_2_run_1, 'results.pckl'))), sorted(results.pickle_list))
self.assertListEqual([['SEARCH_2', 'RUN_2', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor',
'local_CC', 'overall_CC', '9', 'acl', 'is_extended', 'solution'],
['SEARCH_2', 'RUN_1', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor',
'local_CC', 'overall_CC', '45', 'acl', 'is_extended', 'solution'],
['SEARCH_1', 'RUN_1', 'LLG', 'TFZ', 'local_CC', 'overall_CC', 'rfree', 'rfactor',
'local_CC', 'overall_CC', '15', 'acl', 'is_extended', 'solution']]
, results.results)
self.assertListEqual(["SEARCH ID", "RUN ID", "LLG", "TFZ", "PHSR_CC_LOC", "PHSR_CC_ALL", "RFMC_RFREE",
"RFMC_RFACT", "RFMC_CC_LOC", "RFMC_CC_ALL", "SHXE_CC", "SHXE_ACL", "IS_EXTENDED",
"SOLUTION"], results._result_table_fields)
self.assertEqual(results.logger_header, """\n******************************************************************\
****
******************* SWAMP-MR RESULTS ***************
**********************************************************************
Recovering results now...
""")
|
nilq/baby-python
|
python
|
import functools
import itertools
import math
from evm.constants import (
UINT_255_MAX,
UINT_256_CEILING,
)
def int_to_big_endian(value):
byte_length = math.ceil(value.bit_length() / 8)
return (value).to_bytes(byte_length, byteorder='big')
def big_endian_to_int(value):
return int.from_bytes(value, byteorder='big')
def int_to_byte(value):
return bytes([value])
byte_to_int = ord
def ceilXX(value, ceiling):
remainder = value % ceiling
if remainder == 0:
return value
else:
return value + ceiling - remainder
ceil32 = functools.partial(ceilXX, ceiling=32)
ceil8 = functools.partial(ceilXX, ceiling=8)
def unsigned_to_signed(value):
if value <= UINT_255_MAX:
return value
else:
return value - UINT_256_CEILING
def signed_to_unsigned(value):
if value < 0:
return value + UINT_256_CEILING
else:
return value
def safe_ord(value):
if isinstance(value, int):
return value
else:
return ord(value)
def is_even(value):
return value % 2 == 0
def is_odd(value):
return value % 2 == 1
def get_highest_bit_index(value):
value >>= 1
for bit_length in itertools.count():
if not value:
return bit_length
value >>= 1
|
nilq/baby-python
|
python
|
"""
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: populate/pop_rules.py
Author: Jon Gunderson
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import django
from django.core.exceptions import ObjectDoesNotExist
import re
fp = os.path.realpath(__file__)
path, filename = os.path.split(fp)
fae2_path = path.split('/populate')[0]
sys.path.append(fae2_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fae2.settings')
from django.conf import settings
django.setup()
import json
from abouts.models import FAQ
def addFAQ(seq, title, description):
try:
faq = FAQ.objects.get(seq=seq)
print("Updated FAQ: " + title)
faq.title = title
faq.description = description
except:
print("Created FAQ: " + title)
faq = FAQ(seq=seq, title=title, description=description)
faq.save()
desc = """
There are two major reasons why FAE 2.0 and AInspector Sidebar evaluation results may be different:
1. When a page includes dynamically loaded content, the DOM that FAE 2.0 sees will often be different from the DOM that AInspector Sidebar sees, resulting in different evaluation results. The more dynamic the content in the page, the more possibility of a discrepancy.
1. Pages that are responsive to screen dimensions will have different content rendered depending on the width of the screen. FAE 2.0 generally has a wide screen and AInspector Sidebar will analyze the content based on the current screen width.
**Note:** AInspector Sidebar will generally be more accurate than FAE for looking at Individual Pages.
"""
addFAQ(1, "FAE 2.0 and AInspector Sidebar evaluation results different?", desc)
desc = """
The rules are designed to help users understand what accessibility issues they need to consider in the design of a website.
Manual checks help users identify what they need to learn about accessibility inorder to insure their web resource is accessible.
Currently manual checks help inform users of what they need to understand about accessibility, but in FAE 2.1 users will be able to update manual checks to Pass, Fail or Not Applicable to update the report details, summary and implementation scores for rules and rule catagories.
"""
addFAQ(2, "Why report manual checking results?", desc)
|
nilq/baby-python
|
python
|
"""
A complex number is a number in the form a + b * i where a and b are real and i satisfies i^2 = -1.
`a` is called the real part and `b` is called the imaginary part of `z`.
The conjugate of the number `a + b * i` is the number `a - b * i`.
The absolute value of a complex number `z = a + b * i` is a real number `|z| = sqrt(a^2 + b^2)`.
The square of the absolute value `|z|^2` is the result of multiplication of `z` by its complex conjugate.
The sum/difference of two complex numbers involves adding/subtracting their real and imaginary parts separately:
`(a + i * b) + (c + i * d) = (a + c) + (b + d) * i`,
`(a + i * b) - (c + i * d) = (a - c) + (b - d) * i.`
Multiplication result is by definition `(a + i * b) * (c + i * d) = (a * c - b * d) + (b * c + a * d) * i`.
The reciprocal of a non-zero complex number is `1 / (a + i * b) = a/(a^2 + b^2) - b/(a^2 + b^2) * i`.
Dividing a complex number a + i * b by another c + i * d gives:
`(a + i * b) / (c + i * d) = (a * c + b * d)/(c^2 + d^2) + (b * c - a * d)/(c^2 + d^2) * i`.
Raising `e` to a complex exponent can be expressed as:
`e^(a + i * b) = e^a * e^(i * b)`,
the last term of which is given by Euler's formula `e^(i * b) = cos(b) + i * sin(b)`.
Task:
Implement the following operations:
- addition, subtraction, multiplication and division of two complex numbers,
- conjugate, absolute value, exponent of a given complex number.
Assume the programming language you are using does not have an implementation of complex numbers.
"""
from math import sqrt, cos, sin, exp
from typing import Union
class ComplexNumber:
"""
A Class to emulate Complex Numbers
"""
REAL_SET = {int, float}
def __init__(self, real: Union[int, float], imaginary: Union[int, float] = 0):
self._real = real
self._imag = imaginary
self._validate()
def _validate(self):
if (type(self.real) not in self.REAL_SET or
type(self.imaginary) not in self.REAL_SET):
raise ValueError("Both the real and imaginary parts of the complex number must be real!!")
@property
def real(self):
"""
:return: Real part of the Complex Number
"""
return self._real
@property
def imaginary(self):
"""
:return: Imaginary part of the Complex Number
"""
return self._imag
def __eq__(self, other):
return self.real == other.real and self.imaginary == other.imaginary
def __add__(self, other):
return self.__class__(self.real + other.real, self.imaginary + other.imaginary)
def __mul__(self, other):
return self.__class__(self.real * other.real - self.imaginary * other.imaginary,
self.real * other.imaginary + self.imaginary * other.real)
def __sub__(self, other):
return self + self.__class__(-other.real, -other.imaginary)
# Since the square of absolute value of a Complex Number will have `imaginary == 0`
def _abs_square(self):
return (self * self.conjugate()).real
def _reciprocal(self):
return self.__class__(self.real / self._abs_square(),
-self.imaginary / self._abs_square())
def __truediv__(self, other):
return self * other._reciprocal()
def __abs__(self):
return sqrt(self._abs_square())
def conjugate(self):
"""
:return: The Complex Conjugate of the Complex Number
"""
return self.__class__(self.real, -self.imaginary)
# Calculate value of e^ib as per Euler's Formula
def _exp_imag_only(self):
return self.__class__(cos(self.imaginary), sin(self.imaginary))
def exp(self):
"""
:return: The value of `e` raised to the power of the Complex Number
"""
return self.__class__(exp(self.real)) * self._exp_imag_only()
|
nilq/baby-python
|
python
|
"""
Module: 'sys' on pyboard 1.13.0-95
"""
# MCU: (sysname='pyboard', nodename='pyboard', release='1.13.0', version='v1.13-95-g0fff2e03f on 2020-10-03', machine='PYBv1.1 with STM32F405RG')
# Stubber: 1.3.4
argv = None
byteorder = 'little'
def exit():
pass
implementation = None
maxsize = 2147483647
modules = None
path = None
platform = 'pyboard'
def print_exception():
pass
stderr = None
stdin = None
stdout = None
version = '3.4.0'
version_info = None
|
nilq/baby-python
|
python
|
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GridFS is a specification for storing large objects in Mongo.
The :mod:`gridfs` package is an implementation of GridFS on top of
:mod:`pymongo`, exposing a file-like interface.
"""
from __future__ import absolute_import, division
from twisted.internet import defer
from txmongo._gridfs.errors import NoFile
from txmongo._gridfs.grid_file import GridIn, GridOut, GridOutIterator
from txmongo import filter
from txmongo.filter import ASCENDING, DESCENDING
from txmongo.database import Database
assert GridOutIterator
class GridFS(object):
"""An instance of GridFS on top of a single Database.
"""
def __init__(self, database, collection="fs"):
"""Create a new instance of :class:`GridFS`.
Raises :class:`TypeError` if `database` is not an instance of
:class:`~pymongo.database.Database`.
:Parameters:
- `database`: database to use
- `collection` (optional): root collection to use
.. note::
Instantiating a GridFS object will implicitly create it indexes.
This could leads to errors if the underlying connection is closed
before the indexes creation request has returned. To avoid this you
should use the defer returned by :meth:`GridFS.indexes_created`.
.. versionadded:: 1.6
The `collection` parameter.
"""
if not isinstance(database, Database):
raise TypeError("TxMongo: database must be an instance of Database.")
self.__database = database
self.__collection = database[collection]
self.__files = self.__collection.files
self.__chunks = self.__collection.chunks
self.__indexes_created_defer = defer.DeferredList([
self.__files.create_index(
filter.sort(ASCENDING("filename") + ASCENDING("uploadDate"))),
self.__chunks.create_index(
filter.sort(ASCENDING("files_id") + ASCENDING("n")), unique=True)
])
def indexes_created(self):
"""Returns a defer on the creation of this GridFS instance's indexes
"""
d = defer.Deferred()
self.__indexes_created_defer.chainDeferred(d)
return d
def new_file(self, **kwargs):
"""Create a new file in GridFS.
Returns a new :class:`~gridfs.grid_file.GridIn` instance to
which data can be written. Any keyword arguments will be
passed through to :meth:`~gridfs.grid_file.GridIn`.
:Parameters:
- `**kwargs` (optional): keyword arguments for file creation
.. versionadded:: 1.6
"""
return GridIn(self.__collection, **kwargs)
def put(self, data, **kwargs):
"""Put data in GridFS as a new file.
Equivalent to doing:
>>> f = new_file(**kwargs)
>>> try:
>>> f.write(data)
>>> finally:
>>> f.close()
`data` can be either an instance of :class:`str` or a
file-like object providing a :meth:`read` method. Any keyword
arguments will be passed through to the created file - see
:meth:`~gridfs.grid_file.GridIn` for possible
arguments. Returns the ``"_id"`` of the created file.
:Parameters:
- `data`: data to be written as a file.
- `**kwargs` (optional): keyword arguments for file creation
.. versionadded:: 1.6
"""
grid_file = GridIn(self.__collection, **kwargs)
def _finally(result):
return grid_file.close().addCallback(lambda _: result)
return grid_file.write(data)\
.addBoth(_finally)\
.addCallback(lambda _: grid_file._id)
def get(self, file_id):
"""Get a file from GridFS by ``"_id"``.
Returns an instance of :class:`~gridfs.grid_file.GridOut`,
which provides a file-like interface for reading.
:Parameters:
- `file_id`: ``"_id"`` of the file to get
.. versionadded:: 1.6
"""
def ok(doc):
if doc is None:
raise NoFile("TxMongo: no file in gridfs with _id {0}".format(repr(file_id)))
return GridOut(self.__collection, doc)
return self.__collection.files.find_one({"_id": file_id}).addCallback(ok)
def get_version(self, filename=None, version=-1):
"""Get a file from GridFS by ``"filename"``.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``. Note that searching by
random (unindexed) meta data is not supported here.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
"""
query = {"filename": filename}
skip = abs(version)
if version < 0:
skip -= 1
myorder = DESCENDING("uploadDate")
else:
myorder = ASCENDING("uploadDate")
def ok(cursor):
if cursor:
return GridOut(self.__collection, cursor[0])
raise NoFile("no version %d for filename %r" % (version, filename))
return self.__files.find(query, filter=filter.sort(myorder), limit=1, skip=skip)\
.addCallback(ok)
def count(self, filename):
"""Count the number of versions of a given file.
Returns an integer number of versions of the file in GridFS whose filename matches
`filename`, or raises NoFile if the file doesn't exist.
:Parameters:
- `filename`: ``"filename"`` of the file to get version count of
"""
return self.__files.count({"filename": filename})
def get_last_version(self, filename):
"""Get a file from GridFS by ``"filename"``.
Returns the most recently uploaded file in GridFS with the
name `filename` as an instance of
:class:`~gridfs.grid_file.GridOut`. Raises
:class:`~gridfs.errors.NoFile` if no such file exists.
An index on ``{filename: 1, uploadDate: -1}`` will
automatically be created when this method is called the first
time.
:Parameters:
- `filename`: ``"filename"`` of the file to get
.. versionadded:: 1.6
"""
def ok(doc):
if doc is None:
raise NoFile("TxMongo: no file in gridfs with filename {0}".format(repr(filename)))
return GridOut(self.__collection, doc)
return self.__files.find_one({"filename": filename},
filter = filter.sort(DESCENDING("uploadDate"))).addCallback(ok)
# TODO add optional safe mode for chunk removal?
def delete(self, file_id):
"""Delete a file from GridFS by ``"_id"``.
Removes all data belonging to the file with ``"_id"``:
`file_id`.
.. warning:: Any processes/threads reading from the file while
this method is executing will likely see an invalid/corrupt
file. Care should be taken to avoid concurrent reads to a file
while it is being deleted.
:Parameters:
- `file_id`: ``"_id"`` of the file to delete
.. versionadded:: 1.6
"""
return defer.DeferredList([
self.__files.remove({"_id": file_id}, safe=True),
self.__chunks.remove({"files_id": file_id})
])
def list(self):
"""List the names of all files stored in this instance of
:class:`GridFS`.
.. versionchanged:: 1.6
Removed the `collection` argument.
"""
return self.__files.distinct("filename")
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#:deploy:OHsentinel:/usr/local/bin
#standard imports
import sys
import ConfigParser
import logging
import os
#custom imports
if os.path.isdir("/usr/local/share/OHsentinel"):
sys.path.append("/usr/local/share/OHsentinel")
elif os.path.isdir("/usr/share/OHsentinel"):
sys.path.append("/usr/share/OHsentinel")
try:
import OHcommons
import OHssdp
import OHcustoms
except:
print 'Could not import OHcli modules. Please check /usr/local/share/OHsentinel and /usr/share/OHsentinel. lxml and tabulate are also required.'
sys.exit(4)
def init():
"reading config and parameters"
config = {}
devices = {}
if True:#try:
conf = ConfigParser.ConfigParser()
if os.path.isfile('/etc/OHsentinel.conf'):
conf.read('/etc/OHsentinel.conf')
else:
conf.read('./OHsentinel.conf')
else:#except:
print 'Could not read config file'
sys.exit(1)
if conf.has_section('OHproduct'):
for item in conf.items('OHproduct'):
devices['product', item[0]] = item[1]
if conf.has_section('OHsender'):
for item in conf.items('OHsender'):
devices['sender', item[0]] = item[1]
if conf.has_section('OHradio'):
config['stations'] = conf.get('OHradio', 'stations').split(',')
if conf.has_section('fakeradio'):
for station in conf.items('fakeradio'):
config['fakeradio', station[0]] = station[1]
if conf.has_option('resources', 'xmlpath'):
config['xmlpath'] = conf.get('resources', 'xmlpath')
else: config['xmlpath'] = '/usr/local/share/OHsentinel/xml'
if conf.has_option('resources', 'xslpath'):
config['xslpath'] = conf.get('resources', 'xslpath')
else: config['xslpath'] = '/usr/local/share/OHsentinel/xsl'
if conf.has_option('resources', 'logfile'):
config['logfile'] = conf.get('resources', 'logfile')
else:
config['logfile'] = './OHsentinel.log'
if conf.has_option('resources', 'cmdport'):
config['cmdport'] = conf.get('resources', 'cmdport')
else:
config['cmdport'] = 8891
if conf.has_option('resources', 'remote'):
config['remote'] = conf.get('resources', 'remote')
else:
config['remote'] = 'http://localhost'
if conf.has_option('misc', 'xmltagdelimiter'):
config['xmltagdelimiter'] = conf.get('misc', 'xmltagdelimiter')
else: config['xmltagdelimiter'] = ';;'
if conf.has_option('misc', 'maxcolumnwidth'):
config['maxcolumnwidth'] = conf.getint('misc', 'maxcolumnwidth')
if conf.has_option('misc', 'standardtags'):
config['standardtags'] = conf.get('misc', 'standardtags')
config['searchstring', 'product'] = "urn:av-openhome-org:service:Product:1"
config['searchstring', 'sender'] = "urn:av-openhome-org:service:Sender:1"
config['searchstring', 'all'] = "ssdp:all"
config['searchtypes'] = ['product', 'sender', 'all']
if conf.has_section('customsearch'):
for st in conf.items('customsearch'):
config['searchstring', st[0]] = st[1]
config['searchtypes'].append(st[0])
"parse arguments"
args = OHcustoms.set_arguments(None, config['searchtypes'])
"setup logging"
numeric_level = getattr(logging, args.loglevel[0].upper(), None)
if args.log == ["screen"]:
logging.basicConfig(level=numeric_level, format='%(asctime)s - %(levelname)s - %(message)s')
logging.debug('logging started')
elif args.log == ["file"]:
log_handler = logging.handlers.WatchedFileHandler(config['logfile'])
log_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger = logging.getLogger()
logger.setLevel(numeric_level)
logger.addHandler(log_handler)
logging.debug('logging started')
elif args.log == ["syslog"]:
log_handler = logging.handlers.SysLogHandler(address = '/dev/log')
log_handler.setFormatter(logging.Formatter('OHsentinel - cli: %(levelname)s - %(message)s'))
logger = logging.getLogger()
logger.setLevel(numeric_level)
logger.addHandler(log_handler)
logging.debug('logging started')
elif args.log == ["systemd"]:
from systemd.journal import JournalHandler
logger = logging.getLogger()
logger.setLevel(numeric_level)
logger.addHandler(JournalHandler(SYSLOG_IDENTIFIER='OHsentinel'))
logging.debug('logging started')
logging.debug('Used configuration: ' + str(config))
logging.debug('Known devices: ' + str(devices))
return args, config, devices
args, config, devices = init()
logging.debug(args)
if args.mode == 'search':
OHcommons.search(args, devices, config)
elif args.mode == 'command':
if args.unit[0] == 'Custom':
"Process command in custom unit"
OHcustoms.command(args, devices, config)
else:
"Process standard command"
OHcommons.command(args, devices, config)
elif args.mode == 'explore':
OHcommons.explore(args, devices, config)
elif args.mode == 'remote':
OHcommons.remote(args, devices, config)
|
nilq/baby-python
|
python
|
import unittest
from policosm.utils.levels import get_level
class LevelsTestCase(unittest.TestCase):
def test_known(self):
for highway in ['construction', 'demolished', 'raceway', 'abandoned', 'disused', 'foo', 'no','projected', 'planned','proposed','razed','dismantled','historic']:
self.assertEqual(0, get_level(highway))
for highway in ['stairway', 'elevator', 'corridor', 'hallway', 'slide']:
self.assertEqual(1, get_level(highway))
for highway in ['services', 'busway', 'bus_guideway', 'access','bus_stop', 'via_ferrata', 'access_ramp', 'emergency_access_point', 'emergency_bay','service', 'footway',
'traffic_island', 'virtual', 'cyleway', 'cycleway', 'byway', 'path', 'track', 'pedestrian', 'steps',
'platform', 'bridleway', 'rest_area', 'escape','footway']:
self.assertEqual(2, get_level(highway))
for highway in ['residential', 'yes', 'unclassified', 'crossing', 'unknown',
'bridge', 'lane', 'ford', 'psv', 'living_street','alley']:
self.assertEqual(3, get_level(highway))
for highway in ['tertiary', 'tertiary_link', 'turning_circle', 'road', 'roundabout', 'ice_road']:
self.assertEqual(4, get_level(highway))
for highway in ['secondary', 'secondary_link']:
self.assertEqual(5, get_level(highway))
for highway in ['primary', 'primary_link']:
self.assertEqual(6, get_level(highway))
for highway in ['trunk', 'trunk_link']:
self.assertEqual(7, get_level(highway))
for highway in ['motorway', 'motorway_link','ramp']:
self.assertEqual(8, get_level(highway))
def test_unknown(self):
self.assertEqual(3, get_level('zzz'))
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
from torchsummary import summary
from lib.medzoo.BaseModelClass import BaseModel
"""
Code was borrowed and modified from this repo: https://github.com/josedolz/HyperDenseNet_pytorch
"""
def conv(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d,
BN=False, ws=False, activ=nn.LeakyReLU(0.2), gainWS=2):
convlayer = layer(nin, nout, kernel_size, stride=stride, padding=padding, bias=bias)
layers = []
# if ws:
# layers.append(WScaleLayer(convlayer, gain=gainWS))
if BN:
layers.append(nn.BatchNorm2d(nout))
if activ is not None:
if activ == nn.PReLU:
# to avoid sharing the same parameter, activ must be set to nn.PReLU (without '()')
layers.append(activ(num_parameters=1))
else:
# if activ == nn.PReLU(), the parameter will be shared for the whole network !
layers.append(activ)
layers.insert(ws, convlayer)
return nn.Sequential(*layers)
class ResidualConv(nn.Module):
def __init__(self, nin, nout, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
super(ResidualConv, self).__init__()
convs = [conv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ),
conv(nout, nout, bias=bias, BN=BN, ws=ws, activ=None)]
self.convs = nn.Sequential(*convs)
res = []
if nin != nout:
res.append(conv(nin, nout, kernel_size=1, padding=0, bias=False, BN=BN, ws=ws, activ=None))
self.res = nn.Sequential(*res)
activation = []
if activ is not None:
if activ == nn.PReLU:
# to avoid sharing the same parameter, activ must be set to nn.PReLU (without '()')
activation.append(activ(num_parameters=1))
else:
# if activ == nn.PReLU(), the parameter will be shared for the whole network !
activation.append(activ)
self.activation = nn.Sequential(*activation)
def forward(self, input):
out = self.convs(input)
return self.activation(out + self.res(input))
def upSampleConv_Res(nin, nout, upscale=2, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
return nn.Sequential(
nn.Upsample(scale_factor=upscale),
ResidualConv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ)
)
def conv_block(in_dim, out_dim, act_fn, kernel_size=3, stride=1, padding=1, dilation=1):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation),
nn.BatchNorm2d(out_dim),
act_fn,
)
return model
def conv_block_1(in_dim, out_dim):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=1),
nn.BatchNorm2d(out_dim),
nn.PReLU(),
)
return model
def conv_block_Asym(in_dim, out_dim, kernelSize):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([2, 0])),
nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, 2])),
nn.BatchNorm2d(out_dim),
nn.PReLU(),
)
return model
def conv_block_Asym_Inception(in_dim, out_dim, kernel_size, padding, dilation=1):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=[kernel_size, 1], padding=tuple([padding * dilation, 0]),
dilation=(dilation, 1)),
nn.BatchNorm2d(out_dim),
nn.ReLU(),
nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, padding * dilation]),
dilation=(dilation, 1)),
nn.BatchNorm2d(out_dim),
nn.ReLU(),
)
return model
def conv_block_Asym_Inception_WithIncreasedFeatMaps(in_dim, mid_dim, out_dim, kernel_size, padding, dilation=1):
model = nn.Sequential(
nn.Conv2d(in_dim, mid_dim, kernel_size=[kernel_size, 1], padding=tuple([padding * dilation, 0]),
dilation=(dilation, 1)),
nn.BatchNorm2d(mid_dim),
nn.ReLU(),
nn.Conv2d(mid_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, padding * dilation]),
dilation=(dilation, 1)),
nn.BatchNorm2d(out_dim),
nn.ReLU(),
)
return model
def conv_block_Asym_ERFNet(in_dim, out_dim, kernelSize, padding, drop, dilation):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([padding, 0]), bias=True),
nn.ReLU(),
nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, padding]), bias=True),
nn.BatchNorm2d(out_dim, eps=1e-03),
nn.ReLU(),
nn.Conv2d(in_dim, out_dim, kernel_size=[kernelSize, 1], padding=tuple([padding * dilation, 0]), bias=True,
dilation=(dilation, 1)),
nn.ReLU(),
nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernelSize], padding=tuple([0, padding * dilation]), bias=True,
dilation=(1, dilation)),
nn.BatchNorm2d(out_dim, eps=1e-03),
nn.Dropout2d(drop),
)
return model
def conv_block_3_3(in_dim, out_dim):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1),
nn.BatchNorm2d(out_dim),
nn.PReLU(),
)
return model
# TODO: Change order of block: BN + Activation + Conv
def conv_decod_block(in_dim, out_dim, act_fn):
model = nn.Sequential(
nn.ConvTranspose2d(in_dim, out_dim, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(out_dim),
act_fn,
)
return model
def dilation_conv_block(in_dim, out_dim, act_fn, stride_val, dil_val):
model = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride_val, padding=1, dilation=dil_val),
nn.BatchNorm2d(out_dim),
act_fn,
)
return model
def maxpool():
pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
return pool
def avrgpool05():
pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
return pool
def avrgpool025():
pool = nn.AvgPool2d(kernel_size=2, stride=4, padding=0)
return pool
def avrgpool0125():
pool = nn.AvgPool2d(kernel_size=2, stride=8, padding=0)
return pool
def maxpool():
pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
return pool
def maxpool_1_4():
pool = nn.MaxPool2d(kernel_size=2, stride=4, padding=0)
return pool
def maxpool_1_8():
pool = nn.MaxPool2d(kernel_size=2, stride=8, padding=0)
return pool
def maxpool_1_16():
pool = nn.MaxPool2d(kernel_size=2, stride=16, padding=0)
return pool
def maxpool_1_32():
pool = nn.MaxPool2d(kernel_size=2, stride=32, padding=0)
def conv_block_3(in_dim, out_dim, act_fn):
model = nn.Sequential(
conv_block(in_dim, out_dim, act_fn),
conv_block(out_dim, out_dim, act_fn),
nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_dim),
)
return model
def classificationNet(D_in):
H = 400
D_out = 1
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, int(H / 4)),
torch.nn.ReLU(),
torch.nn.Linear(int(H / 4), D_out)
)
return model
# from layers import *
def croppCenter(tensorToCrop, finalShape):
org_shape = tensorToCrop.shape
diff = org_shape[2] - finalShape[2]
croppBorders = int(diff / 2)
return tensorToCrop[:,
:,
croppBorders:org_shape[2] - croppBorders,
croppBorders:org_shape[3] - croppBorders,
croppBorders:org_shape[4] - croppBorders]
def convBlock(nin, nout, kernel_size=3, batchNorm=False, layer=nn.Conv3d, bias=True, dropout_rate=0.0, dilation=1):
if batchNorm == False:
return nn.Sequential(
nn.PReLU(),
nn.Dropout(p=dropout_rate),
layer(nin, nout, kernel_size=kernel_size, bias=bias, dilation=dilation)
)
else:
return nn.Sequential(
nn.BatchNorm3d(nin),
nn.PReLU(),
nn.Dropout(p=dropout_rate),
layer(nin, nout, kernel_size=kernel_size, bias=bias, dilation=dilation)
)
def convBatch(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d, dilation=1):
return nn.Sequential(
layer(nin, nout, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, dilation=dilation),
nn.BatchNorm2d(nout),
# nn.LeakyReLU(0.2)
nn.PReLU()
)
class HyperDenseNet_2Mod(BaseModel):
def __init__(self, in_channels=2, classes=4):
super(HyperDenseNet_2Mod, self).__init__()
self.num_classes = classes
assert in_channels == 2, "input channels must be two for this architecture"
# Path-Top
self.conv1_Top = convBlock(1, 25)
self.conv2_Top = convBlock(50, 25, batchNorm=True)
self.conv3_Top = convBlock(100, 25, batchNorm=True)
self.conv4_Top = convBlock(150, 50, batchNorm=True)
self.conv5_Top = convBlock(250, 50, batchNorm=True)
self.conv6_Top = convBlock(350, 50, batchNorm=True)
self.conv7_Top = convBlock(450, 75, batchNorm=True)
self.conv8_Top = convBlock(600, 75, batchNorm=True)
self.conv9_Top = convBlock(750, 75, batchNorm=True)
# Path-Bottom
self.conv1_Bottom = convBlock(1, 25)
self.conv2_Bottom = convBlock(50, 25, batchNorm=True)
self.conv3_Bottom = convBlock(100, 25, batchNorm=True)
self.conv4_Bottom = convBlock(150, 50, batchNorm=True)
self.conv5_Bottom = convBlock(250, 50, batchNorm=True)
self.conv6_Bottom = convBlock(350, 50, batchNorm=True)
self.conv7_Bottom = convBlock(450, 75, batchNorm=True)
self.conv8_Bottom = convBlock(600, 75, batchNorm=True)
self.conv9_Bottom = convBlock(750, 75, batchNorm=True)
self.fully_1 = nn.Conv3d(1800, 400, kernel_size=1)
self.fully_2 = nn.Conv3d(400, 200, kernel_size=1)
self.fully_3 = nn.Conv3d(200, 150, kernel_size=1)
self.final = nn.Conv3d(150, classes, kernel_size=1)
def forward(self, input):
# ----- First layer ------ #
# get 2 of the channels as 5D tensors
# pdb.set_trace()
print("input shape ", input.shape)
y1t = self.conv1_Top(input[:, 0:1, :, :, :])
y1b = self.conv1_Bottom(input[:, 1:2, :, :, :])
# ----- Second layer ------ #
# concatenate
y2t_i = torch.cat((y1t, y1b), dim=1)
y2b_i = torch.cat((y1b, y1t), dim=1)
y2t_o = self.conv2_Top(y2t_i)
y2b_o = self.conv2_Bottom(y2b_i)
# ----- Third layer ------ #
y2t_i_cropped = croppCenter(y2t_i, y2t_o.shape)
y2b_i_cropped = croppCenter(y2b_i, y2t_o.shape)
# concatenate
y3t_i = torch.cat((y2t_i_cropped, y2t_o, y2b_o), dim=1)
y3b_i = torch.cat((y2b_i_cropped, y2b_o, y2t_o), dim=1)
y3t_o = self.conv3_Top(y3t_i)
y3b_o = self.conv3_Bottom(y3b_i)
# ------ Fourth layer ----- #
y3t_i_cropped = croppCenter(y3t_i, y3t_o.shape)
y3b_i_cropped = croppCenter(y3b_i, y3t_o.shape)
# concatenate
y4t_i = torch.cat((y3t_i_cropped, y3t_o, y3b_o), dim=1)
y4b_i = torch.cat((y3b_i_cropped, y3b_o, y3t_o), dim=1)
y4t_o = self.conv4_Top(y4t_i)
y4b_o = self.conv4_Bottom(y4b_i)
# ------ Fifth layer ----- #
y4t_i_cropped = croppCenter(y4t_i, y4t_o.shape)
y4b_i_cropped = croppCenter(y4b_i, y4t_o.shape)
# concatenate
y5t_i = torch.cat((y4t_i_cropped, y4t_o, y4b_o), dim=1)
y5b_i = torch.cat((y4b_i_cropped, y4b_o, y4t_o), dim=1)
y5t_o = self.conv5_Top(y5t_i)
y5b_o = self.conv5_Bottom(y5b_i)
# ------ Sixth layer ----- #
y5t_i_cropped = croppCenter(y5t_i, y5t_o.shape)
y5b_i_cropped = croppCenter(y5b_i, y5t_o.shape)
# concatenate
y6t_i = torch.cat((y5t_i_cropped, y5t_o, y5b_o), dim=1)
y6b_i = torch.cat((y5b_i_cropped, y5b_o, y5t_o), dim=1)
y6t_o = self.conv6_Top(y6t_i)
y6b_o = self.conv6_Bottom(y6b_i)
# ------ Seventh layer ----- #
y6t_i_cropped = croppCenter(y6t_i, y6t_o.shape)
y6b_i_cropped = croppCenter(y6b_i, y6t_o.shape)
# concatenate
y7t_i = torch.cat((y6t_i_cropped, y6t_o, y6b_o), dim=1)
y7b_i = torch.cat((y6b_i_cropped, y6b_o, y6t_o), dim=1)
y7t_o = self.conv7_Top(y7t_i)
y7b_o = self.conv7_Bottom(y7b_i)
# ------ Eight layer ----- #
y7t_i_cropped = croppCenter(y7t_i, y7t_o.shape)
y7b_i_cropped = croppCenter(y7b_i, y7t_o.shape)
# concatenate
y8t_i = torch.cat((y7t_i_cropped, y7t_o, y7b_o), dim=1)
y8b_i = torch.cat((y7b_i_cropped, y7b_o, y7t_o), dim=1)
y8t_o = self.conv8_Top(y8t_i)
y8b_o = self.conv8_Bottom(y8b_i)
# ------ Ninth layer ----- #
y8t_i_cropped = croppCenter(y8t_i, y8t_o.shape)
y8b_i_cropped = croppCenter(y8b_i, y8t_o.shape)
# concatenate
y9t_i = torch.cat((y8t_i_cropped, y8t_o, y8b_o), dim=1)
y9b_i = torch.cat((y8b_i_cropped, y8b_o, y8t_o), dim=1)
y9t_o = self.conv9_Top(y9t_i)
y9b_o = self.conv9_Bottom(y9b_i)
##### Fully connected layers
y9t_i_cropped = croppCenter(y9t_i, y9t_o.shape)
y9b_i_cropped = croppCenter(y9b_i, y9t_o.shape)
outputPath_top = torch.cat((y9t_i_cropped, y9t_o, y9b_o), dim=1)
outputPath_bottom = torch.cat((y9b_i_cropped, y9b_o, y9t_o), dim=1)
inputFully = torch.cat((outputPath_top, outputPath_bottom), dim=1)
y = self.fully_1(inputFully)
y = self.fully_2(y)
y = self.fully_3(y)
return self.final(y)
def test(self, device='cpu'):
input_tensor = torch.rand(1, 2, 22, 22, 22)
ideal_out = torch.rand(1, self.num_classes, 22, 22, 22)
out = self.forward(input_tensor)
# assert ideal_out.shape == out.shape
# summary(self.to(torch.device(device)), (2, 22, 22, 22),device=device)
# torchsummaryX.summary(self,input_tensor.to(device))
print("HyperDenseNet test is complete", out.shape)
class HyperDenseNet(BaseModel):
def __init__(self, in_channels=3, classes=4):
super(HyperDenseNet, self).__init__()
assert in_channels == 3, "HyperDensenet supports 3 in_channels. For 2 in_channels use HyperDenseNet_2Mod "
self.num_classes = classes
# Path-Top
self.conv1_Top = convBlock(1, 25)
self.conv2_Top = convBlock(75, 25, batchNorm=True)
self.conv3_Top = convBlock(150, 25, batchNorm=True)
self.conv4_Top = convBlock(225, 50, batchNorm=True)
self.conv5_Top = convBlock(375, 50, batchNorm=True)
self.conv6_Top = convBlock(525, 50, batchNorm=True)
self.conv7_Top = convBlock(675, 75, batchNorm=True)
self.conv8_Top = convBlock(900, 75, batchNorm=True)
self.conv9_Top = convBlock(1125, 75, batchNorm=True)
# Path-Middle
self.conv1_Middle = convBlock(1, 25)
self.conv2_Middle = convBlock(75, 25, batchNorm=True)
self.conv3_Middle = convBlock(150, 25, batchNorm=True)
self.conv4_Middle = convBlock(225, 50, batchNorm=True)
self.conv5_Middle = convBlock(375, 50, batchNorm=True)
self.conv6_Middle = convBlock(525, 50, batchNorm=True)
self.conv7_Middle = convBlock(675, 75, batchNorm=True)
self.conv8_Middle = convBlock(900, 75, batchNorm=True)
self.conv9_Middle = convBlock(1125, 75, batchNorm=True)
# Path-Bottom
self.conv1_Bottom = convBlock(1, 25)
self.conv2_Bottom = convBlock(75, 25, batchNorm=True)
self.conv3_Bottom = convBlock(150, 25, batchNorm=True)
self.conv4_Bottom = convBlock(225, 50, batchNorm=True)
self.conv5_Bottom = convBlock(375, 50, batchNorm=True)
self.conv6_Bottom = convBlock(525, 50, batchNorm=True)
self.conv7_Bottom = convBlock(675, 75, batchNorm=True)
self.conv8_Bottom = convBlock(900, 75, batchNorm=True)
self.conv9_Bottom = convBlock(1125, 75, batchNorm=True)
self.fully_1 = nn.Conv3d(4050, 400, kernel_size=1)
self.fully_2 = nn.Conv3d(400, 200, kernel_size=1)
self.fully_3 = nn.Conv3d(200, 150, kernel_size=1)
self.final = nn.Conv3d(150, classes, kernel_size=1)
def forward(self, input):
# ----- First layer ------ #
# get the 3 channels as 5D tensors
y1t = self.conv1_Top(input[:, 0:1, :, :, :])
y1m = self.conv1_Middle(input[:, 1:2, :, :, :])
y1b = self.conv1_Bottom(input[:, 2:3, :, :, :])
# ----- Second layer ------ #
# concatenate
y2t_i = torch.cat((y1t, y1m, y1b), dim=1)
y2m_i = torch.cat((y1m, y1t, y1b), dim=1)
y2b_i = torch.cat((y1b, y1t, y1m), dim=1)
y2t_o = self.conv2_Top(y2t_i)
y2m_o = self.conv2_Middle(y2m_i)
y2b_o = self.conv2_Bottom(y2b_i)
# ----- Third layer ------ #
y2t_i_cropped = croppCenter(y2t_i, y2t_o.shape)
y2m_i_cropped = croppCenter(y2m_i, y2t_o.shape)
y2b_i_cropped = croppCenter(y2b_i, y2t_o.shape)
# concatenate
y3t_i = torch.cat((y2t_i_cropped, y2t_o, y2m_o, y2b_o), dim=1)
y3m_i = torch.cat((y2m_i_cropped, y2m_o, y2t_o, y2b_o), dim=1)
y3b_i = torch.cat((y2b_i_cropped, y2b_o, y2t_o, y2m_o), dim=1)
y3t_o = self.conv3_Top(y3t_i)
y3m_o = self.conv3_Middle(y3m_i)
y3b_o = self.conv3_Bottom(y3b_i)
# ------ Fourth layer ----- #
y3t_i_cropped = croppCenter(y3t_i, y3t_o.shape)
y3m_i_cropped = croppCenter(y3m_i, y3t_o.shape)
y3b_i_cropped = croppCenter(y3b_i, y3t_o.shape)
# concatenate
y4t_i = torch.cat((y3t_i_cropped, y3t_o, y3m_o, y3b_o), dim=1)
y4m_i = torch.cat((y3m_i_cropped, y3m_o, y3t_o, y3b_o), dim=1)
y4b_i = torch.cat((y3b_i_cropped, y3b_o, y3t_o, y3m_o), dim=1)
y4t_o = self.conv4_Top(y4t_i)
y4m_o = self.conv4_Middle(y4m_i)
y4b_o = self.conv4_Bottom(y4b_i)
# ------ Fifth layer ----- #
y4t_i_cropped = croppCenter(y4t_i, y4t_o.shape)
y4m_i_cropped = croppCenter(y4m_i, y4t_o.shape)
y4b_i_cropped = croppCenter(y4b_i, y4t_o.shape)
# concatenate
y5t_i = torch.cat((y4t_i_cropped, y4t_o, y4m_o, y4b_o), dim=1)
y5m_i = torch.cat((y4m_i_cropped, y4m_o, y4t_o, y4b_o), dim=1)
y5b_i = torch.cat((y4b_i_cropped, y4b_o, y4t_o, y4m_o), dim=1)
y5t_o = self.conv5_Top(y5t_i)
y5m_o = self.conv5_Middle(y5m_i)
y5b_o = self.conv5_Bottom(y5b_i)
# ------ Sixth layer ----- #
y5t_i_cropped = croppCenter(y5t_i, y5t_o.shape)
y5m_i_cropped = croppCenter(y5m_i, y5t_o.shape)
y5b_i_cropped = croppCenter(y5b_i, y5t_o.shape)
# concatenate
y6t_i = torch.cat((y5t_i_cropped, y5t_o, y5m_o, y5b_o), dim=1)
y6m_i = torch.cat((y5m_i_cropped, y5m_o, y5t_o, y5b_o), dim=1)
y6b_i = torch.cat((y5b_i_cropped, y5b_o, y5t_o, y5m_o), dim=1)
y6t_o = self.conv6_Top(y6t_i)
y6m_o = self.conv6_Middle(y6m_i)
y6b_o = self.conv6_Bottom(y6b_i)
# ------ Seventh layer ----- #
y6t_i_cropped = croppCenter(y6t_i, y6t_o.shape)
y6m_i_cropped = croppCenter(y6m_i, y6t_o.shape)
y6b_i_cropped = croppCenter(y6b_i, y6t_o.shape)
# concatenate
y7t_i = torch.cat((y6t_i_cropped, y6t_o, y6m_o, y6b_o), dim=1)
y7m_i = torch.cat((y6m_i_cropped, y6m_o, y6t_o, y6b_o), dim=1)
y7b_i = torch.cat((y6b_i_cropped, y6b_o, y6t_o, y6m_o), dim=1)
y7t_o = self.conv7_Top(y7t_i)
y7m_o = self.conv7_Middle(y7m_i)
y7b_o = self.conv7_Bottom(y7b_i)
# ------ Eight layer ----- #
y7t_i_cropped = croppCenter(y7t_i, y7t_o.shape)
y7m_i_cropped = croppCenter(y7m_i, y7t_o.shape)
y7b_i_cropped = croppCenter(y7b_i, y7t_o.shape)
# concatenate
y8t_i = torch.cat((y7t_i_cropped, y7t_o, y7m_o, y7b_o), dim=1)
y8m_i = torch.cat((y7m_i_cropped, y7m_o, y7t_o, y7b_o), dim=1)
y8b_i = torch.cat((y7b_i_cropped, y7b_o, y7t_o, y7m_o), dim=1)
y8t_o = self.conv8_Top(y8t_i)
y8m_o = self.conv8_Middle(y8m_i)
y8b_o = self.conv8_Bottom(y8b_i)
# ------ Ninth layer ----- #
y8t_i_cropped = croppCenter(y8t_i, y8t_o.shape)
y8m_i_cropped = croppCenter(y8m_i, y8t_o.shape)
y8b_i_cropped = croppCenter(y8b_i, y8t_o.shape)
# concatenate
y9t_i = torch.cat((y8t_i_cropped, y8t_o, y8m_o, y8b_o), dim=1)
y9m_i = torch.cat((y8m_i_cropped, y8m_o, y8t_o, y8b_o), dim=1)
y9b_i = torch.cat((y8b_i_cropped, y8b_o, y8t_o, y8m_o), dim=1)
y9t_o = self.conv9_Top(y9t_i)
y9m_o = self.conv9_Middle(y9m_i)
y9b_o = self.conv9_Bottom(y9b_i)
##### Fully connected layers
y9t_i_cropped = croppCenter(y9t_i, y9t_o.shape)
y9m_i_cropped = croppCenter(y9m_i, y9t_o.shape)
y9b_i_cropped = croppCenter(y9b_i, y9t_o.shape)
outputPath_top = torch.cat((y9t_i_cropped, y9t_o, y9m_o, y9b_o), dim=1)
outputPath_middle = torch.cat((y9m_i_cropped, y9m_o, y9t_o, y9b_o), dim=1)
outputPath_bottom = torch.cat((y9b_i_cropped, y9b_o, y9t_o, y9m_o), dim=1)
inputFully = torch.cat((outputPath_top, outputPath_middle, outputPath_bottom), dim=1)
y = self.fully_1(inputFully)
y = self.fully_2(y)
y = self.fully_3(y)
return self.final(y)
def test(self, device='cpu'):
device = torch.device(device)
input_tensor = torch.rand(1, 3, 20, 20, 20)
ideal_out = torch.rand(1, self.num_classes, 20, 20, 20)
out = self.forward(input_tensor)
# assert ideal_out.shape == out.shape
summary(self, (3, 16, 16, 16))
# torchsummaryX.summary(self, input_tensor.to(device))
print("HyperDenseNet test is complete!!!", out.shape)
# m = HyperDenseNet(1,4)
# m.test()
|
nilq/baby-python
|
python
|
from sht3x_raspberry_exporter.sht3x import _crc8
def test_crc8():
assert _crc8(0xBE, 0xEF, 0x92)
|
nilq/baby-python
|
python
|
import choraconfig, os.path
tool = choraconfig.clone_tool("chora")
tool["displayname"] = "CHORA:sds"
tool["shortname"] = "chora:sds"
tool["cmd"] = [choraconfig.parent(2,choraconfig.testroot) + "/duet.native","-chora-debug-recs","-chora-summaries","-chora-debug-squeeze","-chora","{filename}"]
|
nilq/baby-python
|
python
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from ...core.loop.candidate_point_calculators import RandomSampling
from ...core.loop.loop_state import create_loop_state
from ...core.loop.model_updaters import NoopModelUpdater
from ...core.loop.outer_loop import OuterLoop
from ...core.parameter_space import ParameterSpace
class RandomSearch(OuterLoop):
def __init__(
self, space: ParameterSpace, x_init: np.ndarray = None, y_init: np.ndarray = None, cost_init: np.ndarray = None
):
"""
Simple loop to perform random search where in each iteration points are sampled uniformly at random
over the input space.
:param space: Input space where the optimization is carried out.
:param x_init: 2d numpy array of shape (no. points x no. input features) of initial X data
:param y_init: 2d numpy array of shape (no. points x no. targets) of initial Y data
:param cost_init: 2d numpy array of shape (no. points x no. targets) of initial cost of each function evaluation
"""
model_updaters = NoopModelUpdater()
candidate_point_calculator = RandomSampling(parameter_space=space)
if x_init is not None and y_init is not None:
loop_state = create_loop_state(x_init, y_init, cost=cost_init)
else:
loop_state = None
super().__init__(candidate_point_calculator, model_updaters, loop_state=loop_state)
|
nilq/baby-python
|
python
|
ar = [float(i) for i in input().split()]
ar_sq = []
for i in range(len(ar)):
ar_sq.append(ar[i]**2)
ar_sq = sorted(ar_sq)
print(ar_sq[0], end = ' ')
for i in range(1, len(ar_sq)):
if ar_sq[i] != ar_sq[i-1]:
print(ar_sq[i], end = ' ')
|
nilq/baby-python
|
python
|
from chroniclr import window
if __name__ == '__main__':
window = window.AppWindow()
|
nilq/baby-python
|
python
|
import atexit
import sys
import inspect
import json
import time
from .Autocomplete import Autocomplete
from .Connection import Connection, ConnectionError
from .Controller import Controller, ControllerError, parse_key
from .Device import (Device, DeviceError, parse_hex, parse_hsv,
parse_hsv_normalized, parse_rgb, parse_rgb_normalized)
from .Dialog import ask, prompt
from .Sketch import Sketch, SketchError
def no_controller():
if Controller.defined is False:
sketches = []
for name, obj in inspect.getmembers(sys.modules['__main__'], inspect.isclass):
if (obj is not Sketch) and (Sketch in inspect.getmro(obj)):
sketches.append((obj, name))
num_sketches = len(sketches)
if num_sketches == 0:
raise SketchError('No sketches found')
elif num_sketches == 1:
sketch_class, sketch_name = sketches.pop()
with Controller(sketch_class.config_path) as controller:
controller.run_sketch(sketch_class)
else:
raise SketchError(f'Use Controller to run multiple sketches (found {num_sketches})')
atexit.register(no_controller)
|
nilq/baby-python
|
python
|
# pylint: disable=redefined-outer-name
""" py.test dynamic configuration.
For details needed to understand these tests, refer to:
https://pytest.org/
http://pythontesting.net/start-here/
"""
# Copyright © {{ cookiecutter.year }} {{ cookiecutter.full_name }} <{{ cookiecutter.email }}>
#
# ## LICENSE_SHORT ##
import os
import logging
from pathlib import Path
import pytest
# Globally available fixtures
@pytest.fixture(scope='session')
def logger() -> logging.Logger:
"""Test logger instance as a fixture."""
level = os.getenv('TESTS_LOG_LEVEL', 'DEBUG')
logging.basicConfig(level=getattr(logging, level))
return logging.getLogger('tests')
@pytest.fixture(scope='session')
def tests_dir() -> Path:
"""Directory where tests + data is located."""
return Path(__file__).parent
@pytest.fixture(scope='session')
def project_dir(tests_dir) -> Path:
""" Root directory of the project.
"""
return tests_dir.parent.parent
@pytest.fixture(scope='session')
def build_dir(project_dir) -> Path:
"""Build directory for dynamic data (created if missing)."""
result = project_dir / "build"
result.mkdir(exist_ok=True)
return result
|
nilq/baby-python
|
python
|
import subprocess
import codecs
goal_goals = []
#txt_dirs = ['ted_originals/', 'ted_transcripts/']
#txt_dirs = ['orig_lower/']
txt_dirs = ['trans_preprocessed/', 'orig_preprocessed/']
for txt_dir in txt_dirs:
with codecs.open('goal_goals.txt') as goal_goals_in:
for line in goal_goals_in:
if line[-1] == '\n':
line = line[:-1]
goal_goals.append((line.split(' ')[0],int(line.split(' ')[1])))
for myfile,number in goal_goals:
with codecs.open('TestMaryamTexts.m','r','utf-8') as mat_in:
mat = mat_in.read()
mat = mat.replace('<filename_placeholder>',txt_dir + myfile.split("/")[-1])
mat = mat.replace('<number_placeholder>', '10') #str(number))
with codecs.open('TestMaryamTexts2.m','w','utf-8') as mat_out:
mat_out.write(mat)
subprocess.call("touch W/2", shell=True)
subprocess.call("touch W/1", shell=True)
subprocess.call("matlab -r \"try;run('TestMaryamTexts2.m');catch;end;quit;\"", shell=True)
subprocess.call("mv W/1 " + txt_dir + "habibi75_1/" + myfile.split("/")[-1], shell=True)
subprocess.call("mv W/2 " + txt_dir + "habibi75_2/" + myfile.split("/")[-1], shell=True)
|
nilq/baby-python
|
python
|
import unittest
import os
NOT_FATAL = 0
iverilog = "iverilog -y./tb -y./main_unit/ -o ./tb/main_unit__adc_capture__tb.vvp ./tb/main_unit__adc_capture__tb.v"
vvp = "vvp ./tb/main_unit__adc_capture__tb.vvp"
class adc_capture(unittest.TestCase):
def test_adc_capture(self):
self.assertEqual(os.system(iverilog), NOT_FATAL)
self.assertEqual(os.system(vvp), NOT_FATAL)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import random
import math
import sys
import copy
# import os
class Bracket:
def __init__(self, teams):
self.numTeams = 1
# self.numTeams = len(teams)
self.teams = list(teams)
self.maxScore = len(max(["Round "]+teams, key=len))
self.numRounds = int(math.ceil(math.log(self.numTeams, 2)) + 1)
self.totalNumTeams = int(2**math.ceil(math.log(self.numTeams, 2)))
self.totalTeams = self.addTeams()
self.lineup = ["bye" if "-" in str(x) else x for x in self.totalTeams]
self.numToName()
self.count = 0
self.rounds = []
for i in range(0, self.numRounds):
self.rounds.append([])
for _ in range(0, 2**(self.numRounds-i-1)):
self.rounds[i].append("-"*self.maxScore)
self.rounds[0] = list(self.totalTeams)
def numToName(self):
for i in range(0, self.numTeams):
self.totalTeams[self.totalTeams.index(i+1)] = self.teams[i]
def shuffle(self):
random.shuffle(self.teams)
self.totalTeams = self.addTeams()
self.numToName()
self.rounds[0] = list(self.totalTeams)
def update(self, rounds, teams):
lowercase = [team.lower() for team in self.rounds[rounds-2]]
for team in teams:
try:
index = lowercase.index(team.lower())
self.rounds[rounds-1][int(index/2)] = self.rounds[rounds-2][index]
except:
return False
if "-"*self.maxScore in self.rounds[rounds-1]:
return False
return True
def show(self):
self.count = 0
self.temp = copy.deepcopy(self.rounds)
self.tempLineup = list(self.lineup)
sys.stdout.write("Seed ")
for i in range(1, self.numRounds+1):
sys.stdout.write(("Round "+str(i)).rjust(self.maxScore+3))
print ""
self.recurse(self.numRounds-1, 0)
def recurse(self, num, tail):
if num == 0:
self.count += 1
if tail == -1:
print str(self.tempLineup.pop(0)).rjust(4)+self.temp[0].pop(0).rjust(self.maxScore+3)+" \\"
elif tail == 1:
print str(self.tempLineup.pop(0)).rjust(4)+self.temp[0].pop(0).rjust(self.maxScore+3)+" /"
else:
self.recurse(num-1, -1)
if tail == -1:
print "".rjust(4)+"".rjust((
self.maxScore+3)*num)+self.temp[num].pop(0).rjust(self.maxScore+3)+" \\"
elif tail == 1:
print "".rjust(4)+"".rjust((self.maxScore+3)*num)+self.temp[num].pop(0).rjust(self.maxScore+3)+" /"
else:
print "".rjust(4)+"".rjust((self.maxScore+3)*num)+self.temp[num].pop(0).rjust(self.maxScore+3)
self.recurse(num-1, 1)
def addTeams(self):
x = self.numTeams
teams = [1]
temp = []
count = 0
for i in range(2, x+1):
temp.append(i)
for i in range(0, int(2**math.ceil(math.log(x, 2))-x)):
temp.append("-"*self.maxScore)
for _ in range(0, int(math.ceil(math.log(x, 2)))):
high = max(teams)
for i in range(0, len(teams)):
index = teams.index(high)+1
teams.insert(index, temp[count])
high -= 1
count += 1
return teams
def getNumTeams():
print "How many players?",
numTeams = 2
try:
x = int(numTeams)
if x > 1:
return x
else:
print "Must be at least two players"
return getNumTeams()
except:
return getNumTeams()
def getTeamNames(numTeams):
teams = []
for i in range(0, numTeams):
correct = False
"""while not correct:
print "Name of player "+str(i+1)+"?",
teams.append(name)
correct = True"""
return teams
def run():
numTeams = getNumTeams()
teams = getTeamNames(numTeams)
bracket = Bracket(teams)
bracket.shuffle()
bracket.show()
for i in range(2, bracket.numRounds+1):
updated = False
while not updated:
teams = []
updated = bracket.update(i, teams)
bracket.show()
print ""
print bracket.rounds[-1][0]+" won!"
|
nilq/baby-python
|
python
|
import re
import numpy
import os
import pdb
### INPUT FILES
rd_folder = "../raw_data/"
traits_file = rd_folder+"data_dental_master.csv"
bio_file_all = rd_folder+"data_sites_IUCN_narrowA.csv"
occurence_file = rd_folder+"occurence_IUCN_%s.csv"
bio_legend_file = rd_folder+"bio_legend.txt"
### OUTPUT FILES
pr_folder = "../prepared_data/"
agg_file = pr_folder+"IUCN_%s_agg.csv"
aggrnd_file = pr_folder+"IUCN_%s_agg_rounded%d.csv"
bio_file = pr_folder+"IUCN_%s_bio.csv"
stats_file = "../misc/IUCN_ordfam_stats.tex"
### PARAMETERS
continents = ["EU", "AF", "NA", "SA"]
keep_traits = ["HYP","FCT_HOD","FCT_AL","FCT_OL","FCT_SF","FCT_OT","FCT_CM"]
bool_traits = ["HYP:1", "HYP:2","HYP:3","FCT_HOD:1","FCT_HOD:2","FCT_HOD:3","FCT_AL","FCT_OL","FCT_SF","FCT_OT","FCT_CM"]
keep_ordfam = ["FAMILY", "ORDER"]
key_species = "TAXON"
NA_val = "NA"
files_thres_out = [{"ext": "_nbspc3+", "thres_type": "num", "thres_side": 0, "thres_col": "NB_SPC", "thres_min": 3}]
round_dgt = 3
### FUNCTIONS
def load_legend_bio(bio_legend_file):
leg = {}
with open(bio_legend_file) as fp:
for line in fp:
parts = line.strip().split("=")
leg[parts[0].strip()] = parts[0].strip()+":"+parts[1].strip()
return leg
def load_lines_bio(bio_file, remove_vars, key_var, trans_vars={}):
lines_bio = {}
key_col = None
sep = ","
with open(bio_file) as fp:
for line in fp:
parts = line.strip().split(sep)
if key_col is None:
key_col = parts.index(key_var)
keep_cols = [key_col]+[k for (k,v) in enumerate(parts) if v not in remove_vars+[key_var]]
lines_bio[None] = sep.join([trans_vars.get(parts[k], parts[k]) for k in keep_cols])+"\n"
else:
lines_bio[parts[key_col]] = sep.join([parts[k] for k in keep_cols])+"\n"
return lines_bio
def load_traits(traits_file, keep_traits, bool_traits, key_species):
data_traits = {}
head_traits = None
sep = "\t"
with open(traits_file) as fp:
for line in fp:
parts = line.strip().split(sep)
if head_traits is None:
head_traits = dict([(v,k) for (k,v) in enumerate(parts)])
else:
if True:
values = []
for kv in bool_traits:
tmp = re.match("(?P<trait>.*):(?P<val>[0-9]+)$", kv)
if tmp is not None:
if parts[head_traits[tmp.group("trait")]] == NA_val:
print parts[head_traits[key_species]], kv, "MISSING"
values.append(0)
else:
values.append(1*(parts[head_traits[tmp.group("trait")]] == tmp.group("val")))
else:
if parts[head_traits[kv]] == NA_val:
print parts[head_traits[key_species]], kv, "MISSING"
values.append(0)
else:
values.append(int(parts[head_traits[kv]]))
data_traits[parts[head_traits[key_species]]] = values
# except ValueError:
# print parts[head_traits[key_species]], "MISSING"
return data_traits, head_traits
def aggregate_traits(occurence_file, agg_file, data_traits, head_traits, bool_traits, lines_bio=None, bio_file=None):
data_occurence = {}
head_occurence = None
sep = ","
if bio_file is not None and lines_bio is not None:
flb = open(bio_file, "w")
else:
flb = None
fo = open(agg_file, "w")
with open(occurence_file) as fp:
for line in fp:
parts = line.strip().split(sep)
if head_occurence is None:
head_occurence = dict([(k,v) for (k,v) in enumerate(parts)])
fo.write(",".join(["ID"]+["MEAN_%s" % t for t in bool_traits]+["NB_SPC"])+"\n")
if flb is not None:
flb.write(lines_bio[None])
elif lines_bio is None or parts[0] in lines_bio:
try:
present = [head_occurence[i] for (i,v) in enumerate(parts) if v =="1"]
except ValueError:
print line
pdb.set_trace()
data_mat = numpy.array([data_traits[p] for p in present])
if data_mat.shape[0] == 0:
fo.write(",".join([parts[0]]+["0" for t in bool_traits]+["0"])+"\n")
else:
fo.write(",".join([parts[0]]+["%f" % t for t in data_mat.mean(axis=0)]+["%d" % data_mat.shape[0]])+"\n")
if flb is not None:
flb.write(lines_bio[parts[0]])
if flb is not None:
flb.close()
fo.close()
def filter_nbspc(files_in, files_thres_out):
fps = [open(file_in) for file_in in files_in]
heads = []
head_lines = []
for fp in fps:
head_lines.append(fp.readline())
heads.append(dict([(v,k) for (k,v) in enumerate(head_lines[-1].strip().split(","))]))
checked = []
for ooo in files_thres_out:
out = dict(ooo)
if out["thres_side"] >= 0 and out["thres_side"] < len(heads) and out["thres_col"] in heads[out["thres_side"]]:
out["colid"] = heads[out["thres_side"]][out["thres_col"]]
out["fps"] = []
out["fns"] = []
### EXCLUDE FILTER COLUMN OR NOT
excl = None # out["colid"]
for file_in in files_in:
parts = file_in.split("_")
parts[-2]+= out["ext"]
fname = "_".join(parts)
out["fps"].append(open(fname, "w"))
out["fns"].append(fname)
for li, l in enumerate(head_lines):
if li == out["thres_side"]:
out["fps"][li].write(",".join([p for (pi, p) in \
enumerate(l.strip().split(",")) if pi != excl])+"\n")
else:
# out["fps"][li].write(l)
# pdb.set_trace()
## out["fps"][li].write(l.strip('\n') + ",bioA:abs_bio13-bio14,bioB:bio4_corr"+"\n")
out["fps"][li].write(l.strip('\n') + ",bioA:abs_bio13-bio14"+"\n")
out["count_lines"] = 0
checked.append(out)
stop = False
while not stop:
lines = [fp.readline() for fp in fps]
if numpy.prod([len(line) for line in lines]) == 0:
stop = True
else:
for out in checked:
inclus = False
if out["thres_type"] == "num":
v = float(lines[out["thres_side"]].split(",")[out["colid"]])
inclus = ("thres_min" not in out or v >= out["thres_min"]) and \
("thres_max" not in out or v <= out["thres_max"])
elif out["thres_type"] == "cat":
v = lines[out["thres_side"]].split(",")[heads[out["thres_side"]][out["thres_col"]]]
inclus = (v == out["thres_val"])
if inclus:
for li, l in enumerate(lines):
if li == out["thres_side"]:
out["fps"][li].write(",".join([p for (pi, p) in \
enumerate(l.strip().split(",")) if pi != excl])+"\n")
else:
# out["fps"][li].write(l)
# pdb.set_trace()
parts = l.strip().split(",")
valA = abs(float(parts[heads[li]['bio13:PWetM']]) - float(parts[heads[li]['bio14:PDryM']]))
# valB = float(parts[heads[li]['bio4:TSeason']])
# if valA < 232 and float(parts[heads[li]['bio7:TRngY']]) > 30:
# valB *= 10
# out["fps"][li].write(l.strip('\n') + (",%d,%d" % (valA, valB)) +"\n")
out["fps"][li].write(l.strip('\n') + (",%f" % valA) +"\n")
out["count_lines"] += 1
for out in checked:
for fp in out["fps"]:
fp.close()
if out["count_lines"] == 0:
for fn in out["fns"]:
os.remove(fn)
print "EMPTY %s removed..." % (", ".join(out["fns"]))
for fp in fps:
fp.close()
return checked
def round_values(in_file, out_file, round_dgt):
### first check that no information will be lost
with open(in_file) as fp:
head = fp.readline().strip().split(",")
cols = [i for i,p in enumerate(head) if re.match("MEAN_", p)]
D = numpy.loadtxt(in_file, delimiter=",", skiprows=2, usecols=cols)
print ">>> CHECK FOR LOSS OF INFO (all values should be True)"
print [numpy.unique(numpy.around(numpy.unique(D[:,i]), round_dgt)).shape[0] == numpy.unique(D[:,i]).shape[0] for i in range(D.shape[1])]
### then round values
fo = open(out_file, "w")
head = None
fmt = "%."+str(round_dgt)+"f"
with open(in_file) as fp:
for line in fp:
parts = line.strip().split(",")
if head is None:
head = parts
fo.write(line)
elif parts[0] == "enabled_col":
fo.write(line)
else:
for i,p in enumerate(head):
if re.match("MEAN_", p):
parts[i] = fmt % numpy.around(float(parts[i]), round_dgt)
fo.write(",".join(parts)+"\n")
def collect_all(files_in, tk, continents, suffixes, round_dgt):
for suffix in suffixes:
for ffi, file_in in enumerate(files_in):
parts = file_in.split("_")
parts[-2]+= suffix
fname = "_".join(parts)
fpo = open(fname % tk, "w")
if ffi == 0:
fname_splits = fname % (tk+"-splits")
fpos = open(fname_splits, "w")
head = False
for continent in continents:
if os.path.exists(fname % continent):
with open(fname % continent) as fp:
for li, line in enumerate(fp):
if li == 0:
if not head:
if ffi == 0:
fpos.write(line.strip()+",folds_split_C\n")
fpos.write(",".join(["enabled_col"]+ \
["T" for i in range(len(line.strip().split(","))-2)]+["F,F\n"]))
fpo.write(line)
if ffi == 0:
fpo.write(",".join(["enabled_col"]+ \
["T" for i in range(len(line.strip().split(","))-1)])+"\n")
else:
fpo.write(",".join(["enabled_col"]+ \
["T" for i in range(len(line.strip().split(","))-2)])+",F\n")
head = True
else:
if ffi == 0:
fpos.write(line.strip()+",F:%s\n" % continent)
fpo.write(line)
if ffi == 0:
fpos.close()
fpo.close()
parts = fname_splits.split(".")
fname_rnd = ".".join(parts[:-1])+("_rounded%d." % round_dgt) + parts[-1]
round_values(fname_splits, fname_rnd, round_dgt)
def load_ordfam(traits_file, keep_ordfam, key_species):
data_ordfam = {}
head_ordfam = None
sep = "\t"
with open(traits_file) as fp:
for line in fp:
parts = line.strip().split(sep)
if head_ordfam is None:
head_ordfam = dict([(v,k) for (k,v) in enumerate(parts)])
else:
if True:
values = []
for kv in keep_ordfam:
if kv == "FAMILY":
for f,t in [("Galagonidae", "Galagidae"),
("Loridae", "Lorisidae"),
("Rhinoceratidae", "Rhinocerotidae")]:
if parts[head_ordfam[kv]] == f:
parts[head_ordfam[kv]] = t
if parts[head_ordfam[kv]] == NA_val:
print parts[head_ordfam[key_species]], kv, "MISSING"
values.append(0)
else:
values.append(parts[head_ordfam[kv]])
data_ordfam[parts[head_ordfam[key_species]]] = values
# except ValueError:
# print parts[head_ordfam[key_species]], "MISSING"
return data_ordfam, head_ordfam
def aggregate_counts(occurence_file, data_ordfam, head_ordfam, keep_ordfam, lines_bio=None):
data_occurence = {}
head_occurence = None
sep = ","
counts = {}
# keep_ordfam = [ss.replace("_Kari", "") for ss in keep_ordfam]
for ck in [0,3]:
counts[ck] = dict([("SITES",0)]+[(kk, {}) for kk in keep_ordfam])
with open(occurence_file) as fp:
for line in fp:
parts = line.strip().split(sep)
if head_occurence is None:
head_occurence = dict([(k,v) for (k,v) in enumerate(parts)])
elif lines_bio is None or parts[0] in lines_bio:
try:
present = [head_occurence[i] for (i,v) in enumerate(parts) if v =="1"]
except ValueError:
print line
pdb.set_trace()
nb_spc = len(present)
data_mat = [data_ordfam[p] for p in present]
for ck in counts.keys():
if nb_spc >= ck:
counts[ck]["SITES"] += 1
for i, cs in enumerate(map(set, zip(*data_mat))):
for ck in counts.keys():
if nb_spc >= ck:
for cc in cs:
counts[ck][keep_ordfam[i]][cc] = counts[ck][keep_ordfam[i]].get(cc, 0) + 1
return counts
def make_counts_table(data_ordfam, continents, counts_all):
pairs = sorted(set([(ka, kb) for (kb,ka) in data_ordfam.values()]))
table = """\\begin{table}[h]
\\caption{Number of sites from each continent containing taxa from the given order or family, after/before filtering out sites with fewer than three taxa.}\\label{fig:spc_counts}
\\vspace{2ex} \\centering
\\begin{tabular}{@{\\hspace*{3ex}}l@{\\hspace*{2ex}}ccr@{~/~}rc@{\\hspace*{2ex}}cr@{~/~}rc@{\\hspace*{2ex}}cr@{~/~}rc@{\\hspace*{2ex}}cr@{~/~}rc@{\\hspace*{3ex}}} \n\\toprule\n"""
table += " & & "+ "&".join(["\\multicolumn{4}{c}{\\textsc{%s}}" % c for c in continents]) +" \\\\\n\\midrule\n"
table += " & & & ".join(["Nb.\ sites" ] + ["%d & %d" % tuple([counts_all[continent][ck]["SITES"] for ck in [3,0]]) for continent in continents])+" & \\\\\n"
for pi, pair in enumerate(pairs):
if pi == 0 or pairs[pi-1][0] != pair[0]:
table += "[0.5em]\n"+" & & & ".join(["\\textbf{\\textit{%s}}" % pair[0]] + ["%d & %d" % tuple([counts_all[continent][ck]["ORDER"].get(pair[0], 0) for ck in [3,0]]) for continent in continents])+" & \\\\\n"
table += " & & & ".join(["\\textit{%s}" % pair[1]] + ["%d & %d" % tuple([counts_all[continent][ck]["FAMILY"].get(pair[1], 0) for ck in [3,0]]) for continent in continents])+" & \\\\\n"
table += """\\bottomrule\n\\end{tabular}\n\\end{table}"""
return table
### MAIN
data_traits, head_traits = load_traits(traits_file, keep_traits, bool_traits, key_species)
bio_leg = load_legend_bio(bio_legend_file)
bio_leg.update({"lon_bio":"longitude","lat_bio":"latitude", "SITE": "ID"})
lines_bio = load_lines_bio(bio_file_all, ["CONT","NO_SPECIES","NO_ORDERS","NO_FAMILIES","GlobalID"], "SITE", bio_leg)
for continent in continents:
aggregate_traits(occurence_file % continent, agg_file % continent, data_traits, head_traits, bool_traits, lines_bio, bio_file % continent)
filter_nbspc([agg_file % continent, bio_file % continent], files_thres_out)
collect_all([agg_file, bio_file], "all", continents, suffixes=[fto["ext"] for fto in files_thres_out], round_dgt=round_dgt)
############# COMPUTING COUNTS
data_ordfam, head_ordfam = load_ordfam(traits_file, keep_ordfam, key_species)
counts_all = {}
for continent in continents:
counts_all[continent] = aggregate_counts(occurence_file % continent, data_ordfam, head_ordfam, keep_ordfam, lines_bio)
table = make_counts_table(data_ordfam, continents, counts_all)
with open(stats_file, "w") as fo:
fo.write(table)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku migrations management.
Migrations files are put in renku/core/management/migrations directory. Name
of these files has m_1234__name.py format where 1234 is the migration version
and name can be any alphanumeric and underscore combination. Migration files
are sorted based on their lowercase name. Each migration file must define a
public "migrate" function that accepts a client as its argument.
When executing a migration, the migration file is imported as a module and the
"migrate" function is executed. Migration version is checked against the Renku
project version (in .renku/metadata.yml) and any migration which has a higher
version is applied to the project.
"""
import hashlib
import importlib
import json
import os
import re
import shutil
from pathlib import Path
import pkg_resources
from jinja2 import Template
from renku.core.errors import (
DockerfileUpdateError,
MigrationError,
MigrationRequired,
ProjectNotSupported,
TemplateUpdateError,
)
from renku.core.utils.migrate import read_project_version
SUPPORTED_PROJECT_VERSION = 8
def check_for_migration(client):
"""Checks if migration is required."""
if is_migration_required(client):
raise MigrationRequired
elif is_project_unsupported(client):
raise ProjectNotSupported
def is_migration_required(client):
"""Check if project requires migration."""
return is_renku_project(client) and _get_project_version(client) < SUPPORTED_PROJECT_VERSION
def is_project_unsupported(client):
"""Check if this version of Renku cannot work with the project."""
return is_renku_project(client) and _get_project_version(client) > SUPPORTED_PROJECT_VERSION
def is_template_update_possible(client):
"""Check if the project can be updated to a newer version of the project template."""
return _update_template(client, check_only=True)
def is_docker_update_possible(client):
"""Check if the Dockerfile can be updated to a new version of renku-python."""
return _update_dockerfile(client, check_only=True)
def migrate(
client,
force_template_update=False,
skip_template_update=False,
skip_docker_update=False,
skip_migrations=False,
progress_callback=None,
):
"""Apply all migration files to the project."""
template_updated = docker_updated = False
if not is_renku_project(client):
return False, template_updated, docker_updated
if (
not skip_template_update
and client.project.template_source
and (force_template_update or client.project.automated_update)
):
try:
template_updated, _, _ = _update_template(client, progress_callback)
except TemplateUpdateError:
raise
except (Exception, BaseException) as e:
raise TemplateUpdateError("Couldn't update from template.") from e
if not skip_docker_update:
try:
docker_updated = _update_dockerfile(client, progress_callback)
except DockerfileUpdateError:
raise
except (Exception, BaseException) as e:
raise DockerfileUpdateError("Couldn't update renku version in Dockerfile.") from e
if skip_migrations:
return False, template_updated, docker_updated
project_version = _get_project_version(client)
n_migrations_executed = 0
for version, path in get_migrations():
if version > project_version:
module = importlib.import_module(path)
if progress_callback:
module_name = module.__name__.split(".")[-1]
progress_callback(f"Applying migration {module_name}...")
try:
module.migrate(client)
except (Exception, BaseException) as e:
raise MigrationError("Couldn't execute migration") from e
n_migrations_executed += 1
if n_migrations_executed > 0:
client._project = None # NOTE: force reloading of project metadata
client.project.version = str(version)
client.project.to_yaml()
if progress_callback:
progress_callback(f"Successfully applied {n_migrations_executed} migrations.")
return n_migrations_executed != 0, template_updated, docker_updated
def _update_template(client, check_only=False, progress_callback=None):
"""Update local files from the remote template."""
from renku.core.commands.init import fetch_template
project = client.project
if not project.template_version:
return False, None, None
template_manifest, template_folder, template_source, template_version = fetch_template(
project.template_source, project.template_ref, progress_callback
)
current_version = None
if template_source == "renku":
current_version = pkg_resources.parse_version(template_version)
template_version = pkg_resources.parse_version(project.template_version)
if template_version >= current_version:
return False, project.template_version, current_version
else:
if template_version == project.template_version:
return False, project.template_version, template_version
if check_only:
return True, project.template_version, current_version if current_version else template_version
if progress_callback:
progress_callback("Updating project from template...")
template_filtered = [
template_elem for template_elem in template_manifest if template_elem["folder"] == project.template_id
]
if len(template_filtered) == 1:
template_data = template_filtered[0]
else:
raise TemplateUpdateError(f'The template with id "{project.template_id}" is not available.')
template_path = template_folder / template_data["folder"]
metadata = json.loads(project.template_metadata)
template_variables = set(template_data.get("variables", {}).keys())
metadata_keys = set(metadata.keys())
missing_keys = ", ".join(template_variables - metadata_keys)
if missing_keys:
raise TemplateUpdateError(
f"Can't update template, it now requires variable(s) {missing_keys} which were not present on project "
"creation."
)
if not os.path.exists(client.template_checksums):
raise TemplateUpdateError("Can't update template as there are no template checksums set on the project.")
with open(client.template_checksums, "r") as checksum_file:
checksums = json.load(checksum_file)
updated_files = []
for file in template_path.glob("**/*"):
rel_path = file.relative_to(template_path)
destination = client.path / rel_path
try:
# parse file and process it
template = Template(file.read_text())
rendered_content = template.render(metadata)
sha256_hash = hashlib.sha256()
content_bytes = rendered_content.encode("utf-8")
blocksize = 4096
blocks = (len(content_bytes) - 1) // blocksize + 1
for i in range(blocks):
byte_block = content_bytes[i * blocksize : (i + 1) * blocksize]
sha256_hash.update(byte_block)
new_template_hash = sha256_hash.hexdigest()
current_hash = client._content_hash(destination)
local_changes = current_hash != checksums[str(rel_path)]
remote_changes = new_template_hash != checksums[str(rel_path)]
if local_changes:
if remote_changes and str(rel_path) in project.immutable_template_files:
# NOTE: There are local changes in a file that should not be changed by users,
# and the file as updated in the template as well. So the template can't be updated.
raise TemplateUpdateError(
f"Can't update template as immutable template file {rel_path} has local changes."
)
continue
elif not remote_changes:
continue
destination = Path(Template(str(destination)).render(metadata))
destination.write_text(rendered_content)
except IsADirectoryError:
destination.mkdir(parents=True, exist_ok=True)
except TypeError:
shutil.copy(file, destination)
if progress_callback:
updated = "\n".join(updated_files)
progress_callback(f"Updated project from template, updated files:\n{updated}")
return True, project.template_version, current_version if current_version else template_version
def _update_dockerfile(client, check_only=False, progress_callback=None):
"""Update the dockerfile to the newest version of renku."""
from renku import __version__
if not client.docker_path.exists():
return False
if progress_callback:
progress_callback("Updating dockerfile...")
with open(client.docker_path, "r") as f:
dockercontent = f.read()
current_version = pkg_resources.parse_version(__version__)
m = re.search(r"^ARG RENKU_VERSION=(\d+\.\d+\.\d+)$", dockercontent, flags=re.MULTILINE)
if not m:
if check_only:
return False
raise DockerfileUpdateError(
"Couldn't update renku-python version in Dockerfile, as it doesn't contain an 'ARG RENKU_VERSION=...' line."
)
docker_version = pkg_resources.parse_version(m.group(1))
if docker_version >= current_version:
return False
if check_only:
return True
dockercontent = re.sub(
r"^ARG RENKU_VERSION=\d+\.\d+\.\d+$", f"ARG RENKU_VERSION={__version__}", dockercontent, flags=re.MULTILINE,
)
with open(client.docker_path, "w") as f:
f.write(dockercontent)
if progress_callback:
progress_callback("Updated dockerfile.")
return True
def _get_project_version(client):
try:
return int(read_project_version(client))
except ValueError:
return 1
def is_renku_project(client):
"""Check if repository is a renku project."""
try:
return client.project is not None
except ValueError: # Error in loading due to an older schema
return client.renku_metadata_path.exists()
def get_migrations():
"""Return a sorted list of versions and migration modules."""
migrations = []
for file_ in pkg_resources.resource_listdir("renku.core.management", "migrations"):
match = re.search(r"m_([0-9]{4})__[a-zA-Z0-9_-]*.py", file_)
if match is None: # migration files match m_0000__[name].py format
continue
version = int(match.groups()[0])
path = "renku.core.management.migrations.{}".format(Path(file_).stem)
migrations.append((version, path))
migrations = sorted(migrations, key=lambda v: v[1].lower())
return migrations
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
from torchvision.models import resnet
class BasicBlock1d(nn.Module):
def __init__(self, inplanes, planes, stride, size,downsample):
super(BasicBlock1d, self).__init__()
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=size, stride=stride, bias=False)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv1d( planes, planes, kernel_size=1, stride=1, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = nn.Sequential(
nn.Conv1d(inplanes, planes ,kernel_size=size, stride=stride, bias=False),
nn.BatchNorm1d(planes))
self.dropout = nn.Dropout(.2)
self.sigmoid = nn.Sigmoid()
self.globalAvgPool =nn.AdaptiveAvgPool1d(1)
self.fc1 = nn.Linear(in_features=planes, out_features=round(planes / 16))
self.fc2 = nn.Linear(in_features=round(planes / 16), out_features=planes)
def forward(self, x):
x=x.squeeze(2)
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.bn2(out)
out = self.conv2(out)
#Squeeze-and-Excitation (SE)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1),1)
out = out * original_out
#resnet
out += residual
out = self.relu(out)
return out
class BasicBlock2d(nn.Module):
def __init__(self, inplanes, planes, stride, size,downsample):
super(BasicBlock2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=(1,size), stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=(1,1), stride=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = nn.Sequential(
nn.Conv2d(inplanes, planes ,kernel_size=(1,size), stride=stride, bias=False),
nn.BatchNorm2d(planes))
self.dropout = nn.Dropout(.2)
self.sigmoid = nn.Sigmoid()
self.globalAvgPool = nn.AdaptiveAvgPool2d((1,1))
self.fc1 = nn.Linear(in_features=planes, out_features=round(planes / 16))
self.fc2 = nn.Linear(in_features=round(planes / 16), out_features=planes)
def forward(self, x):
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.bn2(out)
out = self.conv2(out)
#Squeeze-and-Excitation (SE)
original_out=out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1),1,1)
out = out * original_out
#resnet
out += residual
out = self.relu(out)
return out
class ECGNet(nn.Module):
def __init__(self, BasicBlock1d,BasicBlock2d, num_classes=9):
super(ECGNet, self).__init__()
self.sizes=[5,7,9]
self.external = 3
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(12,32, kernel_size=(1,50), stride=(1,2),padding=(0,0),bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.AvgPool = nn.AdaptiveAvgPool1d(1)
self.layers=nn.Sequential()
self.layers.add_module('layer_1',self._make_layer( BasicBlock2d,inplanes=32,planes=32,blocks=1,stride=(1,2),size=15))
self.layers.add_module('layer_2',self._make_layer( BasicBlock2d,inplanes=32,planes=32,blocks=1,stride=(1,2),size=15))
self.layers.add_module('layer_3',self._make_layer( BasicBlock2d,inplanes=32,planes=32,blocks=1,stride=(1,2),size=15))
self.layers1_list=nn.ModuleList()
self.layers2_list=nn.ModuleList()
for size in self.sizes:
self.layers1=nn.Sequential()
self.layers1.add_module('layer{}_1_1'.format(size),self._make_layer( BasicBlock2d,inplanes=32, planes=32,blocks=32,
stride=(1,1),size=size))
self.layers2=nn.Sequential()
self.layers2.add_module('layer{}_2_1'.format(size),self._make_layer(BasicBlock1d,inplanes=32, planes=256,blocks=1,
stride=2,size=size))
self.layers2.add_module('layer{}_2_2'.format(size),self._make_layer(BasicBlock1d,inplanes=256, planes=256,blocks=1,
stride=2,size=size))
self.layers2.add_module('layer{}_2_3'.format(size),self._make_layer(BasicBlock1d,inplanes=256, planes=256,blocks=1,
stride=2,size=size))
self.layers2.add_module('layer{}_2_4'.format(size),self._make_layer(BasicBlock1d,inplanes=256, planes=256,blocks=1,
stride=2,size=size))
self.layers1_list.append(self.layers1)
self.layers2_list.append(self.layers2)
self.fc = nn.Linear(256*len(self.sizes)+self.external, num_classes)
def _make_layer(self, block,inplanes, planes, blocks, stride ,size,downsample = None):
layers = []
for i in range(blocks):
layers.append(block(inplanes, planes, stride, size,downsample))
return nn.Sequential(*layers)
def forward(self, x0, fr):
x0=x0.unsqueeze(2)
x0 = self.conv1(x0)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x0 = self.layers(x0)
xs=[]
for i in range(len(self.sizes)):
x=self.layers1_list[i](x0)
x=torch.flatten(x,start_dim=2,end_dim=3)
x=self.layers2_list[i](x0)
x= self.AvgPool(x)
xs.append(x)
out = torch.cat(xs,dim=2)
out = out.view(out.size(0), -1)
out = torch.cat([out,fr], dim=1)
out = self.fc(out)
return out
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ECGNet(BasicBlock1d,BasicBlock2d,**kwargs)
return model
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from sqlalchemy import MetaData
from py_privatekonomi.utilities import common
class ModelContext(object):
def __init__(self, context = {}):
self.__context = common.as_obj(context)
self.__context._metadata = MetaData()
@property
def metadata(self):
return self.__context._metadata
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(
name = 'json2html',
packages = ['json2html'],
version = '1.3.0',
description = 'JSON to HTML Table Representation',
long_description=open('README.rst').read(),
author = 'Varun Malhotra',
author_email = 'varun2902@gmail.com',
url = 'https://github.com/softvar/json2html',
download_url = 'https://github.com/softvar/json2html/tarball/1.3.0',
keywords = ['json', 'HTML', 'Table'],
license = 'MIT',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers = [
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
],
)
|
nilq/baby-python
|
python
|
class Solution:
# @param {integer} A
# @param {integer} B
# @param {integer} C
# @param {integer} D
# @param {integer} E
# @param {integer} F
# @param {integer} G
# @param {integer} H
# @return {integer}
def computeArea(self, A, B, C, D, E, F, G, H):
def area(x, y):
return 0 if x < 0 or y < 0 else x * y
return area(C-A, D-B) + area(G-E, H-F) - \
area(min(C, G) - max(A, E), min(D, H) - max(B, F))
|
nilq/baby-python
|
python
|
def roll_new(name, gender):
pass
def describe(character):
pass
|
nilq/baby-python
|
python
|
"""Integrate with NamecheapDNS."""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_ACCESS_TOKEN, CONF_DOMAIN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.aiohttp_client import async_get_clientsession
DOMAIN = 'namecheapdns'
UPDATE_URL = 'https://dynamicdns.park-your-domain.com/update'
INTERVAL = timedelta(minutes=5)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_ACCESS_TOKEN): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
@asyncio.coroutine
def async_setup(hass, config):
"""Initialize the NamecheapDNS component."""
host = config[DOMAIN][CONF_HOST]
domain = config[DOMAIN][CONF_DOMAIN]
token = config[DOMAIN][CONF_ACCESS_TOKEN]
session = async_get_clientsession(hass)
result = yield from _update_namecheapdns(session, host, domain, token)
if not result:
return False
@asyncio.coroutine
def update_domain_interval(now):
"""Update the NamecheapDNS entry."""
yield from _update_namecheapdns(session, host, domain, token)
async_track_time_interval(hass, update_domain_interval, INTERVAL)
return result
@asyncio.coroutine
def _update_namecheapdns(session, host, domain, token):
"""Update NamecheapDNS."""
import xml.etree.ElementTree as ET
params = {
'host': host,
'domain': domain,
'password': token,
}
resp = yield from session.get(UPDATE_URL, params=params)
xml_string = yield from resp.text()
root = ET.fromstring(xml_string)
err_count = root.find('ErrCount').text
if int(err_count) != 0:
_LOGGER.warning('Updating Namecheap domain %s failed', domain)
return False
return True
|
nilq/baby-python
|
python
|
celsius = float(input('Insira a temperatura em °C:'))
fahrenheit = celsius * 1.8 + 32
kelvin = celsius + 273.15
print(f'{celsius}°C vale {fahrenheit}°F e {kelvin}K.')
|
nilq/baby-python
|
python
|
"""
A Python package module for simulating falling objects with simple aerodynamic drag.
Developed by FIRST Robotics Competition Team 6343 - Steel Ridge Robotics
Strong
Trustworthy
Empowering
Effective
Leadership
"""
__VERSION__ = "1.0.0b1"
|
nilq/baby-python
|
python
|
"""
pluginName = dashcam
Senario Continuous Video Series like a Dashcam
----------------------------------------------
You want to take a series of videos like a dash cam.
You can manage disk space and delete oldest videos when disk
is close to full or run video session for a set number of minutes.
Edit the settings below to suit your project needs.
if config.py variable pluginEnable=True and pluginName=dashcam
then these settings will override the config.py settings.
"""
# Customize Settings Below to Suit your Project Needs
# ---------------------------------------------------
imageWidth = 1280 # default= 1280 Full Size video Width in px
imageHeight = 720 # default= 720 Full Size video Height in px
imageVFlip = False # default= False True Flips image Vertically
imageHFlip = False # default= False True Flips image Horizontally
showDateOnImage = True # default= True False=Do Not display date/time text on images
videoPath = "media/dashcam" # default= media/dashcam Storage folder path for videos
videoPrefix = "dc-" # prefix for dasbca video filenames
videoDuration = 120 # default= 120 seconds (2 min) for each video recording
videoTimer = 0 # default= 0 0=Continuous or Set Total Session Minutes to Record then Exit
videoFPS = 30 # default= 30 fps. Note slow motion can be achieved at 640x480 image resolution at 90 fps
videoNumOn = False # default= False False=filenames by date/time True=filenames by sequence Number
# Use settings below if motionNumOn = True
videoNumRecycle = False # default= False when NumMax reached restart at NumStart instead of exiting
videoNumStart = 1000 # default= 1000 Start of video filename number sequence
videoNumMax = 0 # default= 20 Max number of videos desired. 0=Continuous
# Manage Disk Space Settings
#---------------------------
spaceTimerHrs = 1 # default= 0 0=off or specify hours frequency to perform free disk space check
spaceFreeMB = 500 # default= 500 Target Free space in MB Required.
spaceMediaDir = videoPath # default= videoPath per variable above
spaceFileExt = 'mp4' # default= '' File extension to Delete Oldest Files
# Do Not Change these Settings
# ----------------------------
videoRepeatOn = True # Turn on Video Repeat Mode IMPORTANT Overrides timelapse and motion
|
nilq/baby-python
|
python
|
load_dotenv('.env.txt')
|
nilq/baby-python
|
python
|
import discord
from discord.ext import commands
from cogs.utils import checks
from .utils.dataIO import dataIO, fileIO
from __main__ import send_cmd_help
import os
import io
import requests
import json
import asyncio
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
creditIcon = "https://i.imgur.com/TP8GXZb.png"
credits = "Bot by GR8 | Titan"
class warlog:
"""Clash Royale Clan War log"""
def __init__(self, bot):
self.bot = bot
self.auth = dataIO.load_json('cogs/auth.json')
self.clans = dataIO.load_json('cogs/clans.json')
def getAuth(self):
return {"auth" : self.auth['token']}
def save_clans(self):
dataIO.save_json('cogs/clans.json', self.clans)
def update_clans(self):
self.clans = dataIO.load_json('cogs/clans.json')
async def getLeague(self, trophies):
if trophies >= 3000:
return "legend"
elif trophies >= 1500:
return "gold"
elif trophies >= 600:
return "silver"
else:
return "bronze"
async def findRank(self, lst, key, value):
for i, dic in enumerate(lst):
if dic[key] == value:
return i
return -1
async def genImage(self, leagueName, trophies, rank, clanName, participants, wins, crowns):
font1 = ImageFont.truetype("data/warlog/ClashRoyale.ttf",27)
font2 = ImageFont.truetype("data/warlog/ClashRoyale.ttf",37)
font3 = ImageFont.truetype("data/warlog/ClashRoyale.ttf",41)
img = Image.open("data/warlog/images/warlog.jpg")
draw = ImageDraw.Draw(img)
league = Image.open("data/warlog/images/{}.png".format(leagueName))
img.paste(league, (410, 55), league) # league
draw.text((493, 75), "{:,}".format(int(trophies)), (255,255,255), font=font1) # Trophies
# thin border
x, y = 284, 192
fillcolor = "white"
shadowcolor = "black"
draw.text((x-2, y-2), rank, font=font2, fill=shadowcolor)
draw.text((x+2, y-2), rank, font=font2, fill=shadowcolor)
draw.text((x-2, y+2), rank, font=font2, fill=shadowcolor)
draw.text((x+2, y+2), rank, font=font2, fill=shadowcolor)
draw.text((x, y), rank, font=font2, fill=fillcolor) # Rank
draw.text((347, 194), clanName, (255,255,255), font=font3) # Clan Name
draw.text((682, 340), participants, (255,255,255), font=font1) # Participants
draw.text((682, 457), wins, (255,255,255), font=font1) # Wins
draw.text((682, 575), crowns, (255,255,255), font=font1) # Crowns
# scale down and return
scale = 0.5
scaled_size = tuple([x * scale for x in img.size])
img.thumbnail(scaled_size)
return img
async def getWarData(self, channel):
self.update_clans()
for clankey in self.clans.keys():
try:
clandata = requests.get('https://api.royaleapi.com/clan/{}/warlog'.format(self.clans[clankey]['tag']), headers=self.getAuth(), timeout=10).json()
except (requests.exceptions.Timeout, json.decoder.JSONDecodeError):
return
except requests.exceptions.RequestException as e:
print(e)
return
standings = clandata[0]['standings']
clanRank = await self.findRank(standings, "tag", self.clans[clankey]['tag'])
warTrophies = standings[clanRank]['warTrophies']
if self.clans[clankey]['warTrophies'] != warTrophies:
clanLeague = await self.getLeague(warTrophies)
image = await self.genImage(clanLeague, str(warTrophies), str(clanRank+1), standings[clanRank]['name'], str(standings[clanRank]['participants']), str(standings[clanRank]['wins']), str(standings[clanRank]['crowns']))
filename = "warlog-{}.png".format(clankey)
with io.BytesIO() as f:
image.save(f, "PNG")
f.seek(0)
await self.bot.send_file(channel, f, filename=filename)
self.clans[clankey]['warTrophies'] = warTrophies
self.save_clans()
await asyncio.sleep(1)
@commands.command(pass_context=True)
async def warlog(self, ctx):
"""Track Clan wars"""
channel = ctx.message.channel
await self.getWarData(channel)
def check_clans():
c = dataIO.load_json('cogs/clans.json')
for clankey in c.keys():
if 'members' not in c[clankey]:
c[clankey]['members'] = []
dataIO.save_json('cogs/clans.json', c)
def check_files():
f = "cogs/auth.json"
if not fileIO(f, "check"):
print("enter your RoyaleAPI token in auth.json...")
fileIO(f, "save", {"token" : "enter your RoyaleAPI token here!"})
def setup(bot):
check_files()
check_clans()
bot.add_cog(warlog(bot))
|
nilq/baby-python
|
python
|
"""
Module to implement a plugin that looks for hard tabs in the files.
"""
from pymarkdown.plugin_manager import Plugin, PluginDetails
class RuleMd040(Plugin):
"""
Class to implement a plugin that looks for hard tabs in the files.
"""
def get_details(self):
"""
Get the details for the plugin.
"""
return PluginDetails(
# code, language
plugin_name="fenced-code-language",
plugin_id="MD040",
plugin_enabled_by_default=True,
plugin_description="Fenced code blocks should have a language specified",
plugin_version="0.5.0",
plugin_interface_version=1,
) # https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md040---fenced-code-blocks-should-have-a-language-specified
def next_token(self, context, token):
"""
Event that a new token is being processed.
"""
if token.is_fenced_code_block:
if not token.extracted_text.strip():
self.report_next_token_error(context, token)
|
nilq/baby-python
|
python
|
from django.dispatch import Signal
# providing_args: "orderId", "recipientAmount"
result_received = Signal()
|
nilq/baby-python
|
python
|
"""
Generic module for managing manual data transfer jobs using Galaxy's built-in file browser.
This module can be used by various external services that are configured to transfer data manually.
"""
import logging, urllib2, re, shutil
from data_transfer import *
log = logging.getLogger( __name__ )
__all__ = [ 'ManualDataTransferPlugin' ]
class ManualDataTransferPlugin( DataTransfer ):
def __init__( self, app ):
super( ManualDataTransferPlugin, self ).__init__( app )
def create_job( self, trans, **kwd ):
if 'sample' in kwd and 'sample_datasets' in kwd and 'external_service' in kwd and 'external_service_type' in kwd:
sample = kwd[ 'sample' ]
sample_datasets = kwd[ 'sample_datasets' ]
external_service = kwd[ 'external_service' ]
external_service_type = kwd[ 'external_service_type' ]
# TODO: is there a better way to store the protocol?
protocol = external_service_type.data_transfer.keys()[0]
host = external_service.form_values.content[ 'host' ]
user_name = external_service.form_values.content[ 'user_name' ]
password = external_service.form_values.content[ 'password' ]
# TODO: In the future, we may want to implement a way for the user to associate a selected file with one of
# the run outputs configured in the <run_details><results> section of the external service config file. The
# following was a first pass at implementing something (the datatype was included in the sample_dataset_dict),
# but without a way for the user to associate stuff it's useless. However, allowing the user this ability may
# open a can of worms, so maybe we shouldn't do it???
#
#for run_result_file_name, run_result_file_datatype in external_service_type.run_details[ 'results' ].items():
# # external_service_type.run_details[ 'results' ] looks something like: {'dataset1_name': 'dataset1_datatype'}
# if run_result_file_datatype in external_service.form_values.content:
# datatype = external_service.form_values.content[ run_result_file_datatype ]
#
# When the transfer is automatic (the process used in the SMRT Portal plugin), the datasets and datatypes
# can be matched up to those configured in the <run_details><results> settings in the external service type config
# (e.g., pacific_biosciences_smrt_portal.xml). However, that's a bit trickier here since the user is manually
# selecting files for transfer.
sample_datasets_dict = {}
for sample_dataset in sample_datasets:
sample_dataset_id = sample_dataset.id
sample_dataset_dict = dict( sample_id = sample_dataset.sample.id,
name = sample_dataset.name,
file_path = sample_dataset.file_path,
status = sample_dataset.status,
error_msg = sample_dataset.error_msg,
size = sample_dataset.size,
external_service_id = sample_dataset.external_service.id )
sample_datasets_dict[ sample_dataset_id ] = sample_dataset_dict
params = { 'type' : 'init_transfer',
'sample_id' : sample.id,
'sample_datasets_dict' : sample_datasets_dict,
'protocol' : protocol,
'host' : host,
'user_name' : user_name,
'password' : password }
elif 'transfer_job_id' in kwd:
params = { 'type' : 'finish_transfer',
'protocol' : kwd[ 'result' ][ 'protocol' ],
'sample_id' : kwd[ 'sample_id' ],
'result' : kwd[ 'result' ],
'transfer_job_id' : kwd[ 'transfer_job_id' ] }
else:
log.error( 'No job was created because kwd does not include "samples" and "sample_datasets" or "transfer_job_id".' )
return
deferred_job = self.app.model.DeferredJob( state=self.app.model.DeferredJob.states.NEW,
plugin='ManualDataTransferPlugin',
params=params )
self.sa_session.add( deferred_job )
self.sa_session.flush()
log.debug( 'Created a deferred job in the ManualDataTransferPlugin of type: %s' % params[ 'type' ] )
# TODO: error reporting to caller (if possible?)
def check_job( self, job ):
if self._missing_params( job.params, [ 'type' ] ):
return self.job_states.INVALID
if job.params[ 'type' ] == 'init_transfer':
if job.params[ 'protocol' ] in [ 'http', 'https' ]:
raise Exception( "Manual data transfer is not yet supported for http(s)." )
elif job.params[ 'protocol' ] == 'scp':
if self._missing_params( job.params, [ 'protocol', 'host', 'user_name', 'password', 'sample_id', 'sample_datasets_dict' ] ):
return self.job_states.INVALID
# TODO: what kind of checks do we need here?
return self.job_states.READY
return self.job_states.WAIT
if job.params[ 'type' ] == 'finish_transfer':
if self._missing_params( job.params, [ 'transfer_job_id' ] ):
return self.job_states.INVALID
# Get the TransferJob object and add it to the DeferredJob so we only look it up once.
if not hasattr( job, 'transfer_job' ):
job.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( job.params[ 'transfer_job_id' ] ) )
state = self.app.transfer_manager.get_state( job.transfer_job )
if not state:
log.error( 'No state for transfer job id: %s' % job.transfer_job.id )
return self.job_states.WAIT
if state[ 'state' ] in self.app.model.TransferJob.terminal_states:
return self.job_states.READY
log.debug( "Checked on finish transfer job %s, not done yet." % job.id )
return self.job_states.WAIT
else:
log.error( 'Unknown job type for ManualDataTransferPlugin: %s' % str( job.params[ 'type' ] ) )
return self.job_states.INVALID
|
nilq/baby-python
|
python
|
# coding: utf-8
# In[36]:
# In[39]:
import numpy as np
import powerlaw
edges= np.array([[1,2],[0,2],[0,3],[2,3],[3,4],[4,1]])
class karpatiGraphSolution:
def __init__(self,edges):
assert type(edges)==np.ndarray, "input is not an edge list"
self.edgeList=edges
self.numNodes=np.amax(edges)+1
def give_me_matrix(self):
res=[[0] * self.numNodes for i in range(self.numNodes)]
for edge in self.edgeList:
res[edge[0]][edge[1]]=1
self.adjMat=res
return res
def isConnected(self):
rowSums=np.asarray(self.adjMat).sum(0)
colSums=np.asarray(self.adjMat).sum(1)
print(rowSums)
print(colSums)
total=rowSums+colSums
res=0 not in total
return res
def isStronglyConnected(self):
rowSums=np.asarray(self.adjMat).sum(0)
colSums=np.asarray(self.adjMat).sum(1)
print(rowSums)
print(colSums)
res=0 not in rowSums & 0 not in colSums
return res
def MST(self):
assert self.isConnected, "Sorry, your graph is not connected"
treeMST=set()
nodeInMST=set()
nodeInMST.add(self.edgeList[0][0])
print(nodeInMST)
for edge in self.edgeList:
if (edge[1] in nodeInMST and edge[0] not in nodeInMST):
print("LOL")
treeMST.add((edge[0],edge[1]))
nodeInMST.add(edge[0])
print(nodeInMST)
elif (edge[0] in nodeInMST and edge[1] not in nodeInMST):
print("LOL2")
nodeInMST.add(edge[1])
treeMST.add((edge[1],edge[0]))
print(nodeInMST)
#nodeInMST.add(edge[1])
if len(nodeInMST)==self.numNodes:
print("BREAKING")
break
return(treeMST)
def fitPowerLaw(self):
#get degree distribution
rowSums=np.asarray(self.adjMat).sum(0)
colSums=np.asarray(self.adjMat).sum(1)
total=rowSums+colSums
results=powerlaw.Fit(total)
print("LOL")
return(results.power_law.alpha,results.power_law.xmin)
sol=karpatiGraphSolution(edges)
cucc=sol.give_me_matrix()
cucc3=sol.MST()
print(cucc3)
cucc4=sol.fitPowerLaw()
print(cucc4)
# In[144]:
var = 100
if var == 200:
print "1 - Got a true expression value"
print var
elif var == 150:
print "2 - Got a true expression value"
print var
elif var == 100:
print "3 - Got a true expression value"
print var
else:
print "4 - Got a false expression value"
print var
print "Good bye!"
# In[ ]:
# In[ ]:
|
nilq/baby-python
|
python
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class GuildManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('GuildManagerAI')
|
nilq/baby-python
|
python
|
from xviz.builder.base_builder import XVIZBaseBuilder, CATEGORY
from xviz.v2.core_pb2 import Pose, MapOrigin
class XVIZPoseBuilder(XVIZBaseBuilder):
"""
# Reference
[@xviz/builder/xviz-pose-builder]/(https://github.com/uber/xviz/blob/master/modules/builder/src/builders/xviz-pose-builder.js)
"""
def __init__(self, metadata, logger=None):
super().__init__(CATEGORY.POSE, metadata, logger)
self._poses = None
self.reset()
def reset(self):
super().reset()
self._category = CATEGORY.POSE
self._temp_pose = Pose()
def map_origin(self, longitude, latitude, altitude):
self._temp_pose.map_origin.longitude = longitude
self._temp_pose.map_origin.latitude = latitude
self._temp_pose.map_origin.altitude = altitude
return self
def position(self, x, y, z):
self._temp_pose.position.extend([x, y, z])
return self
def orientation(self, roll, pitch, yaw):
self._temp_pose.orientation.extend([roll, pitch, yaw])
return self
def timestamp(self, timestamp):
self._temp_pose.timestamp = timestamp
return self
def _flush(self):
if not self._poses:
self._poses = {}
self._poses[self._stream_id] = self._temp_pose
self._temp_pose = Pose()
def get_data(self):
if self._stream_id:
self._flush()
return self._poses
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.