hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6002ace185388c888ba705ff8de6efa12833e498
| 5,226
|
py
|
Python
|
pylibcontainer/image.py
|
joaompinto/pylibcontainer
|
794f12e7511dc2452521bad040a7873eff40f50b
|
[
"Apache-2.0"
] | 7
|
2018-05-14T14:35:29.000Z
|
2020-12-04T11:26:19.000Z
|
pylibcontainer/image.py
|
joaompinto/pylibcontainer
|
794f12e7511dc2452521bad040a7873eff40f50b
|
[
"Apache-2.0"
] | 8
|
2018-05-16T17:52:09.000Z
|
2019-05-26T15:54:45.000Z
|
pylibcontainer/image.py
|
joaompinto/pylibcontainer
|
794f12e7511dc2452521bad040a7873eff40f50b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
import shutil
import hashlib
import requests
import click
from tempfile import NamedTemporaryFile
from hashlib import sha256
from os.path import expanduser, join, exists, basename
from .utils import HumanSize
from .tar import extract_layer
from . import trust
from . import container
from .colorhelper import print_info, print_error, print_warn, print_success
from .colorhelper import success
from .image_index import get_url
from clint.textui import progress
from dateutil.parser import parse as parsedate
from datetime import datetime
CACHE_PATH = join(expanduser("~"), ".pylibcontainer", "images_cache")
class Cache(object):
cache_dir = CACHE_PATH
""" Provides an image caching mechanism on disk """
def __init__(self):
if not exists(CACHE_PATH):
os.makedirs(CACHE_PATH, 0o700)
def get(self, cache_key, default=None):
""" return info for cached file """
cache_hash = sha256(cache_key.encode()).hexdigest()
cache_fn = join(CACHE_PATH, "url_" + cache_hash)
if exists(cache_fn):
file_stat = os.stat(cache_fn)
last_modified = datetime.fromtimestamp(file_stat.st_mtime)
file_size = file_stat.st_size
return cache_fn, cache_hash, last_modified, file_size
return default
def put(self, filename, cache_key):
""" put a file into cache """
cache_hash = sha256(cache_key.encode()).hexdigest()
cache_fn = join(CACHE_PATH, "url_" + cache_hash)
shutil.move(filename, cache_fn)
return cache_hash, cache_fn
def download(image_url):
""" Download image (if not found in cache) and return it's filename """
response = requests.head(image_url)
file_size = remote_file_size = int(response.headers.get("Content-Length"))
remote_last_modified = parsedate(response.headers.get("Last-Modified")).replace(
tzinfo=None
)
remote_is_valid = response.status_code == 200 and file_size and remote_last_modified
# Check if image is on cache
cache = Cache()
cached_image = cache.get(image_url)
if cached_image:
if remote_is_valid:
cache_fn, cache_hash, last_modified, file_size = cached_image
if remote_file_size == file_size and remote_last_modified < last_modified:
print_info("Using file from cache", CACHE_PATH)
return cache_hash, cache_fn
print_info("Downloading new remote file because an update was found")
else:
print_warn("Unable to check the status for " + image_url)
print_warn("Assuming local cache is valid")
# Not cached, and no valid remote information was found
if not remote_is_valid:
print_error(
"Unable to get file, http_code=%s, size=%s, last_modified=%s"
% (response.status_code, remote_file_size, remote_last_modified)
)
exit(2)
# Dowload image
print_info(
"Downloading image... ",
"{0} [{1:.2S}]".format(basename(image_url), HumanSize(file_size)),
)
remote_sha256 = hashlib.sha256()
response = requests.get(image_url, stream=True)
with NamedTemporaryFile(delete=False) as tmp_file:
for chunk in progress.bar(
response.iter_content(chunk_size=1024), expected_size=(file_size / 1024) + 1
):
if chunk:
remote_sha256.update(chunk)
tmp_file.write(chunk)
tmp_file.flush()
# Verify image integrity
trust_verify = trust.verify(image_url, tmp_file.name, remote_sha256.hexdigest())
if not trust_verify or not trust_verify.valid or not trust_verify.username:
print_error("Integrity/authenticity error - GPG signature mismatch!")
exit(3)
print("{0:>10}: {1}".format("GPG Signer", success(trust_verify.username)))
print("{0:>10}: {1}".format("GPG ID", success(trust_verify.pubkey_fingerprint)))
print("{0:>10}: {1}".format("Creation", success(trust_verify.creation_date)))
return cache.put(tmp_file.name, image_url)
@click.command()
@click.argument("image_url")
@click.option("--as_root", is_flag=True)
@click.option("--overlay", "-o", multiple=True)
@click.argument("command", nargs=-1)
def run(image_url, command, as_root, overlay):
url = get_url(image_url)
image_url = url or image_url
if not image_url:
print_info("No index was found for image", image_url)
exit(5)
is_validate_only = False
if not command:
command = ["/bin/sh"]
image_protocol = image_url.split(":")[0].lower()
if image_protocol in ["http", "https"]:
_, image_fn = download(image_url)
else:
_, image_fn = sha256(image_url).hexdigest(), image_url
rootfs = extract_layer(image_fn)
if len(command) == 1 and command[0] == "-":
is_validate_only = True
print("Validating container setup with the rootfs")
else:
print_info("Executing", " ".join(command))
_, exit_code = container.runc(rootfs, command, as_root, overlay)
if exit_code != 0:
print_error("Last command returned an error")
elif is_validate_only:
print_success("OK")
| 36.291667
| 88
| 0.670876
| 694
| 5,226
| 4.815562
| 0.268012
| 0.045482
| 0.021544
| 0.008079
| 0.109216
| 0.091562
| 0.063435
| 0.063435
| 0.041891
| 0.041891
| 0
| 0.015559
| 0.22522
| 5,226
| 143
| 89
| 36.545455
| 0.80983
| 0.044967
| 0
| 0.077586
| 0
| 0
| 0.116307
| 0.004473
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043103
| false
| 0
| 0.163793
| 0
| 0.267241
| 0.146552
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
600533785dbd02d51d6674d42a21d63ffcb7660b
| 16,243
|
py
|
Python
|
experiments/solve_different_methods.py
|
vishalbelsare/ags_nlp_solver
|
3558e8aae5507285d0c5e74f163c01d09a9cb805
|
[
"MIT"
] | 8
|
2018-10-23T11:19:26.000Z
|
2022-01-10T19:18:45.000Z
|
experiments/solve_different_methods.py
|
sovrasov/Algorithm-of-Global-Search
|
3558e8aae5507285d0c5e74f163c01d09a9cb805
|
[
"MIT"
] | null | null | null |
experiments/solve_different_methods.py
|
sovrasov/Algorithm-of-Global-Search
|
3558e8aae5507285d0c5e74f163c01d09a9cb805
|
[
"MIT"
] | 2
|
2018-10-07T20:02:40.000Z
|
2018-10-23T11:19:29.000Z
|
import functools
import numpy as np
import math
import argparse
import ags_solver
import go_problems
import nlopt
import sys
from Simple import SimpleTuner
import itertools
from scipy.spatial import Delaunay
from scipy.optimize import differential_evolution
from scipy.optimize import basinhopping
from sdaopt import sda
from stochopy import Evolutionary
from pyOpt import Optimization
from pyOpt import MIDACO
import pyOpt
from shgo import shgo
from benchmark_tools.core import Solver, solve_class, GrishClass, GKLSClass
from benchmark_tools.plot import plot_cmcs
from benchmark_tools.stats import save_stats, compute_stats
class AGSWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01, mixedFast=False):
params = self.class_name2params(class_name)
params.mixedFastMode = mixedFast
if dist_stop:
params.eps = 0
params.itersLimit = max_iters
self.solver = ags_solver.Solver()
self.solver.SetParameters(params)
self.dist_stop = dist_stop
self.eps = eps
def class_name2params(self, name):
params = ags_solver.Parameters()
if 'grish' in name:
params.r = 3
elif 'gklss2' in name:
params.r = 4.6
elif 'gklsh2' in name:
params.r = 6.5
elif 'gklss3' in name:
params.r = 3.7
elif 'gklsh3' in name:
params.r = 4.4
elif 'gklss4' in name:
params.r = 4.7
elif 'gklsh4' in name:
params.r = 4.9
elif 'gklss5' in name:
params.r = 4
params.evolventDensity = 10
elif 'gklsh5' in name:
params.r = 4
params.evolventDensity = 10
return params
def Solve(self, problem):
self.solver.SetProblem([lambda x: problem.Calculate(x)], *problem.GetBounds())
#self.solver.SetProblem(problem)
if not self.dist_stop:
point, val, idx = self.solver.Solve()
else:
opt_pt = np.array(problem.GetOptimumPoint())
point, val, idx = self.solver.Solve(lambda x: np.linalg.norm(np.array(x)-opt_pt, np.inf) < self.eps)
#calcCounters = self.solver.GetCalculationsStatistics()
calcCounters = problem.GetCalculationsStatistics()
return point, val, calcCounters
class SDAWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def Solve(self, problem):
lb, ub = problem.GetBounds()
ret = sda(lambda x: problem.Calculate(x), None, bounds=list(zip(lb, ub)), \
seed=100, maxfun=self.max_iters, visit=2.72, maxiter=self.max_iters)
n_evals = problem.GetCalculationsStatistics()
return ret.x, ret.fun, n_evals
class SCBasinhoppingWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def Solve(self, problem):
lb, ub = problem.GetBounds()
#pop_size = self.class_name2params(self.class_name)
class MyBounds(object):
def __init__(self, xmax=[1.1,1.1], xmin=[-1.1,-1.1] ):
self.xmax = np.array(xmax)
self.xmin = np.array(xmin)
def __call__(self, **kwargs):
x = kwargs["x_new"]
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
return tmax and tmin
x0 = [.5]*problem.GetDimension()
result = \
basinhopping(lambda x: problem.Calculate(x), x0, accept_test=MyBounds(ub, lb), seed=100, T=10, stepsize=0.3)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
class SCDEWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def class_name2params(self, name):
if 'grish' in name:
popsize = 60
elif 'gklss2' in name:
popsize = 60
elif 'gklsh2' in name:
popsize = 60
elif 'gklss3' in name:
popsize = 70
elif 'gklsh3' in name:
popsize = 80
elif 'gklss4' in name:
popsize = 90
elif 'gklsh4' in name:
popsize = 100
elif 'gklss5' in name:
popsize = 120
elif 'gklsh5' in name:
popsize = 140
return popsize
def Solve(self, problem):
lb, ub = problem.GetBounds()
bounds = [(l, u) for l, u in zip(lb, ub)]
pop_size = self.class_name2params(self.class_name)
result = \
differential_evolution(
lambda x: problem.Calculate(x), bounds, mutation=(1.1,1.9),
tol=1e-12, maxiter=int(float(self.max_iters) / (pop_size*problem.GetDimension())), popsize=pop_size, disp=False, seed=100)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
class PyEvolveWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
def Solve(self, problem):
lb, ub = problem.GetBounds()
# Genome instance
genome = G1DList.G1DList(2)
genome.setParams(rangemin=lb[0], rangemax=ub[0], bestRawScore=-100, roundDecimal=2)
genome.initializator.set(Initializators.G1DListInitializatorReal)
genome.mutator.set(Mutators.G1DListMutatorRealGaussian)
# The evaluator function (objective function)
genome.evaluator.set(lambda x: problem.Calculate(x) + 100)
# Genetic Algorithm Instance
ga = GSimpleGA.GSimpleGA(genome)
ga.selector.set(Selectors.GRouletteWheel)
ga.minimax = Consts.minimaxType["minimize"]
ga.setGenerations(5000)
ga.setMutationRate(0.05)
ga.terminationCriteria.set(GSimpleGA.ConvergenceCriteria)
# Do the evolution, with stats dump
# frequency of 10 generations
ga.evolve(freq_stats=100)
# Best individual
best = ga.bestIndividual()
print ("\nBest individual score: %.2f" % (best.score - 100,))
print (best)
from bayes_opt import BayesianOptimization
class BOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
def Solve(self, problem):
lb, ub = problem.GetBounds()
bo = BayesianOptimization(lambda x, y: -problem.Calculate([x, y]),
{'x': (lb[0], ub[0]), 'y': (lb[1], ub[1])})
bo.maximize(init_points=5, n_iter=20, kappa=1.5)
n_evals = problem.GetCalculationsStatistics()
opt_val = -bo.res['max']['max_val']
opt_point = [bo.res['max']['max_params']['x'], bo.res['max']['max_params']['y']]
return opt_point, opt_val, n_evals
class SimpleWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.exploration = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
return 0.1
elif 'gklss2' in name:
return 0.15
elif 'gklsh2' in name:
return 0.15
elif 'gklss3' in name:
return 0.15
elif 'gklsh3' in name:
return 0.25
elif 'gklss4' in name:
return 0.2
elif 'gklsh4' in name:
return 0.25
def Solve(self, problem):
objective_function = lambda x: -problem.Calculate(x)
lb, ub = problem.GetBounds()
opt_pt = problem.GetOptimumPoint()
bounds = [[l, u] for l, u in zip(lb, ub)]
points = np.array([point for point in itertools.product(*bounds)])
tri = Delaunay(points)
optimization_domain_vertices = points[tri.simplices]
exploration = self.exploration # optional, default 0.15
tuner = SimpleTuner(optimization_domain_vertices, objective_function, \
exploration_preference=exploration,
stop_criterion=lambda x:np.linalg.norm(np.array(x)-opt_pt, np.inf) < self.eps)
tuner.optimize(self.max_iters)
opt_val, opt_point = tuner.get_best()
#tuner.plot() # only works in 2D
n_evals = problem.GetCalculationsStatistics()
return opt_point, -opt_val, n_evals
class NLOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, method=nlopt.GD_STOGO, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.method = method
self.max_iters = max_iters
self.pop_size = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
popsize = 150
elif 'gklss2' in name:
popsize = 200
elif 'gklsh2' in name:
popsize = 400
elif 'gklss3' in name:
popsize = 1000
elif 'gklsh3' in name:
popsize = 2000
elif 'gklss4' in name:
popsize = 8000
elif 'gklsh4' in name:
popsize = 16000
elif 'gklss5' in name:
popsize = 25000
elif 'gklsh5' in name:
popsize = 30000
return popsize
def Solve(self, problem):
lb, ub = problem.GetBounds()
self.opt = nlopt.opt(self.method, problem.GetDimension())
self.opt.set_local_optimizer(nlopt.opt(nlopt.LN_SBPLX, problem.GetDimension()))
self.opt.set_lower_bounds(lb)
self.opt.set_upper_bounds(ub)
self.opt.set_min_objective(lambda x, grad: problem.Calculate(x))
self.opt.set_maxeval(self.max_iters)
self.opt.set_xtol_rel(1e-13)
if self.method == nlopt.GN_CRS2_LM:
self.opt.set_population(self.pop_size)
x = self.opt.optimize([.5]*problem.GetDimension())
minf = self.opt.last_optimum_value()
n_evals = problem.GetCalculationsStatistics()
return x, minf, n_evals
class StochOpyWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.popsize = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
popsize = 60
elif 'gklss2' in name:
popsize = 60
elif 'gklsh2' in name:
popsize = 60
elif 'gklss3' in name:
popsize = 70
elif 'gklsh3' in name:
popsize = 80
elif 'gklss4' in name:
popsize = 90
elif 'gklsh4' in name:
popsize = 100
elif 'gklss5' in name:
popsize = 120
elif 'gklsh5' in name:
popsize = 140
return popsize
def Solve(self, problem):
objective_function = lambda x: 50 + problem.Calculate(x)
lb, ub = problem.GetBounds()
ea = Evolutionary(objective_function, lower=lb, upper=ub, popsize=self.popsize, \
max_iter=int(self.max_iters/self.popsize), eps1=1e-16, eps2=1e-16)
xopt, gfit = ea.optimize(solver='cpso', sync=False, CR=0.4, F=0.5)
n_evals = problem.GetCalculationsStatistics()
return xopt, gfit, n_evals
class PyOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
def Solve(self, problem):
objective_function = lambda x: [problem.Calculate(x), 0, 0]
lb, ub = problem.GetBounds()
opt_prob = pyOpt.Optimization('Problem', objective_function)
opt_prob.addObj('f')
for i in range(problem.GetDimension()):
opt_prob.addVar('x'+str(i),'c',lower=lb[i],upper=ub[i],value=(lb[i] + ub[i])/2.)
midaco_none = MIDACO(pll_type=None)
midaco_none.setOption('IPRINT',-1)
midaco_none.setOption('ISEED', 100)
midaco_none.setOption('MAXEVAL',self.max_iters)
midaco_none.setOption('FOCUS', -4)
fstr, xstr, inform = midaco_none(opt_prob)
n_evals = problem.GetCalculationsStatistics()
return xstr, fstr[0], n_evals
class SHGOWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
def Solve(self, problem):
objective_function = lambda x: problem.Calculate(x)
bounds = zip(*problem.GetBounds())
opts = {'maxfev': self.max_iters}
result = shgo(objective_function, bounds, options=opts)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
algos = {'scd': SCDEWrapper, 'ags': AGSWrapper,
'agsd': functools.partial(AGSWrapper, mixedFast=True),
'direct': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT),
'directl': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT_L),
'stogo': functools.partial(NLOptWrapper, method=nlopt.GD_STOGO),
'mlsl': functools.partial(NLOptWrapper, method=nlopt.G_MLSL_LDS),
'crs': functools.partial(NLOptWrapper, method=nlopt.GN_CRS2_LM),
'simple': SimpleWrapper, 'scb': SCBasinhoppingWrapper,
'sda': SDAWrapper, 'stochopy': StochOpyWrapper, 'shgo': SHGOWrapper,
'pyopt': PyOptWrapper}
algo2cature = {'scd': 'Scipy DE', 'ags': 'AGS', 'direct': 'DIRECT', 'agsd': 'AGSd',
'directl': 'DIRECTl', 'simple': 'Simple',
'stogo': 'StoGO', 'mlsl': 'MLSL', 'crs':'CRS', 'scb': 'Scipy B-H',
'sda': 'SDA', 'stochopy': 'Stochopy', 'pysot': 'PySOT', 'pyopt': 'PyOpt', 'shgo': 'SHGO'}
serg_eps = {2: 0.01, 3: 0.01, 4: math.pow(1e-6, 1./4), 5: math.pow(1e-7, 1./5)}
def main(args):
wrapper_class = algos[args.algo]
if args.problems_class == 'grish':
problems = GrishClass()
else:
assert args.problems_dim > 1 and args.problems_dim < 6
if args.problems_class == 'gklss':
problems = GKLSClass(args.problems_dim, go_problems.GKLSClass.Simple)
else:
problems = GKLSClass(args.problems_dim, go_problems.GKLSClass.Hard)
eps = 0.01
if args.serg_eps:
eps = serg_eps[args.problems_dim]
wrapper = wrapper_class(args.dist_stop, args.max_iters, args.problems_class+str(args.problems_dim), eps=0.01)
calc_stats, solved_status = solve_class(problems, wrapper, verbose=args.verbose, eps_check=eps)
stats = compute_stats(calc_stats, solved_status)
print('Problems solved: {}'.format(stats['num_solved']))
for i, avg in enumerate(stats['avg_calcs'][:-1]):
print('Average number of calculations of constraint #{}: {}'.format(i, avg))
print('Average number of calculations of objective: {}'.format(stats['avg_calcs'][-1]))
#plot_cmcs([stats['cmc']], captures=[algo2cature(args.algo)], show=True, filename='')
save_stats(stats, args.stats_fname, capture=algo2cature[args.algo])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sample for AGS solver')
parser.add_argument('--max_iters', type=int, default=10000, help='limit of iterations for the method')
parser.add_argument('--problems_class', type=str, choices=['grish','gklss','gklsh'], default='grish')
parser.add_argument('--algo', type=str, choices=algos.keys(), default='scd')
parser.add_argument('--problems_dim', type=int, default=2)
parser.add_argument('--verbose', action='store_true', help='Print additional info to console')
parser.add_argument('--dist_stop', action='store_true', help='Stop algorithm then the next point is close enough to the optimum')
parser.add_argument('--serg_eps', action='store_true')
parser.add_argument('--stats_fname', type=str, default='')
main(parser.parse_args())
| 38.490521
| 134
| 0.623284
| 2,053
| 16,243
| 4.772041
| 0.18753
| 0.026335
| 0.035827
| 0.016842
| 0.434521
| 0.336838
| 0.308462
| 0.302644
| 0.257324
| 0.250281
| 0
| 0.028512
| 0.261528
| 16,243
| 421
| 135
| 38.581948
| 0.788245
| 0.026904
| 0
| 0.433702
| 0
| 0
| 0.066485
| 0
| 0
| 0
| 0
| 0
| 0.002762
| 1
| 0.082873
| false
| 0
| 0.063536
| 0
| 0.240331
| 0.013812
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6006beb0722b92f412b4c3f2503f64cd54b33641
| 8,288
|
py
|
Python
|
src/py/fc.py
|
mattyschell/geodatabase-toiler
|
c8231999c3156bf41f9b80f151085afa97ba8586
|
[
"CC0-1.0"
] | null | null | null |
src/py/fc.py
|
mattyschell/geodatabase-toiler
|
c8231999c3156bf41f9b80f151085afa97ba8586
|
[
"CC0-1.0"
] | 4
|
2021-04-05T16:03:30.000Z
|
2022-03-02T21:28:06.000Z
|
src/py/fc.py
|
mattyschell/geodatabase-toiler
|
c8231999c3156bf41f9b80f151085afa97ba8586
|
[
"CC0-1.0"
] | null | null | null |
import arcpy
import logging
import pathlib
import subprocess
import gdb
import cx_sde
class Fc(object):
def __init__(self
,gdb
,name):
# gdb object
self.gdb = gdb
# ex BUILDING
self.name = name.upper()
# esri tools usually expect this C:/sdefiles/bldg.sde/BUILDING
# also acceptable: C:/sdefiles/bldg.sde/BLDG.BUILDING
self.featureclass = self.gdb.sdeconn + "/" + self.name
def getfields(self):
desc = arcpy.Describe(self.featureclass)
fields = desc.fields
fieldsameslist = []
for field in fields:
fieldsameslist.append(field.name)
return fieldsameslist
def exists(self):
return arcpy.Exists(self.featureclass)
def delete(self):
logging.info('deleting {0}'.format(self.name))
desc = arcpy.Describe(self.featureclass)
if desc.IsArchived == True:
# disable archving and axe the _H table
arcpy.DisableArchiving_management(self.featureclass,
'DELETE')
arcpy.Delete_management(self.featureclass)
def locksexist(self):
if arcpy.TestSchemaLock(self.featureclass):
# "True A schema lock can be applied to the dataset"
return False
else:
return True
def interpret(self
,resobject):
# could also work with resobject.status
output = 0
if 'succeeded' not in resobject.getMessages().lower():
output = 1
logging.warn('response code is {0}'.format(resobject.status))
logging.warn('response messages are {0}'.format(resobject.getMessages()))
return output
def version(self):
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/register-as-versioned.htm
logging.info('versioning {0}'.format(self.name))
arcpy.RegisterAsVersioned_management(self.featureclass
,"NO_EDITS_TO_BASE")
# https://support.esri.com/en/technical-article/000023226
# When an ArcGIS 10.8 / ArcGIS Pro 2.5 (or newer) client connects to a
# 10.7.1, or earlier, release of an Enterprise geodatabase in Oracle,
# and registers the data as versioned, the versioned view is not created
# for the associated table or feature class.
# I cant get this shell out to python27 to work
# so like I dummy I'm gonna print it to the screen for now
# the test will fail until I (or esri) get it right, thats honest at least
py2versionedviews = pathlib.Path(__file__).parent.parent \
.joinpath('py27') \
.joinpath('create_versionedviews.py')
# see gdb class for this path, perhaps 'C:\Python27\ArcGIS10.6'
callcmd = r'{0} {1} {2}'.format(self.gdb.arcpy2path, py2versionedviews, self.name)
logging.info('YOU MUST CREATE versioned views from py27 using {0}'.format(callcmd))
logging.info('YOU YES YOU MUST call this: {0}'.format(callcmd))
# From a script run a postprocess something like:
# C:\Python27\ArcGIS10.6\python.exe C:\matt_projects\geodatabase-toiler\src\py27\create_versionedviews.py TOILERTESTFC
# exit_code = subprocess.call(callcmd,shell=True)
# exit_code = subprocess.run([self.gdb.arcpy2path, 'C:\matt_projects\geodatabase-toiler\src\py27\create_versionedviews.py'])
# subprocess.Popen(["virtualenv1/bin/python", "my_script.py"])
# attempts above yield
# File "C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3\Lib\site.py", line 177
#file=sys.stderr)
# ^
# SyntaxError: invalid syntax
def trackedits(self):
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/enable-editor-tracking.htm
# this will create fields only if they dont exist
# I am gonna fix the field names here. Reminder that our goal is to
# be opinionated and consistent across anything we manage
logging.info('enabling editor tracking on {0}'.format(self.name))
return self.interpret(arcpy.EnableEditorTracking_management(self.featureclass
,'CREATED_USER'
,'CREATED_DATE'
,'LAST_EDITED_USER'
,'LAST_EDITED_DATE'
,'NO_ADD_FIELDS'
,'UTC'))
def grantprivileges(self
,user
,edits='GRANT'): # or AS_IS
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/change-privileges.htm
# caller should know who editors are we dont concern ourselves here
# always grant select, edits are GRANT or AS_IS for grant select only
# The nobs and dials on this tool are confounding
logging.info('granting privileges on {0} to {1}'.format(self.name
,user))
return self.interpret(arcpy.ChangePrivileges_management(self.featureclass
,user
,'GRANT'
,edits))
def index(self
,column):
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/add-attribute-index.htm
# unique indexes cant be specified for multiversioned tables
logging.info('indexing column {0} on {1}'.format(column
,self.name))
# BUILDINGBINIX
# BUILDING_HISTORICDOITT_IDIX = 27 careful friend
return self.interpret(arcpy.AddIndex_management(self.featureclass
,column
,'{0}{1}{2}'.format(self.name
,column
,'IX')))
def analyze(self
,components=['BUSINESS','ADDS','DELETES']):
return self.interpret(arcpy.Analyze_management(self.featureclass
,components))
def rebuildindexes(self):
# https://pro.arcgis.com/en/pro-app/latest/tool-reference/data-management/rebuild-indexes.htm
return self.interpret(arcpy.RebuildIndexes_management(self.gdb.sdeconn
,'NO_SYSTEM'
,self.name
,'ALL'))
def enablearchiving(self):
desc = arcpy.Describe(self.featureclass)
if desc.IsArchived == False:
return self.interpret(arcpy.EnableArchiving_management(self.featureclass))
else:
return 0
def exporttoshp(self
,outputdir
,outputname):
# print('fc2fc {0} {1} {2}'.format(self.featureclass, outputdir, outputname))
arcpy.FeatureClassToFeatureClass_conversion(self.featureclass
,outputdir
,outputname)
# TODO exportogeopackage if ESRI ever fills in some functionality in
# https://pro.arcgis.com/en/pro-app/latest/tool-reference/conversion/an-overview-of-the-to-geopackage-toolset.htm
# TODO exportogeojson if ESRI tool does something other than error 99999 (guess: sdo_geometry not supported)
# For now export to shp, then ogr2ogr to other formats. Classic
| 39.279621
| 132
| 0.532939
| 827
| 8,288
| 5.286578
| 0.38815
| 0.058554
| 0.047575
| 0.02333
| 0.138152
| 0.129231
| 0.119854
| 0.119854
| 0.096523
| 0.096523
| 0
| 0.015475
| 0.384049
| 8,288
| 210
| 133
| 39.466667
| 0.84094
| 0.330719
| 0
| 0.11
| 0
| 0
| 0.07968
| 0.004366
| 0
| 0
| 0
| 0.004762
| 0
| 1
| 0.14
| false
| 0
| 0.06
| 0.03
| 0.33
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60072f56e1c7453c15f0f4342de268f2dd1b42f7
| 640
|
py
|
Python
|
Machine learning book/3 - MultiLayer Perceptron/test_regression.py
|
dalmia/Lisa-Lab-Tutorials
|
ee1b0b4fcb82914085420bb289ebda09f248c8d1
|
[
"MIT"
] | 25
|
2017-01-14T08:17:23.000Z
|
2022-02-26T13:53:17.000Z
|
Machine learning book/3 - MultiLayer Perceptron/test_regression.py
|
dalmia/Lisa-Lab-Tutorials
|
ee1b0b4fcb82914085420bb289ebda09f248c8d1
|
[
"MIT"
] | 1
|
2020-06-20T02:49:16.000Z
|
2020-06-20T02:49:16.000Z
|
Machine learning book/3 - MultiLayer Perceptron/test_regression.py
|
dalmia/Lisa-Lab-Tutorials
|
ee1b0b4fcb82914085420bb289ebda09f248c8d1
|
[
"MIT"
] | 6
|
2017-08-24T08:40:41.000Z
|
2020-03-17T00:01:56.000Z
|
from numpy import *
import numpy as np
import matplotlib.pyplot as plt
from mlp import mlp
x = ones((1, 40)) * linspace(0, 1, 40)
t = sin(2 * pi * x) + cos(2 * pi * x) + np.random.randn(40) * 0.2
x = transpose(x)
t = transpose(t)
n_hidden = 3
eta = 0.25
n_iterations = 101
plt.plot(x, t, '.')
plt.show()
train = x[0::2, :]
test = x[1::4, :]
valid = x[3::4, :]
train_targets = t[0::2, :]
test_targets = t[1::4, :]
valid_targets = t[3::4, :]
net = mlp(train, train_targets, n_hidden, out_type='linear')
net.mlptrain(train, train_targets, eta, n_iterations)
best_err = net.earlystopping(train, train_targets, valid, valid_targets, eta)
| 21.333333
| 77
| 0.65
| 116
| 640
| 3.474138
| 0.387931
| 0.119107
| 0.126551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06015
| 0.16875
| 640
| 29
| 78
| 22.068966
| 0.697368
| 0
| 0
| 0
| 0
| 0
| 0.010938
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6009cc193c9712b5bd11ff6e9909ef949c64bd53
| 12,219
|
py
|
Python
|
sdk/python/tekton_pipeline/models/v1beta1_embedded_task.py
|
jmcshane/experimental
|
3c47c7e87bcdadc6172941169f3f24fc3f159ae0
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/tekton_pipeline/models/v1beta1_embedded_task.py
|
jmcshane/experimental
|
3c47c7e87bcdadc6172941169f3f24fc3f159ae0
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/tekton_pipeline/models/v1beta1_embedded_task.py
|
jmcshane/experimental
|
3c47c7e87bcdadc6172941169f3f24fc3f159ae0
|
[
"Apache-2.0"
] | 1
|
2020-07-30T15:55:45.000Z
|
2020-07-30T15:55:45.000Z
|
# Copyright 2020 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Tekton
Tekton Pipeline # noqa: E501
The version of the OpenAPI document: v0.17.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from tekton_pipeline.configuration import Configuration
class V1beta1EmbeddedTask(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'description': 'str',
'metadata': 'V1beta1PipelineTaskMetadata',
'params': 'list[V1beta1ParamSpec]',
'resources': 'V1beta1TaskResources',
'results': 'list[V1beta1TaskResult]',
'sidecars': 'list[V1beta1Sidecar]',
'step_template': 'V1Container',
'steps': 'list[V1beta1Step]',
'volumes': 'list[V1Volume]',
'workspaces': 'list[V1beta1WorkspaceDeclaration]'
}
attribute_map = {
'description': 'description',
'metadata': 'metadata',
'params': 'params',
'resources': 'resources',
'results': 'results',
'sidecars': 'sidecars',
'step_template': 'stepTemplate',
'steps': 'steps',
'volumes': 'volumes',
'workspaces': 'workspaces'
}
def __init__(self, description=None, metadata=None, params=None, resources=None, results=None, sidecars=None, step_template=None, steps=None, volumes=None, workspaces=None, local_vars_configuration=None): # noqa: E501
"""V1beta1EmbeddedTask - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._metadata = None
self._params = None
self._resources = None
self._results = None
self._sidecars = None
self._step_template = None
self._steps = None
self._volumes = None
self._workspaces = None
self.discriminator = None
if description is not None:
self.description = description
if metadata is not None:
self.metadata = metadata
if params is not None:
self.params = params
if resources is not None:
self.resources = resources
if results is not None:
self.results = results
if sidecars is not None:
self.sidecars = sidecars
if step_template is not None:
self.step_template = step_template
if steps is not None:
self.steps = steps
if volumes is not None:
self.volumes = volumes
if workspaces is not None:
self.workspaces = workspaces
@property
def description(self):
"""Gets the description of this V1beta1EmbeddedTask. # noqa: E501
Description is a user-facing description of the task that may be used to populate a UI. # noqa: E501
:return: The description of this V1beta1EmbeddedTask. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1beta1EmbeddedTask.
Description is a user-facing description of the task that may be used to populate a UI. # noqa: E501
:param description: The description of this V1beta1EmbeddedTask. # noqa: E501
:type: str
"""
self._description = description
@property
def metadata(self):
"""Gets the metadata of this V1beta1EmbeddedTask. # noqa: E501
:return: The metadata of this V1beta1EmbeddedTask. # noqa: E501
:rtype: V1beta1PipelineTaskMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1EmbeddedTask.
:param metadata: The metadata of this V1beta1EmbeddedTask. # noqa: E501
:type: V1beta1PipelineTaskMetadata
"""
self._metadata = metadata
@property
def params(self):
"""Gets the params of this V1beta1EmbeddedTask. # noqa: E501
Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value. # noqa: E501
:return: The params of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1ParamSpec]
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this V1beta1EmbeddedTask.
Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value. # noqa: E501
:param params: The params of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1ParamSpec]
"""
self._params = params
@property
def resources(self):
"""Gets the resources of this V1beta1EmbeddedTask. # noqa: E501
:return: The resources of this V1beta1EmbeddedTask. # noqa: E501
:rtype: V1beta1TaskResources
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1beta1EmbeddedTask.
:param resources: The resources of this V1beta1EmbeddedTask. # noqa: E501
:type: V1beta1TaskResources
"""
self._resources = resources
@property
def results(self):
"""Gets the results of this V1beta1EmbeddedTask. # noqa: E501
Results are values that this Task can output # noqa: E501
:return: The results of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1TaskResult]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this V1beta1EmbeddedTask.
Results are values that this Task can output # noqa: E501
:param results: The results of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1TaskResult]
"""
self._results = results
@property
def sidecars(self):
"""Gets the sidecars of this V1beta1EmbeddedTask. # noqa: E501
Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete. # noqa: E501
:return: The sidecars of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1Sidecar]
"""
return self._sidecars
@sidecars.setter
def sidecars(self, sidecars):
"""Sets the sidecars of this V1beta1EmbeddedTask.
Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete. # noqa: E501
:param sidecars: The sidecars of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1Sidecar]
"""
self._sidecars = sidecars
@property
def step_template(self):
"""Gets the step_template of this V1beta1EmbeddedTask. # noqa: E501
:return: The step_template of this V1beta1EmbeddedTask. # noqa: E501
:rtype: V1Container
"""
return self._step_template
@step_template.setter
def step_template(self, step_template):
"""Sets the step_template of this V1beta1EmbeddedTask.
:param step_template: The step_template of this V1beta1EmbeddedTask. # noqa: E501
:type: V1Container
"""
self._step_template = step_template
@property
def steps(self):
"""Gets the steps of this V1beta1EmbeddedTask. # noqa: E501
Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace. # noqa: E501
:return: The steps of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1Step]
"""
return self._steps
@steps.setter
def steps(self, steps):
"""Sets the steps of this V1beta1EmbeddedTask.
Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace. # noqa: E501
:param steps: The steps of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1Step]
"""
self._steps = steps
@property
def volumes(self):
"""Gets the volumes of this V1beta1EmbeddedTask. # noqa: E501
Volumes is a collection of volumes that are available to mount into the steps of the build. # noqa: E501
:return: The volumes of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1Volume]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""Sets the volumes of this V1beta1EmbeddedTask.
Volumes is a collection of volumes that are available to mount into the steps of the build. # noqa: E501
:param volumes: The volumes of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1Volume]
"""
self._volumes = volumes
@property
def workspaces(self):
"""Gets the workspaces of this V1beta1EmbeddedTask. # noqa: E501
Workspaces are the volumes that this Task requires. # noqa: E501
:return: The workspaces of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1WorkspaceDeclaration]
"""
return self._workspaces
@workspaces.setter
def workspaces(self, workspaces):
"""Sets the workspaces of this V1beta1EmbeddedTask.
Workspaces are the volumes that this Task requires. # noqa: E501
:param workspaces: The workspaces of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1WorkspaceDeclaration]
"""
self._workspaces = workspaces
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1EmbeddedTask):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1EmbeddedTask):
return True
return self.to_dict() != other.to_dict()
| 31.903394
| 222
| 0.628857
| 1,364
| 12,219
| 5.56305
| 0.156892
| 0.049552
| 0.131787
| 0.114655
| 0.447812
| 0.394175
| 0.384159
| 0.191882
| 0.163416
| 0.163416
| 0
| 0.034101
| 0.289631
| 12,219
| 382
| 223
| 31.986911
| 0.840092
| 0.463868
| 0
| 0.089744
| 0
| 0
| 0.084214
| 0.01914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.025641
| 0
| 0.320513
| 0.012821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6009fd2ff57dced5db01fbc3398709e54f5b6bf1
| 3,122
|
py
|
Python
|
tzp.py
|
gmlunesa/zhat
|
3bf62625d102bd40274fcd39c91f21c169e334a8
|
[
"MIT"
] | 1
|
2018-06-14T04:00:43.000Z
|
2018-06-14T04:00:43.000Z
|
tzp.py
|
gmlunesa/zhat
|
3bf62625d102bd40274fcd39c91f21c169e334a8
|
[
"MIT"
] | null | null | null |
tzp.py
|
gmlunesa/zhat
|
3bf62625d102bd40274fcd39c91f21c169e334a8
|
[
"MIT"
] | 1
|
2020-11-01T13:06:56.000Z
|
2020-11-01T13:06:56.000Z
|
import zmq
import curses
import argparse
import configparser
import threading
import time
from curses import wrapper
from client import Client
from ui import UI
def parse_args():
parser = argparse.ArgumentParser(description='Client for teezeepee')
# Please specify your username
parser.add_argument('username',
type=str,
help='Specified username')
parser.add_argument('--config-file',
type=str,
help='Default path for configuration file.')
return parser.parse_args()
def display_section(window, display):
window_lines, window_cols = window.getmaxyx()
bottom_line = window_lines - 1
window.bkgd(curses.A_NORMAL)
window.scrollok(1)
while True:
window.addstr(bottom_line, 1, display.recv_string())
window.move(bottom_line, 1)
window.scroll(1)
window.refresh()
def input_section(window, chat_sender):
window.bkgd(curses.A_NORMAL)
window.clear()
window.box()
window.refresh()
while True:
window.clear()
window.box()
window.refresh()
s = window.getstr(1, 1).decode('utf-8')
if s is not None and s != "":
chat_sender.send_string(s)
# Short pause
time.sleep(0.01)
def main(stdscr):
config_file = args.config_file if args.config_file is not None else 'tzp.cfg'
config = configparser.ConfigParser()
config.read(config_file)
config = config['default']
receiver = zmq.Context().instance().socket(zmq.PAIR)
receiver.bind("inproc://clientchat")
sender = zmq.Context().instance().socket(zmq.PAIR)
sender.connect("inproc://clientchat")
client = Client(args.username, config['server_host'],
config['chat_port'], receiver)
client.run()
show_receiver = zmq.Context().instance().socket(zmq.PAIR)
show_receiver.bind("inproc://clientdisplay")
show_sender = zmq.Context().instance().socket(zmq.PAIR)
show_sender.connect("inproc://clientdisplay")
ui = UI(config['server_host'], config['display_port'], show_sender)
ui.run()
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.echo()
curses.curs_set(0)
window_height = curses.LINES
window_width = curses.COLS
divider = int(window_height * 0.5)
history_screen = stdscr.subpad(divider, window_width, 0, 0)
input_screen = stdscr.subpad(window_height - divider, window_width, divider, 0)
history_thread = threading.Thread(target=display_section, args=(history_screen, show_receiver))
history_thread.daemon = True
history_thread.start()
input_thread = threading.Thread(target=input_section, args=(input_screen, sender))
input_thread.daemon = True
input_thread.start()
history_thread.join()
input_thread.join()
if '__main__' == __name__:
try:
args = parse_args()
wrapper(main)
except KeyboardInterrupt as e:
pass
except:
raise
| 26.235294
| 99
| 0.65663
| 379
| 3,122
| 5.229551
| 0.335092
| 0.025227
| 0.036327
| 0.048436
| 0.178607
| 0.178607
| 0.119072
| 0
| 0
| 0
| 0
| 0.00791
| 0.230621
| 3,122
| 118
| 100
| 26.457627
| 0.817236
| 0.012812
| 0
| 0.154762
| 0
| 0
| 0.080247
| 0.014295
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.011905
| 0.107143
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
600de579e9f074f3a42976d366b7423013a654a6
| 5,270
|
py
|
Python
|
exercise-09/programming_assignment/hopfield.py
|
AleRiccardi/technical-neural-network-course
|
bfcca623a9dc3f7f4c20e1efe39abe986cd8869e
|
[
"Apache-2.0"
] | null | null | null |
exercise-09/programming_assignment/hopfield.py
|
AleRiccardi/technical-neural-network-course
|
bfcca623a9dc3f7f4c20e1efe39abe986cd8869e
|
[
"Apache-2.0"
] | null | null | null |
exercise-09/programming_assignment/hopfield.py
|
AleRiccardi/technical-neural-network-course
|
bfcca623a9dc3f7f4c20e1efe39abe986cd8869e
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import random
letter_C = np.array([
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
])
noisy_C = np.array([
[1, 1, 1, 1, 1],
[0, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 1, 1, 1],
])
letter_I = np.array([
[0, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[1, 1, 1, 1, 1],
])
noisy_I = np.array([
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 0, 1, 1],
])
letter_T = np.array([
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
])
noisy_T = np.array([
[1, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
])
class HopfieldNet:
def __init__(self, num_neurons, threshold=None):
assert num_neurons <= 1000
self.weights = np.zeros((num_neurons, num_neurons)).astype(np.int)
self.state = np.array((1, num_neurons))
if threshold:
self.thresholds = np.array([threshold for _ in num_neurons])
else:
self.thresholds = np.zeros((num_neurons,))
def fit(self, X):
num_p = X.shape[0]
num_k = X.shape[1]
# check right number of pattern
assert num_p < num_k * 0.138
num_k = X.shape[1]
for p in range(X.shape[0]):
X_p = X[p, :].reshape((1, num_k))
matrix_lr = np.dot(X_p.T, X_p).astype(np.int)
np.fill_diagonal(matrix_lr, 0)
self.weights += matrix_lr
def predict(self, X, show_energy=False, show_char=False):
num_k = X.shape[1]
X_pred = X.copy()
# loop per every pattern
for p in range(X_pred.shape[0]):
differ = True
time_s = 0
# loop until the state
# stay the same
while differ:
X_prev = X_pred[p].copy()
# print energy
if show_energy:
self.print_energy(X_pred[p], p, time_s)
# print char
if show_char and num_k <= 100:
self.print_char(X_pred[p], p, time_s)
# loop per every neuron
for k in range(num_k):
val = np.dot(X_pred[p], self.weights[:, k])
val_thres = 1 if val > self.thresholds[k] else -1
X_pred[p, k] = val_thres
# check if the new state differs from the previous one
differ = False if np.array_equal(X_pred[p], X_prev) else True
time_s += 1
return X_pred
def print_energy(self, state, num_p, time_s):
first_term = 0
second_term = 0
for i in range(state.shape[0]):
for j in range(state.shape[0]):
first_term += self.weights[i, j] * state[i] * state[j]
for k in range(state.shape[0]):
second_term += self.thresholds[k] * state[k]
energy = -0.5 * first_term + second_term
print('Pattern: {}\t||\tTime stamp: {}\t||\tEnergy: {:7.0f}'.format(num_p, time_s, energy))
return energy
def print_char(self, sequence, num_p, time_s):
sqrtK = np.sqrt(sequence.shape[0])
# check if correct sequence
assert sqrtK % 1 == 0
print('Pattern: {}\t||\tTime stamp: {}'.format(num_p, time_s))
for y in range(int(sqrtK)):
for x in range(int(sqrtK)):
idx = int(y * sqrtK + x)
val = '*' if sequence[idx] > 0 else ' '
print(val, end=' ')
print('', sep='', end='\n')
print('', sep='', end='\n')
def test_w_less_101():
print('\n================')
print('K < 101')
print('================\n')
X = np.array([
letter_C.flatten(),
letter_I.flatten(),
letter_T.flatten(),
])
X = np.where(X > 0, 1, -1)
net = HopfieldNet(X.shape[1])
net.fit(X)
X_test = np.array([
noisy_C.flatten(),
noisy_I.flatten(),
noisy_T.flatten(),
])
X_test = np.where(X_test > 0, 1, -1)
_ = net.predict(X_test, show_char=True)
def test_w_more_100():
print('\n================')
print('K > 100')
print('================\n')
num_k = random.randint(101, 1000)
binary = 2
X = np.array([
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
])
X = np.where(X > 0, 1, -1)
net = HopfieldNet(X.shape[1])
net.fit(X)
X_test = np.array([
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
])
X_test = np.where(X_test > 0, 1, -1)
_ = net.predict(X_test, show_energy=True)
if __name__ == '__main__':
test_w_less_101()
test_w_more_100()
| 27.164948
| 99
| 0.493548
| 809
| 5,270
| 3.060569
| 0.147095
| 0.045234
| 0.041195
| 0.03231
| 0.382068
| 0.299677
| 0.287964
| 0.287157
| 0.268578
| 0.263732
| 0
| 0.064562
| 0.332827
| 5,270
| 193
| 100
| 27.305699
| 0.639647
| 0.040228
| 0
| 0.437909
| 0
| 0
| 0.03645
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 1
| 0.045752
| false
| 0
| 0.013072
| 0
| 0.078431
| 0.098039
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60108a3d3357ef01dab42a6e413205a5ad651ed5
| 13,095
|
py
|
Python
|
lrtc_lib/experiment_runners/experiment_runner.py
|
MovestaDev/low-resource-text-classification-framework
|
4380755a65b35265e84ecbf4b87e872d79e8f079
|
[
"Apache-2.0"
] | 57
|
2020-11-18T15:13:06.000Z
|
2022-03-28T22:33:26.000Z
|
lrtc_lib/experiment_runners/experiment_runner.py
|
MovestaDev/low-resource-text-classification-framework
|
4380755a65b35265e84ecbf4b87e872d79e8f079
|
[
"Apache-2.0"
] | 5
|
2021-02-23T22:11:07.000Z
|
2021-12-13T00:13:48.000Z
|
lrtc_lib/experiment_runners/experiment_runner.py
|
MovestaDev/low-resource-text-classification-framework
|
4380755a65b35265e84ecbf4b87e872d79e8f079
|
[
"Apache-2.0"
] | 14
|
2021-02-10T08:55:27.000Z
|
2022-02-23T22:37:54.000Z
|
# (c) Copyright IBM Corporation 2020.
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
import abc
import logging
import time
from collections import defaultdict
from typing import List
import numpy as np
from dataclasses import dataclass
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')
import lrtc_lib.data_access.data_access_factory as data_access_factory
import lrtc_lib.experiment_runners.experiments_results_handler as res_handler
from lrtc_lib.oracle_data_access import oracle_data_access_api
from lrtc_lib.active_learning.diversity_calculator import DiversityCalculator
from lrtc_lib.active_learning.knn_outlier_calculator import KnnOutlierCalculator
from lrtc_lib.active_learning.strategies import ActiveLearningStrategies
from lrtc_lib.data_access.core.data_structs import TextElement
from lrtc_lib.data_access.data_access_api import DataAccessApi
from lrtc_lib.data_access.data_access_factory import get_data_access
from lrtc_lib.orchestrator import orchestrator_api
from lrtc_lib.orchestrator.orchestrator_api import DeleteModels
from lrtc_lib.train_and_infer_service.model_type import ModelType
from lrtc_lib.training_set_selector.train_and_dev_set_selector_api import TrainingSetSelectionStrategy
@dataclass
class ExperimentParams:
experiment_name: str
train_dataset_name: str
dev_dataset_name: str
test_dataset_name: str
category_name: str
workspace_id: str
model: ModelType
active_learning_strategies: list
repeat_id: int
train_params: dict
def compute_batch_scores(config, elements):
data_access = get_data_access()
unlabeled = data_access.sample_unlabeled_text_elements(config.workspace_id, config.train_dataset_name,
config.category_name, 10 ** 6)["results"]
unlabeled_emb = np.array(orchestrator_api.infer(config.workspace_id, config.category_name, unlabeled)["embeddings"])
batch_emb = np.array(orchestrator_api.infer(config.workspace_id, config.category_name, elements)["embeddings"])
outlier_calculator = KnnOutlierCalculator(unlabeled_emb)
outlier_value = outlier_calculator.compute_batch_score(batch_emb)
representativeness_value = 1 / outlier_value
diversity_calculator = DiversityCalculator(unlabeled_emb)
diversity_value = diversity_calculator.compute_batch_score(batch_emb)
return diversity_value, representativeness_value
class ExperimentRunner(object, metaclass=abc.ABCMeta):
NO_AL = 'no_active_learning'
def __init__(self, first_model_positives_num: int, first_model_negatives_num: int,
active_learning_suggestions_num: int):
"""
Init the ExperimentsRunner
:param first_model_positives_num: the number of positives instances to provide for the first model.
:param first_model_negatives_num: the number of negative instances to provide for the first model.
:param active_learning_suggestions_num: the number of instances to be suggested by the active learning strategy
for the training of the second model.
"""
self.first_model_positives_num = first_model_positives_num
self.first_model_negatives_num = first_model_negatives_num
self.active_learning_suggestions_num = active_learning_suggestions_num
self.data_access: DataAccessApi = data_access_factory.get_data_access()
self.cached_first_model_scores = False
orchestrator_api.set_training_set_selection_strategy(TrainingSetSelectionStrategy.ALL_LABELED)
def run(self, config: ExperimentParams, active_learning_iterations_num: int, results_file_path: str,
delete_workspaces: bool = True):
# key: active learning name, value: list of results oevr iterations (first model has no iterations)
results_per_active_learning = defaultdict(dict)
# train first model
iteration = 0
res_dict = self.train_first_model(config=config)
res_handler.save_results(results_file_path, [res_dict])
results_per_active_learning[self.NO_AL][iteration] = res_dict
original_workspace_id = config.workspace_id
for al in config.active_learning_strategies:
orchestrator_api.set_active_learning_strategy(al)
if not orchestrator_api.is_model_compatible_with_active_learning(al, config.model):
logging.info(f'skipping active learning strategy {al.name} for model {config.model.name} '
f'since the strategy does not support this model.')
continue
al_workspace_id = original_workspace_id + "-" + al.name
if orchestrator_api.workspace_exists(al_workspace_id):
orchestrator_api.delete_workspace(al_workspace_id)
orchestrator_api.copy_workspace(original_workspace_id, al_workspace_id)
config.workspace_id = al_workspace_id
for iteration in range(1, active_learning_iterations_num + 1):
logging.info(f'Run AL strategy: {al.name}, iteration num: {iteration}, repeat num: {config.repeat_id}\t'
f'workspace: {config.workspace_id}')
res_dict, train_id = self.run_active_learning_iteration(config, al, iteration)
res_handler.save_results(results_file_path, [res_dict])
results_per_active_learning[al.name][iteration] = res_dict
if delete_workspaces:
orchestrator_api.delete_workspace(config.workspace_id, DeleteModels.ALL_BUT_FIRST_MODEL)
if delete_workspaces:
orchestrator_api.delete_workspace(original_workspace_id)
return results_per_active_learning
def train_first_model(self, config: ExperimentParams):
if orchestrator_api.workspace_exists(config.workspace_id):
orchestrator_api.delete_workspace(config.workspace_id)
orchestrator_api.create_workspace(config.workspace_id, config.train_dataset_name,
dev_dataset_name=config.dev_dataset_name)
orchestrator_api.create_new_category(config.workspace_id, config.category_name, "No description for you")
dev_text_elements_uris = orchestrator_api.get_all_text_elements_uris(config.dev_dataset_name)
dev_text_elements_and_labels = oracle_data_access_api.get_gold_labels(config.dev_dataset_name,
dev_text_elements_uris)
if dev_text_elements_and_labels is not None:
orchestrator_api.set_labels(config.workspace_id, dev_text_elements_and_labels)
random_seed = sum([ord(c) for c in config.workspace_id])
logging.info(str(config))
logging.info(f'random seed: {random_seed}')
self.set_first_model_positives(config, random_seed)
self.set_first_model_negatives(config, random_seed)
# train first model
logging.info(f'Starting first model training (model: {config.model.name})\tworkspace: {config.workspace_id}')
new_model_id = orchestrator_api.train(config.workspace_id, config.category_name, config.model, train_params=config.train_params)
if new_model_id is None:
raise Exception(f'a new model was not trained\tworkspace: {config.workspace_id}')
eval_dataset = config.test_dataset_name
res_dict = self.evaluate(config, al=self.NO_AL, iteration=0, eval_dataset=eval_dataset)
res_dict.update(self.generate_al_batch_dict(config)) # ensures AL-related keys are in the results dictionary
logging.info(f'Evaluation on dataset: {eval_dataset}, iteration: 0, first model (id: {new_model_id}) '
f'repeat: {config.repeat_id}, is: {res_dict}\t'
f'workspace: {config.workspace_id}')
return res_dict
def run_active_learning_iteration(self, config: ExperimentParams, al, iteration):
# get suggested elements for labeling (and their gold labels)
suggested_text_elements, suggested_uris_and_gold_labels = \
self.get_suggested_elements_and_gold_labels(config, al)
# calculate metrics for the batch suggested by the active learning strategy
al_batch_dict = self.generate_al_batch_dict(config, suggested_text_elements)
# set gold labels as the user-provided labels of the elements suggested by the active learning strategy
orchestrator_api.set_labels(config.workspace_id, suggested_uris_and_gold_labels)
# train a new model with the additional elements suggested by the active learning strategy
new_model_id = orchestrator_api.train(config.workspace_id, config.category_name, config.model, train_params=config.train_params)
if new_model_id is None:
raise Exception('New model was not trained')
# evaluate the new model
eval_dataset = config.test_dataset_name
res_dict = self.evaluate(config, al.name, iteration, eval_dataset, suggested_text_elements)
res_dict.update(al_batch_dict)
logging.info(f'Evaluation on dataset: {eval_dataset}, with AL: {al.name}, iteration: {iteration}, '
f'repeat: {config.repeat_id}, model (id: {new_model_id}) is: {res_dict}\t'
f'workspace: {config.workspace_id}')
return res_dict, new_model_id
def get_suggested_elements_and_gold_labels(self, config, al):
start = time.time()
suggested_text_elements_for_labeling = \
orchestrator_api.get_elements_to_label(config.workspace_id, config.category_name,
self.active_learning_suggestions_num)
end = time.time()
logging.info(f'{len(suggested_text_elements_for_labeling)} instances '
f'suggested by active learning strategy: {al.name} '
f'for dataset: {config.train_dataset_name} and category: {config.category_name}.\t'
f'runtime: {end - start}\tworkspace: {config.workspace_id}')
uris_for_labeling = [elem.uri for elem in suggested_text_elements_for_labeling]
uris_and_gold_labels = oracle_data_access_api.get_gold_labels(config.train_dataset_name, uris_for_labeling,
config.category_name)
return suggested_text_elements_for_labeling, uris_and_gold_labels
def evaluate(self, config: ExperimentParams, al, iteration, eval_dataset,
suggested_text_elements_for_labeling=None):
metadata_dict = res_handler.generate_metadata_dict(config, eval_dataset, al, iteration)
labels_counts_dict = res_handler.generate_train_labels_counts_dict(config)
performance_dict = res_handler.generate_performance_metrics_dict(config, eval_dataset)
experiment_specific_metrics_dict = \
self.generate_additional_metrics_dict(config, suggested_text_elements_for_labeling)
res_dict = {**metadata_dict, **labels_counts_dict, **performance_dict, **experiment_specific_metrics_dict}
return res_dict
@abc.abstractmethod
def set_first_model_positives(self, config, random_seed) -> List[TextElement]:
"""
Set the positive instances for the training of the first model.
:param config: experiment config for this run
:param random_seed: a seed for the Random being used for sampling
:return: a list of TextElements and a log message
"""
func_name = self.set_first_model_positives.__name__
raise NotImplementedError('users must define ' + func_name + ' to use this base class')
@abc.abstractmethod
def set_first_model_negatives(self, config, random_seed) -> List[TextElement]:
"""
Set the negative instances for the training of the first model.
:param config: experiment config for this run
:param random_seed: a seed for the Random being used for sampling
:return: a list of TextElements and a log message
"""
func_name = self.set_first_model_negatives.__name__
raise NotImplementedError('users must define ' + func_name + ' to use this base class')
@staticmethod
def generate_al_batch_dict(config, batch_elements=None):
batch_dict = {}
model_supports_embeddings = \
orchestrator_api.is_model_compatible_with_active_learning(ActiveLearningStrategies.DAL, config.model)
if batch_elements is not None and model_supports_embeddings:
diversity_value, representativeness_value = compute_batch_scores(config, batch_elements)
batch_dict["diversity"] = diversity_value
batch_dict["representativeness"] = representativeness_value
else:
batch_dict["diversity"] = "NA"
batch_dict["representativeness"] = "NA"
return batch_dict
def generate_additional_metrics_dict(self, config, suggested_text_elements_for_labeling):
return {}
| 52.590361
| 136
| 0.72333
| 1,633
| 13,095
| 5.453154
| 0.150643
| 0.039528
| 0.041999
| 0.020663
| 0.448961
| 0.344862
| 0.266929
| 0.202358
| 0.163728
| 0.143515
| 0
| 0.001933
| 0.209775
| 13,095
| 248
| 137
| 52.802419
| 0.85862
| 0.113326
| 0
| 0.109827
| 0
| 0.017341
| 0.114004
| 0.024286
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063584
| false
| 0
| 0.115607
| 0.00578
| 0.300578
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6011674256a1e396b16faca45277694f253b2c3f
| 909
|
py
|
Python
|
contrast/environment/data.py
|
alexbjorling/acquisition-framework
|
4090381344aabca05155612845ba4e4a47455dc3
|
[
"MIT"
] | null | null | null |
contrast/environment/data.py
|
alexbjorling/acquisition-framework
|
4090381344aabca05155612845ba4e4a47455dc3
|
[
"MIT"
] | 2
|
2018-09-19T06:49:03.000Z
|
2019-06-28T10:47:37.000Z
|
contrast/environment/data.py
|
alexbjorling/acquisition-framework
|
4090381344aabca05155612845ba4e4a47455dc3
|
[
"MIT"
] | null | null | null |
try:
from tango import DeviceProxy, DevError
except ModuleNotFoundError:
pass
class PathFixer(object):
"""
Basic pathfixer which takes a path manually.
"""
def __init__(self):
self.directory = None
class SdmPathFixer(object):
"""
MAX IV pathfixer which takes a path from a Tango device.
"""
def __init__(self, sdm_device):
self.device = DeviceProxy(sdm_device)
self.TRIALS = 10
self.cache = None
@property
def directory(self):
for trial in range(self.TRIALS):
try:
val = self.device.SamplePath
self.cache = val
return val
except DevError:
print('Failed in getting SDM path from Tango. Trying again...')
print('Failed %u times, using cached value: %s'
% (self.TRIALS, self.cache))
return self.cache
| 25.25
| 79
| 0.581958
| 102
| 909
| 5.088235
| 0.490196
| 0.069364
| 0.073218
| 0.077071
| 0.092486
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003295
| 0.332233
| 909
| 35
| 80
| 25.971429
| 0.85173
| 0.111111
| 0
| 0.083333
| 0
| 0
| 0.119691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.041667
| 0.041667
| 0
| 0.333333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60121c6217810f4a6299e69b2f99282f9e977749
| 1,504
|
py
|
Python
|
game_2048/views.py
|
fung04/csrw_game
|
9673fdd311583057d5bf756dec7b99959d961d0c
|
[
"MIT"
] | null | null | null |
game_2048/views.py
|
fung04/csrw_game
|
9673fdd311583057d5bf756dec7b99959d961d0c
|
[
"MIT"
] | null | null | null |
game_2048/views.py
|
fung04/csrw_game
|
9673fdd311583057d5bf756dec7b99959d961d0c
|
[
"MIT"
] | null | null | null |
import json
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import redirect, render
from .models import Game2048
# Create your views here.
# test_user
# 8!S#5RP!WVMACg
def game(request):
return render(request, 'game_2048/index.html')
def set_result(request):
user = request.user if str(
request.user) != "AnonymousUser" else User.objects.get(username='test_user')
if request.method == 'POST':
# Get the game state from the POST request
game_state = request.body
obj = Game2048.objects.get(user=user)
# Check if the game state idendical to the server game state
if game_state != obj.game_state:
# let string to JSON object
json_game_state = json.loads(game_state)
# extract value of best from JSON objest
obj.best_score = json_game_state['best']
obj.game_state = json_game_state # save JSON object to game_state
obj.save()
else:
return redirect('game_2048:game')
return JsonResponse("", safe=False)
def get_result(request):
# Check if user is logged in if not set user to test_user
user = request.user if str(
request.user) != "AnonymousUser" else User.objects.get(username='test_user')
if request.method == 'GET':
obj, created = Game2048.objects.get_or_create(user=user)
game_state = obj.game_state
return JsonResponse(game_state, safe=False)
| 27.851852
| 84
| 0.672207
| 208
| 1,504
| 4.735577
| 0.326923
| 0.137056
| 0.036548
| 0.034518
| 0.231472
| 0.188832
| 0.188832
| 0.188832
| 0.188832
| 0.188832
| 0
| 0.019248
| 0.240027
| 1,504
| 53
| 85
| 28.377358
| 0.84252
| 0.198803
| 0
| 0.142857
| 0
| 0
| 0.074539
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.178571
| 0.035714
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6013883d7068c2a00e5b4b40942f112984e3413c
| 7,417
|
py
|
Python
|
arviz/plots/pairplot.py
|
gimbo/arviz
|
c1df1847aa5170ad2810ae3d705d576d2643e3ec
|
[
"Apache-2.0"
] | null | null | null |
arviz/plots/pairplot.py
|
gimbo/arviz
|
c1df1847aa5170ad2810ae3d705d576d2643e3ec
|
[
"Apache-2.0"
] | null | null | null |
arviz/plots/pairplot.py
|
gimbo/arviz
|
c1df1847aa5170ad2810ae3d705d576d2643e3ec
|
[
"Apache-2.0"
] | null | null | null |
"""Plot a scatter or hexbin of sampled parameters."""
import warnings
import numpy as np
from ..data import convert_to_dataset, convert_to_inference_data
from .plot_utils import xarray_to_ndarray, get_coords, get_plotting_function
from ..utils import _var_names
def plot_pair(
data,
group="posterior",
var_names=None,
coords=None,
figsize=None,
textsize=None,
kind="scatter",
gridsize="auto",
contour=True,
fill_last=True,
divergences=False,
colorbar=False,
ax=None,
divergences_kwargs=None,
plot_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""
Plot a scatter or hexbin matrix of the sampled parameters.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
group : str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names : list of variable names
Variables to be plotted, if None all variable are plotted
coords : mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
figsize : figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str
Type of plot to display (scatter, kde or hexbin)
gridsize : int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences : Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar : bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs : dicts, optional
Additional keywords passed to ax.scatter for divergences
plot_kwargs : dicts, optional
Additional keywords passed to ax.plot, az.plot_kde or ax.hexbin
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['theta', 'mu', 'tau'],
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
if kind not in valid_kinds:
raise ValueError(
("Plot type {} not recognized." "Plot type must be in {}").format(kind, valid_kinds)
)
if coords is None:
coords = {}
if plot_kwargs is None:
plot_kwargs = {}
if kind == "scatter":
plot_kwargs.setdefault("marker", ".")
plot_kwargs.setdefault("lw", 0)
if divergences_kwargs is None:
divergences_kwargs = {}
divergences_kwargs.setdefault("marker", "o")
divergences_kwargs.setdefault("markeredgecolor", "k")
divergences_kwargs.setdefault("color", "C1")
divergences_kwargs.setdefault("lw", 0)
# Get posterior draws and combine chains
data = convert_to_inference_data(data)
grouped_data = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, grouped_data)
flat_var_names, infdata_group = xarray_to_ndarray(
get_coords(grouped_data, coords), var_names=var_names, combined=True
)
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
SyntaxWarning,
)
if gridsize == "auto":
gridsize = int(len(infdata_group[0]) ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise Exception("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
infdata_group=infdata_group,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
plot_kwargs=plot_kwargs,
contour=contour,
fill_last=fill_last,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
show=show,
)
if backend == "bokeh":
pairplot_kwargs.pop("gridsize", None)
pairplot_kwargs.pop("colorbar", None)
pairplot_kwargs.pop("divergences_kwargs", None)
pairplot_kwargs.pop("hexbin_values", None)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
| 33.40991
| 99
| 0.62559
| 874
| 7,417
| 5.167048
| 0.255149
| 0.030115
| 0.014172
| 0.011957
| 0.141718
| 0.105182
| 0.067981
| 0.067981
| 0.032108
| 0.023472
| 0
| 0.003777
| 0.2861
| 7,417
| 221
| 100
| 33.561086
| 0.849103
| 0.450047
| 0
| 0.038095
| 0
| 0
| 0.147366
| 0
| 0
| 0
| 0
| 0.004525
| 0
| 1
| 0.009524
| false
| 0
| 0.047619
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
601c3263a4fb21497920c0fe4c9459fa3c4066b9
| 844
|
py
|
Python
|
oops/#016exceptions.py
|
krishankansal/PythonPrograms
|
6d4d989068195b8c8dd9d71cf4f920fef1177cf2
|
[
"MIT"
] | null | null | null |
oops/#016exceptions.py
|
krishankansal/PythonPrograms
|
6d4d989068195b8c8dd9d71cf4f920fef1177cf2
|
[
"MIT"
] | null | null | null |
oops/#016exceptions.py
|
krishankansal/PythonPrograms
|
6d4d989068195b8c8dd9d71cf4f920fef1177cf2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 08:40:11 2020
@author: krishan
"""
def funny_division2(anumber):
try:
if anumber == 13:
raise ValueError("13 is an unlucky number")
return 100 / anumber
except (ZeroDivisionError, TypeError):
return "Enter a number other than zero"
def funny_division3(anumber):
try:
if anumber == 13:
raise ValueError("13 is an unlucky number")
return 100 / anumber
except ZeroDivisionError:
return "Enter a number other than zero"
except TypeError:
return "Enter a numerical value"
except ValueError as e:
print("The exception arguments were",e.args)
#raise
for val in (0, "hello", 50.0, 13):
print(f"Testing {val}:", funny_division3(val))
| 24.823529
| 55
| 0.609005
| 107
| 844
| 4.775701
| 0.560748
| 0.064579
| 0.07045
| 0.074364
| 0.489237
| 0.489237
| 0.489237
| 0.367906
| 0.367906
| 0.367906
| 0
| 0.061564
| 0.287915
| 844
| 33
| 56
| 25.575758
| 0.788686
| 0.120853
| 0
| 0.5
| 0
| 0
| 0.240437
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.35
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
601c880be1287d7f4ecd5a8ee1ee870db121bb75
| 4,129
|
py
|
Python
|
config/simclr_config.py
|
denn-s/SimCLR
|
e2239ac52464b1271c3b8ad1ec4eb26f3b73c7d4
|
[
"MIT"
] | 5
|
2020-08-24T17:57:51.000Z
|
2021-06-06T18:18:19.000Z
|
config/simclr_config.py
|
denn-s/SimCLR
|
e2239ac52464b1271c3b8ad1ec4eb26f3b73c7d4
|
[
"MIT"
] | null | null | null |
config/simclr_config.py
|
denn-s/SimCLR
|
e2239ac52464b1271c3b8ad1ec4eb26f3b73c7d4
|
[
"MIT"
] | 1
|
2020-08-29T00:35:36.000Z
|
2020-08-29T00:35:36.000Z
|
import os
from datetime import datetime
import torch
from dataclasses import dataclass
class SimCLRConfig:
@dataclass()
class Base:
output_dir_path: str
log_dir_path: str
log_file_path: str
device: object
num_gpu: int
logger_name: str
@dataclass()
class Train:
# batch_size as usual. examples: 16,32,..
batch_size: int
# number of workers to be used for data loading. examples: 2,4,...
num_workers: int
# start training with this epoch. most likely: 0
start_epoch: int
# in case of restart this is where the saved model is expected to be located
restart_log_dir_path: str
# end training with this epoch. examples: 10, 100,...
epochs: int
# directory where the datasets are located. example: "/home/USER_NAME/Data"
data_dir_path: str
# dataset name. options: ["CIFAR10", "STL10", "iNaturalist2019", "ImageNet"]
dataset: str
# save trained model every n epochs. examples: 1,5,10,...
save_num_epochs: int
# image size obtained from last data preparation step
img_size: int
# name of the optimizer. options: ["Adam", "LARS"]
# TODO: implement LARS ptimizer
optimizer: str
weight_decay: float
temperature: float
global_step: int
current_epoch: int
@dataclass()
class Model:
# model architecture. options: ["resnet18", "resnet50"]
resnet: str
normalize: bool
projection_dim: int
@dataclass()
class SimCLR:
train: object
model: object
@dataclass()
class LogisticRegression:
epochs: int
batch_size: int
learning_rate: float
momentum: float
img_size: int
model_path: str
epoch_num: int
@dataclass()
class FineTuning:
epochs: int
batch_size: int
learning_rate: float
momentum: float
img_size: int
save_num_epochs: int
# decay "learning_rate" by a factor of "gamma" every "step_size" epochs
gamma: float
step_size: int
model_path: str
epoch_num: int
@dataclass()
class ONNX:
batch_size: int
img_size: int
model_path: str
epoch_num: int
def __init__(self, config):
global_step = 0
current_epoch = 0
simclr_train = SimCLRConfig.Train(**config['simclr']['train'], global_step=global_step,
current_epoch=current_epoch)
simclr_model = SimCLRConfig.Model(**config['simclr']['model'])
self.simclr = SimCLRConfig.SimCLR(simclr_train, simclr_model)
model_path = None
epoch_num = None
self.logistic_regression = SimCLRConfig.LogisticRegression(**config['logistic_regression'],
model_path=model_path, epoch_num=epoch_num)
model_path = None
epoch_num = None
self.fine_tuning = SimCLRConfig.FineTuning(**config['fine_tuning'], model_path=model_path,
epoch_num=epoch_num)
model_path = None
epoch_num = None
self.onnx = SimCLRConfig.ONNX(**config['onnx'], model_path=model_path, epoch_num=epoch_num)
logger_name = config['logger_name']
output_dir_path = 'output'
now = datetime.now()
dt_string: str = now.strftime("%Y_%m_%d_%H_%M_%S")
log_dir_name = dt_string + '_' + logger_name + '_' + self.simclr.train.dataset.lower()
log_dir_path = os.path.join(output_dir_path, log_dir_name)
log_file_path = os.path.join(log_dir_path, 'log.txt')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
num_gpu = torch.cuda.device_count()
self.base = SimCLRConfig.Base(output_dir_path, log_dir_path, log_file_path, device, num_gpu, logger_name)
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
| 28.475862
| 113
| 0.601356
| 489
| 4,129
| 4.813906
| 0.302658
| 0.045879
| 0.02124
| 0.020391
| 0.196686
| 0.180544
| 0.180544
| 0.168224
| 0.153781
| 0.139762
| 0
| 0.010915
| 0.312182
| 4,129
| 144
| 114
| 28.673611
| 0.817958
| 0.178736
| 0
| 0.378947
| 0
| 0
| 0.032573
| 0
| 0
| 0
| 0
| 0.006944
| 0
| 1
| 0.021053
| false
| 0
| 0.042105
| 0.010526
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
601e563b0639154915d91614f293088729954120
| 6,729
|
py
|
Python
|
mldftdat/scripts/train_gp.py
|
mir-group/CiderPress
|
bf2b3536e6bd7432645c18dce5a745d63bc9df59
|
[
"MIT"
] | 10
|
2021-09-09T06:51:57.000Z
|
2021-12-17T09:48:41.000Z
|
mldftdat/scripts/train_gp.py
|
mir-group/CiderPress
|
bf2b3536e6bd7432645c18dce5a745d63bc9df59
|
[
"MIT"
] | null | null | null |
mldftdat/scripts/train_gp.py
|
mir-group/CiderPress
|
bf2b3536e6bd7432645c18dce5a745d63bc9df59
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
import os
import numpy as np
from joblib import dump
from mldftdat.workflow_utils import SAVE_ROOT
from mldftdat.models.gp import *
from mldftdat.data import load_descriptors, filter_descriptors
import yaml
def parse_settings(args):
fname = args.datasets_list[0]
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
with open(os.path.join(fname, 'settings.yaml'), 'r') as f:
d = yaml.load(f, Loader=yaml.Loader)
args.gg_a0 = d.get('a0')
args.gg_amin = d.get('amin')
args.gg_facmul = d.get('fac_mul')
def parse_dataset(args, i, val=False):
if val:
fname = args.validation_set[2*i]
n = int(args.validation_set[2*i+1])
else:
fname = args.datasets_list[2*i]
n = int(args.datasets_list[2*i+1])
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
X, y, rho_data = load_descriptors(fname)
if val:
# offset in case repeat datasets are used
X, y, rho_data = X[n//2+1:,:], y[n//2+1:], rho_data[:,n//2+1:]
X, y, rho, rho_data = filter_descriptors(X, y, rho_data,
tol=args.density_cutoff)
print(X.shape, n)
if args.randomize:
inds = np.arange(X.shape[0])
np.random.shuffle(inds)
X = X[inds,:]
y = y[inds]
rho = rho[inds]
rho_data = rho_data[:,inds]
return X[::n,:], y[::n], rho[::n], rho_data[:,::n]
def parse_list(lststr, T=int):
return [T(substr) for substr in lststr.split(',')]
def main():
parser = ArgumentParser(description='Trains a GP exchange model')
parser.add_argument('save_file', type=str)
parser.add_argument('feature_file', type=str,
help='serialized FeatureList object in yaml format')
parser.add_argument('datasets_list', nargs='+',
help='pairs of dataset names and inverse sampling densities')
parser.add_argument('basis', metavar='basis', type=str,
help='basis set code')
parser.add_argument('--functional', metavar='functional', type=str, default=None,
help='exchange-correlation functional, HF for Hartree-Fock')
parser.add_argument('-r', '--randomize', action='store_true')
parser.add_argument('-c', '--density-cutoff', type=float, default=1e-4)
#parser.add_argument('-m', '--model-class', type=str, default=None)
#parser.add_argument('-k', '--kernel', help='kernel initialization strategy', type=str, default=None)
parser.add_argument('-s', '--seed', help='random seed', default=0, type=int)
parser.add_argument('-vs', '--validation-set', nargs='+')
parser.add_argument('-d', '--delete-k', action='store_true',
help='Delete L (LL^T=K the kernel matrix) to save disk space. Need to refit when reloading to calculate covariance.')
parser.add_argument('--heg', action='store_true', help='HEG exact constraint')
parser.add_argument('--tail', action='store_true', help='atomic tail exact constraint')
parser.add_argument('-o', '--desc-order', default=None,
help='comma-separated list of descriptor order with no spaces. must start with 0,1.')
parser.add_argument('-l', '--length-scale', default=None,
help='comma-separated list initial length-scale guesses')
parser.add_argument('--length-scale-mul', type=float, default=1.0,
help='Used for automatic length-scale initial guess')
parser.add_argument('-a', '--agpr', action='store_true',
help='Whether to use Additive RBF. If False, use RBF')
parser.add_argument('-as', '--agpr-scale', default=None)
parser.add_argument('-ao', '--agpr-order', default=2, type=int)
parser.add_argument('-an', '--agpr-nsingle', default=1, type=int)
parser.add_argument('-x', '--xed-y-code', default='CHACHIYO', type=str)
parser.add_argument('-on', '--optimize-noise', action='store_true',
help='Whether to optimzie exponent of density noise.')
parser.add_argument('-v', '--version', default='c', type=str,
help='version of descriptor set. Default c')
parser.add_argument('--suffix', default=None, type=str,
help='customize data directories with this suffix')
args = parser.parse_args()
parse_settings(args)
np.random.seed(args.seed)
feature_list = FeatureList.load(args.feature_file)
if args.length_scale is not None:
args.length_scale = parse_list(args.length_scale, T=float)
if args.agpr_scale is not None:
args.agpr_scale = parse_list(args.agpr_scale, T=float)
if args.desc_order is not None:
args.desc_order = parse_list(args.desc_order)
assert len(args.datasets_list) % 2 == 0, 'Need pairs of entries for datasets list.'
assert len(args.datasets_list) != 0, 'Need training data'
nd = len(args.datasets_list) // 2
if args.validation_set is None:
nv = 0
else:
assert len(args.validation_set) % 2 == 0, 'Need pairs of entries for datasets list.'
nv = len(args.validation_set) // 2
X, y, rho, rho_data = parse_dataset(args, 0)
for i in range(1, nd):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i)
X = np.append(X, Xn, axis=0)
y = np.append(y, yn, axis=0)
rho = np.append(rho, rhon, axis=0)
rho_data = np.append(rho_data, rho_datan, axis=1)
if nv != 0:
Xv, yv, rhov, rho_datav = parse_dataset(args, 0, val=True)
for i in range(1, nv):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i, val=True)
Xv = np.append(Xv, Xn, axis=0)
yv = np.append(yv, yn, axis=0)
rhov = np.append(rhov, rhon, axis=0)
rho_datav = np.append(rho_datav, rho_datan, axis=1)
gpcls = DFTGPR
gpr = gpcls.from_settings(X, feature_list, args)
gpr.fit(X, y, add_heg=args.heg, add_tail=args.tail)
#if args.heg:
# gpr.add_heg_limit()
print('FINAL KERNEL', gpr.gp.kernel_)
if nv != 0:
pred = gpr.xed_to_y(gpr.predict(Xv), Xv)
abserr = np.abs(pred - gpr.xed_to_y(yv, Xv))
print('MAE VAL SET', np.mean(abserr))
# Always attach the arguments to the object to keep track of settings.
gpr.args = args
if args.delete_k:
gpr.L_ = None
dump(gpr, args.save_file)
if __name__ == '__main__':
main()
| 43.412903
| 141
| 0.622975
| 966
| 6,729
| 4.212215
| 0.23499
| 0.055296
| 0.104448
| 0.023347
| 0.280659
| 0.140084
| 0.110101
| 0.092898
| 0.092898
| 0.059474
| 0
| 0.009117
| 0.233913
| 6,729
| 154
| 142
| 43.694805
| 0.780213
| 0.045921
| 0
| 0.121212
| 0
| 0.007576
| 0.194449
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 1
| 0.030303
| false
| 0
| 0.060606
| 0.007576
| 0.106061
| 0.037879
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
601f1b72f2f10dacace33b87801d53b05bfc4ed8
| 5,684
|
py
|
Python
|
picoCTF-web/api/routes/admin.py
|
zaratec/picoCTF
|
b0a63f03625bb4657a8116f43bea26346ca6f010
|
[
"MIT"
] | null | null | null |
picoCTF-web/api/routes/admin.py
|
zaratec/picoCTF
|
b0a63f03625bb4657a8116f43bea26346ca6f010
|
[
"MIT"
] | null | null | null |
picoCTF-web/api/routes/admin.py
|
zaratec/picoCTF
|
b0a63f03625bb4657a8116f43bea26346ca6f010
|
[
"MIT"
] | null | null | null |
import api
import bson
from api.annotations import (
api_wrapper,
log_action,
require_admin,
require_login,
require_teacher
)
from api.common import WebError, WebSuccess
from flask import (
Blueprint,
Flask,
render_template,
request,
send_from_directory,
session
)
blueprint = Blueprint("admin_api", __name__)
@blueprint.route('/problems', methods=['GET'])
@api_wrapper
@require_admin
def get_problem_data_hook():
has_instances = lambda p : len(p["instances"]) > 0
problems = list(filter(has_instances, api.problem.get_all_problems(show_disabled=True)))
for problem in problems:
problem["reviews"] = api.problem_feedback.get_problem_feedback(pid=problem["pid"])
data = {
"problems": problems,
"bundles": api.problem.get_all_bundles()
}
return WebSuccess(data=data)
@blueprint.route('/users', methods=['GET'])
@api_wrapper
@require_admin
def get_all_users_hook():
users = api.user.get_all_users()
if users is None:
return WebError("There was an error query users from the database.")
return WebSuccess(data=users)
@blueprint.route('/exceptions', methods=['GET'])
@api_wrapper
@require_admin
def get_exceptions_hook():
try:
limit = abs(int(request.args.get("limit")))
exceptions = api.admin.get_api_exceptions(result_limit=limit)
return WebSuccess(data=exceptions)
except (ValueError, TypeError):
return WebError("limit is not a valid integer.")
@blueprint.route('/exceptions/dismiss', methods=['POST'])
@api_wrapper
@require_admin
def dismiss_exceptions_hook():
trace = request.form.get("trace", None)
if trace:
api.admin.dismiss_api_exceptions(trace)
return WebSuccess(data="Successfuly changed exception visibility.")
else:
return WebError(message="You must supply a trace to hide.")
@blueprint.route("/problems/submissions", methods=["GET"])
@api_wrapper
@require_admin
def get_problem():
submission_data = {p["name"]:api.stats.get_problem_submission_stats(pid=p["pid"]) \
for p in api.problem.get_all_problems(show_disabled=True)}
return WebSuccess(data=submission_data)
@blueprint.route("/problems/availability", methods=["POST"])
@api_wrapper
@require_admin
def change_problem_availability_hook():
pid = request.form.get("pid", None)
desired_state = request.form.get("state", None)
if desired_state == None:
return WebError("Problems are either enabled or disabled.")
else:
state = bson.json_util.loads(desired_state)
api.admin.set_problem_availability(pid, state)
return WebSuccess(data="Problem state changed successfully.")
@blueprint.route("/shell_servers", methods=["GET"])
@api_wrapper
@require_admin
def get_shell_servers():
return WebSuccess(data=api.shell_servers.get_servers())
@blueprint.route("/shell_servers/add", methods=["POST"])
@api_wrapper
@require_admin
def add_shell_server():
params = api.common.flat_multi(request.form)
api.shell_servers.add_server(params)
return WebSuccess("Shell server added.")
@blueprint.route("/shell_servers/update", methods=["POST"])
@api_wrapper
@require_admin
def update_shell_server():
params = api.common.flat_multi(request.form)
sid = params.get("sid", None)
if sid is None:
return WebError("Must specify sid to be updated")
api.shell_servers.update_server(sid, params)
return WebSuccess("Shell server updated.")
@blueprint.route("/shell_servers/remove", methods=["POST"])
@api_wrapper
@require_admin
def remove_shell_server():
sid = request.form.get("sid", None)
if sid is None:
return WebError("Must specify sid to be removed")
api.shell_servers.remove_server(sid)
return WebSuccess("Shell server removed.")
@blueprint.route("/shell_servers/load_problems", methods=["POST"])
@api_wrapper
@require_admin
def load_problems_from_shell_server():
sid = request.form.get("sid", None)
if sid is None:
return WebError("Must provide sid to load from.")
number = api.shell_servers.load_problems_from_server(sid)
return WebSuccess("Loaded {} problems from the server".format(number))
@blueprint.route("/shell_servers/check_status", methods=["GET"])
@api_wrapper
@require_admin
def check_status_of_shell_server():
sid = request.args.get("sid", None)
if sid is None:
return WebError("Must provide sid to load from.")
all_online, data = api.shell_servers.get_problem_status_from_server(sid)
if all_online:
return WebSuccess("All problems are online", data=data)
else:
return WebError("One or more problems are offline. Please connect and fix the errors.", data=data)
@blueprint.route("/bundle/dependencies_active", methods=["POST"])
@api_wrapper
@require_admin
def bundle_dependencies():
bid = request.form.get("bid", None)
state = request.form.get("state", None)
if bid is None:
return WebError("Must provide bid to load from.")
if state is None:
return WebError("Must provide a state to set.")
state = bson.json_util.loads(state)
api.problem.set_bundle_dependencies_enabled(bid, state)
return WebSuccess("Dependencies are now {}.".format("enabled" if state else "disabled"))
@blueprint.route("/settings", methods=["GET"])
@api_wrapper
@require_admin
def get_settings():
return WebSuccess(data=api.config.get_settings())
@blueprint.route("/settings/change", methods=["POST"])
@api_wrapper
@require_admin
def change_settings():
data = bson.json_util.loads(request.form["json"])
api.config.change_settings(data)
return WebSuccess("Settings updated")
| 29.450777
| 106
| 0.714814
| 742
| 5,684
| 5.281671
| 0.198113
| 0.040827
| 0.065068
| 0.084205
| 0.331462
| 0.292166
| 0.276346
| 0.196989
| 0.116356
| 0.069916
| 0
| 0.00021
| 0.16133
| 5,684
| 192
| 107
| 29.604167
| 0.821901
| 0
| 0
| 0.275641
| 0
| 0
| 0.186312
| 0.029381
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.032051
| 0.012821
| 0.294872
| 0.108974
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60226c7d97ac7aadd65011be5f070784ee3088d9
| 8,504
|
py
|
Python
|
venv/lib/python3.9/site-packages/biorun/fetch.py
|
LucaCilibrasi/docker_viruclust
|
88149c17fd4b94a54397d0cb4a9daece00122c49
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.9/site-packages/biorun/fetch.py
|
LucaCilibrasi/docker_viruclust
|
88149c17fd4b94a54397d0cb4a9daece00122c49
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.9/site-packages/biorun/fetch.py
|
LucaCilibrasi/docker_viruclust
|
88149c17fd4b94a54397d0cb4a9daece00122c49
|
[
"Apache-2.0"
] | null | null | null |
"""
Handles functionality related to data storege.
"""
import sys, os, glob, re, gzip, json
from biorun import const, utils, objects, ncbi
from biorun.models import jsonrec
import biorun.libs.placlib as plac
# Module level logger.
logger = utils.logger
# A nicer error message on incorrect installation.
try:
from Bio import SeqIO
except ImportError as exc:
print(f"*** Error: {exc}", file=sys.stderr)
print(f"*** This program requires biopython", file=sys.stderr)
print(f"*** Install: conda install -y biopython>=1.78", file=sys.stderr)
sys.exit(-1)
def resolve_fname(name, format='json'):
"""
Resolve a file name given an accession number.
"""
ext = format.lower()
fname = f"{name}.{ext}.gz"
fname = os.path.join(utils.DATADIR, fname)
return fname
def delete_data(text):
"""
Deletes data under a filename.
"""
for name in text.split(","):
fname = resolve_fname(name)
if os.path.isfile(fname):
os.remove(fname)
logger.info(f"removed: {fname}")
else:
logger.info(f"file does not exist: {fname}")
def read_json_file(fname):
"""
Returns the content of a JSON file.
"""
fp = utils.gz_read(fname)
data = json.load(fp)
fp.close()
return data
def save_json_file(fname, data):
"""
Returns the content of a JSON file.
"""
fp = utils.gz_write(fname)
json.dump(data, fp)
fp.close()
logger.info(f"saved {fname}")
return data
def change_seqid(json_name, seqid):
"""
Changes the sequence id stored in a json file.
"""
if os.path.isfile(json_name):
data = read_json_file(json_name)
for item in data:
item[const.SEQID] = seqid
fp = utils.gz_write(json_name)
json.dump(data, fp)
fp.close()
def fetch_data(data, param):
"""
Obtains data from NCBI. Fills each parameter with a json field.
"""
db = "protein" if param.protein else "nuccore"
# Ensure json DB is built
ncbi.build_db()
genbank, taxon_acc, refseq = ncbi.get_data()
for name in data:
# Pretend no data if it is an update.
json = None if param.update else get_json(name)
# The data exists, nothing needs to be done.
if json:
continue
# The JSON representation of the data.
json_name = resolve_fname(name=name, format="json")
# GenBank representation of the data.
gbk_name = resolve_fname(name=name, format="gb")
# Genome assembly data.
if name.startswith("GCA") or name.startswith("GCF"):
ncbi.genome(name=name, fname=gbk_name, update=param.update, genbank=genbank,
refseq=refseq)
else:
# Genbank data.
ncbi.genbank_save(name, db=db, fname=gbk_name)
# Convert Genbank to JSON.
data = jsonrec.parse_file(fname=gbk_name, seqid=param.seqid)
# Save JSON file.
save_json_file(fname=json_name, data=data)
def genbank_view(params):
for param in params:
altname = resolve_fname(param.acc, format="gb")
if os.path.isfile(param.acc):
stream = utils.gz_read(param.acc)
elif os.path.isfile(altname):
stream = utils.gz_read(altname)
else:
stream = []
utils.error(f"data not found: {param.acc}")
for line in stream:
print(line, end='')
def get_json(name, seqid=None, inter=False, strict=False):
"""
Attempts to return a JSON formatted data based on a name.
"""
# Data is an existing path to a JSON file.
if os.path.isfile(name):
try:
data = jsonrec.parse_file(name, seqid=seqid)
except Exception as exc:
logger.error(f"JSON parsing error for file {name}: {exc}")
sys.exit(-1)
return data
# The JSON representation of the data.
json_name = resolve_fname(name=name, format="json")
# GenBank representation of the data.
gbk_name = resolve_fname(name=name, format="gb")
# Found the JSON representation of the file.
if os.path.isfile(json_name):
logger.info(f"found {json_name}")
data = read_json_file(json_name)
return data
# There is no JSON file but there is a GenBank file.
if os.path.isfile(gbk_name):
logger.info(f"found {gbk_name}")
data = jsonrec.parse_file(fname=gbk_name, seqid=seqid)
data = save_json_file(fname=json_name, data=data)
return data
# Interactive input, make JSON from name
if inter:
data = jsonrec.make_jsonrec(name, seqid=seqid)
return data
# Raise error if in strict mode
if strict:
utils.error(f"data not found: {name}")
return None
def rename_data(data, param, newname=None):
"""
Rename data.
"""
# Will only rename a single data
newnames = newname.split(",")
for name1, name2 in zip(data, newnames):
src_json = resolve_fname(name=name1, format="json")
dest_json = resolve_fname(name=name2, format="json")
src_gb = resolve_fname(name=name1, format="gb")
dest_gb = resolve_fname(name=name2, format="gb")
if os.path.isfile(src_json):
logger.info(f"renamed {name1} as {name2}")
os.rename(src_json, dest_json)
if param.seqid:
change_seqid(dest_json, seqid=param.seqid)
else:
logger.info(f"file not found: {src_json}")
if os.path.isfile(src_gb):
if not os.path.isfile(dest_gb):
os.symlink(src_gb, dest_gb)
else:
logger.info(f"file not found: {src_gb}")
def print_data_list():
"""
Returns a list of the files in the data directory
"""
pattern = os.path.join(os.path.join(utils.DATADIR, '*.json.gz'))
matched = glob.glob(pattern)
# Extract the definition from the JSON without parsing it.
patt = re.compile(r'(definition\":\s*)(?P<value>\".+?\")')
collect = []
for path in matched:
fsize = utils.human_size(os.path.getsize(path))
base, fname = os.path.split(path)
fname = fname.rsplit(".", maxsplit=2)[0]
# Parse the first N lines
stream = gzip.open(path, 'rt') if path.endswith('gz') else open(path, 'rt')
text = stream.read(1000)
match = patt.search(text)
title = match.group("value") if match else ''
title = title.strip('", ')
# Trim the title
stitle = title[:100]
stitle = stitle + "..." if len(title) != len(stitle) else stitle
collect.append((str(fsize), f"{fname:10s}", stitle))
collect = sorted(collect, key=lambda x: x[2])
for row in collect:
line = "\t".join(row)
print(line)
@plac.pos("data", "data names")
@plac.flg('fetch', "download data as accessions")
@plac.flg('update', "updates data in storage")
@plac.opt('rename', "rename the data")
@plac.opt('seqid', "set the sequence id of the data")
@plac.flg('protein', "use the protein database")
@plac.flg('build', "build the database")
@plac.flg('verbose', "verbose mode")
def run(update=False, rename='', seqid='', protein=False, verbose=False, *data):
"""
Fetches and manages data in storage.
"""
# Set the verbosity
utils.set_verbosity(logger, level=int(verbose))
# Reset counter (needed for consistency during testing).
jsonrec.reset_counter()
# A simple wrapper class to represent input parameters.
param = objects.Param(seqid=seqid, rename=rename, start=1, protein=protein, update=update)
# Fetch the data.
fetch_data(data, param=param)
# Renaming after fetching.
if rename:
rename_data(data, param=param, newname=rename)
@plac.opt('delete', "deletes foo from storage", metavar='foo')
@plac.flg('verbose', "verbose mode")
def data(delete, verbose=False):
"""
Shows the data in the storage.
Usage:
bio data : lists the data
bio data --delete foo : deletes data called foo
bio data --delete foo,bar : deletes multiple datasets
"""
# Set the verbosity
utils.set_verbosity(logger, level=int(verbose))
# Reset counter (needed for consistency during testing).
jsonrec.reset_counter()
# Delete should be the first to execute.
if delete:
delete_data(delete)
else:
# Prints the data listing.
print_data_list()
| 28.441472
| 94
| 0.61477
| 1,153
| 8,504
| 4.457069
| 0.243712
| 0.017513
| 0.031134
| 0.021794
| 0.272621
| 0.21366
| 0.177077
| 0.163067
| 0.111695
| 0.111695
| 0
| 0.004157
| 0.264581
| 8,504
| 299
| 95
| 28.441472
| 0.817557
| 0.200141
| 0
| 0.216049
| 0
| 0
| 0.114216
| 0.005468
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.160494
| 0.04321
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6022c4c8c548f73dbd95a825913c8b4639f2e4dc
| 1,049
|
py
|
Python
|
game/items/game_item.py
|
LaverdeS/Genetic_Algorithm_EGame
|
89ff8c7870fa90768f4616cab6803227c8613396
|
[
"MIT"
] | 2
|
2019-07-02T15:20:46.000Z
|
2020-03-04T13:31:12.000Z
|
game/items/game_item.py
|
shivaa511/EGame
|
6db10cb5cf7431093d2ab09a9e4049d6633fe792
|
[
"MIT"
] | 2
|
2019-07-16T16:50:19.000Z
|
2020-03-04T12:52:45.000Z
|
game/items/game_item.py
|
shivaa511/EGame
|
6db10cb5cf7431093d2ab09a9e4049d6633fe792
|
[
"MIT"
] | 8
|
2018-06-06T15:14:48.000Z
|
2018-07-08T11:46:10.000Z
|
import numpy as np
from random import randint
from PyQt5.QtGui import QImage
from PyQt5.QtCore import QPointF
class GameItem():
def __init__(self, parent, boundary, position=None):
self.parent = parent
self.config = parent.config
self.items_config = self.config.items
if position is None:
_left_border = boundary
_right_border = int(self.parent.frame_dimension[0]) - boundary
_top_border = boundary
_bottom_border = int(self.parent.frame_dimension[1]) - boundary
_x = float(randint(_left_border, _right_border))
_y = float(randint(_top_border, _bottom_border))
self._position = np.array([_x, _y])
else:
self._position = position
def draw_image(self, painter):
item_image = QImage(self.image)
painter.drawImage(QPointF(self._position[0]-(item_image.height()/2),
self._position[1]-(item_image.width()/2)),
item_image)
| 37.464286
| 77
| 0.611058
| 121
| 1,049
| 5
| 0.396694
| 0.066116
| 0.042975
| 0.06281
| 0.109091
| 0.109091
| 0
| 0
| 0
| 0
| 0
| 0.010825
| 0.29552
| 1,049
| 28
| 78
| 37.464286
| 0.807848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6022d662d09b473f63deec188827d3c36ba79479
| 6,750
|
py
|
Python
|
source/deepsecurity/models/application_type_rights.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:09.000Z
|
2021-10-30T16:40:09.000Z
|
source/deepsecurity/models/application_type_rights.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-07-28T20:19:03.000Z
|
2021-07-28T20:19:03.000Z
|
source/deepsecurity/models/application_type_rights.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:02.000Z
|
2021-10-30T16:40:02.000Z
|
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApplicationTypeRights(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'can_create_new_application_types': 'bool',
'can_delete_application_types': 'bool',
'can_edit_application_type_properties': 'bool'
}
attribute_map = {
'can_create_new_application_types': 'canCreateNewApplicationTypes',
'can_delete_application_types': 'canDeleteApplicationTypes',
'can_edit_application_type_properties': 'canEditApplicationTypeProperties'
}
def __init__(self, can_create_new_application_types=None, can_delete_application_types=None, can_edit_application_type_properties=None): # noqa: E501
"""ApplicationTypeRights - a model defined in Swagger""" # noqa: E501
self._can_create_new_application_types = None
self._can_delete_application_types = None
self._can_edit_application_type_properties = None
self.discriminator = None
if can_create_new_application_types is not None:
self.can_create_new_application_types = can_create_new_application_types
if can_delete_application_types is not None:
self.can_delete_application_types = can_delete_application_types
if can_edit_application_type_properties is not None:
self.can_edit_application_type_properties = can_edit_application_type_properties
@property
def can_create_new_application_types(self):
"""Gets the can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
Right to create new application types. # noqa: E501
:return: The can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
:rtype: bool
"""
return self._can_create_new_application_types
@can_create_new_application_types.setter
def can_create_new_application_types(self, can_create_new_application_types):
"""Sets the can_create_new_application_types of this ApplicationTypeRights.
Right to create new application types. # noqa: E501
:param can_create_new_application_types: The can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
:type: bool
"""
self._can_create_new_application_types = can_create_new_application_types
@property
def can_delete_application_types(self):
"""Gets the can_delete_application_types of this ApplicationTypeRights. # noqa: E501
Right to delete application types. # noqa: E501
:return: The can_delete_application_types of this ApplicationTypeRights. # noqa: E501
:rtype: bool
"""
return self._can_delete_application_types
@can_delete_application_types.setter
def can_delete_application_types(self, can_delete_application_types):
"""Sets the can_delete_application_types of this ApplicationTypeRights.
Right to delete application types. # noqa: E501
:param can_delete_application_types: The can_delete_application_types of this ApplicationTypeRights. # noqa: E501
:type: bool
"""
self._can_delete_application_types = can_delete_application_types
@property
def can_edit_application_type_properties(self):
"""Gets the can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
Right to edit application type properties. # noqa: E501
:return: The can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
:rtype: bool
"""
return self._can_edit_application_type_properties
@can_edit_application_type_properties.setter
def can_edit_application_type_properties(self, can_edit_application_type_properties):
"""Sets the can_edit_application_type_properties of this ApplicationTypeRights.
Right to edit application type properties. # noqa: E501
:param can_edit_application_type_properties: The can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
:type: bool
"""
self._can_edit_application_type_properties = can_edit_application_type_properties
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApplicationTypeRights, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplicationTypeRights):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.571429
| 311
| 0.663556
| 776
| 6,750
| 5.439433
| 0.189433
| 0.159204
| 0.099502
| 0.124378
| 0.627103
| 0.557451
| 0.498223
| 0.405117
| 0.336887
| 0.268183
| 0
| 0.015207
| 0.269333
| 6,750
| 174
| 312
| 38.793103
| 0.840633
| 0.349778
| 0
| 0.065789
| 0
| 0
| 0.085025
| 0.075974
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.039474
| 0
| 0.355263
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60254d5cf06d095bd8f90781b32cfb0d4a95c6e4
| 3,900
|
py
|
Python
|
code-samples/aws_neptune.py
|
hardikvasa/database-journal
|
7932b5a7fe909f8adb3a909183532b43d450da7b
|
[
"MIT"
] | 45
|
2019-06-07T07:12:09.000Z
|
2022-03-20T19:58:53.000Z
|
code-samples/aws_neptune.py
|
hardikvasa/database-journal
|
7932b5a7fe909f8adb3a909183532b43d450da7b
|
[
"MIT"
] | 1
|
2019-06-09T17:23:05.000Z
|
2019-06-10T18:36:20.000Z
|
code-samples/aws_neptune.py
|
hardikvasa/database-journal
|
7932b5a7fe909f8adb3a909183532b43d450da7b
|
[
"MIT"
] | 15
|
2019-06-07T07:12:12.000Z
|
2022-01-02T01:09:53.000Z
|
from __future__ import print_function # Python 2/3 compatibility
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
#initializing the graph object
graph = Graph()
#creating connection with the remote
remoteConn = DriverRemoteConnection('wss://<endpoint>:8182/gremlin','g')
g = graph.traversal().withRemote(DriverRemoteConnection('wss://<endpoint>:8182/gremlin','g'))
print('Connection created.')
#clearing out all the vertices to start fresh
g.V().drop().iterate()
print('Deleting everything and starting clean.')
#Adding some vertices (nodes)
gerald = g.addV('person').property('age','81').property('first_name','Gerald').property('stays_in','Portland').next()
edith = g.addV('person').property('age','78').property('first_name','Edith').property('stays_in','Portland').next()
peter = g.addV('person').property('age','52').property('first_name','Shane').property('stays_in','Seattle').next()
mary = g.addV('person').property('age','50').property('first_name','Mary').property('stays_in','Seattle').next()
betty = g.addV('person').property('age','19').property('first_name','Betty').property('stays_in','Chicago').next()
print('Added some vertices (nodes).')
#Adding relationships (edges)
edge = g.V().has('first_name', 'Gerald').addE('husband_of').to(g.V().has('first_name', 'Edith')).property('married_since','1947').next()
edge = g.V().has('first_name', 'Edith').addE('wife_of').to(g.V().has('first_name', 'Gerald')).property('married_since','1947').next()
edge = g.V().has('first_name', 'Shane').addE('son_of').to(g.V().has('first_name', 'Gerald')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Gerald').addE('father_of').to(g.V().has('first_name', 'Shane')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Shane').addE('son_of').to(g.V().has('first_name', 'Edith')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Edith').addE('mother_of').to(g.V().has('first_name', 'Shane')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Shane').addE('husband_of').to(g.V().has('first_name', 'Mary')).property('known_since','1989').next()
edge = g.V().has('first_name', 'Mary').addE('wife_of').to(g.V().has('first_name', 'Shane')).property('known_since','1989').next()
edge = g.V().has('first_name', 'Shane').addE('father_of').to(g.V().has('first_name', 'Betty')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Betty').addE('daughter_of').to(g.V().has('first_name', 'Shane')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Mary').addE('mother_of').to(g.V().has('first_name', 'Betty')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Betty').addE('daughter_of').to(g.V().has('first_name', 'Mary')).property('known_since','1991').next()
#print out all the node's first names
print('\n Printing first name from all nodes:')
print(g.V().first_name.toList())
#print out all the properties of person whose's first name is Shane
print('\n Printing all properties of person whose first name is Shane:')
print(g.V().has('person','first_name','Shane').valueMap().next())
#traversing the graph starting with Betty to then Shane to then Edith
print('\n Finding Betty and then looking up her parents:')
print(g.V().has('first_name', 'Betty').out('daughter_of').out('son_of').valueMap().toList())
#Print out all the nodes
print('\n Printing out all the nodes:')
people = g.V().valueMap().toList()
print(people)
#Print out all the connections (edges)
print('\n Print out all the connections (edges):')
connections = g.E().valueMap().toList()
print(connections)
#Closing the connection
remoteConn.close()
print('Connection closed!')
| 57.352941
| 136
| 0.704615
| 590
| 3,900
| 4.525424
| 0.19661
| 0.117978
| 0.048689
| 0.093633
| 0.543446
| 0.428839
| 0.365543
| 0.365543
| 0.354307
| 0.296255
| 0
| 0.018702
| 0.067692
| 3,900
| 68
| 137
| 57.352941
| 0.715622
| 0.112821
| 0
| 0
| 0
| 0
| 0.375
| 0.016821
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.136364
| 0
| 0.136364
| 0.340909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6025b1cfb25bd8e7710a10ffd3f52c87c8e4a3b7
| 15,045
|
py
|
Python
|
kits19cnn/io/preprocess_train.py
|
Ramsha04/kits19-2d-reproduce
|
66678f1eda3688d6dc64389e9a80ae0b754a3052
|
[
"Apache-2.0"
] | null | null | null |
kits19cnn/io/preprocess_train.py
|
Ramsha04/kits19-2d-reproduce
|
66678f1eda3688d6dc64389e9a80ae0b754a3052
|
[
"Apache-2.0"
] | null | null | null |
kits19cnn/io/preprocess_train.py
|
Ramsha04/kits19-2d-reproduce
|
66678f1eda3688d6dc64389e9a80ae0b754a3052
|
[
"Apache-2.0"
] | null | null | null |
import os
from os.path import join, isdir
from pathlib import Path
from collections import defaultdict
from tqdm import tqdm
import nibabel as nib
import numpy as np
import json
from .resample import resample_patient
from .custom_augmentations import resize_data_and_seg, crop_to_bbox
class Preprocessor(object):
"""
Preprocesses the original dataset (interpolated).
Procedures:
* Resampled all volumes to have a thickness of 3mm.
* Clipped to [-30, 300] HU
* z-score standardization (zero mean and unit variance)
* Standardization per 3D image instead of ACROSS THE WHOLE
TRAINING SET
* save as .npy array
* imaging.npy
* segmentation.npy (if with_masks)
"""
def __init__(self, in_dir, out_dir, cases=None, kits_json_path=None,
bbox_json_path=None, clip_values=[-30, 300], with_mask=True,
fg_classes=[0, 1, 2], resize_xy_shape=(256, 256)):
"""
Attributes:
in_dir (str): directory with the input data. Should be the
kits19/data directory.
out_dir (str): output directory where you want to save each case
cases: list of case folders to preprocess
kits_json_path (str): path to the kits.json file in the kits19/data
directory. This only should be specfied if you're resampling.
Defaults to None.
bbox_json_path (str): path to the bbox_stage1.json file made from
stage1 post-processing. Triggers cropping to the bboxes.
Defaults to None.
target_spacing (list/tuple): spacing to resample to
clip_values (list, tuple): values you want to clip CT scans to.
Defaults to None for no clipping.
with_mask (bool): whether or not to preprocess with masks or no
masks. Applicable to preprocessing test set (no labels
available).
fg_classes (list): of foreground class indices
if None, doesn't gather fg class stats.
"""
self.in_dir = in_dir
self.out_dir = out_dir
self._load_kits_json(kits_json_path)
self._load_bbox_json(bbox_json_path)
self.clip_values = clip_values
self.with_mask = with_mask
self.fg_classes = fg_classes
if not self.with_mask:
assert self.fg_classes is None, \
"When with_mask is False, fg_classes must be None."
self.cases = cases
# automatically collecting all of the case folder names
if self.cases is None:
self.cases = [os.path.join(self.in_dir, case) \
for case in os.listdir(self.in_dir) \
if case.startswith("case")]
self.cases = sorted(self.cases)
assert len(self.cases) > 0, \
"Please make sure that in_dir refers to the proper directory."
# making directory if out_dir doesn't exist
if not isdir(out_dir):
os.mkdir(out_dir)
print("Created directory: {0}".format(out_dir))
self.resize_xy_shape = tuple(resize_xy_shape)
def gen_data(self, save_fnames=["imaging", "segmentation"]):
"""
Generates and saves preprocessed data as numpy arrays (n, x, y).
Args:
task_path: file path to the task directory
(must have the corresponding "dataset.json" in it)
save_fnames (List[str]): save names for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
Returns:
None
"""
# Generating data and saving them recursively
for case in tqdm(self.cases):
x_path, y_path = join(case, "imaging.nii.gz"), join(case, "segmentation.nii.gz")
image = nib.load(x_path).get_fdata()[None]
label = nib.load(y_path).get_fdata()[None] if self.with_mask \
else None
preprocessed_img, preprocessed_label = self.preprocess(image,
label,
case)
if self.bbox_dict is not None:
preprocessed_img, preprocessed_label = self.crop_case_to_bbox(preprocessed_img,
preprocessed_label,
case)
self.save_imgs(preprocessed_img, preprocessed_label, case,
save_fnames=save_fnames)
def preprocess(self, image, mask, case=None):
"""
Clipping, cropping, and resampling.
Args:
image: numpy array
shape (c, n, x, y)
mask: numpy array or None
shape (c, n, x, y)
case (str): path to a case folder
Returns:
tuple of:
- preprocessed image
shape: (n, x, y)
- preprocessed mask or None
shape: (n, x, y)
"""
raw_case = Path(case).name # raw case name, i.e. case_00000
# resampling
if self.kits_json is not None:
for info_dict in self.kits_json:
# guaranteeing that the info is corresponding to the right
# case
if info_dict["case_id"] == raw_case:
case_info_dict = info_dict
break
# resampling the slices axis to 3mm
orig_spacing = (case_info_dict["captured_slice_thickness"],
case_info_dict["captured_pixel_width"],
case_info_dict["captured_pixel_width"])
target_spacing = (3,) + orig_spacing[1:]
image, mask = resample_patient(image, mask, np.array(orig_spacing),
target_spacing=np.array(target_spacing))
if self.clip_values is not None:
image = np.clip(image, self.clip_values[0], self.clip_values[1])
if self.resize_xy_shape is not None:
# image coming in : shape (c, n, h, w); mask is same shape
zdim_size = image.shape[1]
resize_xy_shape = (zdim_size,) + self.resize_xy_shape
image, mask = resize_data_and_seg(image, size=resize_xy_shape,
seg=mask)
image = standardize_per_image(image)
mask = mask.squeeze() if mask is not None else mask
return (image.squeeze(), mask)
def save_imgs(self, image, mask, case,
save_fnames=["imaging", "segmentation"]):
"""
Saves an image and mask pair as .npy arrays in the KiTS19 file structure
Args:
image: numpy array
mask: numpy array
case: path to a case folder (each element of self.cases)
save_fnames (List[str]): save names for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
"""
for fname in save_fnames:
assert not ".npy" in fname, \
"Filenames in save_fnames should not include .npy in the name."
# saving the generated dataset
# output dir in KiTS19 format
# extracting the raw case folder name
case_raw = Path(case).name # extracting the raw case folder name
out_case_dir = join(self.out_dir, case_raw)
# checking to make sure that the output directories exist
if not isdir(out_case_dir):
os.mkdir(out_case_dir)
np.save(os.path.join(out_case_dir, f"{save_fnames[0]}.npy"), image)
if mask is not None:
np.save(os.path.join(out_case_dir, f"{save_fnames[1]}.npy"), mask)
def save_dir_as_2d(self, base_fnames=["imaging", "segmentation"],
delete3dcase=False):
"""
Takes preprocessed 3D numpy arrays and saves them as slices
in the same directory.
Arrays must have shape (n, h, w).
Args:
base_fnames (List[str]): names to read for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
delete3dcase (bool): whether or not to delete the 3D volume after
saving the 2D sliced versions
"""
for fname in base_fnames:
assert not ".npy" in fname, \
"Filenames in base_fnames should not include .npy in the name."
self.pos_per_class_dict = {} # saves slices per class
self.pos_per_slice_dict = defaultdict(list) # saves classes per slice
# Generating data and saving them recursively
for case in tqdm(self.cases):
# output dir in KiTS19 format
case_raw = Path(case).name # extracting the raw case folder name
out_case_dir = join(self.out_dir, case_raw)
# checking to make sure that the output directories exist
if not isdir(out_case_dir):
os.mkdir(out_case_dir)
# assumes the .npy files have shape: (d, h, w)
paths = [join(out_case_dir, f"{base_fnames[0]}.npy"),
join(out_case_dir, f"{base_fnames[1]}.npy")]
image, label = np.load(paths[0]), np.load(paths[1])
self.save_3d_as_2d(image, label, case_raw, out_case_dir)
# to deal with colaboratory storage limitations
if delete3dcase:
os.remove(paths[0]), os.remove(paths[1])
if self.fg_classes is not None:
self._save_pos_slice_dict()
def save_3d_as_2d(self, image, mask, case_raw, out_case_dir):
"""
Saves a 3D volume as separate 2D arrays for each slice across the
axial axis. The naming convention is as follows:
imaging_{parsed_slice_idx}.npy
segmentation_{parsed_slice_idx}.npy
where parsed_slice_idx is just the slice index but filled with
zeros until it hits 5 digits (so sorting is easier.)
Args:
image: numpy array
mask: numpy array
case: raw case folder name
"""
# saving the generated dataset
# iterates through all slices and saves them individually as 2D arrays
assert len(image.shape) == 3, \
"Image shape should be (n, h, w)"
slice_idx_per_class = defaultdict(list)
for slice_idx in range(image.shape[0]):
# naming
slice_idx_str = parse_slice_idx_to_str(slice_idx)
case_str = f"{case_raw}_{slice_idx_str}"
if mask is not None:
label_slice = mask[slice_idx]
# appending fg slice indices
if self.fg_classes is not None:
for label_idx in self.fg_classes:
if label_idx != 0 and (label_slice == label_idx).any():
slice_idx_per_class[label_idx].append(slice_idx)
self.pos_per_slice_dict[case_str].append(label_idx)
elif label_idx == 0 and np.sum(label_slice) == 0:
# for completely blank labels
slice_idx_per_class[label_idx].append(slice_idx)
self.pos_per_slice_dict[case_str].append(label_idx)
self._save_slices(image, mask, out_case_dir=out_case_dir,
slice_idx=slice_idx, slice_idx_str=slice_idx_str)
if self.fg_classes is not None:
self.pos_per_class_dict[case_raw] = slice_idx_per_class
def _save_pos_slice_dict(self):
"""
Saves the foreground (positive) class dictionaries:
- slice_indices.json
saves the slice indices per class
{
case: {fg_class1: [slice indices...],
fg_class2: [slice indices...],
...}
}
- classes_per_slice.json
the keys are not cases, but the actual filenames that are
being read.
{
case_slice_idx_str: [classes_in_slice],
case_slice_idx_str2: [classes_in_slice],
}
"""
save_path_per_slice = join(self.out_dir, "classes_per_slice.json")
# saving the dictionaries
print(f"Logged the classes in {self.fg_classes} for each slice at",
f"{save_path_per_slice}.")
with open(save_path_per_slice, "w") as fp:
json.dump(self.pos_per_slice_dict, fp)
save_path = join(self.out_dir, "slice_indices.json")
# saving the dictionaries
print(f"Logged the slice indices for each class in {self.fg_classes} at",
f"{save_path}.")
with open(save_path, "w") as fp:
json.dump(self.pos_per_class_dict, fp)
def _save_slices(self, image, mask, out_case_dir, slice_idx,
slice_idx_str):
"""
For saving the slices in self.save_3d_as_2d()
"""
np.save(join(out_case_dir, f"imaging_{slice_idx_str}.npy"),
image[slice_idx])
if mask is not None:
label_slice = mask[slice_idx]
np.save(join(out_case_dir, f"segmentation_{slice_idx_str}.npy"),
label_slice)
def _load_kits_json(self, json_path):
"""
Loads the kits.json file into `self.kits_json`
"""
if json_path is None:
self.kits_json = None
print("`kits_json_path is empty, so not resampling.`")
elif json_path is not None:
with open(json_path, "r") as fp:
self.kits_json = json.load(fp)
def _load_bbox_json(self, json_path):
"""
Loads the kits.json file into `self.kits_json`
"""
if json_path is None:
self.bbox_dict = None
print("bbox_json_path, so not cropping volumes to their bbox.")
else:
with open(json_path, "r") as fp:
self.bbox_dict = json.load(fp)
def crop_case_to_bbox(self, image, label, case):
"""
Crops a 3D image and 3D label to the corresponding bounding box.
"""
bbox_coord = self.bbox_dict[case]
return (crop_to_bbox(image, bbox), crop_to_bbox(label, case))
def standardize_per_image(image):
"""
Z-score standardization per image.
"""
mean, stddev = image.mean(), image.std()
return (image - mean) / stddev
def parse_slice_idx_to_str(slice_idx):
"""
Parse the slice index to a three digit string for saving and reading the
2D .npy files generated by io.preprocess.Preprocessor.
Naming convention: {type of slice}_{case}_{slice_idx}
* adding 0s to slice_idx until it reaches 3 digits,
* so sorting files is easier when stacking
"""
return f"{slice_idx:03}"
| 43.482659
| 97
| 0.571685
| 1,925
| 15,045
| 4.263377
| 0.16987
| 0.031193
| 0.020714
| 0.010235
| 0.296454
| 0.237602
| 0.212014
| 0.185208
| 0.130864
| 0.125746
| 0
| 0.008793
| 0.349884
| 15,045
| 345
| 98
| 43.608696
| 0.830283
| 0.339116
| 0
| 0.178571
| 0
| 0
| 0.103898
| 0.017038
| 0
| 0
| 0
| 0
| 0.029762
| 1
| 0.077381
| false
| 0
| 0.059524
| 0
| 0.166667
| 0.029762
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6026a153525e13fa3c171bca805b17cf817349e3
| 1,558
|
py
|
Python
|
setup.py
|
opywan/calm-dsl
|
1d89436d039a39265a0ae806022be5b52e757ac0
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
opywan/calm-dsl
|
1d89436d039a39265a0ae806022be5b52e757ac0
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
opywan/calm-dsl
|
1d89436d039a39265a0ae806022be5b52e757ac0
|
[
"Apache-2.0"
] | null | null | null |
import sys
import setuptools
from setuptools.command.test import test as TestCommand
def read_file(filename):
with open(filename, "r", encoding='utf8') as f:
return f.read()
class PyTest(TestCommand):
"""PyTest"""
def finalize_options(self):
"""finalize_options"""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""run_tests"""
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setuptools.setup(
name="calm.dsl",
version="0.9.0-alpha",
author="Nutanix",
author_email="nucalm@nutanix.com",
description="Calm DSL for blueprints",
long_description=read_file("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/nutanix/calm-dsl",
packages=setuptools.find_namespace_packages(include=["calm.*"]),
namespace_packages=["calm"],
install_requires=read_file("requirements.txt"),
tests_require=read_file("dev-requirements.txt"),
cmdclass={"test": PyTest},
zip_safe=False,
include_package_data=True,
entry_points={"console_scripts": ["calm=calm.dsl.cli:main"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
],
)
| 28.327273
| 68
| 0.649551
| 176
| 1,558
| 5.596591
| 0.579545
| 0.032487
| 0.038579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005691
| 0.210526
| 1,558
| 54
| 69
| 28.851852
| 0.795122
| 0.021181
| 0
| 0
| 0
| 0
| 0.296885
| 0.014579
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.095238
| 0
| 0.214286
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60285f227b486baa95c5fb739b65a5f1c6ce6e02
| 3,364
|
py
|
Python
|
third_party/webrtc/src/chromium/src/tools/swarming_client/tests/logging_utils_test.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 8
|
2016-02-08T11:59:31.000Z
|
2020-05-31T15:19:54.000Z
|
third_party/webrtc/src/chromium/src/tools/swarming_client/tests/logging_utils_test.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 1
|
2021-05-05T11:11:31.000Z
|
2021-05-05T11:11:31.000Z
|
third_party/webrtc/src/chromium/src/tools/swarming_client/tests/logging_utils_test.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 7
|
2016-02-09T09:28:14.000Z
|
2020-07-25T19:03:36.000Z
|
#!/usr/bin/env python
# Copyright 2015 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
import logging
import os
import subprocess
import sys
import tempfile
import shutil
import unittest
import re
THIS_FILE = os.path.abspath(__file__)
sys.path.insert(0, os.path.dirname(os.path.dirname(THIS_FILE)))
from utils import logging_utils
# PID YYYY-MM-DD HH:MM:SS.MMM
_LOG_HEADER = r'^%d \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d' % os.getpid()
_LOG_HEADER_PID = r'^\d+ \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d'
_PHASE = 'LOGGING_UTILS_TESTS_PHASE'
def call(phase, cwd):
"""Calls itself back."""
env = os.environ.copy()
env[_PHASE] = phase
return subprocess.call([sys.executable, '-u', THIS_FILE], env=env, cwd=cwd)
class Test(unittest.TestCase):
def setUp(self):
super(Test, self).setUp()
self.tmp = tempfile.mkdtemp(prefix='logging_utils')
def tearDown(self):
try:
shutil.rmtree(self.tmp)
finally:
super(Test, self).tearDown()
def test_capture(self):
root = logging.RootLogger(logging.DEBUG)
with logging_utils.CaptureLogs('foo', root) as log:
root.debug('foo')
result = log.read()
expected = _LOG_HEADER + ': DEBUG foo\n$'
if sys.platform == 'win32':
expected = expected.replace('\n', '\r\n')
self.assertTrue(re.match(expected, result), (expected, result))
def test_prepare_logging(self):
root = logging.RootLogger(logging.DEBUG)
filepath = os.path.join(self.tmp, 'test.log')
logging_utils.prepare_logging(filepath, root)
root.debug('foo')
with open(filepath, 'rb') as f:
result = f.read()
# It'd be nice to figure out a way to ensure it's properly in UTC but it's
# tricky to do reliably.
expected = _LOG_HEADER + ' D: foo\n$'
self.assertTrue(re.match(expected, result), (expected, result))
def test_rotating(self):
# Create a rotating log. Create a subprocess then delete the file. Make sure
# nothing blows up.
# Everything is done in a child process because the called functions mutate
# the global state.
self.assertEqual(0, call('test_rotating_phase_1', cwd=self.tmp))
self.assertEqual({'shared.1.log'}, set(os.listdir(self.tmp)))
with open(os.path.join(self.tmp, 'shared.1.log'), 'rb') as f:
lines = f.read().splitlines()
expected = [
r' I: Parent1',
r' I: Child1',
r' I: Child2',
r' I: Parent2',
]
for e, l in zip(expected, lines):
ex = _LOG_HEADER_PID + e + '$'
self.assertTrue(re.match(ex, l), (ex, l))
self.assertEqual(len(expected), len(lines))
def test_rotating_phase_1():
logging_utils.prepare_logging('shared.log')
logging.info('Parent1')
r = call('test_rotating_phase_2', None)
logging.info('Parent2')
return r
def test_rotating_phase_2():
# Simulate rotating the log.
logging_utils.prepare_logging('shared.log')
logging.info('Child1')
os.rename('shared.log', 'shared.1.log')
logging.info('Child2')
return 0
def main():
phase = os.environ.get(_PHASE)
if phase:
return getattr(sys.modules[__name__], phase)()
verbose = '-v' in sys.argv
logging.basicConfig(level=logging.DEBUG if verbose else logging.ERROR)
unittest.main()
if __name__ == '__main__':
sys.exit(main())
| 28.508475
| 80
| 0.67063
| 514
| 3,364
| 4.268482
| 0.344358
| 0.030994
| 0.043756
| 0.054695
| 0.175023
| 0.144941
| 0.111212
| 0.111212
| 0.06928
| 0.06928
| 0
| 0.009434
| 0.180737
| 3,364
| 117
| 81
| 28.752137
| 0.786647
| 0.162307
| 0
| 0.098765
| 0
| 0.024691
| 0.133524
| 0.039629
| 0
| 0
| 0
| 0
| 0.074074
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.283951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
602b781497fe10bfa361f38ffbff943242a02399
| 3,392
|
py
|
Python
|
2021/d8b_bits.py
|
apie/advent-of-code
|
c49abec01b044166a688ade40ebb1e642f0e5ce0
|
[
"MIT"
] | 4
|
2018-12-04T23:33:46.000Z
|
2021-12-07T17:33:27.000Z
|
2021/d8b_bits.py
|
apie/advent-of-code
|
c49abec01b044166a688ade40ebb1e642f0e5ce0
|
[
"MIT"
] | 17
|
2018-12-12T23:32:09.000Z
|
2020-01-04T15:50:31.000Z
|
2021/d8b_bits.py
|
apie/advent-of-code
|
c49abec01b044166a688ade40ebb1e642f0e5ce0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pytest
import fileinput
from os.path import splitext, abspath
F_NAME = 'd8'
#implement day8 using bits
def find_ones(d):
'''count number of ones in binary number'''
ones = 0
while d > 0:
ones += d & 1
d >>= 1
return ones
# Assign each segment a 'wire'.
lut = {
'a':0b0000001,
'b':0b0000010,
'c':0b0000100,
'd':0b0001000,
'e':0b0010000,
'f':0b0100000,
'g':0b1000000,
}
def solve_line(line):
def solve_output_val(output_values):
'''Look up each output val in binary repr in the mapping and add them together shifting each digit to the left.'''
output = 0
for o in output_values:
b_val = sum(lut[c] for c in o)
for k,v in mapping.items():
if v == b_val:
output = output*10 + k
break
else:
raise Exception(b_val, 'not found')
return output
def found(digit, bit_pattern):
mapping[digit] = bit_pattern
bpatterns.remove(bit_pattern)
signal_pattern, output_value = line.split(' | ')
# Convert letter string to binary pattern
bpatterns = {
sum(lut[c] for c in p)
for p in signal_pattern.split()
}
## Search for each digit and if found, remove it from bpatterns and add the digit to the mapping.
######################################
mapping = {}
# 1,4,7,8 all have a unique count of segments. Find them.
for bp in list(bpatterns):
if find_ones(bp) == 2:
found(1, bp)
elif find_ones(bp) == 4:
found(4, bp)
elif find_ones(bp) == 3:
found(7, bp)
elif find_ones(bp) == 7:
found(8, bp)
# Find 0, 6, 9. All have 6 segments
for bp in list(bpatterns):
if find_ones(bp) != 6:
continue
#is 4 contained within p, then it is 9
if mapping[4] & bp >= mapping[4]:
found(9, bp)
#is 1 contained within p, then it is 0
elif mapping[1] & bp >= mapping[1]:
found(0, bp)
else: # 6 is left
found(6, bp)
#is p contained within 6, then it is 5
for bp in bpatterns:
if mapping[6] & bp >= bp:
found(5, bp)
break
#is p contained within 9, and it is not 8 or 5, then it is 3
for bp in bpatterns:
if mapping[9] & bp >= bp:
found(3, bp)
break
assert len(bpatterns) == 1, bpatterns
#what is left is 2
for bp in bpatterns:
found(2, bp)
break
assert len(bpatterns) == 0, bpatterns
return solve_output_val(output_value.split())
def answer(lines):
return sum(solve_line(line) for line in map(str.strip, lines))
@pytest.fixture
def example_input1():
return fileinput.input(F_NAME + '.test.1')
def test_answer1(example_input1):
assert answer(example_input1) == 5353
@pytest.fixture
def example_input():
return fileinput.input(F_NAME + '.test')
def test_answer(example_input):
assert answer(example_input) == 61229
if __name__ == '__main__':
import timeit
start = timeit.default_timer()
filename = fileinput.input(F_NAME + '.input')
ans = answer(filename)
print('Answer:', ans)
duration = timeit.default_timer()-start
print(f'Execution time: {duration:.3f} s')
| 26.294574
| 122
| 0.571934
| 477
| 3,392
| 3.97065
| 0.301887
| 0.025343
| 0.018479
| 0.022175
| 0.181626
| 0.129884
| 0.033791
| 0.033791
| 0.033791
| 0
| 0
| 0.053128
| 0.31191
| 3,392
| 128
| 123
| 26.5
| 0.758355
| 0.190153
| 0
| 0.141304
| 0
| 0
| 0.032042
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.097826
| false
| 0
| 0.043478
| 0.032609
| 0.206522
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
602c28a9205e1c1670c905a216255ec8e326af0a
| 8,931
|
py
|
Python
|
frame_dataloader/spatial_dataloader.py
|
rizkiailham/two-stream-action-recognition-1
|
01221f668e62eb26e3593f4ecd3f257b6b6979ab
|
[
"Apache-2.0"
] | 67
|
2019-01-02T11:42:44.000Z
|
2022-03-24T02:46:39.000Z
|
frame_dataloader/spatial_dataloader.py
|
rizkiailham/two-stream-action-recognition-1
|
01221f668e62eb26e3593f4ecd3f257b6b6979ab
|
[
"Apache-2.0"
] | 10
|
2019-02-06T17:12:23.000Z
|
2021-11-10T08:05:27.000Z
|
frame_dataloader/spatial_dataloader.py
|
rizkiailham/two-stream-action-recognition-1
|
01221f668e62eb26e3593f4ecd3f257b6b6979ab
|
[
"Apache-2.0"
] | 25
|
2019-04-03T19:25:41.000Z
|
2021-11-22T16:34:15.000Z
|
"""
********************************
* Created by mohammed-alaa *
********************************
Spatial Dataloader implementing sequence api from keras (defines how to load a single item)
this loads batches of images for each iteration it returns [batch_size, height, width ,3] ndarrays
"""
import copy
import random
import cv2
import numpy as np
import tensorflow.keras as keras
from .UCF_splitting_kernel import *
from .helpers import get_training_augmenter, get_validation_augmenter
class SpatialSequence(keras.utils.Sequence):
def __init__(self, data_to_load, data_root_path, batch_size, is_training, augmenter):
"""get data structure to load data"""
# list of (video names,frame/max_frame,label)
self.data_to_load = copy.deepcopy(data_to_load)
self.batch_size = batch_size
self.is_training = is_training
self.augmenter = copy.deepcopy(augmenter)
self.data_root_path = data_root_path
self.video_names, self.frames, self.labels = [list(one_of_three_tuples) for one_of_three_tuples in zip(*self.data_to_load)] # three lists
def __len__(self):
"""Denotes the number of batches per epoch"""
return (len(self.video_names) + self.batch_size - 1) // self.batch_size # ceiling div
def get_actual_length(self):
"""Denotes the total number of samples"""
return len(self.video_names)
def __getitem__(self, batch_start):
"""Gets one batch"""
batch_video_names = self.video_names[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]
batch_frames = self.frames[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]
batch_y = np.array(self.labels[batch_start * self.batch_size:(batch_start + 1) * self.batch_size])
batch_x = [] # could be less or equal batch size
#
for vid_id, _ in enumerate(batch_y):
if self.is_training: # max frame is given
frame_id = random.randint(1, batch_frames[vid_id]) # random frame (one based)
else:
frame_id = batch_frames[vid_id] # just as selected
batch_x.append(
cv2.cvtColor(cv2.imread(os.path.join(self.data_root_path, "v_" + batch_video_names[vid_id], 'frame{}'.format(str(frame_id).zfill(6)) + '.jpg')), cv2.COLOR_BGR2RGB)
)
if self.is_training:
return np.array(self.augmenter.augment_images(batch_x), dtype=np.float32) / 255.0, batch_y
else:
# no label needed since (test_video_to_label mapping) (dictionary of name to label) is returned
return batch_video_names, np.array(self.augmenter.augment_images(batch_x), dtype=np.float32) / 255.0
def shuffle_and_reset(self):
"""
new data for the next epoch
"""
random.shuffle(self.data_to_load)
self.video_names, self.frames, self.labels = [list(one_of_three_tuples) for one_of_three_tuples in zip(*self.data_to_load)] # shuffle all
class SpatialDataLoader:
def __init__(self, batch_size, testing_samples_per_video, width, height, log_stream=open("/tmp/null.log", "w"), augmenter_level=1, data_root_path='./jpegs_256/', ucf_list_path='./UCF_list/', ucf_split='01'):
"""
get the mapping and initialize the augmenter
"""
self.batch_size = batch_size
self.width, self.height = width, height
self.data_root_path = data_root_path
self.testing_samples_per_video = testing_samples_per_video
self.log_stream = log_stream
# split the training and testing videos
data_util_ = DataUtil(path=ucf_list_path, split=ucf_split)
self.train_video_to_label, self.test_video_to_label = data_util_.get_train_test_video_to_label_mapping() # name without v_ or .avi and small s .. name to numeric label starts at 0
# get video frames
self.video_frame_count = data_util_.get_video_frame_count() # name without v_ or .avi and small s
self.augmenter_level = augmenter_level
def run(self):
"""
get the data structure for training and validation
"""
train_loader = self.get_training_loader()
val_loader = self.get_testing_loader()
return train_loader, val_loader, self.test_video_to_label
def get_training_data_structure(self):
"""
get the data structure for training
"""
training_data_structure = [] # list of (video names,frame/max_frame,label)
for video_name in self.train_video_to_label: # sample from the whole video frames
training_data_structure.append((video_name, self.video_frame_count[video_name], self.train_video_to_label[video_name]))
return training_data_structure
def get_testing_data_structure(self):
"""
get the data structure for validation
"""
test_data_structure = [] # list of (video names,frame/max_frame,label)
for video_name in self.test_video_to_label:
nb_frame = self.video_frame_count[video_name]
interval = nb_frame // self.testing_samples_per_video
if interval == 0: # for videos shorter than self.testing_samples_per_video
interval = 1
# range is exclusive add one to be inclusive
# 1 > self.testing_samples_per_video * interval
for frame_idx in range(1, min(self.testing_samples_per_video * interval, nb_frame) + 1, interval):
test_data_structure.append((video_name, frame_idx, self.test_video_to_label[video_name]))
return test_data_structure
def get_training_loader(self):
"""
an instance of sequence loader for spatial model for parallel dataloading using keras sequence
"""
loader = SpatialSequence(data_to_load=self.get_training_data_structure(),
data_root_path=self.data_root_path,
batch_size=self.batch_size,
is_training=True,
augmenter=get_training_augmenter(height=self.height, width=self.width, augmenter_level=self.augmenter_level),
)
print('==> Training data :', len(loader.data_to_load), 'videos', file=self.log_stream)
print('==> Training data :', len(loader.data_to_load), 'videos')
return loader
def get_testing_loader(self):
"""
an instance of sequence loader for spatial model for parallel dataloading using keras sequence
"""
loader = SpatialSequence(data_to_load=self.get_testing_data_structure(),
data_root_path=self.data_root_path,
batch_size=self.batch_size,
is_training=False,
augmenter=get_validation_augmenter(height=self.height, width=self.width),
)
print('==> Validation data :', len(loader.data_to_load), 'frames', file=self.log_stream)
print('==> Validation data :', len(loader.data_to_load), 'frames')
return loader
if __name__ == '__main__':
data_loader = SpatialDataLoader(batch_size=64, use_multiprocessing=True, # data_root_path="data",
ucf_split='01',
testing_samples_per_video=19, width=224, height=224, num_workers=2)
train_loader, test_loader, test_video_level_label = data_loader.run()
print(len(train_loader))
print(len(test_loader))
print(train_loader.get_actual_length())
print(test_loader.get_actual_length())
print(train_loader.sequence[0][0].shape, train_loader.sequence[0][1].shape)
print(train_loader[0][0].shape, train_loader[0][1].shape)
# import tqdm
# progress = tqdm.tqdm(train_loader.get_epoch_generator(), total=len(train_loader))
# for (sampled_frame, label) in progress:
# pass
import matplotlib.pyplot as plt
# preview raw data
def preview(data, labels):
# 3 channels
fig, axeslist = plt.subplots(ncols=8, nrows=8, figsize=(10, 10))
for i, sample in enumerate(data):
axeslist.ravel()[i].imshow(data[i])
axeslist.ravel()[i].set_title(labels[i])
axeslist.ravel()[i].set_axis_off()
plt.subplots_adjust(wspace=.4, hspace=.4)
print("train sample")
for batch in train_loader.get_epoch_generator():
print(batch[0].shape, batch[1].shape)
print(batch[1])
preview(batch[0], batch[1])
break
print("test sample") # same name will be displayed testing_samples_per_video with no shuffling
for batch in test_loader.get_epoch_generator():
print(batch[1].shape, batch[2].shape)
print(batch[0], batch[2])
preview(batch[1], batch[2])
break
| 42.127358
| 211
| 0.647744
| 1,174
| 8,931
| 4.632879
| 0.204429
| 0.034749
| 0.031072
| 0.036404
| 0.432616
| 0.337194
| 0.276521
| 0.254459
| 0.213275
| 0.181651
| 0
| 0.011505
| 0.250588
| 8,931
| 211
| 212
| 42.327014
| 0.801136
| 0.197962
| 0
| 0.15
| 0
| 0
| 0.027206
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.066667
| 0
| 0.258333
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
602c73ce30543054207480d8bbb3a3dcd0069abc
| 2,762
|
py
|
Python
|
day02/puzzle2.py
|
jack-beach/AdventOfCode2019
|
a8ac53eaf03cd7595deb2a9aa798a2d17c21c513
|
[
"MIT"
] | null | null | null |
day02/puzzle2.py
|
jack-beach/AdventOfCode2019
|
a8ac53eaf03cd7595deb2a9aa798a2d17c21c513
|
[
"MIT"
] | 1
|
2019-12-05T19:21:46.000Z
|
2019-12-05T19:21:46.000Z
|
day02/puzzle2.py
|
jack-beach/AdventOfCode2019
|
a8ac53eaf03cd7595deb2a9aa798a2d17c21c513
|
[
"MIT"
] | 1
|
2019-12-05T18:05:54.000Z
|
2019-12-05T18:05:54.000Z
|
# stdlib imports
import copy
# vendor imports
import click
@click.command()
@click.argument("input_file", type=click.File("r"))
def main(input_file):
"""Put your puzzle execution code here"""
# Convert the comma-delimited string of numbers into a list of ints
masterRegister = list(
map(lambda op: int(op), input_file.read().strip().split(","))
)
def execute(noun, verb):
# Create a local copy of the register for this execution
register = copy.deepcopy(masterRegister)
# Inject the noun and verb
register[1] = noun
register[2] = verb
# We will start reading the opcodes at position 0
pointer = 0
# Loop infinitely until we reach the termination instruction
while True:
# Get the code at the current read position
code = register[pointer]
# Code 99 means immediate termination
if code == 99:
break
# Code 1 is addition
elif code == 1:
# Get register addresses
addendAPointer = register[pointer + 1]
addendBPointer = register[pointer + 2]
sumPointer = register[pointer + 3]
# Perform the addition
register[sumPointer] = (
register[addendAPointer] + register[addendBPointer]
)
# Advance the code position by 4
pointer += 4
# Code 2 is multiplication
elif code == 2:
# Get register addresses
factorAPointer = register[pointer + 1]
factorBPointer = register[pointer + 2]
productPointer = register[pointer + 3]
# Perform the addition
register[productPointer] = (
register[factorAPointer] * register[factorBPointer]
)
# Advance the code position by 4
pointer += 4
# Unknown opcode means there was an error
else:
raise RuntimeError(
f"Unknown opcode {code} at position {pointer}"
)
# Return the result
return register[0]
# Iterate through all the possible combinations until the target is found
target = 19690720
found = None
for noun in range(100):
for verb in range(100):
result = execute(noun, verb)
if result == target:
found = (noun, verb)
break
if found:
break
# Calculate the final result
print("RESULT:", 100 * found[0] + found[1])
# Execute cli function on main
if __name__ == "__main__":
main()
| 29.073684
| 77
| 0.542723
| 282
| 2,762
| 5.276596
| 0.43617
| 0.070565
| 0.020161
| 0.030914
| 0.100806
| 0.100806
| 0.100806
| 0.044355
| 0
| 0
| 0
| 0.024779
| 0.386314
| 2,762
| 94
| 78
| 29.382979
| 0.853097
| 0.281318
| 0
| 0.098039
| 0
| 0
| 0.035751
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.039216
| 0
| 0.098039
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
602e5a99d805700346d56a51e68cf804e5858e7b
| 6,174
|
py
|
Python
|
oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_dealer_publisher.py
|
devendermishrajio/oslo.messaging
|
9e5fb5697d3f7259f01e3416af0582090d20859a
|
[
"Apache-1.1"
] | 1
|
2021-02-17T15:30:45.000Z
|
2021-02-17T15:30:45.000Z
|
oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_dealer_publisher.py
|
devendermishrajio/oslo.messaging
|
9e5fb5697d3f7259f01e3416af0582090d20859a
|
[
"Apache-1.1"
] | null | null | null |
oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_dealer_publisher.py
|
devendermishrajio/oslo.messaging
|
9e5fb5697d3f7259f01e3416af0582090d20859a
|
[
"Apache-1.1"
] | 2
|
2015-11-03T03:21:55.000Z
|
2015-12-01T08:56:14.000Z
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_messaging._drivers.zmq_driver.client.publishers\
import zmq_publisher_base
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_names
from oslo_messaging._i18n import _LI, _LW
LOG = logging.getLogger(__name__)
zmq = zmq_async.import_zmq()
class DealerPublisher(zmq_publisher_base.PublisherMultisend):
def __init__(self, conf, matchmaker):
super(DealerPublisher, self).__init__(conf, matchmaker, zmq.DEALER)
def send_request(self, request):
self._check_request_pattern(request)
dealer_socket, hosts = self._check_hosts_connections(request.target)
if not dealer_socket.connections:
# NOTE(ozamiatin): Here we can provide
# a queue for keeping messages to send them later
# when some listener appears. However such approach
# being more reliable will consume additional memory.
LOG.warning(_LW("Request %s was dropped because no connection")
% request.msg_type)
return
if request.msg_type in zmq_names.MULTISEND_TYPES:
for _ in range(dealer_socket.connections_count()):
self._send_request(dealer_socket, request)
else:
self._send_request(dealer_socket, request)
def _check_request_pattern(self, request):
if request.msg_type == zmq_names.CALL_TYPE:
raise zmq_publisher_base.UnsupportedSendPattern(request.msg_type)
def _send_request(self, socket, request):
socket.send(b'', zmq.SNDMORE)
socket.send_pyobj(request)
LOG.info(_LI("Sending message_id %(message)s to a target %(target)s")
% {"message": request.message_id,
"target": request.target})
def cleanup(self):
super(DealerPublisher, self).cleanup()
class DealerPublisherLight(zmq_publisher_base.PublisherBase):
def __init__(self, conf, address):
super(DealerPublisherLight, self).__init__(conf)
self.socket = self.zmq_context.socket(zmq.DEALER)
self.socket.connect(address)
def send_request(self, request):
if request.msg_type == zmq_names.CALL_TYPE:
raise zmq_publisher_base.UnsupportedSendPattern(request.msg_type)
envelope = request.create_envelope()
self.socket.send(b'', zmq.SNDMORE)
self.socket.send_pyobj(envelope, zmq.SNDMORE)
self.socket.send_pyobj(request)
def cleanup(self):
self.socket.setsockopt(zmq.LINGER, 0)
self.socket.close()
class DealerPublisherProxy(DealerPublisher):
def __init__(self, conf, matchmaker, reply_receiver):
super(DealerPublisherProxy, self).__init__(conf, matchmaker)
self.reply_receiver = reply_receiver
def send_request(self, multipart_message):
envelope = multipart_message[zmq_names.MULTIPART_IDX_ENVELOPE]
LOG.info(_LI("Envelope: %s") % envelope)
target = envelope[zmq_names.FIELD_TARGET]
dealer_socket, hosts = self._check_hosts_connections(target)
if not dealer_socket.connections:
# NOTE(ozamiatin): Here we can provide
# a queue for keeping messages to send them later
# when some listener appears. However such approach
# being more reliable will consume additional memory.
LOG.warning(_LW("Request %s was dropped because no connection")
% envelope[zmq_names.FIELD_MSG_TYPE])
return
self.reply_receiver.track_socket(dealer_socket.handle)
LOG.info(_LI("Sending message %(message)s to a target %(target)s")
% {"message": envelope[zmq_names.FIELD_MSG_ID],
"target": envelope[zmq_names.FIELD_TARGET]})
if envelope[zmq_names.FIELD_MSG_TYPE] in zmq_names.MULTISEND_TYPES:
for _ in range(dealer_socket.connections_count()):
self._send_request(dealer_socket, multipart_message)
else:
self._send_request(dealer_socket, multipart_message)
def _send_request(self, socket, multipart_message):
socket.send(b'', zmq.SNDMORE)
socket.send_pyobj(
multipart_message[zmq_names.MULTIPART_IDX_ENVELOPE],
zmq.SNDMORE)
socket.send(multipart_message[zmq_names.MULTIPART_IDX_BODY])
class ReplyReceiver(object):
def __init__(self, poller):
self.poller = poller
LOG.info(_LI("Reply waiter created in broker"))
def _receive_reply(self, socket):
return socket.recv_multipart()
def track_socket(self, socket):
self.poller.register(socket, self._receive_reply)
def cleanup(self):
self.poller.close()
class AcknowledgementReceiver(object):
def __init__(self):
self.poller = zmq_async.get_poller()
self.thread = zmq_async.get_executor(self.poll_for_acknowledgements)
self.thread.execute()
def _receive_acknowledgement(self, socket):
empty = socket.recv()
assert empty == b"", "Empty delimiter expected"
ack_message = socket.recv_pyobj()
return ack_message
def track_socket(self, socket):
self.poller.register(socket, self._receive_acknowledgement)
def poll_for_acknowledgements(self):
ack_message, socket = self.poller.poll()
LOG.info(_LI("Message %s acknowledged")
% ack_message[zmq_names.FIELD_ID])
def cleanup(self):
self.thread.stop()
self.poller.close()
| 34.49162
| 78
| 0.679462
| 742
| 6,174
| 5.392183
| 0.264151
| 0.027993
| 0.020995
| 0.022494
| 0.485379
| 0.430892
| 0.35816
| 0.300425
| 0.245939
| 0.245939
| 0
| 0.002326
| 0.234046
| 6,174
| 178
| 79
| 34.685393
| 0.84373
| 0.154357
| 0
| 0.283019
| 0
| 0
| 0.058891
| 0
| 0
| 0
| 0
| 0
| 0.009434
| 1
| 0.188679
| false
| 0
| 0.056604
| 0.009434
| 0.330189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
602e5ff210d9605bb2e8229e3fbf0370c704bfb0
| 25,175
|
py
|
Python
|
coba/environments/filters.py
|
mrucker/banditbenchmark
|
0365291b3a0cf1d862d294e0386d0ccad3f360f1
|
[
"BSD-3-Clause"
] | null | null | null |
coba/environments/filters.py
|
mrucker/banditbenchmark
|
0365291b3a0cf1d862d294e0386d0ccad3f360f1
|
[
"BSD-3-Clause"
] | null | null | null |
coba/environments/filters.py
|
mrucker/banditbenchmark
|
0365291b3a0cf1d862d294e0386d0ccad3f360f1
|
[
"BSD-3-Clause"
] | null | null | null |
import pickle
import warnings
import collections.abc
from math import isnan
from statistics import mean, median, stdev, mode
from abc import abstractmethod, ABC
from numbers import Number
from collections import defaultdict
from itertools import islice, chain
from typing import Hashable, Optional, Sequence, Union, Iterable, Dict, Any, List, Tuple, Callable, Mapping
from coba.backports import Literal
from coba import pipes
from coba.random import CobaRandom
from coba.exceptions import CobaException
from coba.statistics import iqr
from coba.pipes import Flatten
from coba.environments.primitives import Interaction
from coba.environments.logged.primitives import LoggedInteraction
from coba.environments.simulated.primitives import SimulatedInteraction
class EnvironmentFilter(pipes.Filter[Iterable[Interaction],Iterable[Interaction]], ABC):
"""A filter that can be applied to an Environment."""
@abstractmethod
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
"""Apply a filter to an Environment's interactions."""
...
class Identity(pipes.Identity, EnvironmentFilter):
"""Return whatever interactions are given to the filter."""
pass
class Take(pipes.Take, EnvironmentFilter):
"""Take a fixed number of interactions from an Environment."""
pass
class Shuffle(pipes.Shuffle, EnvironmentFilter):
"""Shuffle a sequence of Interactions in an Environment."""
pass
class Reservoir(pipes.Reservoir, EnvironmentFilter):
"""Take a fixed number of random Interactions from an Environment."""
pass
class Scale(EnvironmentFilter):
"""Shift and scale features to precondition them before learning."""
def __init__(self,
shift: Union[Number,Literal["min","mean","med"]] = 0,
scale: Union[Number,Literal["minmax","std","iqr","maxabs"]] = "minmax",
target: Literal["features","rewards"] = "features",
using: Optional[int] = None):
"""Instantiate a Scale filter.
Args:
shift: The statistic to use to shift each context feature.
scale: The statistic to use to scale each context feature.
target: The target data we wish to scale in the environment.
using: The number of interactions to use when calculating the necessary statistics.
"""
assert isinstance(shift,Number) or shift in ["min","mean","med"]
assert isinstance(scale,Number) or scale in ["minmax","std","iqr","maxabs"]
self._shift = shift
self._scale = scale
self._using = using
self._target = target
@property
def params(self) -> Dict[str, Any]:
return {
"scale_shift": self._shift,
"scale_scale": self._scale,
"scale_using": self._using,
"scale_target": self._target
}
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
iter_interactions = iter(interactions)
fitting_interactions = list(islice(iter_interactions,self._using))
shifts : Dict[Hashable,float] = defaultdict(lambda:0)
scales : Dict[Hashable,float] = defaultdict(lambda:1)
unscaled: Dict[Hashable,List[Any]] = defaultdict(list)
if any([isinstance(i.context,dict) for i in fitting_interactions]) and self._shift != 0:
raise CobaException("Shift is required to be 0 for sparse environments. Otherwise the environment will become dense.")
mixed = set()
had_non_numeric = set()
for interaction in fitting_interactions:
if self._target == "features":
for name,value in self._feature_pairs(interaction.context):
if name in mixed: continue
is_numeric = isinstance(value,Number)
is_nan = is_numeric and isnan(value)
if is_nan:
pass
elif (not is_numeric and name in unscaled) or (is_numeric and name in had_non_numeric):
mixed.add(name)
if name in unscaled: del unscaled[name]
if name in had_non_numeric: had_non_numeric.remove(name)
elif not is_numeric:
had_non_numeric.add(name)
elif is_numeric and not is_nan:
unscaled[name].append(value)
if self._target == "rewards":
unscaled["rewards"].extend(interaction.rewards)
if mixed: warnings.warn(f"Some features were not scaled due to having mixed types: {mixed}. ")
has_sparse_zero = set()
for interaction in fitting_interactions:
if isinstance(interaction.context,dict):
has_sparse_zero |= unscaled.keys() - interaction.context.keys() - {"rewards"}
for key in has_sparse_zero:
unscaled[key].append(0)
for name, values in unscaled.items():
if isinstance(self._shift, Number):
shift = self._shift
if self._shift == "min":
shift = min(values)
if self._shift == "mean":
shift = mean(values)
if self._shift == "med":
shift = median(values)
if isinstance(self._scale, Number):
scale_num = self._scale
scale_den = 1
if self._scale == "std":
scale_num = 1
scale_den = stdev(values)
if self._scale == "minmax":
scale_num = 1
scale_den = max(values)-min(values)
if self._scale == "iqr":
scale_num = 1
scale_den = iqr(values)
if self._scale == "maxabs":
scale_num = 1
scale_den = max([abs(v-shift) for v in values])
shifts[name] = shift
scales[name] = scale_num/scale_den if round(scale_den,10) != 0 else 1
for interaction in chain(fitting_interactions, iter_interactions):
scaled_values = {}
final_context = interaction.context
final_rewards = None
final_kwargs = interaction.kwargs.copy()
if self._target == "features":
for name,value in self._feature_pairs(interaction.context):
if isinstance(value,Number):
scaled_values[name] = (value-shifts[name])*scales[name]
else:
scaled_values[name] = value
if interaction.context is None:
final_context = None
elif isinstance(interaction.context,dict):
final_context = scaled_values
elif isinstance(interaction.context,tuple):
final_context = tuple(scaled_values[k] for k,_ in self._feature_pairs(interaction.context))
else:
final_context = scaled_values[1]
if self._target == "rewards":
final_rewards = [ (r-shifts['rewards'])*scales['rewards'] for r in interaction.rewards ]
if isinstance(interaction, SimulatedInteraction):
yield SimulatedInteraction(
final_context,
interaction.actions,
final_rewards or interaction.rewards,
**interaction.kwargs
)
elif isinstance(interaction, LoggedInteraction):
yield LoggedInteraction(
final_context,
interaction.action,
interaction.reward,
interaction.probability,
interaction.actions,
**interaction.kwargs
)
else: #pragma: no cover
raise CobaException("Unknown interactions were given to Scale.")
def _feature_pairs(self,context) -> Sequence[Tuple[Hashable,Any]]:
if isinstance(context,dict ): return context.items()
if isinstance(context,tuple): return enumerate(context)
if context is not None : return [(1,context)]
return []
class Impute(EnvironmentFilter):
"""Impute missing values (nan) in Interaction contexts."""
def __init__(self,
stat : Literal["mean","median","mode"] = "mean",
using: Optional[int] = None):
"""Instantiate an Impute filter.
Args:
stat: The statistic to use for impuatation.
using: The number of interactions to use to calculate the imputation statistics.
"""
assert stat in ["mean","median","mode"]
self._stat = stat
self._using = using
@property
def params(self) -> Dict[str, Any]:
return { "impute_stat": self._stat, "impute_using": self._using }
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
iter_interactions = iter(interactions)
train_interactions = list(islice(iter_interactions,self._using))
test_interactions = chain.from_iterable([train_interactions, iter_interactions])
stats : Dict[Hashable,float] = defaultdict(int)
features: Dict[Hashable,List[Number]] = defaultdict(list)
for interaction in train_interactions:
for name,value in self._context_as_name_values(interaction.context):
if isinstance(value,Number) and not isnan(value):
features[name].append(value)
for feat_name, feat_numeric_values in features.items():
if self._stat == "mean":
stats[feat_name] = mean(feat_numeric_values)
if self._stat == "median":
stats[feat_name] = median(feat_numeric_values)
if self._stat == "mode":
stats[feat_name] = mode(feat_numeric_values)
for interaction in test_interactions:
kv_imputed_context = {}
for name,value in self._context_as_name_values(interaction.context):
kv_imputed_context[name] = stats[name] if isinstance(value,Number) and isnan(value) else value
if interaction.context is None:
final_context = None
elif isinstance(interaction.context,dict):
final_context = kv_imputed_context
elif isinstance(interaction.context,tuple):
final_context = tuple(kv_imputed_context[k] for k,_ in self._context_as_name_values(interaction.context))
else:
final_context = kv_imputed_context[1]
if isinstance(interaction, SimulatedInteraction):
yield SimulatedInteraction(
final_context,
interaction.actions,
interaction.rewards,
**interaction.kwargs
)
elif isinstance(interaction, LoggedInteraction):
yield LoggedInteraction(
final_context,
interaction.action,
interaction.reward,
**interaction.kwargs
)
else: #pragma: no cover
raise CobaException("Unknown interactions were given to Impute.")
def _context_as_name_values(self,context) -> Sequence[Tuple[Hashable,Any]]:
if isinstance(context,dict ): return context.items()
if isinstance(context,tuple): return enumerate(context)
if context is not None : return [(1,context)]
return []
class Sparse(EnvironmentFilter):
"""Sparsify an environment's feature representation.
This has little utility beyond debugging.
"""
def __init__(self, context:bool = True, action:bool = False):
"""Instantiate a Sparse filter.
Args:
context: If True then contexts should be made sparse otherwise leave them alone.
action: If True then actions should be made sparse otherwise leave them alone.
"""
self._context = context
self._action = action
@property
def params(self) -> Dict[str, Any]:
return { "sparse_C": self._context, "sparse_A": self._action }
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
for interaction in interactions:
sparse_context = self._make_sparse(interaction.context) if self._context else interaction.context
if isinstance(interaction, SimulatedInteraction):
sparse_actions = list(map(self._make_sparse,interaction.actions)) if self._action else interaction.actions
yield SimulatedInteraction(
sparse_context,
sparse_actions,
interaction.rewards
)
elif isinstance(interaction, LoggedInteraction):
sparse_action = self._make_sparse(interaction.action) if self._action else interaction.action
yield LoggedInteraction(
sparse_context,
sparse_action,
interaction.reward,
interaction.probability,
interaction.actions,
**interaction.kwargs
)
else: #pragma: no cover
raise CobaException("Unknown interactions were given to Sparse.")
def _make_sparse(self, value) -> Optional[dict]:
if isinstance(value,dict) or value is None:
return value
if isinstance(value,(list,tuple)):
return dict(enumerate(value))
return {0:value}
class Cycle(EnvironmentFilter):
"""Cycle all rewards associated with actions by one place.
This filter is useful for testing an algorithms response to a non-stationary shock.
"""
def __init__(self, after:int = 0):
"""Instantiate a Cycle filter.
Args:
after: How many interactions should be seen before applying the cycle filter.
"""
self._after = after
@property
def params(self) -> Dict[str, Any]:
return { "cycle_after": self._after }
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[SimulatedInteraction]:
underlying_iterable = iter(interactions)
sans_cycle_interactions = islice(underlying_iterable, self._after)
with_cycle_interactions = underlying_iterable
for interaction in sans_cycle_interactions:
yield interaction
try:
first_interaction = next(with_cycle_interactions)
action_set = set(first_interaction.actions)
n_actions = len(action_set)
featureless_actions = [tuple([0]*n+[1]+[0]*(n_actions-n-1)) for n in range(n_actions)]
with_cycle_interactions = chain([first_interaction], with_cycle_interactions)
if len(set(action_set) & set(featureless_actions)) != len(action_set):
warnings.warn("Cycle only works for environments without action features. It will be ignored in this case.")
for interaction in with_cycle_interactions:
yield interaction
else:
for interaction in with_cycle_interactions:
rewards = interaction.rewards[-1:] + interaction.rewards[:-1]
yield SimulatedInteraction(interaction.context, interaction.actions, rewards, **interaction.kwargs)
except StopIteration:
pass
class Binary(EnvironmentFilter):
"""Binarize all rewards to either 1 (max rewards) or 0 (all others)."""
@property
def params(self) -> Dict[str, Any]:
return { "binary": True }
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[SimulatedInteraction]:
for interaction in interactions:
max_rwd = max(interaction.rewards)
rewards = [int(r==max_rwd) for r in interaction.rewards]
yield SimulatedInteraction(interaction.context, interaction.actions, rewards, **interaction.kwargs)
class Sort(EnvironmentFilter):
"""Sort a sequence of Interactions in an Environment."""
def __init__(self, *keys: Union[str,int,Sequence[Union[str,int]]]) -> None:
"""Instantiate a Sort filter.
Args:
*keys: The context items that should be sorted on.
"""
self._keys = list(Flatten().filter([list(keys)]))[0]
@property
def params(self) -> Dict[str, Any]:
return { "sort": self._keys or '*' }
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
full_sorter = lambda interaction: tuple(interaction.context )
list_sorter = lambda interaction: tuple(interaction.context[key] for key in self._keys)
dict_sorter = lambda interaction: tuple(interaction.context.get(key,0) for key in self._keys)
interactions = list(interactions)
is_sparse = isinstance(interactions[0].context,dict)
sorter = full_sorter if not self._keys else dict_sorter if is_sparse else list_sorter
return sorted(interactions, key=sorter)
class Where(EnvironmentFilter):
"""Define Environment selection criteria for an Environments pipe."""
def __init__(self, *, n_interactions: Union[int,Tuple[Optional[int],Optional[int]]] = None) -> None:
"""Instantiate a Where filter.
Args:
n_interactions: The minimum, maximum or exact number of interactions Environments must have.
"""
self._n_interactions = n_interactions
@property
def params(self) -> Dict[str, Any]:
params = {}
if self._n_interactions is not None:
params["where_n_interactions"] = self._n_interactions
return params
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
interactions = iter(interactions)
if self._n_interactions is None or self._n_interactions == (None,None):
min_interactions = None
max_interactions = None
take_interactions = 0
elif isinstance(self._n_interactions, int):
min_interactions = self._n_interactions
max_interactions = self._n_interactions
take_interactions = self._n_interactions+1
else:
min_interactions = self._n_interactions[0]
max_interactions = self._n_interactions[1]
take_interactions = max(filter(lambda x: x is not None, list(self._n_interactions)))+1
taken_interactions = list(islice(interactions, take_interactions))
if max_interactions is not None and len(taken_interactions) > max_interactions:
return []
if min_interactions is not None and len(taken_interactions) < min_interactions:
return []
return chain(taken_interactions, interactions)
class Warm(EnvironmentFilter):
"""Turn a SimulatedEnvironment into a WarmStartEnvironment."""
def __init__(self, n_warm:int, seed:int = 1):
"""Instantiate a Warm filter.
Args:
n_warm: The number of interactions that should be turned into LoggedInteractions.
seed: The random number seed that determines the random logging policy for LoggedInteractions.
"""
self._n_warm = n_warm
self._seed = seed
@property
def params(self) -> Dict[str, Any]:
return { "n_warm": self._n_warm }
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[Interaction]:
self._rng = CobaRandom(self._seed)
underlying_iterable = iter(interactions)
logged_interactions = map(self._to_logged_interaction, islice(underlying_iterable, self._n_warm))
simulated_interactions = underlying_iterable
return chain(logged_interactions, simulated_interactions)
def _to_logged_interaction(self, interaction: SimulatedInteraction) -> LoggedInteraction:
num_actions = len(interaction.actions)
probabilities = [1/num_actions] * num_actions
idx = self._rng.choice(list(range(num_actions)), probabilities)
actions = interaction.actions
action = interaction.actions[idx]
prob = probabilities[idx]
reward = interaction.rewards[idx]
return LoggedInteraction(interaction.context, action, reward, prob, actions)
class Riffle(EnvironmentFilter):
"""Riffle shuffle Interactions by taking actions from the end and evenly distributing into the beginning."""
def __init__(self, spacing: int = 3, seed=1) -> None:
"""Instantiate a Riffle filter.
Args:
spacing: The number of interactions from the beginning between each interaction shuffled in from the end.
seed: The seed used to determine the location of each ending interaction when placed within its beginning space.
"""
self._spacing = spacing
self._seed = seed
@property
def params(self) -> Dict[str, Any]:
return {"riffle_spacing": self._spacing, "riffle_seed": self._seed}
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
rng = CobaRandom(self._seed)
interactions = list(interactions)
for i in range(int(len(interactions)/(self._spacing+1))):
interactions.insert(i*self._spacing+rng.randint(0,self._spacing), interactions.pop())
return interactions
class Noise(EnvironmentFilter):
"""Introduce noise to an environment."""
def __init__(self,
context: Callable[[float,CobaRandom], float] = None,
action : Callable[[float,CobaRandom], float] = None,
reward : Callable[[float,CobaRandom], float] = None,
seed : int = 1) -> None:
"""Instantiate a Noise EnvironmentFilter.
Args:
context: A noise generator for context features.
action : A noise generator for action features.
reward : A noise generator for rewards.
seed : The seed initializing the random state of the noise generators.
"""
self._args = (context,action,reward,seed)
self._no_noise = lambda x, _: x
if context is None and action is None and reward is None:
context = lambda x, rng: x+rng.gauss(0,1)
self._context_noise = context or self._no_noise
self._action_noise = action or self._no_noise
self._reward_noise = reward or self._no_noise
self._seed = seed
def __reduce__(self) -> tuple:
try:
pickle.dumps(self._args)
except Exception:
message = (
"We were unable to pickle the Noise filter. This is likely due to using lambda functions for noise generation. "
"To work around this we recommend you first define your lambda functions as a named function and then pass the "
"named function to Noise."
)
raise CobaException(message)
else:
return (Noise, self._args)
@property
def params(self) -> Dict[str, Any]:
params = {}
if self._context_noise != self._no_noise: params['context_noise'] = True
if self._action_noise != self._no_noise : params['action_noise' ] = True
if self._reward_noise != self._no_noise : params['reward_noise' ] = True
params['noise_seed'] = self._seed
return params
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[SimulatedInteraction]:
rng = CobaRandom(self._seed)
for interaction in interactions:
if isinstance(interaction, LoggedInteraction):
raise CobaException("We do not currently support adding noise to a LoggedInteraction.")
noisy_context = self._noises(interaction.context, rng, self._context_noise)
noisy_actions = [ self._noises(a, rng, self._action_noise) for a in interaction.actions ]
noisy_rewards = [ self._noises(r, rng, self._reward_noise) for r in interaction.rewards ]
yield SimulatedInteraction(noisy_context, noisy_actions, noisy_rewards, **interaction.kwargs)
def _noises(self, value:Union[None,float,str,Mapping,Sequence], rng: CobaRandom, noiser: Callable[[float,CobaRandom], float]):
if isinstance(value, collections.abc.Mapping):
#we sort so that noise generation is deterministic with respect to seed
return { k:self._noise(v, rng, noiser) for k,v in sorted(value.items()) }
if isinstance(value, collections.abc.Sequence) and not isinstance(value, str):
return [ self._noise(v, rng, noiser) for v in value ]
return self._noise(value, rng, noiser)
def _noise(self, value:Union[None,float,str], rng: CobaRandom, noiser: Callable[[float,CobaRandom], float]) -> float:
return value if not isinstance(value,(int,float)) else noiser(value, rng)
| 38.259878
| 130
| 0.623952
| 2,718
| 25,175
| 5.623252
| 0.128403
| 0.028265
| 0.01446
| 0.017993
| 0.355404
| 0.290762
| 0.249673
| 0.212641
| 0.140605
| 0.129482
| 0
| 0.00257
| 0.289057
| 25,175
| 657
| 131
| 38.318113
| 0.85138
| 0.11718
| 0
| 0.345324
| 0
| 0.002398
| 0.050797
| 0
| 0
| 0
| 0
| 0
| 0.007194
| 1
| 0.088729
| false
| 0.016787
| 0.045564
| 0.021583
| 0.235012
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
602f71483df50285674a0fe43ba737fee526a84e
| 6,553
|
py
|
Python
|
python/cuml/preprocessing/LabelEncoder.py
|
egoolish/cuml
|
5320eff78890b3e9129e04e13437496c0424820d
|
[
"Apache-2.0"
] | 7
|
2019-02-26T10:41:09.000Z
|
2020-06-17T06:08:57.000Z
|
python/cuml/preprocessing/LabelEncoder.py
|
danielhanchen/cuml
|
fab74ca94fdbc5b49281660ce32a48cfd3d66f46
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/preprocessing/LabelEncoder.py
|
danielhanchen/cuml
|
fab74ca94fdbc5b49281660ce32a48cfd3d66f46
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import nvcategory
from librmm_cffi import librmm
import numpy as np
def _enforce_str(y: cudf.Series) -> cudf.Series:
''' Ensure that nvcategory is being given strings
'''
if y.dtype != "object":
return y.astype("str")
return y
def _enforce_npint32(y: cudf.Series) -> cudf.Series:
if y.dtype != np.int32:
return y.astype(np.int32)
return y
class LabelEncoder(object):
"""
An nvcategory based implementation of ordinal label encoding
Examples
--------
Converting a categorical implementation to a numerical one
.. code-block:: python
from cudf import DataFrame, Series
data = DataFrame({'category': ['a', 'b', 'c', 'd']})
# There are two functionally equivalent ways to do this
le = LabelEncoder()
le.fit(data.category) # le = le.fit(data.category) also works
encoded = le.transform(data.category)
print(encoded)
# This method is preferred
le = LabelEncoder()
encoded = le.fit_transform(data.category)
print(encoded)
# We can assign this to a new column
data = data.assign(encoded=encoded)
print(data.head())
# We can also encode more data
test_data = Series(['c', 'a'])
encoded = le.transform(test_data)
print(encoded)
# After train, ordinal label can be inverse_transform() back to
# string labels
ord_label = cudf.Series([0, 0, 1, 2, 1])
ord_label = dask_cudf.from_cudf(data, npartitions=2)
str_label = le.inverse_transform(ord_label)
print(str_label)
Output:
.. code-block:: python
0 0
1 1
2 2
3 3
dtype: int64
0 0
1 1
2 2
3 3
dtype: int32
category encoded
0 a 0
1 b 1
2 c 2
3 d 3
0 2
1 0
dtype: int64
0 a
1 a
2 b
3 c
4 b
dtype: object
"""
def __init__(self, *args, **kwargs):
self._cats: nvcategory.nvcategory = None
self._dtype = None
self._fitted: bool = False
def _check_is_fitted(self):
if not self._fitted:
raise RuntimeError("Model must first be .fit()")
def fit(self, y: cudf.Series) -> "LabelEncoder":
"""
Fit a LabelEncoder (nvcategory) instance to a set of categories
Parameters
---------
y : cudf.Series
Series containing the categories to be encoded. It's elements
may or may not be unique
Returns
-------
self : LabelEncoder
A fitted instance of itself to allow method chaining
"""
self._dtype = y.dtype
y = _enforce_str(y)
self._cats = nvcategory.from_strings(y.data)
self._fitted = True
return self
def transform(self, y: cudf.Series) -> cudf.Series:
"""
Transform an input into its categorical keys.
This is intended for use with small inputs relative to the size of the
dataset. For fitting and transforming an entire dataset, prefer
`fit_transform`.
Parameters
----------
y : cudf.Series
Input keys to be transformed. Its values should match the
categories given to `fit`
Returns
------
encoded : cudf.Series
The ordinally encoded input series
Raises
------
KeyError
if a category appears that was not seen in `fit`
"""
self._check_is_fitted()
y = _enforce_str(y)
encoded = cudf.Series(
nvcategory.from_strings(y.data)
.set_keys(self._cats.keys())
.values()
)
if -1 in encoded:
raise KeyError("Attempted to encode unseen key")
return encoded
def fit_transform(self, y: cudf.Series) -> cudf.Series:
"""
Simultaneously fit and transform an input
This is functionally equivalent to (but faster than)
`LabelEncoder().fit(y).transform(y)`
"""
self._dtype = y.dtype
# Convert y to nvstrings series, if it isn't one
y = _enforce_str(y)
# Bottleneck is here, despite everything being done on the device
self._cats = nvcategory.from_strings(y.data)
self._fitted = True
arr: librmm.device_array = librmm.device_array(
y.data.size(), dtype=np.int32
)
self._cats.values(devptr=arr.device_ctypes_pointer.value)
return cudf.Series(arr)
def inverse_transform(self, y: cudf.Series) -> cudf.Series:
''' Revert ordinal label to original label
Parameters
----------
y : cudf.Series, dtype=int32
Ordinal labels to be reverted
Returns
-------
reverted : cudf.Series
Reverted labels
'''
# check LabelEncoder is fitted
self._check_is_fitted()
# check input type is cudf.Series
if not isinstance(y, cudf.Series):
raise TypeError(
'Input of type {} is not cudf.Series'.format(type(y)))
# check if y's dtype is np.int32, otherwise convert it
y = _enforce_npint32(y)
# check if ord_label out of bound
ord_label = y.unique()
category_num = len(self._cats.keys())
for ordi in ord_label:
if ordi < 0 or ordi >= category_num:
raise ValueError(
'y contains previously unseen label {}'.format(ordi))
# convert ordinal label to string label
reverted = cudf.Series(self._cats.gather_strings(
y.data.mem.device_ctypes_pointer.value, len(y)))
return reverted
| 27.649789
| 78
| 0.574546
| 806
| 6,553
| 4.583127
| 0.301489
| 0.062263
| 0.029778
| 0.020303
| 0.096914
| 0.060639
| 0.060639
| 0.033027
| 0.033027
| 0.025988
| 0
| 0.015921
| 0.338624
| 6,553
| 236
| 79
| 27.766949
| 0.83641
| 0.531512
| 0
| 0.209677
| 0
| 0
| 0.060916
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.064516
| 0
| 0.33871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6031df65367df99733ce016cb9fcdddefa51c5dc
| 3,951
|
py
|
Python
|
examples/python-guide/cross_validation_example.py
|
StatMixedML/GPBoost
|
786d8be61c5c28da0690e167af636a6d777bf9e1
|
[
"Apache-2.0"
] | 2
|
2020-04-12T06:12:17.000Z
|
2020-04-12T15:34:01.000Z
|
examples/python-guide/cross_validation_example.py
|
StatMixedML/GPBoost
|
786d8be61c5c28da0690e167af636a6d777bf9e1
|
[
"Apache-2.0"
] | null | null | null |
examples/python-guide/cross_validation_example.py
|
StatMixedML/GPBoost
|
786d8be61c5c28da0690e167af636a6d777bf9e1
|
[
"Apache-2.0"
] | 1
|
2020-04-12T15:34:12.000Z
|
2020-04-12T15:34:12.000Z
|
# coding: utf-8
# pylint: disable = invalid-name, C0111
import gpboost as gpb
import numpy as np
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
plt.style.use('ggplot')
#--------------------Cross validation for tree-boosting without GP or random effects----------------
print('Simulating data...')
# Simulate and create your dataset
def f1d(x):
"""Non-linear function for simulation"""
return (1.7 * (1 / (1 + np.exp(-(x - 0.5) * 20)) + 0.75 * x))
x = np.linspace(0, 1, 200, endpoint=True)
plt.plot(x, f1d(x), linewidth=2, color="r")
plt.title("Mean function")
plt.show()
def sim_data(n):
"""Function that simulates data. Two covariates of which only one has an effect"""
X = np.random.rand(n, 2)
# mean function plus noise
y = f1d(X[:, 0]) + np.random.normal(scale=0.1, size=n)
return ([X, y])
# Simulate data
n = 1000
data = sim_data(2 * n)
# create dataset for gpb.train
data_train = gpb.Dataset(data[0][0:n, :], data[1][0:n])
# specify your configurations as a dict
params = {
'objective': 'regression_l2',
'metric': {'l2', 'l1'},
'learning_rate': 0.1,
'max_depth': 6,
'min_data_in_leaf': 5,
'verbose': 0
}
print('Starting cross-validation...')
# do cross-validation
cvbst = gpb.cv(params=params, train_set=data_train,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=False, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
# --------------------Combine tree-boosting and grouped random effects model----------------
print('Simulating data...')
# Simulate data
def f1d(x):
"""Non-linear function for simulation"""
return (1.7 * (1 / (1 + np.exp(-(x - 0.5) * 20)) + 0.75 * x))
x = np.linspace(0, 1, 200, endpoint=True)
plt.figure("Mean function")
plt.plot(x, f1d(x), linewidth=2, color="r")
plt.title("Mean function")
plt.show()
n = 1000 # number of samples
np.random.seed(1)
X = np.random.rand(n, 2)
F = f1d(X[:, 0])
# Simulate grouped random effects
m = 25 # number of categories / levels for grouping variable
group = np.arange(n) # grouping variable
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
# incidence matrix relating grouped random effects to samples
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
sigma2_1 = 1 ** 2 # random effect variance
sigma2 = 0.1 ** 2 # error variance
b1 = np.sqrt(sigma2_1) * np.random.normal(size=m) # simulate random effects
eps = Z1.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = F + eps + xi # observed data
# define GPModel
gp_model = gpb.GPModel(group_data=group)
gp_model.set_optim_params(params={"optimizer_cov": "fisher_scoring"})
# create dataset for gpb.train
data_train = gpb.Dataset(X, y)
# specify your configurations as a dict
params = {
'objective': 'regression_l2',
'learning_rate': 0.05,
'max_depth': 6,
'min_data_in_leaf': 5,
'verbose': 0
}
print('Starting cross-validation...')
# do cross-validation
cvbst = gpb.cv(params=params, train_set=data_train,
gp_model=gp_model, use_gp_model_for_validation=False,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=False, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
# Include random effect predictions for validation (observe the lower test error)
gp_model = gpb.GPModel(group_data=group)
print("Running cross validation for GPBoost model and use_gp_model_for_validation = TRUE")
cvbst = gpb.cv(params=params, train_set=data_train,
gp_model=gp_model, use_gp_model_for_validation=True,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=Falsem, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
cvbst.best_iteration
| 35.276786
| 100
| 0.665148
| 616
| 3,951
| 4.152597
| 0.297078
| 0.027365
| 0.017592
| 0.018765
| 0.528538
| 0.519156
| 0.495309
| 0.471071
| 0.471071
| 0.437451
| 0
| 0.037082
| 0.174133
| 3,951
| 111
| 101
| 35.594595
| 0.746859
| 0.258162
| 0
| 0.555556
| 0
| 0
| 0.170588
| 0.009343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.049383
| 0
| 0.123457
| 0.098765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60321018f94dd63905027338dadab96fc7adf06f
| 2,230
|
py
|
Python
|
synapse/rest/synapse/client/unsubscribe.py
|
Florian-Sabonchi/synapse
|
c95b04bb0e719d3f5de1714b442f95a39c6e3634
|
[
"Apache-2.0"
] | null | null | null |
synapse/rest/synapse/client/unsubscribe.py
|
Florian-Sabonchi/synapse
|
c95b04bb0e719d3f5de1714b442f95a39c6e3634
|
[
"Apache-2.0"
] | null | null | null |
synapse/rest/synapse/client/unsubscribe.py
|
Florian-Sabonchi/synapse
|
c95b04bb0e719d3f5de1714b442f95a39c6e3634
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from synapse.api.errors import StoreError
from synapse.http.server import DirectServeHtmlResource, respond_with_html_bytes
from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
if TYPE_CHECKING:
from synapse.server import HomeServer
class UnsubscribeResource(DirectServeHtmlResource):
"""
To allow pusher to be delete by clicking a link (ie. GET request)
"""
SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.notifier = hs.get_notifier()
self.auth = hs.get_auth()
self.pusher_pool = hs.get_pusherpool()
self.macaroon_generator = hs.get_macaroon_generator()
async def _async_render_GET(self, request: SynapseRequest) -> None:
token = parse_string(request, "access_token", required=True)
app_id = parse_string(request, "app_id", required=True)
pushkey = parse_string(request, "pushkey", required=True)
user_id = self.macaroon_generator.verify_delete_pusher_token(
token, app_id, pushkey
)
try:
await self.pusher_pool.remove_pusher(
app_id=app_id, pushkey=pushkey, user_id=user_id
)
except StoreError as se:
if se.code != 404:
# This is fine: they're already unsubscribed
raise
self.notifier.on_new_replication_data()
respond_with_html_bytes(
request,
200,
UnsubscribeResource.SUCCESS_HTML,
)
| 34.307692
| 80
| 0.689686
| 288
| 2,230
| 5.166667
| 0.486111
| 0.040323
| 0.030242
| 0.021505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008178
| 0.232287
| 2,230
| 64
| 81
| 34.84375
| 0.860981
| 0.304484
| 0
| 0
| 0
| 0
| 0.056505
| 0.016426
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.171429
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
603213c5e7e394368a3f594930adb85245cbf3c3
| 4,859
|
py
|
Python
|
pyhanabi/act_group.py
|
ravihammond/hanabi-convention-adaptation
|
5dafa91742de8e8d5810e8213e0e2771818b2f54
|
[
"MIT"
] | 1
|
2022-03-24T19:41:22.000Z
|
2022-03-24T19:41:22.000Z
|
pyhanabi/act_group.py
|
ravihammond/hanabi-convention-adaptation
|
5dafa91742de8e8d5810e8213e0e2771818b2f54
|
[
"MIT"
] | null | null | null |
pyhanabi/act_group.py
|
ravihammond/hanabi-convention-adaptation
|
5dafa91742de8e8d5810e8213e0e2771818b2f54
|
[
"MIT"
] | null | null | null |
import set_path
import sys
import torch
set_path.append_sys_path()
import rela
import hanalearn
import utils
assert rela.__file__.endswith(".so")
assert hanalearn.__file__.endswith(".so")
class ActGroup:
def __init__(
self,
devices,
agent,
partner_weight,
seed,
num_thread,
num_game_per_thread,
num_player,
explore_eps,
trinary,
replay_buffer,
max_len,
gamma,
convention,
convention_act_override,
):
self.devices = devices.split(",")
self.seed = seed
self.num_thread = num_thread
self.num_player = num_player
self.num_game_per_thread = num_game_per_thread
self.explore_eps = explore_eps
self.trinary = trinary
self.replay_buffer = replay_buffer
self.max_len = max_len
self.gamma = gamma
self.load_partner_model(partner_weight)
self.model_runners = []
for dev in self.devices:
runner = rela.BatchRunner(agent.clone(dev), dev)
runner.add_method("act", 5000)
runner.add_method("compute_priority", 100)
runner.add_method("compute_target", 5000)
partner_runner = rela.BatchRunner(
self._partner_agent.clone(dev), dev)
partner_runner.add_method("act", 5000)
self.model_runners.append([runner, partner_runner])
self.num_runners = len(self.model_runners)
self.convention = convention
self.convention_act_override = convention_act_override
self.create_r2d2_actors()
def load_partner_model(self, weight_file):
try:
state_dict = torch.load(weight_file)
except:
sys.exit(f"weight_file {weight_file} can't be loaded")
overwrite = {}
overwrite["vdn"] = False
overwrite["device"] = "cuda:0"
overwrite["boltzmann_act"] = False
if "fc_v.weight" in state_dict.keys():
agent, cfg = utils.load_agent(weight_file, overwrite)
self._partner_sad = cfg["sad"] if "sad" in cfg else cfg["greedy_extra"]
self._partner_hide_action = bool(cfg["hide_action"])
else:
agent = utils.load_supervised_agent(weight_file, "cuda:0")
self._partner_sad = False
self._partner_hide_action = False
agent.train(False)
self._partner_agent = agent
def create_r2d2_actors(self):
convention_act_override = [0, 0]
convention_sender = [1, 0]
if self.convention_act_override:
convention_act_override = [0, 1]
convention_sender = [1, 0]
actors = []
for i in range(self.num_thread):
thread_actors = []
for j in range(self.num_game_per_thread):
game_actors = []
actor = hanalearn.R2D2Actor(
self.model_runners[i % self.num_runners][0],
self.seed,
self.num_player,
0,
self.explore_eps,
[0], # boltzmann_act
False,
0, # sad
0, # shuffle_color
0, # hide_action
self.trinary,
self.replay_buffer,
1, # multi-step
self.max_len,
self.gamma,
self.convention,
1,
0,
True, # convention_fict_act_override
True, # use_experience
)
game_actors.append(actor)
self.seed += 1
actor = hanalearn.R2D2Actor(
self.model_runners[i % self.num_runners][1], # runner
self.num_player, # numPlayer
1, # playerIdx
False, # vdn
self._partner_sad, # sad
self._partner_hide_action, # hideAction
self.convention, # convention
0, # conventionSender
1) # conventionOverride
game_actors.append(actor)
for k in range(self.num_player):
partners = game_actors[:]
partners[k] = None
game_actors[k].set_partners(partners)
thread_actors.append(game_actors)
actors.append(thread_actors)
self.actors = actors
print("ActGroup created")
def start(self):
for runners in self.model_runners:
for runner in runners:
runner.start()
def update_model(self, agent):
for runner in self.model_runners:
runner[0].update_model(agent)
| 31.967105
| 83
| 0.537148
| 506
| 4,859
| 4.869565
| 0.231225
| 0.03125
| 0.045455
| 0.025974
| 0.131899
| 0.081169
| 0.081169
| 0.043831
| 0.043831
| 0.043831
| 0
| 0.015963
| 0.381148
| 4,859
| 151
| 84
| 32.178808
| 0.803459
| 0.039309
| 0
| 0.174242
| 0
| 0
| 0.037411
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 1
| 0.037879
| false
| 0
| 0.045455
| 0
| 0.090909
| 0.007576
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
603237057511914da74cfc53cec432cce1013ccc
| 1,128
|
py
|
Python
|
A_source_code/carbon/code/make_mask.py
|
vanHoek-dgnm/CARBON-DISC
|
3ecd5f4efba5e032d43679ee977064d6b25154a9
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
A_source_code/carbon/code/make_mask.py
|
vanHoek-dgnm/CARBON-DISC
|
3ecd5f4efba5e032d43679ee977064d6b25154a9
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
A_source_code/carbon/code/make_mask.py
|
vanHoek-dgnm/CARBON-DISC
|
3ecd5f4efba5e032d43679ee977064d6b25154a9
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# ******************************************************
## Copyright 2019, PBL Netherlands Environmental Assessment Agency and Utrecht University.
## Reuse permitted under Gnu Public License, GPL v3.
# ******************************************************
from netCDF4 import Dataset
import numpy as np
import general_path
import accuflux
import ascraster
import get_surrounding_cells
import make_np_grid
def do(mask_asc_fn, mask_id, dum_asc, logical = "EQ", mask_type='np_grid'):
dum_mask = ascraster.create_mask(mask_asc_fn, mask_id, logical = logical, numtype=int)
mask=[]
if mask_type=="rowcol":
for i in dum_mask:
mask.append(dum_asc.get_row_col_from_index(i))
elif mask_type=="index":
for i in dum_mask:
mask.append(i)
elif mask_type=="latlon":
for i in dum_mask:
mask.append(dum_asc.get_coord_from_index(i))
elif mask_type=="np_grid":
mask = np.zeros((dum_asc.nrows, dum_asc.ncols), dtype=bool)
mask[:,:] = True
for i in dum_mask:
row, col = dum_asc.get_row_col_from_index(i)
mask[row,col]=False
return mask
| 31.333333
| 90
| 0.62766
| 159
| 1,128
| 4.194969
| 0.421384
| 0.053973
| 0.035982
| 0.053973
| 0.307346
| 0.242879
| 0.191904
| 0.157421
| 0.095952
| 0.095952
| 0
| 0.00655
| 0.187943
| 1,128
| 35
| 91
| 32.228571
| 0.721616
| 0.218972
| 0
| 0.153846
| 0
| 0
| 0.037757
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.269231
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6032a6052ffc5ac0129ff8a333fbe0b572cb530c
| 7,309
|
py
|
Python
|
Code/Dataset.py
|
gitFloyd/AAI-Project-2
|
c6bb4d389248c3385e58a0c399343322a6dd887f
|
[
"MIT"
] | null | null | null |
Code/Dataset.py
|
gitFloyd/AAI-Project-2
|
c6bb4d389248c3385e58a0c399343322a6dd887f
|
[
"MIT"
] | null | null | null |
Code/Dataset.py
|
gitFloyd/AAI-Project-2
|
c6bb4d389248c3385e58a0c399343322a6dd887f
|
[
"MIT"
] | null | null | null |
from io import TextIOWrapper
import math
from typing import TypeVar
import random
import os
from Settings import Settings
class Dataset:
DataT = TypeVar('DataT')
WIN_NL = "\r\n"
LINUX_NL = "\n"
def __init__(self, path:str, filename:str, newline:str = WIN_NL) -> None:
self.path_ = path
self.filename_ = filename
self.loaded_ = False
self.parsed_ = False
self.data_ = None
self.nl = newline
self.classes_ = set()
self.attributes_ = []
self.types_ = []
self.data_ = []
def Data(self) -> list:
return self.data_
def Attributes(self) -> list:
return self.attributes_
def Types(self) -> list:
return self.types_
def Classes(self) -> list:
return self.classes_
def Load(self, reload:bool = False) -> DataT:
if not self.loaded_ or reload:
self.file_ = open(os.sep.join([self.path_, self.filename_]))
self.loaded_ = True
# If we reload, then we want to reparse as well.
return self.Parse_(reload)
def Parse_(self, reparse:bool = False) -> DataT:
if not self.loaded_:
# Silently return instead of raising an exception because
# this method is not intended to be used outside of the
# class. Although, it can be used that way if needed.
return
if not self.parsed_ or reparse:
self.Parse_Hook_(self.file_.read())
return self.data_
def Parse_Hook_(self, data:str) -> None:
self.data_ = data
def __del__(self):
if self.loaded_:
self.file_.close()
class ArffRow:
ATTR_LABEL = '@ATTRIBUTE ' # need the space at the end here
DATA_LABEL = '@DATA'
ATTR_LEN = len(ATTR_LABEL)
DATA_LEN = len(DATA_LABEL)
Attributes = []
Types = []
Data = []
Classes = set()
IsCollecting_ = False
@classmethod
def Reset(cls):
cls.Attributes = []
cls.Types = []
cls.Data = []
cls.Classes = set()
cls.IsCollecting_ = False
def __init__(self, line:str, nl:str) -> None:
self.line_ = line
self.len_ = len(line)
self.nl_ = nl
def Len(self) -> str:
return self.len_
def HasAttributeLabel(self) -> bool:
return self.len_ >= ArffRow.ATTR_LEN and self.line_[0:ArffRow.ATTR_LEN] == ArffRow.ATTR_LABEL
def HasDataLabel(self) -> bool:
return self.len_ >= ArffRow.DATA_LEN and self.line_[0:ArffRow.DATA_LEN] == ArffRow.DATA_LABEL
def GetAttributeData(self) -> tuple[str, str]:
namePosition = 0
for (i, char) in enumerate(self.line_[ArffRow.ATTR_LEN:]):
if char == '\t':
namePosition = i + ArffRow.ATTR_LEN
break
return (self.line_[ArffRow.ATTR_LEN:namePosition], self.line_[namePosition + 1:])
def Parse(self):
if ArffRow.IsCollecting_ and self.len_ > 1:
ArffRow.Data.append(self.line_.split(','))
ArffRow.Classes.add(ArffRow.Data[-1][-1])
elif self.HasDataLabel():
ArffRow.IsCollecting_ = True
elif self.HasAttributeLabel():
attrData = self.GetAttributeData()
ArffRow.Attributes.append(attrData[0])
ArffRow.Types.append(attrData[1])
class ArffDataset(Dataset):
# ARFF (Attribute-Relation File Format)
#def __init__(self, path:str, filename:str, newline:str = Dataset.WIN_NL) -> None:
# super().__init__(path, filename, newline)
#
# self.parser_ = {
# 'attributesLoaded': False,
# }
def Parse_Hook_(self, data:str) -> None:
ArffRow.Reset()
rows = [ArffRow(line, self.nl) for line in data.split(self.nl)]
for row in rows:
row.Parse()
for attribute in ArffRow.Attributes:
self.attributes_.append(attribute)
for typeName in ArffRow.Types:
self.types_.append(typeName)
for datum in ArffRow.Data:
self.data_.append(datum)
self.classes_ = self.classes_.union(ArffRow.Classes)
classes = list(self.classes_)
attribute_maxes = {}
for row in self.data_:
classIndex = classes.index(row[-1])
row[-1] = [1 if i == classIndex else 0 for (i, value) in enumerate(classes)]
for i in range(len(row)):
if self.types_[i] == 'REAL':
row[i] = float(row[i])
elif self.types_[i] == 'INTEGER':
row[i] = int(row[i])
else:
continue
if i not in attribute_maxes:
attribute_maxes[i] = 0
if abs(row[i]) > attribute_maxes[i]:
attribute_maxes[i] = row[i]
for i in range(len(row)):
if self.types_[i] == 'REAL' or self.types_[i] == 'INTEGER':
row[i] = row[i] / attribute_maxes[i]
self.data_ = self.RowSort(self.data_)
def LexOrder(self, item1, item2):
num_fields = len(item1)
for i in range(num_fields):
if item1[i] != item2[i]:
if item1[i] < item2[i]:
return -1
else:
return 1
return 0
def RowSort(self, rows):
rows_len = len(rows)
if rows_len > 2:
result1 = self.RowSort(rows[0: math.floor(rows_len * 0.5)])
result2 = self.RowSort(rows[math.floor(rows_len * 0.5):])
sorted_rows = []
item1 = None
item2 = None
while len(result1) > 0 or len(result2) > 0:
if len(result1) > 0 and len(result2) > 0 and item1 == None and item2 == None:
item1 = result1.pop(0)
item2 = result2.pop(0)
elif len(result1) > 0 and item1 == None:
item1 = result1.pop(0)
elif len(result2) > 0 and item2 == None:
item2 = result2.pop(0)
order = 0
if item1 == None and item2 != None:
order = 1
elif item1 != None and item2 == None:
order = -1
else:
order = self.LexOrder(item1, item2)
if order == -1:
sorted_rows.append(item1)
item1 = None
elif order == 1:
sorted_rows.append(item2)
item2 = None
else:
sorted_rows.append(item1)
sorted_rows.append(item2)
item1 = None
item2 = None
if item1 != None:
sorted_rows.append(item1)
if item2 != None:
sorted_rows.append(item2)
return sorted_rows
elif rows_len == 1:
return rows
else:
order = self.LexOrder(rows[0], rows[1])
if order == 1:
rows.reverse()
return rows
def Fetch(self, *fields:list[str], limit:int = None, offset:int = 0):
cols = []
data = []
# iterate over the field names and find the column indices
# for names that match the requested field names
for (i, field) in enumerate(fields):
try:
cols.append(self.attributes_.index(field))
except ValueError:
pass
end = limit
if limit != None:
end += offset
for row in self.data_[offset:end]:
data.append([row[i] for i in cols])
return data
def FetchFilter_(self, i, value):
# Not used any more
#if self.types_[i] == 'REAL':
# return float(value)
#elif self.types_[i] == 'INTEGER':
# return int(value)
#else:
# return value
pass
def Size(self):
length = len(self.data_)
if length == 0:
return (len(self.data_), None)
return (len(self.data_), len(self.data_[0]))
def Shuffle(self):
random.shuffle(self.data_)
class Pistachio(ArffDataset):
SettingsKey = 'PistachioDataset'
def __init__(self, newline:str = Dataset.WIN_NL) -> None:
settings = Settings.Data()
super().__init__(
path = settings[Pistachio.SettingsKey]['Path'],
filename = settings[Pistachio.SettingsKey]['FileName'],
newline = newline
)
#pist = Pistachio(Dataset.LINUX_NL)
#
#for row in pist.Load()[0:10]:
# print(row)
| 24.363333
| 96
| 0.629498
| 1,000
| 7,309
| 4.457
| 0.184
| 0.030514
| 0.013462
| 0.016154
| 0.185775
| 0.118914
| 0.069554
| 0.032309
| 0.032309
| 0.014808
| 0
| 0.017167
| 0.242851
| 7,309
| 299
| 97
| 24.444816
| 0.788218
| 0.103981
| 0
| 0.148325
| 0
| 0
| 0.012856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114833
| false
| 0.009569
| 0.028708
| 0.033493
| 0.320574
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6037477e26e980cdc81f047c4b3c12fc1cbcec38
| 2,321
|
py
|
Python
|
mars/tensor/base/flip.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | 2
|
2019-03-29T04:11:10.000Z
|
2020-07-08T10:19:54.000Z
|
mars/tensor/base/flip.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/base/flip.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..datasource import tensor as astensor
def flip(m, axis):
"""
Reverse the order of elements in a tensor along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : array_like
Input tensor.
axis : integer
Axis in tensor, which entries are reversed.
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
See Also
--------
flipud : Flip a tensor vertically (axis=0).
fliplr : Flip a tensor horizontally (axis=1).
Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
Examples
--------
>>> import mars.tensor as mt
>>> A = mt.arange(8).reshape((2,2,2))
>>> A.execute()
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> mt.flip(A, 0).execute()
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> mt.flip(A, 1).execute()
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> A = mt.random.randn(3,4,5)
>>> mt.all(mt.flip(A,2) == A[:,:,::-1,...]).execute()
True
"""
m = astensor(m)
sl = [slice(None)] * m.ndim
try:
sl[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional input tensor"
% (axis, m.ndim))
return m[tuple(sl)]
| 25.228261
| 81
| 0.561827
| 326
| 2,321
| 3.993865
| 0.453988
| 0.046083
| 0.016129
| 0.024578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033153
| 0.285222
| 2,321
| 91
| 82
| 25.505495
| 0.751658
| 0.75657
| 0
| 0
| 0
| 0
| 0.14876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6037a51c2f59285acb270192ab5e41f437b7c589
| 1,876
|
py
|
Python
|
tests/test_ops/test_upfirdn2d.py
|
imabackstabber/mmcv
|
b272c09b463f00fd7fdd455f7bd4a055f9995521
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ops/test_upfirdn2d.py
|
imabackstabber/mmcv
|
b272c09b463f00fd7fdd455f7bd4a055f9995521
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ops/test_upfirdn2d.py
|
imabackstabber/mmcv
|
b272c09b463f00fd7fdd455f7bd4a055f9995521
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
_USING_PARROTS = True
try:
from parrots.autograd import gradcheck
except ImportError:
from torch.autograd import gradcheck, gradgradcheck
_USING_PARROTS = False
class TestUpFirDn2d:
"""Unit test for UpFirDn2d.
Here, we just test the basic case of upsample version. More gerneal tests
will be included in other unit test for UpFirDnUpsample and
UpFirDnDownSample modules.
"""
@classmethod
def setup_class(cls):
kernel_1d = torch.tensor([1., 3., 3., 1.])
cls.kernel = kernel_1d[:, None] * kernel_1d[None, :]
cls.kernel = cls.kernel / cls.kernel.sum()
cls.factor = 2
pad = cls.kernel.shape[0] - cls.factor
cls.pad = ((pad + 1) // 2 + cls.factor - 1, pad // 2)
cls.input_tensor = torch.randn((2, 3, 4, 4), requires_grad=True)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_upfirdn2d(self):
from mmcv.ops import upfirdn2d
if _USING_PARROTS:
gradcheck(
upfirdn2d,
(self.input_tensor.cuda(),
self.kernel.type_as(
self.input_tensor).cuda(), self.factor, 1, self.pad),
delta=1e-4,
pt_atol=1e-3)
else:
gradcheck(
upfirdn2d,
(self.input_tensor.cuda(),
self.kernel.type_as(
self.input_tensor).cuda(), self.factor, 1, self.pad),
eps=1e-4,
atol=1e-3)
gradgradcheck(
upfirdn2d,
(self.input_tensor.cuda(),
self.kernel.type_as(
self.input_tensor).cuda(), self.factor, 1, self.pad),
eps=1e-4,
atol=1e-3)
| 31.79661
| 78
| 0.55597
| 221
| 1,876
| 4.61086
| 0.393665
| 0.075564
| 0.088322
| 0.111874
| 0.281649
| 0.281649
| 0.281649
| 0.281649
| 0.281649
| 0.281649
| 0
| 0.03135
| 0.336887
| 1,876
| 58
| 79
| 32.344828
| 0.787781
| 0.1242
| 0
| 0.409091
| 0
| 0
| 0.008025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6038e029f5aa9016bb06dc0180b3e06aac57209e
| 852
|
py
|
Python
|
dataset_creation/description_task2.py
|
rmorain/kirby
|
ef115dbaed4acd1b23c3e10ca3b496f05b9a2382
|
[
"Apache-2.0"
] | 1
|
2021-08-30T11:46:20.000Z
|
2021-08-30T11:46:20.000Z
|
dataset_creation/description_task2.py
|
rmorain/kirby
|
ef115dbaed4acd1b23c3e10ca3b496f05b9a2382
|
[
"Apache-2.0"
] | 36
|
2020-11-18T20:19:33.000Z
|
2021-08-03T23:31:12.000Z
|
dataset_creation/description_task2.py
|
rmorain/kirby
|
ef115dbaed4acd1b23c3e10ca3b496f05b9a2382
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from tqdm import tqdm
data_list = []
def get_questions(row):
global data_list
random_samples = df.sample(n=num_choices - 1)
distractors = random_samples["description"].tolist()
data = {
"question": "What is " + row["label"] + "?",
"correct": row["description"],
"distractors": distractors,
"knowledge": "{" + row["label"] + " : " + row["description"] + "}",
}
data_list.append(data)
debug = False
num_choices = 4
tqdm.pandas(desc="Progress")
df = pd.read_pickle("data/augmented_datasets/pickle/label_description.pkl")
if debug:
df = df.iloc[:10]
df = df.progress_apply(get_questions, axis=1)
new_df = pd.DataFrame(data_list)
if not debug:
new_df.to_pickle("data/augmented_datasets/pickle/description_qa_knowledge.pkl")
else:
__import__("pudb").set_trace()
| 24.342857
| 83
| 0.664319
| 110
| 852
| 4.918182
| 0.5
| 0.05915
| 0.07024
| 0.099815
| 0.121996
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007184
| 0.183099
| 852
| 34
| 84
| 25.058824
| 0.770115
| 0
| 0
| 0
| 0
| 0
| 0.252347
| 0.130282
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
603b2fa764ceaa795942b2f9977849ffd27b7101
| 2,776
|
py
|
Python
|
scarab/commands/attach.py
|
gonzoua/scarab
|
b86474527b7b2ec30710ae79ea3f1cf5b7a93005
|
[
"BSD-2-Clause"
] | 5
|
2018-09-01T01:42:43.000Z
|
2019-01-04T21:32:55.000Z
|
scarab/commands/attach.py
|
gonzoua/scarab
|
b86474527b7b2ec30710ae79ea3f1cf5b7a93005
|
[
"BSD-2-Clause"
] | 1
|
2019-09-18T17:06:11.000Z
|
2019-11-29T18:35:08.000Z
|
scarab/commands/attach.py
|
gonzoua/scarab
|
b86474527b7b2ec30710ae79ea3f1cf5b7a93005
|
[
"BSD-2-Clause"
] | null | null | null |
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""
'attach' command implementation'''
"""
from base64 import b64encode
import argparse
import magic
from ..bugzilla import BugzillaError
from ..context import bugzilla_instance
from .. import ui
from .base import Base
class Command(Base):
"""Attach file to the existing PR"""
def register(self, subparsers):
"""Register 'attach' parser"""
parser = subparsers.add_parser('attach')
parser.set_defaults(func=self.run)
parser.add_argument('attachment', type=str, help='path to the attachment')
parser.add_argument('pr', type=int, help='PR number')
parser.add_argument('-b', '--batch', action='store_true', \
help='batch mode, only print newly created attachment\'s id')
parser.add_argument('-s', '--summary', dest='summary', help='summary for the attachment')
comment_group = parser.add_mutually_exclusive_group()
comment_group.add_argument('-c', '--comment', dest='comment', help='comment text')
comment_group.add_argument('-F', '--comment-file', dest='comment_file', \
type=argparse.FileType('r'), help='file with comment text')
parser.add_argument('-t', '--content-type', dest='content_type', help='file content type')
def run(self, args):
"""Run 'attach' command"""
bugzilla = bugzilla_instance()
content_type = args.content_type
# Read data and encode it to base64
try:
with open(args.attachment, 'rb') as attach_file:
data = attach_file.read()
except IOError as ex:
ui.fatal('error reading file: {}'.format(str(ex)))
comment = args.comment
if comment is None:
if args.comment_file:
comment = args.comment_file.read()
if comment is None:
if args.batch:
comment = ''
else:
comment = ui.edit_message()
# Try and guess file content type
if content_type is None:
mime = magic.Magic(mime=True)
content_type = mime.from_file(args.attachment)
try:
attachment = bugzilla.add_attachment(args.pr, args.attachment, data, \
summary=args.summary, comment=comment, content_type=content_type)
except BugzillaError as ex:
ui.fatal('Bugzilla error: {}'.format(ex.message))
if args.batch:
ui.output('{}'.format(attachment))
else:
ui.output('New attachment {} has been added to bug {}'.format(attachment, args.pr))
ui.output('Attachment URL: {}'.format(bugzilla.attachment_url(attachment)))
ui.output('Bug URL: {}'.format(bugzilla.bug_url(args.pr)))
| 39.098592
| 98
| 0.617075
| 331
| 2,776
| 5.069486
| 0.314199
| 0.065554
| 0.050656
| 0.027414
| 0.02503
| 0.02503
| 0
| 0
| 0
| 0
| 0
| 0.004356
| 0.255764
| 2,776
| 70
| 99
| 39.657143
| 0.807841
| 0.082853
| 0
| 0.153846
| 0
| 0
| 0.159064
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.134615
| 0
| 0.192308
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
603b6cd04fd9a00dac3c017eb36ba4659fec0677
| 330
|
py
|
Python
|
LeetCode/python3/287.py
|
ZintrulCre/LeetCode_Archiver
|
de23e16ead29336b5ee7aa1898a392a5d6463d27
|
[
"MIT"
] | 279
|
2019-02-19T16:00:32.000Z
|
2022-03-23T12:16:30.000Z
|
LeetCode/python3/287.py
|
ZintrulCre/LeetCode_Archiver
|
de23e16ead29336b5ee7aa1898a392a5d6463d27
|
[
"MIT"
] | 2
|
2019-03-31T08:03:06.000Z
|
2021-03-07T04:54:32.000Z
|
LeetCode/python3/287.py
|
ZintrulCre/LeetCode_Crawler
|
de23e16ead29336b5ee7aa1898a392a5d6463d27
|
[
"MIT"
] | 12
|
2019-01-29T11:45:32.000Z
|
2019-02-04T16:31:46.000Z
|
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
p1, p2 = nums[0], nums[nums[0]]
while nums[p1] != nums[p2]:
p1 = nums[p1]
p2 = nums[nums[p2]]
p2 = 0
while nums[p1] != nums[p2]:
p1 = nums[p1]
p2 = nums[p2]
return nums[p1]
| 27.5
| 52
| 0.454545
| 44
| 330
| 3.409091
| 0.295455
| 0.2
| 0.16
| 0.16
| 0.426667
| 0.426667
| 0.426667
| 0.426667
| 0.426667
| 0.426667
| 0
| 0.095
| 0.393939
| 330
| 11
| 53
| 30
| 0.655
| 0
| 0
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
603c4a28289b42faa48ea562130b7e8125179bd8
| 2,327
|
py
|
Python
|
modules/google-earth-engine/docker/src/sepalinternal/gee.py
|
BuddyVolly/sepal
|
6a2356a88940a36568b1d83ba3aeaae4283d5445
|
[
"MIT"
] | 153
|
2015-10-23T09:00:08.000Z
|
2022-03-19T03:24:04.000Z
|
modules/google-earth-engine/docker/src/sepalinternal/gee.py
|
BuddyVolly/sepal
|
6a2356a88940a36568b1d83ba3aeaae4283d5445
|
[
"MIT"
] | 165
|
2015-09-24T09:53:06.000Z
|
2022-03-31T09:55:06.000Z
|
modules/google-earth-engine/docker/src/sepalinternal/gee.py
|
BuddyVolly/sepal
|
6a2356a88940a36568b1d83ba3aeaae4283d5445
|
[
"MIT"
] | 46
|
2016-07-10T10:40:09.000Z
|
2021-11-14T01:07:33.000Z
|
import json
from threading import Semaphore
import ee
from flask import request
from google.auth import crypt
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
service_account_credentials = None
import logging
export_semaphore = Semaphore(5)
get_info_semaphore = Semaphore(2)
def init_service_account_credentials(args):
global service_account_credentials
with open(args['gee_key_path'], 'r') as file_:
key_data = file_.read()
signer = crypt.RSASigner.from_string(key_data)
service_account_credentials = service_account.Credentials(
signer=signer,
service_account_email=args['gee_email'],
token_uri=ee.oauth.TOKEN_URI,
scopes=ee.oauth.SCOPES + ['https://www.googleapis.com/auth/drive']
)
def init_ee():
credentials = service_account_credentials
if 'sepal-user' in request.headers:
user = json.loads(request.headers['sepal-user'])
googleTokens = user.get('googleTokens', None)
if googleTokens:
credentials = Credentials(googleTokens['accessToken'])
ee.InitializeThread(credentials)
def to_asset_id(asset_path):
asset_roots = ee.data.getAssetRoots()
if not asset_roots:
raise Exception('User has no GEE asset roots')
return asset_roots[0]['id'] + '/' + asset_path
def delete_asset_collection(asset_id):
logging.info('Recursively deleting ' + asset_id)
if ee.data.getInfo(asset_id):
images = ee.data.getList({
'id': asset_id,
'fields': 'id'
})
for image in images:
ee.data.deleteAsset(image['id'])
logging.info('Deleted ' + image['id'])
ee.data.deleteAsset(asset_id)
logging.info('Deleted ' + asset_id)
def create_asset_image_collection(asset_id):
delete_asset_collection(asset_id)
ee.data.create_assets(
asset_ids=[asset_id],
asset_type=ee.data.ASSET_TYPE_IMAGE_COLL,
mk_parents=True
)
def create_asset_folder(asset_id):
ee.data.create_assets(
asset_ids=[asset_id],
asset_type=ee.data.ASSET_TYPE_FOLDER,
mk_parents=True
)
def get_info(ee_object):
try:
get_info_semaphore.acquire()
return ee_object.getInfo()
finally:
get_info_semaphore.release()
| 27.376471
| 74
| 0.685862
| 293
| 2,327
| 5.187713
| 0.327645
| 0.055263
| 0.098684
| 0.071053
| 0.116447
| 0.084211
| 0.084211
| 0.084211
| 0.084211
| 0.084211
| 0
| 0.002735
| 0.214439
| 2,327
| 84
| 75
| 27.702381
| 0.828775
| 0
| 0
| 0.090909
| 0
| 0
| 0.078642
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106061
| false
| 0
| 0.121212
| 0
| 0.257576
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
603e1db8585ef18d062d93564593d2084f744fc9
| 14,585
|
py
|
Python
|
PyIK/src/litearm.py
|
AliShug/EvoArm
|
a5dea204914ee1e25867e4412e88d245329316f2
|
[
"CC-BY-3.0"
] | 110
|
2017-01-13T17:19:18.000Z
|
2022-02-20T06:50:03.000Z
|
PyIK/src/litearm.py
|
igcxl/EvoArm
|
a5dea204914ee1e25867e4412e88d245329316f2
|
[
"CC-BY-3.0"
] | 1
|
2018-08-30T07:27:56.000Z
|
2018-08-30T07:27:56.000Z
|
PyIK/src/litearm.py
|
igcxl/EvoArm
|
a5dea204914ee1e25867e4412e88d245329316f2
|
[
"CC-BY-3.0"
] | 47
|
2017-03-10T20:34:01.000Z
|
2021-11-18T03:44:06.000Z
|
from __future__ import print_function
import numpy as np
import struct
import solvers
import pid
from util import *
MOTORSPEED = 0.9
MOTORMARGIN = 1
MOTORSLOPE = 30
ERRORLIM = 5.0
class ArmConfig:
"""Holds an arm's proportions, limits and other configuration data"""
def __init__(self,
main_length = 148.4,
forearm_length = 160,
linkage_length = 155,
lower_actuator_length = 65,
upper_actuator_length = 54.4,
wrist_length = 90.52,
shoulder_offset = [-9.7, 18.71]):
self.main_length = main_length
self.forearm_length = forearm_length
self.linkage_length = linkage_length
self.lower_actuator_length = lower_actuator_length
self.upper_actuator_length = upper_actuator_length
self.wrist_length = wrist_length;
self.shoulder_offset = shoulder_offset
class ArmPose:
"""
Defines a physical configuration of a LiteArm robot arm.
Internal angles are relative to vertical (elevator/actuator) or straight
forward (swing), and are stored in radians. Extracted servo angles range
0-300 and are measured in degrees.
Provides methods for:
- finding the required servo angles to reach the pose
- checking the validity of the pose
"""
structFormat = 'fffff'
@staticmethod
def calcElevatorAngle(servoAngle):
return radians(178.21 - servoAngle)
@staticmethod
def calcSwingAngle(servoAngle):
return radians(150.0 - servoAngle)
@staticmethod
def calcActuatorAngle(servoAngle):
return radians(servoAngle - 204.78)
@staticmethod
def calcWristXAngle(servoAngle):
return radians(150.0 - servoAngle)
@staticmethod
def calcWristYAngle(servoAngle):
return radians(servoAngle - 147.0)
def __init__(self,
arm_config,
swing_angle,
shoulder_angle,
actuator_angle,
elbow_angle,
elbow2D,
wrist2D,
effector2D,
effector,
wrist_x,
wrist_y):
self.cfg = arm_config
self.swing_angle = swing_angle
self.shoulder_angle = shoulder_angle
self.actuator_angle = actuator_angle
self.elbow_angle = elbow_angle
# Joints in the arm
shoulder = rotate(self.cfg.shoulder_offset, swing_angle)
self.shoulder2D = [self.cfg.shoulder_offset[1], 0]
self.shoulder = [shoulder[0], 0, shoulder[1]]
self.wrist2D = wrist2D
self.effector2D = effector2D
self.effector = effector
# Construct the 3D elbow & wrist positions from the 2D (planar) IK
# solution
arm_vec = effector - self.shoulder
arm_vec[1] = 0
self.elbow2D = elbow2D
self.elbow = self.shoulder + normalize(arm_vec)*elbow2D[0]
self.elbow[1] = elbow2D[1]
self.wrist = self.effector - normalize(arm_vec)*arm_config.wrist_length
# Wrist pose
self.wristXAngle = wrist_x
self.wristYAngle = wrist_y
def getServoElevator(self):
return 178.21 - degrees(self.shoulder_angle)
def getServoActuator(self):
return degrees(self.actuator_angle) + 204.78
def getServoSwing(self):
return 150 - degrees(self.swing_angle)
def getServoWristX(self):
return 150 - degrees(self.wristXAngle)
def getServoWristY(self):
return 147 + degrees(self.wristYAngle)
def armDiffAngle(self):
return degrees(self.shoulder_angle - self.actuator_angle)
def checkActuator(self):
angle = self.getServoActuator()
return angle >= 95 and angle <= 250
def checkDiff(self):
angle = self.armDiffAngle()
return angle >= 44 and angle <= 175
def checkElevator(self):
angle = self.getServoElevator()
return angle >= 60 and angle <= 210
def checkForearm(self):
angle = degrees(self.elbow_angle + self.shoulder_angle)
return angle < 200 and angle > 80
def checkSwing(self):
angle = self.getServoSwing()
return angle >= 60 and angle <= 240
def checkWristX(self):
angle = self.getServoWristX()
return angle >= 60 and angle <= 240
def checkWristY(self):
angle = self.getServoWristY()
return angle >= 60 and angle <= 160
def checkPositioning(self):
# When Y>0 Forearm always faces outwards
if self.wrist2D[1] > 0 and self.wrist2D[0] < self.elbow2D[0]:
return False
# No valid positions X<=0
if self.wrist2D[0] <= 0:
return False
# Effector height range
if self.effector[1] > 180 or self.effector[1] < -200:
return False
return True
def checkClearance(self):
return (self.checkDiff() and self.checkActuator() and
self.checkElevator() and self.checkSwing() and
self.checkWristX() and self.checkWristY() and
self.checkPositioning() and self.checkForearm())
def serialize(self):
"""Returns a packed struct holding the pose information"""
return struct.pack(
ArmPose.structFormat,
self.swing_angle,
self.shoulder_angle,
self.elbow_angle,
self.wristXAngle,
self.wristYAngle
)
class ArmController:
def __init__(self,
servo_swing,
servo_shoulder,
servo_elbow,
servo_wrist_x,
servo_wrist_y,
arm_config,
motion_enable = False):
# Solvers are responsible for calculating the target servo positions to
# reach a given goal position
self.ik = solvers.IKSolver(
arm_config.main_length,
arm_config.forearm_length,
arm_config.wrist_length,
arm_config.shoulder_offset)
self.physsolver = solvers.PhysicalSolver(
arm_config.main_length,
arm_config.linkage_length,
arm_config.lower_actuator_length,
arm_config.upper_actuator_length)
# Servos
self.servos = {}
self.servos["swing"] = servo_swing
self.servos["shoulder"] = servo_shoulder
self.servos["elbow"] = servo_elbow
self.servos["wrist_x"] = servo_wrist_x
self.servos["wrist_y"] = servo_wrist_y
for key, servo in self.servos.iteritems():
if servo is None:
print ("Warning: {0} servo not connected".format(key))
else:
# Initialise a PID controller for the servo
if servo.protocol == 1:
servo.setGoalSpeed(-MOTORSPEED)
servo.data['pid'] = pid.PIDControl(2.4, 0, 0.4)
else:
servo.setGoalSpeed(0)
servo.data['error'] = 0.0
# Make sure the goal speed is set
servo.setTorqueEnable(1)
if servo.protocol == 1:
print("Setting slope")
servo.setCWMargin(MOTORMARGIN)
servo.setCCWMargin(MOTORMARGIN)
servo.setCWSlope(MOTORSLOPE)
servo.setCCWSlope(MOTORSLOPE)
# Store parameters
self.motion_enable = True
self.enableMovement(False)
self.cfg = arm_config
# Dirty flags for stored poses
self.ik_pose = None
self.ik_dirty = True
self.real_pose = None
self.real_dirty = True
# Current target pose
self.target_pose = None
def enableMovement(self, enable):
changed = False
if enable and not self.motion_enable:
print ("Warning: Arm enabled")
self.motion_enable = True
changed = True
elif not enable:
self.motion_enable = False
changed = True
if changed:
# Set servos on/off
if self.servos['swing'] is not None:
self.servos['swing'].setTorqueEnable(self.motion_enable)
if self.servos['shoulder'] is not None:
self.servos['shoulder'].setTorqueEnable(self.motion_enable)
if self.servos['elbow'] is not None:
self.servos['elbow'].setTorqueEnable(self.motion_enable)
if self.servos['wrist_x'] is not None:
self.servos['wrist_x'].setTorqueEnable(self.motion_enable)
if self.servos['wrist_y'] is not None:
self.servos['wrist_y'].setTorqueEnable(self.motion_enable)
def setWristGoalPosition(self, pos):
self.ik.setGoal(pos)
self.ik_dirty = True
def setWristGoalDirection(self, normal):
self.ik.setWristDir(normal)
self.ik_dirty = True
def getIKPose(self):
if self.ik_dirty and self.ik.valid:
# Construct geometry of arm from IK state
main_arm = self.ik.elbow - self.ik.originpl
arm_vert_angle = sigangle(main_arm, vertical)
forearm = self.ik.wristpl - self.ik.elbow
elbow_angle = angle_between(main_arm, forearm)
# Solve actuator angle for given elbow angle
# Base angle is between the main arm and actuator
base_angle = self.physsolver.inverse_forearm(elbow_angle)
actuator_angle = arm_vert_angle - base_angle
self.ik_pose = ArmPose(
self.cfg,
swing_angle = self.ik.swing,
# angles from vertical
shoulder_angle = arm_vert_angle,
actuator_angle = actuator_angle,
# angle between the main arm and forearm
elbow_angle = elbow_angle,
elbow2D = self.ik.elbow,
wrist2D = self.ik.wristpl,
effector2D = self.ik.goalpl,
effector = self.ik.goal,
wrist_x = self.ik.wrist_x,
wrist_y = self.ik.wrist_y
)
return self.ik_pose
def pollServos(self):
"""Poll the real-world servo positions"""
for servo in self.servos.itervalues():
if servo is not None:
newPos = servo.getPosition()
if type(newPos) is float:
servo.data['pos'] = newPos
def clearPositionError(self):
"""Clears the servo's position-error accumulators"""
for servo in self.servos.itervalues():
if servo is not None and servo.protocol == 1:
servo.data['error'] = 0.0
def getRealPose(self):
"""Retrieve the real-world arm pose, or None if not all servos are
connected.
"""
if any([servo is None for servo in self.servos.itervalues()]):
return None
# This whole function is essentially just FK based on the known servo
# angles
swing_servo = self.servos['swing'].data['pos']
elevator_servo = self.servos['shoulder'].data['pos']
actuator_servo = self.servos['elbow'].data['pos']
wrist_x_servo = self.servos['wrist_x'].data['pos']
wrist_y_servo = self.servos['wrist_y'].data['pos']
# Find the internal arm-pose angles for the given servo positions
swing_angle = ArmPose.calcSwingAngle(swing_servo)
elevator_angle = ArmPose.calcElevatorAngle(elevator_servo)
actuator_angle = ArmPose.calcActuatorAngle(actuator_servo)
wrist_x_angle = ArmPose.calcWristXAngle(wrist_x_servo)
wrist_y_angle = ArmPose.calcWristYAngle(wrist_y_servo)
# Solve elbow angle for given actuator and elevator angles
# (this is the angle from the elevator arm's direction to the forearm's)
elbow_angle = self.physsolver.solve_forearm(elevator_angle, actuator_angle)
# FK positions from config and angles
offset = self.cfg.shoulder_offset
shoulder2D = np.array([offset[1], 0])
elbow2D = shoulder2D + rotate(vertical, elevator_angle)*self.cfg.main_length
wrist2D = elbow2D + rotate(vertical, elevator_angle + elbow_angle)*self.cfg.forearm_length
effector2D = wrist2D + [self.cfg.wrist_length, 0]
# 3D Effector calculation is a little more involved
td = rotate([offset[0], effector2D[0]], swing_angle)
effector = np.array([td[0], effector2D[1], td[1]])
pose = ArmPose(
self.cfg,
swing_angle, elevator_angle, actuator_angle,
elbow_angle, elbow2D, wrist2D, effector2D,
effector, wrist_x_angle, wrist_y_angle)
return pose
def setTargetPose(self, new_pose):
self.target_pose = new_pose
def tick(self):
if self.target_pose is not None:
if self.motion_enable:
# Drive servos
gain = 0.1
if self.servos['swing'] is not None:
s = self.servos['swing']
pos = s.data['pos']
target = self.target_pose.getServoSwing()
# err = min(10, pos-target)
# s.data['error'] += err*gain
s.setGoalPosition(target)
if self.servos['shoulder'] is not None:
s = self.servos['shoulder']
# cumulative error
pos = s.data['pos']
target = self.target_pose.getServoElevator()
err = min(10, pos-target)
s.data['error'] += err*gain
s.data['error'] = np.clip(s.data['error'], -ERRORLIM, ERRORLIM)
s.setGoalPosition(target - s.data['error'])
if self.servos['elbow'] is not None:
s = self.servos['elbow']
pos = s.data['pos']
target = self.target_pose.getServoActuator()
err = min(10, pos-target)
s.data['error'] += err*gain
s.data['error'] = np.clip(s.data['error'], -ERRORLIM, ERRORLIM)
s.setGoalPosition(target - s.data['error'])
if self.servos['wrist_x'] is not None:
self.servos['wrist_x'].setGoalPosition(self.target_pose.getServoWristX())
if self.servos['wrist_y'] is not None:
self.servos['wrist_y'].setGoalPosition(self.target_pose.getServoWristY())
| 37.397436
| 98
| 0.58471
| 1,630
| 14,585
| 5.094479
| 0.176074
| 0.042148
| 0.01409
| 0.010959
| 0.212066
| 0.163656
| 0.142582
| 0.110549
| 0.079359
| 0.079359
| 0
| 0.02075
| 0.329242
| 14,585
| 389
| 99
| 37.493573
| 0.828069
| 0.117449
| 0
| 0.2
| 0
| 0
| 0.026805
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111864
| false
| 0
| 0.020339
| 0.040678
| 0.237288
| 0.013559
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60422bea81360e85bf0b5cf68c083ffc23ea9d15
| 2,867
|
py
|
Python
|
flux/migrations/versions/9ba67b798fa_add_request_system.py
|
siq/flux
|
ca7563deb9ebef14840bbf0cb7bab4d9478b2470
|
[
"Linux-OpenIB"
] | null | null | null |
flux/migrations/versions/9ba67b798fa_add_request_system.py
|
siq/flux
|
ca7563deb9ebef14840bbf0cb7bab4d9478b2470
|
[
"Linux-OpenIB"
] | null | null | null |
flux/migrations/versions/9ba67b798fa_add_request_system.py
|
siq/flux
|
ca7563deb9ebef14840bbf0cb7bab4d9478b2470
|
[
"Linux-OpenIB"
] | null | null | null |
"""add_request_system
Revision: 9ba67b798fa
Revises: 31b92bf6506d
Created: 2013-07-23 02:49:09.342814
"""
revision = '9ba67b798fa'
down_revision = '31b92bf6506d'
from alembic import op
from spire.schema.fields import *
from spire.mesh import SurrogateType
from sqlalchemy import (Column, ForeignKey, ForeignKeyConstraint, PrimaryKeyConstraint,
CheckConstraint, UniqueConstraint)
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('request',
Column('id', UUIDType(), nullable=False),
Column('name', TextType(), nullable=False),
Column('status', EnumerationType(), nullable=False),
Column('originator', TokenType(), nullable=False),
Column('assignee', TokenType(), nullable=False),
PrimaryKeyConstraint('id'),
UniqueConstraint('name'),
)
op.create_table('request_slot',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('token', TokenType(), nullable=False),
Column('title', TextType(), nullable=True),
Column('slot', TokenType(), nullable=False),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
UniqueConstraint('request_id','token'),
)
op.create_table('request_attachment',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('token', TokenType(), nullable=True),
Column('title', TextType(), nullable=True),
Column('attachment', SurrogateType(), nullable=False),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
)
op.create_table('request_product',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('token', TokenType(), nullable=False),
Column('title', TextType(), nullable=True),
Column('product', SurrogateType(), nullable=False),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
UniqueConstraint('request_id','token'),
)
op.create_table('message',
Column('id', UUIDType(), nullable=False),
Column('request_id', UUIDType(), nullable=False),
Column('author', TokenType(), nullable=False),
Column('occurrence', DateTimeType(timezone=True), nullable=False),
Column('message', TextType(), nullable=True),
ForeignKeyConstraint(['request_id'], ['request.id'], ondelete='CASCADE'),
PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('message')
op.drop_table('request_product')
op.drop_table('request_attachment')
op.drop_table('request_slot')
op.drop_table('request')
| 39.273973
| 87
| 0.659226
| 277
| 2,867
| 6.718412
| 0.231047
| 0.13971
| 0.163353
| 0.111231
| 0.51424
| 0.51424
| 0.475551
| 0.475551
| 0.475551
| 0.43525
| 0
| 0.020443
| 0.181025
| 2,867
| 72
| 88
| 39.819444
| 0.772147
| 0.034531
| 0
| 0.403226
| 0
| 0
| 0.161173
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.080645
| 0
| 0.112903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
604745505e3f84cc6af47e088784a1a28b715d2a
| 1,418
|
py
|
Python
|
fsspec/tests/test_mapping.py
|
sodre/filesystem_spec
|
5fe51c5e85366b57a11ed66637a940970372ea4b
|
[
"BSD-3-Clause"
] | null | null | null |
fsspec/tests/test_mapping.py
|
sodre/filesystem_spec
|
5fe51c5e85366b57a11ed66637a940970372ea4b
|
[
"BSD-3-Clause"
] | null | null | null |
fsspec/tests/test_mapping.py
|
sodre/filesystem_spec
|
5fe51c5e85366b57a11ed66637a940970372ea4b
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import fsspec
from fsspec.implementations.memory import MemoryFileSystem
import pickle
import pytest
def test_mapping_prefix(tmpdir):
tmpdir = str(tmpdir)
os.makedirs(os.path.join(tmpdir, "afolder"))
open(os.path.join(tmpdir, "afile"), "w").write("test")
open(os.path.join(tmpdir, "afolder", "anotherfile"), "w").write("test2")
m = fsspec.get_mapper("file://" + tmpdir)
assert "afile" in m
assert m["afolder/anotherfile"] == b"test2"
fs = fsspec.filesystem("file")
m2 = fs.get_mapper(tmpdir)
m3 = fs.get_mapper("file://" + tmpdir)
assert m == m2 == m3
def test_ops():
MemoryFileSystem.store.clear()
m = fsspec.get_mapper("memory://")
assert not m
assert list(m) == []
with pytest.raises(KeyError):
m["hi"]
assert m.pop("key", 0) == 0
m["key0"] = b"data"
assert list(m) == ["key0"]
assert m["key0"] == b"data"
m.clear()
assert list(m) == []
def test_pickle():
m = fsspec.get_mapper("memory://")
assert isinstance(m.fs, MemoryFileSystem)
m["key"] = b"data"
m2 = pickle.loads(pickle.dumps(m))
assert list(m) == list(m2)
def test_keys_view():
# https://github.com/intake/filesystem_spec/issues/186
m = fsspec.get_mapper("memory://")
m["key"] = b"data"
keys = m.keys()
assert len(keys) == 1
# check that we don't consume the keys
assert len(keys) == 1
| 22.870968
| 76
| 0.612835
| 198
| 1,418
| 4.323232
| 0.358586
| 0.063084
| 0.046729
| 0.074766
| 0.273364
| 0.065421
| 0
| 0
| 0
| 0
| 0
| 0.016115
| 0.212271
| 1,418
| 61
| 77
| 23.245902
| 0.750224
| 0.062764
| 0
| 0.214286
| 0
| 0
| 0.116139
| 0
| 0
| 0
| 0
| 0
| 0.309524
| 1
| 0.095238
| false
| 0
| 0.119048
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6047d157ca53f47cf0fb3523f60398cfb109d425
| 990
|
py
|
Python
|
testedome/questions/quest_5.py
|
EderReisS/pythonChallenges
|
a880358c2cb4de0863f4b4cada36b3d439a8a018
|
[
"MIT"
] | null | null | null |
testedome/questions/quest_5.py
|
EderReisS/pythonChallenges
|
a880358c2cb4de0863f4b4cada36b3d439a8a018
|
[
"MIT"
] | null | null | null |
testedome/questions/quest_5.py
|
EderReisS/pythonChallenges
|
a880358c2cb4de0863f4b4cada36b3d439a8a018
|
[
"MIT"
] | 1
|
2021-07-29T23:20:17.000Z
|
2021-07-29T23:20:17.000Z
|
"""
A
/ |
B C
'B, C'
"""
class CategoryTree:
def __init__(self):
self.root = {}
self.all_categories = []
def add_category(self, category, parent):
if category in self.all_categories:
raise KeyError(f"{category} exists")
if parent is None:
self.root[category] = set()
if parent:
if parent not in self.root:
raise KeyError(f"{parent} invalid")
self.root[category] = set()
self.root[parent].add(category)
self.all_categories.append(category)
def get_children(self, parent):
if parent and parent not in self.root:
raise KeyError(f"{parent} invalid")
return list(self.root[parent])
if __name__ == "__main__":
c = CategoryTree()
c.add_category('A', None)
c.add_category('B', 'A')
c.add_category('C', 'A')
print(','.join(c.get_children('A') or []))
print(','.join(c.get_children('E') or []))
| 22
| 51
| 0.559596
| 124
| 990
| 4.290323
| 0.298387
| 0.105263
| 0.095865
| 0.071429
| 0.25188
| 0.172932
| 0.172932
| 0.172932
| 0.172932
| 0.172932
| 0
| 0
| 0.293939
| 990
| 44
| 52
| 22.5
| 0.761087
| 0.019192
| 0
| 0.153846
| 0
| 0
| 0.06875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0
| 0
| 0.192308
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
604a3acc24feaf58c41a047512c8f6cf4cc0bdd1
| 1,397
|
py
|
Python
|
scripts/multiplayer/server.py
|
AgnirudraSil/tetris
|
2a4f4c26190fc8b669f98c116af343f7f1ac51bf
|
[
"MIT"
] | 3
|
2022-01-11T06:11:08.000Z
|
2022-03-10T09:34:42.000Z
|
scripts/multiplayer/server.py
|
agnirudrasil/tetris
|
2a4f4c26190fc8b669f98c116af343f7f1ac51bf
|
[
"MIT"
] | null | null | null |
scripts/multiplayer/server.py
|
agnirudrasil/tetris
|
2a4f4c26190fc8b669f98c116af343f7f1ac51bf
|
[
"MIT"
] | null | null | null |
import pickle
import socket
import _thread
from scripts.multiplayer import game, board, tetriminos
server = "192.168.29.144"
port = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((server, port))
except socket.error as e:
print(e)
s.listen()
print("Waiting for connection")
connected = set()
games = {}
idCount = 0
def threaded_client(conn, p, gameId):
global idCount
conn.send(str.encode(str(p)))
reply = ""
while True:
try:
data = conn.recv(4096).decode()
if gameId in games:
game = games[gameId]
if not data:
break
else:
game.update(p, data)
reply = game
conn.sendall(pickle.dumps(reply))
else:
break
except:
break
print("Lost Connection!")
try:
del games[gameId]
print("Closing Game", gameId)
except:
pass
idCount -= 1
conn.close()
while True:
conn, addr = s.accept()
print("Connected to: ", addr)
idCount += 1
p = 0
game_id = (idCount - 1) // 2
if idCount % 2 == 1:
games[game_id] = game.Game((0, 0, 0), None, board)
else:
games[game_id].ready = True
p = 1
_thread.start_new_thread(threaded_client, (conn, p, game_id))
| 18.878378
| 65
| 0.536149
| 170
| 1,397
| 4.335294
| 0.458824
| 0.032564
| 0.048847
| 0.05156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034254
| 0.352183
| 1,397
| 73
| 66
| 19.136986
| 0.780111
| 0
| 0
| 0.236364
| 0
| 0
| 0.055834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0.018182
| 0.072727
| 0
| 0.090909
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
604b01d7a386918b107512b8c4b02b4727b0197f
| 2,311
|
py
|
Python
|
AdventOfCode/2018/src/day-03/app.py
|
AustinTSchaffer/DailyProgrammer
|
b16d9babb298ac5e879c514f9c4646b99c6860a8
|
[
"MIT"
] | 1
|
2020-07-28T17:07:35.000Z
|
2020-07-28T17:07:35.000Z
|
AdventOfCode/2018/src/day-03/app.py
|
AustinTSchaffer/DailyProgrammer
|
b16d9babb298ac5e879c514f9c4646b99c6860a8
|
[
"MIT"
] | 5
|
2021-04-06T18:25:29.000Z
|
2021-04-10T15:13:28.000Z
|
AdventOfCode/2018/src/day-03/app.py
|
AustinTSchaffer/DailyProgrammer
|
b16d9babb298ac5e879c514f9c4646b99c6860a8
|
[
"MIT"
] | null | null | null |
import os
import re
from collections import defaultdict
class Claim(object):
def __init__(self, data_row):
match = re.match(r'#(\d+) @ (\d+),(\d+): (\d+)x(\d+)', data_row)
self.id = int(match[1])
self.x = int(match[2])
self.y = int(match[3])
self.width = int(match[4])
self.height = int(match[5])
def all_locations(self):
for x in range(self.width):
for y in range(self.height):
yield (self.x + x, self.y + y)
CURRENT_DIR, _ = os.path.split(__file__)
DATA_FLIE = os.path.join(CURRENT_DIR, 'data.txt')
def data_file_iter(data_file) -> Claim:
with open(data_file, 'r') as data:
for claim in data:
claim = claim.strip()
if (claim):
yield Claim(claim)
def part1(claims):
"""
This is basically a single-threaded collision detection method,
implemented in pure python. Computation complexity is obviously
not a consideration.
"""
# Determines how many times each locations was claimed
claimed_space_registry = defaultdict(int)
for claim in claims:
for location in claim.all_locations():
claimed_space_registry[location] += 1
# Generates the set of all locations that were claimed more than once
multi_claimed_spaces = {
location
for location,count in claimed_space_registry.items()
if count > 1
}
# Prints the number of locations that are claimed more than once
# and returns the set of locations that were claimed more than once
print('Multi-Claimed Spaces:', len(multi_claimed_spaces))
return multi_claimed_spaces
def part2(claims, multi_claimed_spaces):
"""
Might not be the optimal solution, but it runs fast enough, and uses
components that were already calculated in part 1.
"""
for claim in claims:
all_locations_are_non_overlapping = all(map(
lambda loc: loc not in multi_claimed_spaces,
claim.all_locations()
))
if all_locations_are_non_overlapping:
print('Non-overlapping claim:', claim.id)
return claim
if __name__ == '__main__':
claims = list(data_file_iter(DATA_FLIE))
mcs = part1(claims)
santas_suit_material = part2(claims, mcs)
| 32.097222
| 73
| 0.638685
| 312
| 2,311
| 4.544872
| 0.400641
| 0.050776
| 0.076164
| 0.040197
| 0.091678
| 0.050776
| 0.050776
| 0
| 0
| 0
| 0
| 0.007084
| 0.266984
| 2,311
| 71
| 74
| 32.549296
| 0.829988
| 0.225011
| 0
| 0.041667
| 0
| 0
| 0.053295
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0
| 0.0625
| 0
| 0.229167
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
604ecfc2153a2b8f83182b3e8a28bd46fb2056eb
| 8,479
|
py
|
Python
|
tests/views/test_admin_committee_questions.py
|
Lunga001/pmg-cms-2
|
10cea3979711716817b0ba2a41987df73f2c7642
|
[
"Apache-2.0"
] | 2
|
2019-06-11T20:46:43.000Z
|
2020-08-27T22:50:32.000Z
|
tests/views/test_admin_committee_questions.py
|
Lunga001/pmg-cms-2
|
10cea3979711716817b0ba2a41987df73f2c7642
|
[
"Apache-2.0"
] | 70
|
2017-05-26T14:04:06.000Z
|
2021-06-30T10:21:58.000Z
|
tests/views/test_admin_committee_questions.py
|
OpenUpSA/pmg-cms-2
|
ec5f259dae81674ac7a8cdb80f124a8b0f167780
|
[
"Apache-2.0"
] | 4
|
2017-08-29T10:09:30.000Z
|
2021-05-25T11:29:03.000Z
|
import os
from urllib.parse import urlparse, parse_qs
from builtins import str
from tests import PMGLiveServerTestCase
from pmg.models import db, Committee, CommitteeQuestion
from tests.fixtures import dbfixture, UserData, CommitteeData, MembershipData
from flask import escape
from io import BytesIO
class TestAdminCommitteeQuestions(PMGLiveServerTestCase):
def setUp(self):
super().setUp()
self.fx = dbfixture.data(UserData)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.delete_created_objects()
self.fx.teardown()
super().tearDown()
def test_upload_committee_question_document_with_old_format(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path(
"../data/committee_questions/RNW190-200303.docx"
)
with open(path, "rb") as f:
data["file"] = (f, "RNW190-200303.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/somethingelse"},
content_type="multipart/form-data",
)
self.assertEqual(302, response.status_code)
response_url = urlparse(response.location)
response_query = parse_qs(response_url.query)
self.assertIn("id", response_query, "Question ID must be in response query")
created_question_id = int(response_query["id"][0])
response = self.make_request(
"%s?%s" % (response_url.path, response_url.query),
self.user,
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# Test that the question that was created contains the correct data
question = CommitteeQuestion.query.get(created_question_id)
self.assertEqual(
question.question,
"Whether her Office has initiated the drafting of a Bill that seeks to protect and promote the rights of persons with disabilities; if not, (a) why not and (b) what steps does her Office intend taking in this regard; if so, on what date does she envisage that the Bill will be introduced in the National Assembly?",
)
self.assertEqual(
question.minister.name,
"Minister in The Presidency for Women, Youth and Persons with Disabilities",
)
self.assertEqual(question.asked_by_name, "Mr S Ngcobo")
self.assertEqual(
question.answer,
"<p>Yes</p><p>(b) The Department is in the process of preparing the drafting of a Bill which will be submitted to Cabinet for approval before it will be tabled in Parliament during the 2021/2022 financial year.</p>",
)
self.assertEqual(question.code, "NW190")
# Delete the question that was created
self.created_objects.append(question)
def test_upload_committee_question_document_with_new_format(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path(
"../data/committee_questions/RNW104-2020-02-28.docx"
)
with open(path, "rb") as f:
data["file"] = (f, "RNW104-2020-02-28.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/admin/committee-question/"},
content_type="multipart/form-data",
)
self.assertEqual(302, response.status_code)
response_url = urlparse(response.location)
response_query = parse_qs(response_url.query)
self.assertIn("id", response_query, "Question ID must be in response query")
created_question_id = int(response_query["id"][0])
response = self.make_request(
"%s?%s" % (response_url.path, response_url.query),
self.user,
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# Test that the question that was created contains the correct data
question = CommitteeQuestion.query.get(created_question_id)
self.assertEqual(
question.question,
"What (a) is the number of (i) residential properties, (ii) business erven’, (iii) government buildings and (iv) agricultural properties owned by her department in the Lephalale Local Municipality which are (aa) vacant, (bb) occupied and (cc) earmarked for disposal and (b) total amount does her department owe the municipality in outstanding rates and services?",
)
self.assertEqual(
question.minister.name, "Minister of Public Works and Infrastructure",
)
self.assertEqual(question.asked_by_name, "Ms S J Graham")
self.assertEqual(
question.answer,
"<p><strong>The Minister of Public Works and</strong><strong> Infrastructure: </strong></p><ol><li>The Department of Public Works and Infrastructure (DPWI) has informed me that in the Lephalale Local Municipality the Department owns (i) 183 residential properties (ii) one business erven (iii) 132 government buildings and (iv) 5 agricultural properties. DPWI informed me that (aa) 8 land parcels are vacant and (bb) only one property is unutilised. </li></ol><p>(cc) DPWI has not earmarked any properties for disposal in the Lephalale Local Municipality.</p><ol><li>In August 2019 the Department started a Government Debt Project engaging directly with municipalities and Eskom to verify and reconcile accounts and the project. DPWI, on behalf of client departments, owed the Lephalale Local Municipality, as per accounts received on 17 February 2020, R 334,989.69 which relates current consumption. </li></ol>",
)
self.assertEqual(question.code, "NW104")
# Delete the question that was created
self.created_objects.append(question)
def test_upload_committee_question_document_with_navigable_string_error(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path(
"../data/committee_questions/RNW1153-200619.docx"
)
with open(path, "rb") as f:
data["file"] = (f, "RNW1153-200619.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/admin/committee-question/"},
content_type="multipart/form-data",
)
self.assertEqual(302, response.status_code)
response_url = urlparse(response.location)
response_query = parse_qs(response_url.query)
self.assertIn("id", response_query, "Question ID must be in response query")
created_question_id = int(response_query["id"][0])
response = self.make_request(
"%s?%s" % (response_url.path, response_url.query),
self.user,
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# Test that the question that was created contains the correct data
question = CommitteeQuestion.query.get(created_question_id)
self.assertIn(
"(1)Whether, with reference to her reply to question 937 on 4 June 2020",
question.question,
)
self.assertEqual(
question.minister.name,
"Minister in The Presidency for Women, Youth and Persons with Disabilities",
)
self.assertEqual(question.asked_by_name, "Ms T Breedt")
self.assertIn(
"There were no deviations from the standard supply chain management procedures",
question.answer,
)
self.assertEqual(question.code, "NW1153")
# Delete the question that was created
self.created_objects.append(question)
def get_absolute_file_path(self, relative_path):
dir_name = os.path.dirname(__file__)
return os.path.join(dir_name, relative_path)
| 45.342246
| 927
| 0.644416
| 1,004
| 8,479
| 5.331673
| 0.276892
| 0.053241
| 0.055857
| 0.034747
| 0.594807
| 0.538577
| 0.530544
| 0.515599
| 0.515599
| 0.515599
| 0
| 0.021512
| 0.265361
| 8,479
| 186
| 928
| 45.586022
| 0.837855
| 0.06121
| 0
| 0.583893
| 0
| 0.026846
| 0.354471
| 0.046164
| 0
| 0
| 0
| 0
| 0.161074
| 1
| 0.040268
| false
| 0
| 0.053691
| 0
| 0.107383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60518bb19a47173a8268f88acf5e74e628053642
| 4,866
|
py
|
Python
|
syloga/transform/evaluation.py
|
xaedes/python-symbolic-logic-to-gate
|
a0dc9be9e04290008cf709fac789d224ab8c14b0
|
[
"MIT"
] | null | null | null |
syloga/transform/evaluation.py
|
xaedes/python-symbolic-logic-to-gate
|
a0dc9be9e04290008cf709fac789d224ab8c14b0
|
[
"MIT"
] | null | null | null |
syloga/transform/evaluation.py
|
xaedes/python-symbolic-logic-to-gate
|
a0dc9be9e04290008cf709fac789d224ab8c14b0
|
[
"MIT"
] | null | null | null |
from syloga.core.map_expression_args import map_expression_args
from syloga.utils.identity import identity
from syloga.ast.BooleanNot import BooleanNot
from syloga.ast.BooleanValue import BooleanValue
from syloga.ast.BooleanOr import BooleanOr
from syloga.ast.BooleanAnd import BooleanAnd
from syloga.ast.BooleanNand import BooleanNand
from syloga.ast.BooleanNor import BooleanNor
from syloga.ast.BooleanXor import BooleanXor
from syloga.ast.BreakOut import BreakOut
# from syloga.core.assert_equality_by_table import assert_equality_by_table
def evaluate_expr(expression):
recurse = evaluate_expr
# result = assert_equality_by_table
result = identity
#arg_is_value = lambda arg: isinstance(arg, (BooleanValue, bool))
arg_is_value = lambda arg: type(arg) in [BooleanValue, bool]
def arg_is_value(arg):
is_value = type(arg) in [BooleanValue, bool]
#print("is arg a value? " + str(type(arg)) + " " + str(arg))
#print("is_value", is_value)
return is_value
args_are_values = lambda args: all(map(arg_is_value, args))
get_value = lambda arg: arg if type(arg) == bool else arg.value
is_true = lambda val: val == True
is_false = lambda val: val == False
#print("looking at " + str(type(expression)))
if type(expression) == BooleanNot:
assert(len(expression.args) == 1)
arg = recurse(expression.args[0]);
if arg_is_value(arg):
return result(BooleanValue(not get_value(arg)))
else:
return result(BooleanNot(arg))
elif type(expression) == BooleanOr:
args = list(map(recurse, expression.args))
arg_values = [get_value(arg) for arg in args if arg_is_value(arg)]
args_wo_neutral = list(filter(lambda x: not(arg_is_value(x) and is_false(get_value(x))),args))
if args_are_values(args):
return result(BooleanValue(any(arg_values)))
elif any(map(is_true,arg_values)):
return result(BooleanValue(True))
elif len(args) == 1:
return result(recurse(args[0]))
elif len(args_wo_neutral) < len(args):
return result(recurse(BooleanOr(*args_wo_neutral)))
else:
return result(BooleanOr(*args))
elif type(expression) == BooleanAnd:
args = list(map(recurse, expression.args))
#print(expression.args)
#print(args)
#negated_atom_values = [not get_value(arg) for arg in args if arg_is_value(arg)]
arg_values = [get_value(arg) for arg in args if arg_is_value(arg)]
args_wo_neutral = list(filter(lambda x: not(arg_is_value(x) and is_true(get_value(x))),args))
#print(arg_values)
if args_are_values(args):
return result(BooleanValue(all(map(is_true,arg_values))))
elif any(map(is_false,arg_values)):
return result(BooleanValue(False))
elif len(args) == 1:
return result(recurse(args[0]))
elif len(args_wo_neutral) < len(args):
return result(recurse(BooleanAnd(*args_wo_neutral)))
else:
return result(BooleanAnd(*args))
elif type(expression) == BooleanNand:
return result(recurse(BooleanNot(BooleanAnd(*expression.args))))
elif type(expression) == BooleanNor:
return result(recurse(BooleanNot(BooleanOr(*expression.args))))
elif type(expression) == BooleanXor:
args = list(map(recurse, expression.args))
arg_values = [get_value(arg) for arg in args if arg_is_value(arg)]
non_value_args = [arg for arg in args if not arg_is_value(arg)]
if len(args) == 0:
raise ValueError("args are missing")
elif len(args) == 1:
return result(args[0])
elif len(arg_values) == 0:
return result(BooleanXor(*non_value_args))
elif len(arg_values) == 1:
if is_true(arg_values[0]):
return result(BooleanXor(arg_values[0], *non_value_args))
else:
return result(recurse(BooleanXor(*non_value_args)))
elif len(arg_values) > 1:
evaluated = is_true(arg_values[0])
for a in arg_values[1:]:
evaluated ^= is_true(a)
evaluated = bool(evaluated)
return result(recurse(BooleanXor(evaluated, *non_value_args)))
elif type(expression) == BreakOut:
expr = recurse(expression.expr)
if arg_is_value(expr):
return result(BooleanValue(expr))
else:
return result(BreakOut(expr))
else:
return result(map_expression_args(recurse, expression, recurse_collection=True))
| 36.313433
| 102
| 0.621661
| 605
| 4,866
| 4.816529
| 0.117355
| 0.090597
| 0.048044
| 0.031229
| 0.411119
| 0.321208
| 0.229581
| 0.229581
| 0.200069
| 0.173301
| 0
| 0.004545
| 0.276613
| 4,866
| 133
| 103
| 36.586466
| 0.823295
| 0.088368
| 0
| 0.235955
| 0
| 0
| 0.003617
| 0
| 0
| 0
| 0
| 0
| 0.011236
| 1
| 0.022472
| false
| 0
| 0.11236
| 0
| 0.393258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
605202551fbb724a7df19cd7d70079bcc8b5e6d2
| 2,753
|
py
|
Python
|
oscar/apps/customer/mixins.py
|
Idematica/django-oscar
|
242a0654210d63ba75f798788916c8b2f7abb7fb
|
[
"BSD-3-Clause"
] | 1
|
2015-08-02T05:36:11.000Z
|
2015-08-02T05:36:11.000Z
|
oscar/apps/customer/mixins.py
|
elliotthill/django-oscar
|
5a71a1f896f2c14f8ed3e68535a36b26118a65c5
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/apps/customer/mixins.py
|
elliotthill/django-oscar
|
5a71a1f896f2c14f8ed3e68535a36b26118a65c5
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from django.contrib.auth import authenticate, login as auth_login
from django.contrib.sites.models import get_current_site
from django.db.models import get_model
from oscar.apps.customer.signals import user_registered
from oscar.core.loading import get_class
from oscar.core.compat import get_user_model
User = get_user_model()
CommunicationEventType = get_model('customer', 'CommunicationEventType')
Dispatcher = get_class('customer.utils', 'Dispatcher')
class PageTitleMixin(object):
"""
Passes page_title and active_tab into context, which makes it quite useful
for the accounts views.
Dynamic page titles are possible by overriding get_page_title.
"""
page_title = None
active_tab = None
# Use a method that can be overridden and customised
def get_page_title(self):
return self.page_title
def get_context_data(self, **kwargs):
ctx = super(PageTitleMixin, self).get_context_data(**kwargs)
ctx.setdefault('page_title', self.get_page_title())
ctx.setdefault('active_tab', self.active_tab)
return ctx
class RegisterUserMixin(object):
communication_type_code = 'REGISTRATION'
def register_user(self, form):
"""
Create a user instance and send a new registration email (if configured
to).
"""
user = form.save()
if getattr(settings, 'OSCAR_SEND_REGISTRATION_EMAIL', True):
self.send_registration_email(user)
# Raise signal
user_registered.send_robust(sender=self, user=user)
# We have to authenticate before login
try:
user = authenticate(
username=user.email,
password=form.cleaned_data['password1'])
except User.MultipleObjectsReturned:
# Handle race condition where the registration request is made
# multiple times in quick succession. This leads to both requests
# passing the uniqueness check and creating users (as the first one
# hasn't committed when the second one runs the check). We retain
# the first one and delete the dupes.
users = User.objects.filter(email=user.email)
user = users[0]
for u in users[1:]:
u.delete()
auth_login(self.request, user)
return user
def send_registration_email(self, user):
code = self.communication_type_code
ctx = {'user': user,
'site': get_current_site(self.request)}
messages = CommunicationEventType.objects.get_and_render(
code, ctx)
if messages and messages['body']:
Dispatcher().dispatch_user_messages(user, messages)
| 34.848101
| 79
| 0.670904
| 337
| 2,753
| 5.32641
| 0.430267
| 0.035097
| 0.020056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001458
| 0.252815
| 2,753
| 78
| 80
| 35.294872
| 0.871172
| 0.230294
| 0
| 0
| 0
| 0
| 0.066116
| 0.024793
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0.021739
| 0.152174
| 0.021739
| 0.413043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60522d3489fa0c5b3c558dbb7d715900c3bb9392
| 2,421
|
py
|
Python
|
plot_integral.py
|
vfloeser/TumorDelivery
|
a48252c17b50397b1f51be21c0cf65ade87e9000
|
[
"Apache-2.0"
] | null | null | null |
plot_integral.py
|
vfloeser/TumorDelivery
|
a48252c17b50397b1f51be21c0cf65ade87e9000
|
[
"Apache-2.0"
] | null | null | null |
plot_integral.py
|
vfloeser/TumorDelivery
|
a48252c17b50397b1f51be21c0cf65ade87e9000
|
[
"Apache-2.0"
] | null | null | null |
from parameters import *
from library_time import *
from paths import *
import numpy as np
import pylab as plt
import matplotlib.pyplot as mplt
mplt.rc('text', usetex=True)
mplt.rcParams.update({'font.size': 16})
import logging, getopt, sys
import time
import os
##########################################################################################
# C O N F I G U R A T I O N
##########################################################################################
# activate ylim for w
var1 = w1
var3 = w3
var5 = w5
var10 = w10
var25 = w25
mode = "w" # u or w
##########################################################################################
# M A I N
##########################################################################################
if __name__ == "__main__":
if not os.path.exists('plots'):
os.makedirs('plots')
print('Created folder plots!')
if not os.path.exists('plots/integral'):
os.makedirs('plots/integral')
print('Created folder plots/integral!')
t = np.linspace(tmin, tmax, Nt)
r = np.linspace(0,R,Nr)
Ivar1 = np.zeros(Nt)
Ivar3 = np.zeros(Nt)
Ivar5 = np.zeros(Nt)
Ivar10 = np.zeros(Nt)
Ivar25 = np.zeros(Nt)
for i in range(Nt):
# /1000000 because of units
Ivar1[i] = integrate(var1, i,r, Nt)/1000000
Ivar3[i] = integrate(var3, i,r, Nt)/1000000
Ivar5[i] = integrate(var5, i,r, Nt)/1000000
Ivar10[i] = integrate(var10, i,r, Nt)/1000000
Ivar25[i] = integrate(var25, i,r, Nt)/1000000
mplt.plot(t, Ivar1, label=r'$\alpha = 1$')
mplt.plot(t, Ivar3, label=r'$\alpha = 3$')
mplt.plot(t, Ivar5, label=r'$\alpha = 5$')
mplt.plot(t, Ivar10, label=r'$\alpha = 10$')
mplt.plot(t, Ivar25, label=r'$\alpha = 25$')
mplt.xlim(tmin, tmax)
mplt.yscale('log')
mplt.xlabel(r'$t\quad [h]$')
mplt.ylabel(r'$\bar{'+mode+'}\quad [\mu mol]$')
##########################################################################################
# lim for w, because some values dont make sense
mplt.ylim(1e-11, 3e2)
# lim for w, because some values dont make sense
##########################################################################################
mplt.legend(loc=1, bbox_to_anchor=(1, 0.9))
mplt.tight_layout()
mplt.savefig('plots/integral/int'+mode+'.pdf', format='pdf')
mplt.show()
| 33.164384
| 90
| 0.467575
| 299
| 2,421
| 3.745819
| 0.41806
| 0.048214
| 0.040179
| 0.049107
| 0.1125
| 0.1125
| 0.073214
| 0.073214
| 0.073214
| 0.073214
| 0
| 0.052311
| 0.1867
| 2,421
| 73
| 91
| 33.164384
| 0.516506
| 0.077654
| 0
| 0
| 0
| 0
| 0.140476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
605585efa2db2b321777e037a609b7a6f87c04a9
| 686
|
py
|
Python
|
main.py
|
Dr3xler/CookieConsentChecker
|
816cdfb9d9dc741c57dbcd5e9c9ef59837196631
|
[
"MIT"
] | null | null | null |
main.py
|
Dr3xler/CookieConsentChecker
|
816cdfb9d9dc741c57dbcd5e9c9ef59837196631
|
[
"MIT"
] | 3
|
2021-04-29T22:57:09.000Z
|
2021-05-03T15:32:39.000Z
|
main.py
|
Dr3xler/CookieConsentChecker
|
816cdfb9d9dc741c57dbcd5e9c9ef59837196631
|
[
"MIT"
] | 1
|
2021-08-29T09:53:09.000Z
|
2021-08-29T09:53:09.000Z
|
from core import file_handling as file_h, driver_handling as driver_h
from website_handling import website_check as wc
from cookie_handling import cookie_compare
websites = file_h.website_reader()
driver = driver_h.webdriver_setup()
try:
wc.load_with_addon(driver, websites)
except:
print('ERROR: IN FIREFOX USAGE WITH ADDONS')
finally:
wc.close_driver_session(driver)
# driver need to be reloaded because we need a new session without addons
driver = driver_h.webdriver_setup()
try:
wc.load_without_addon(driver, websites)
except:
print('ERROR: IN VANILLA FIREFOX VERSION')
finally:
wc.close_driver_session(driver)
cookie_compare.compare(websites)
| 20.176471
| 73
| 0.781341
| 100
| 686
| 5.13
| 0.42
| 0.040936
| 0.050682
| 0.08577
| 0.413255
| 0.413255
| 0.2846
| 0.140351
| 0
| 0
| 0
| 0
| 0.150146
| 686
| 33
| 74
| 20.787879
| 0.879931
| 0.103499
| 0
| 0.526316
| 0
| 0
| 0.111475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.157895
| 0
| 0.157895
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60558cb725da5275f2069f7bb3c1bb96b154754f
| 4,788
|
py
|
Python
|
PyPBEC/OpticalMedium.py
|
photonbec/PyPBEC
|
fd68fa3e6206671e731bc0c2973af1f67d704f05
|
[
"MIT"
] | 1
|
2020-09-07T10:21:52.000Z
|
2020-09-07T10:21:52.000Z
|
PyPBEC/OpticalMedium.py
|
photonbec/PyPBEC
|
fd68fa3e6206671e731bc0c2973af1f67d704f05
|
[
"MIT"
] | null | null | null |
PyPBEC/OpticalMedium.py
|
photonbec/PyPBEC
|
fd68fa3e6206671e731bc0c2973af1f67d704f05
|
[
"MIT"
] | 1
|
2022-02-04T00:00:59.000Z
|
2022-02-04T00:00:59.000Z
|
import numpy as np
from scipy import constants as sc
from scipy.interpolate import interp1d
from pathlib import Path
from scipy.special import erf as Erf
import pandas as pd
import sys
import os
import csv
class OpticalMedium():
available_media = list()
available_media.append("Rhodamine6G")
def __init__(self, optical_medium):
"""
Initiazies an optical medium object.
Parameters:
optical_medium (str): Optical medium
"""
if not type(optical_medium) == str:
raise Exception("optical_medium is expected to be a string")
if not optical_medium in self.available_media:
raise Exception(optical_medium+" is an unknown optical medium")
if optical_medium == "Rhodamine6G":
self.medium = Rhodamine6G()
def get_rates(self, lambdas, **kwargs):
"""
Calculates the rates of absorption and emission, for a specific optical medium.
Parameters:
lambdas (list, or other iterable): Wavelength points where the rates are to be calculated. Wavelength is in meters
other medium specific arguments
"""
return self.medium.get_rates(lambdas=lambdas, **kwargs)
class Rhodamine6G(OpticalMedium):
def __init__(self):
pass
def get_rates(self, lambdas, dye_concentration, n):
"""
Rates for Rhodamine 6G
Parameters:
lambdas (list, or other iterable): Wavelength points where the rates are to be calculated. Wavelength is in meters
dye_concentration (float): In mM (milimolar) 1 mM = 1 mol / m^3
n (float): index of refraction
"""
# absorption data
min_wavelength = 480
max_wavelength = 650
absorption_spectrum_datafile = Path("data") / 'absorption_cross_sections_R6G_in_EthyleneGlycol_corrected.csv'
absorption_spectrum_datafile = Path(os.path.dirname(os.path.abspath(__file__))) / absorption_spectrum_datafile
raw_data2 = pd.read_csv(absorption_spectrum_datafile)
initial_index = raw_data2.iloc[(raw_data2['wavelength (nm)']-min_wavelength).abs().argsort()].index[0]
raw_data2 = raw_data2.iloc[initial_index:].reset_index(drop=True)
final_index = raw_data2.iloc[(raw_data2['wavelength (nm)']-max_wavelength).abs().argsort()].index[0]
raw_data2 = raw_data2.iloc[:final_index].reset_index(drop=True)
absorption_data = raw_data2
absorption_data_normalized = absorption_data['absorption cross-section (m^2)'].values / np.max(absorption_data['absorption cross-section (m^2)'].values)
absorption_spectrum = np.squeeze(np.array([[absorption_data['wavelength (nm)'].values], [absorption_data_normalized]], dtype=float))
interpolated_absorption_spectrum = interp1d(absorption_spectrum[0,:], absorption_spectrum[1,:], kind='cubic')
# emission data
fluorescence_spectrum_datafile = Path("data") / 'fluorescence_spectrum_R6G_in_EthyleneGlycol_corrected.csv'
fluorescence_spectrum_datafile = Path(os.path.dirname(os.path.abspath(__file__))) / fluorescence_spectrum_datafile
raw_data = pd.read_csv(fluorescence_spectrum_datafile)
initial_index = raw_data.iloc[(raw_data['wavelength (nm)']-min_wavelength).abs().argsort()].index[0]
raw_data = raw_data.iloc[initial_index:].reset_index(drop=True)
final_index = raw_data.iloc[(raw_data['wavelength (nm)']-max_wavelength).abs().argsort()].index[0]
raw_data = raw_data.iloc[:final_index].reset_index(drop=True)
fluorescence_data = raw_data
fluorescence_data_normalized = fluorescence_data['fluorescence (arb. units)'].values / np.max(fluorescence_data['fluorescence (arb. units)'].values)
emission_spectrum = np.squeeze(np.array([[fluorescence_data['wavelength (nm)'].values], [fluorescence_data_normalized]], dtype=float))
interpolated_emission_spectrum = interp1d(emission_spectrum[0,:], emission_spectrum[1,:], kind='cubic')
# Uses both datasets
if np.min(1e9*np.array(lambdas)) < 480 or np.max(1e9*np.array(lambdas)) > 650:
raise Exception('*** Restrict wavelength to the range between 480 and 650 nm ***')
temperature = 300
lamZPL = 545e-9
n_mol_per_vol= dye_concentration*sc.Avogadro
peak_Xsectn = 2.45e-20*n_mol_per_vol*sc.c/n
wpzl = 2*np.pi*sc.c/lamZPL/1e12
def freq(wl):
return 2*np.pi*sc.c/wl/1e12
def single_exp_func(det):
f_p = 2*np.pi*sc.c/(wpzl+det)*1e-3
f_m = 2*np.pi*sc.c/(wpzl-det)*1e-3
return (0.5*interpolated_absorption_spectrum(f_p)) + (0.5*interpolated_emission_spectrum(f_m))
def Err(det):
return Erf(det*1e12)
def single_adjust_func(det):
return ((1+Err(det))/2.0*single_exp_func(det)) + ((1-Err(det))/2.0*single_exp_func(-1.0*det)*np.exp(sc.h/(2*np.pi*sc.k*temperature)*det*1e12))
emission_rates = np.array([single_adjust_func(-1.0*freq(a_l)+wpzl) for a_l in lambdas])*peak_Xsectn
absorption_rates = np.array([single_adjust_func(freq(a_l)-wpzl) for a_l in lambdas])*peak_Xsectn
return absorption_rates, emission_rates
| 38.304
| 154
| 0.746658
| 695
| 4,788
| 4.910791
| 0.238849
| 0.041899
| 0.007325
| 0.010255
| 0.43598
| 0.334603
| 0.293583
| 0.277176
| 0.21506
| 0.186932
| 0
| 0.024466
| 0.129282
| 4,788
| 125
| 155
| 38.304
| 0.794195
| 0.168546
| 0
| 0
| 0
| 0
| 0.120698
| 0.029007
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115942
| false
| 0.014493
| 0.130435
| 0.043478
| 0.376812
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60563aa2ef81de63dbaea0f3ad170ec8ec84759d
| 1,251
|
py
|
Python
|
corehq/apps/appstore/urls.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/appstore/urls.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/appstore/urls.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls.defaults import url, include, patterns
from corehq.apps.appstore.dispatcher import AppstoreDispatcher
store_urls = patterns('corehq.apps.appstore.views',
url(r'^$', 'appstore_default', name="appstore_interfaces_default"),
AppstoreDispatcher.url_pattern(),
)
urlpatterns = patterns('corehq.apps.appstore.views',
url(r'^$', 'appstore', name='appstore'),
url(r'^api/', 'appstore_api', name='appstore_api'),
url(r'^store/', include(store_urls)),
url(r'^(?P<domain>[\w\.-]+)/info/$', 'project_info', name='project_info'),
url(r'^deployments/$', 'deployments', name='deployments'),
url(r'^deployments/api/$', 'deployments_api', name='deployments_api'),
url(r'^deployments/(?P<domain>[\w\.-]+)/info/$', 'deployment_info', name='deployment_info'),
url(r'^(?P<domain>[\w\.-]+)/approve/$', 'approve_app', name='approve_appstore_app'),
url(r'^(?P<domain>[\w\.-]+)/copy/$', 'copy_snapshot', name='domain_copy_snapshot'),
url(r'^(?P<domain>[\w\.-]+)/importapp/$', 'import_app', name='import_app_from_snapshot'),
url(r'^(?P<domain>[\w\.-]+)/image/$', 'project_image', name='appstore_project_image'),
url(r'^(?P<domain>[\w\.-]+)/multimedia/$', 'media_files', name='media_files'),
)
| 46.333333
| 96
| 0.657074
| 157
| 1,251
| 5.050955
| 0.248408
| 0.065574
| 0.070618
| 0.083228
| 0.21942
| 0.15889
| 0.108449
| 0.108449
| 0
| 0
| 0
| 0
| 0.095124
| 1,251
| 26
| 97
| 48.115385
| 0.70053
| 0
| 0
| 0
| 0
| 0
| 0.5336
| 0.2784
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6057750dc6cf45d0cc166a95aaf751e85207651a
| 2,667
|
py
|
Python
|
faster-rcnn-vgg16-fpn/model/fpn.py
|
fengkaibit/faster-rcnn_vgg16_fpn
|
354efd4b5f4d4a42e9c92f48501e02cd7f0c0cdb
|
[
"MIT"
] | 13
|
2019-05-21T13:19:56.000Z
|
2022-02-27T14:36:43.000Z
|
faster-rcnn-vgg16-fpn/model/fpn.py
|
fengkaibit/faster-rcnn_vgg16_fpn
|
354efd4b5f4d4a42e9c92f48501e02cd7f0c0cdb
|
[
"MIT"
] | 2
|
2019-06-27T07:02:33.000Z
|
2021-06-30T15:51:12.000Z
|
faster-rcnn-vgg16-fpn/model/fpn.py
|
fengkaibit/faster-rcnn_vgg16_fpn
|
354efd4b5f4d4a42e9c92f48501e02cd7f0c0cdb
|
[
"MIT"
] | 4
|
2019-05-21T13:19:56.000Z
|
2021-06-29T01:10:31.000Z
|
from __future__ import absolute_import
import torch
from torch.nn import functional
class FPN(torch.nn.Module):
def __init__(self, out_channels):
super(FPN, self).__init__()
self.out_channels = out_channels
self.P5 = torch.nn.MaxPool2d(kernel_size=1, stride=2, padding=0)
self.P4_conv1 = torch.nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P4_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
self.P3_conv1 = torch.nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P3_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
self.P2_conv1 = torch.nn.Conv2d(256, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P2_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
normal_init(self.P4_conv1, 0, 0.01)
normal_init(self.P4_conv2, 0, 0.01)
normal_init(self.P3_conv1, 0, 0.01)
normal_init(self.P3_conv2, 0, 0.01)
normal_init(self.P2_conv1, 0, 0.01)
normal_init(self.P2_conv2, 0, 0.01)
def forward(self, C2, C3, C4):
p4_out = self.P4_conv1(C4)
p5_out = self.P5(p4_out)
p3_out = self._upsample_add(p4_out, self.P3_conv1(C3))
p2_out = self._upsample_add(p3_out, self.P2_conv1(C2))
p4_out = self.P4_conv2(p4_out)
p3_out = self.P3_conv2(p3_out)
p2_out = self.P2_conv2(p2_out)
return p2_out, p3_out, p4_out, p5_out
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_,_,H,W = y.size()
return functional.interpolate(x, size=(H,W), mode='bilinear') + y
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
| 37.041667
| 99
| 0.640045
| 409
| 2,667
| 3.95599
| 0.278729
| 0.081582
| 0.101978
| 0.030902
| 0.302225
| 0.28492
| 0.28492
| 0.2089
| 0.2089
| 0.2089
| 0
| 0.066634
| 0.240345
| 2,667
| 72
| 100
| 37.041667
| 0.731984
| 0.235471
| 0
| 0
| 0
| 0
| 0.004199
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.078947
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6057d15e673e5e8174ccbf2844dfdc2c7b7a4b7d
| 2,314
|
py
|
Python
|
test/setups/finders/finders_test.py
|
bowlofstew/client
|
0d5ae42aaf9863e3871828b6df06170aad17c560
|
[
"MIT"
] | 40
|
2015-04-15T09:40:23.000Z
|
2022-02-11T11:07:24.000Z
|
test/setups/finders/finders_test.py
|
bowlofstew/client
|
0d5ae42aaf9863e3871828b6df06170aad17c560
|
[
"MIT"
] | 19
|
2015-04-15T18:34:53.000Z
|
2018-11-17T00:11:05.000Z
|
test/setups/finders/finders_test.py
|
bowlofstew/client
|
0d5ae42aaf9863e3871828b6df06170aad17c560
|
[
"MIT"
] | 22
|
2015-04-15T09:45:46.000Z
|
2020-09-29T17:04:19.000Z
|
import unittest
from biicode.common.settings.version import Version
from mock import patch
from biicode.client.setups.finders.finders import gnu_version
from biicode.client.setups.rpi_cross_compiler import find_gnu_arm
from biicode.client.workspace.bii_paths import get_biicode_env_folder_path
GCC_VERSION_MAC = '''Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/usr/include/c++/4.2.1
Apple LLVM version 5.1 (clang-503.0.38) (based on LLVM 3.4svn)
Target: x86_64-apple-darwin13.1.0
Thread model: posix'''
GCC_VERSION_UBUNTU = '''gcc (Ubuntu/Linaro 4.8.1-10ubuntu9) 4.8.1
Copyright (C) 2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
'''
GCC_VERSION_WIN = '''gcc (GCC) 4.8.1
Copyright (C) 2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'''
class FindersTest(unittest.TestCase):
@patch('biicode.client.setups.finders.finders.execute')
def test_gnu_version_detection(self, execute_mock):
execute_mock.return_value = ("", GCC_VERSION_MAC)
self.assertEquals(gnu_version('gnu'), Version('4.2.1'))
execute_mock.return_value = ("", GCC_VERSION_UBUNTU)
self.assertEquals(gnu_version('gnu'), Version('4.8.1'))
execute_mock.return_value = ("", GCC_VERSION_WIN)
self.assertEquals(gnu_version('gnu'), Version('4.8.1'))
@patch('os.path.exists')
def test_find_gnu_arm(self, exists):
exists.return_value = False
self.assertEqual((None, None), find_gnu_arm())
exists.return_value = True
c_path, cpp_path = find_gnu_arm()
inst_path = get_biicode_env_folder_path().replace('\\', '/')
c_path = c_path.replace('\\', '/')
cpp_path = cpp_path.replace('\\', '/')
inst_path = '%s/raspberry_cross_compilers/arm-bcm2708/'\
'arm-bcm2708hardfp-linux-gnueabi/bin/'\
'arm-bcm2708hardfp-linux-gnueabi' % inst_path
self.assertTrue(cpp_path.endswith('%s-g++' % inst_path))
self.assertTrue(c_path.endswith('%s-gcc' % inst_path))
| 44.5
| 139
| 0.709594
| 329
| 2,314
| 4.796353
| 0.370821
| 0.050697
| 0.009506
| 0.041825
| 0.413181
| 0.342205
| 0.321926
| 0.257288
| 0.257288
| 0.207858
| 0
| 0.032008
| 0.162921
| 2,314
| 51
| 140
| 45.372549
| 0.782654
| 0
| 0
| 0.142857
| 0
| 0.047619
| 0.39153
| 0.120138
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.047619
| false
| 0
| 0.142857
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
605a9a49370c1c190ccbd51f63a583f9a84128cd
| 5,152
|
py
|
Python
|
utilities.py
|
ameldocena/StratifiedAggregation
|
0031fea120bff00c739eb6c3d654a5c6d3f094bb
|
[
"MIT"
] | null | null | null |
utilities.py
|
ameldocena/StratifiedAggregation
|
0031fea120bff00c739eb6c3d654a5c6d3f094bb
|
[
"MIT"
] | null | null | null |
utilities.py
|
ameldocena/StratifiedAggregation
|
0031fea120bff00c739eb6c3d654a5c6d3f094bb
|
[
"MIT"
] | null | null | null |
import random
import numpy
#import tensorflow as tf
#import torch
from abc import abstractmethod
from sklearn.decomposition import PCA
from aggregators import FedAvg, MultiKrum, AlignedAvg, TrimmedMean, Median, StratifiedAggr
class SelectionStrategy:
# Unchanged from original work
@abstractmethod
def select_round_workers(self, workers, poisoned_workers, kwargs):
"""
:param workers: list(int). All workers available for learning
:param poisoned_workers: list(int). All workers that are poisoned
:param kwargs: dict
"""
raise NotImplementedError("select_round_workers() not implemented")
class RandomSelectionStrategy(SelectionStrategy):
# Unchanged from original work
"""
Randomly selects workers out of the list of all workers
"""
def select_round_workers(self, workers, poisoned_workers, kwargs):
#The poisoned_workers here are not used
return random.sample(workers, kwargs["NUM_WORKERS_PER_ROUND"])
#returns a list of sampled worker ids
# class StratifiedRandomSelection(SelectionStrategy):
# #We first stratify: Each stratum will be a list of workers
# #Then within each stratum, we randomly select
# #We would need the list of workers and the information about their skews
def select_aggregator(args, name, KWARGS={}):
#Creates an Aggregator object as selected
if name == "FedAvg":
return FedAvg(args, name, KWARGS)
elif name == "AlignedAvg":
return AlignedAvg(args, name, KWARGS)
elif name == "AlignedAvgImpute":
KWARGS.update({"use_impute":"filter","align":"fusion"})
return AlignedAvg(args, name, **KWARGS)
elif name == "MultiKrum":
return MultiKrum(args, name, KWARGS)
elif name == "TrimmedMean":
return TrimmedMean(args, name, KWARGS)
elif name == "Median":
return Median(args, name, KWARGS)
elif (name == "StratKrum") or (name == "StratTrimMean") or (name == "StratMedian") or (name == "StratFedAvg"):
#We may have to change the class name to StratifiedAggregation
return StratifiedAggr(args, name, KWARGS)
else:
raise NotImplementedError(f"Unrecognized Aggregator Name: {name}")
def calculate_pca_of_gradients(logger, gradients, num_components):
# Unchanged from original work
pca = PCA(n_components=num_components)
logger.info("Computing {}-component PCA of gradients".format(num_components))
return pca.fit_transform(gradients)
#So this is here after all
def calculate_model_gradient( model_1, model_2):
# Minor change from original work
"""
Calculates the gradient (parameter difference) between two Torch models.
:param logger: loguru.logger (NOW REMOVED)
:param model_1: torch.nn
:param model_2: torch.nn
"""
model_1_parameters = list(dict(model_1.state_dict()))
model_2_parameters = list(dict(model_2.state_dict()))
return calculate_parameter_gradients(model_1_parameters, model_2_parameters)
def calculate_parameter_gradients(params_1, params_2):
# Minor change from original work
"""
Calculates the gradient (parameter difference) between two sets of Torch parameters.
:param logger: loguru.logger (NOW REMOVED)
:param params_1: dict
:param params_2: dict
"""
#logger.debug("Shape of model_1_parameters: {}".format(str(len(params_1))))
#logger.debug("Shape of model_2_parameters: {}".format(str(len(params_2))))
return numpy.array([x for x in numpy.subtract(params_1, params_2)])
# #Inserted
# def convert2TF(torch_tensor):
# # Converts a pytorch tensor into a Tensorflow.
# # We first convert torch into numpy, then to tensorflow.
# # Arg: torch_tensor - a Pytorch tensor object
# np_tensor = torch_tensor.numpy().astype(float)
# return tf.convert_to_tensor(np_tensor)
#
# def convert2Torch(tf_tensor):
# #Converts a TF tensor to Torch
# #Arg: tf_tensor - a TF tensor
# np_tensor = tf.make_ndarray(tf_tensor)
# return torch.from_numpy(np_tensor)
def count_poisoned_stratum(stratified_workers, poisoned_workers):
if len(poisoned_workers) > 0:
print("\nPoisoned workers:", len(poisoned_workers), poisoned_workers)
for stratum in stratified_workers:
intersect = list(set(stratified_workers[stratum]).intersection(poisoned_workers))
print("Count poisoned workers per stratum:", len(intersect), intersect)
print("Stratum: {}. Propn to total poisoned: {}. Propn to subpopn in stratum: {}".format(stratum, len(intersect)/len(poisoned_workers),
len(intersect)/len(stratified_workers[stratum])))
else:
print("No poisoned workers")
def generate_uniform_weights(random_workers):
"""
This function generates uniform weights for each stratum in random_workers
:param random_workers:
:return:
"""
strata_weights = dict()
weight = 1.0 / len(list(random_workers.keys()))
for stratum in random_workers:
strata_weights[stratum] = weight
return strata_weights
| 39.030303
| 147
| 0.695652
| 627
| 5,152
| 5.567783
| 0.295056
| 0.051561
| 0.032083
| 0.030937
| 0.210828
| 0.118591
| 0.118591
| 0.07505
| 0.07505
| 0.044686
| 0
| 0.006144
| 0.21021
| 5,152
| 132
| 148
| 39.030303
| 0.851806
| 0.367818
| 0
| 0.071429
| 0
| 0
| 0.132491
| 0.013929
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.089286
| 0.017857
| 0.482143
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
605ad59a9efe4d2c5632efa0fb33e3ddefc540bb
| 1,301
|
py
|
Python
|
game/player.py
|
b1naryth1ef/mmo
|
400f66b0ac76896af2d7108ff3540c42614a32f0
|
[
"BSD-2-Clause"
] | 7
|
2015-09-29T13:32:36.000Z
|
2021-06-22T19:24:01.000Z
|
game/player.py
|
b1naryth1ef/mmo
|
400f66b0ac76896af2d7108ff3540c42614a32f0
|
[
"BSD-2-Clause"
] | null | null | null |
game/player.py
|
b1naryth1ef/mmo
|
400f66b0ac76896af2d7108ff3540c42614a32f0
|
[
"BSD-2-Clause"
] | 1
|
2019-03-03T23:24:28.000Z
|
2019-03-03T23:24:28.000Z
|
from sprites import PlayerSprite
import time
class Player(object):
def __init__(self, name, game):
self.name = name
self.pos = [50, 50]
self.do_blit = False
self.game = game
self.surf = game.SCREEN
self.lastMove = 99999999999
self.velo_def = [0, 0]
self.velo_x = 0
self.velo_y = 0
self.sprite = PlayerSprite(self)
self.moving = [False, False, False, False]
def tick(self):
if self.do_blit:
self.game.reDraw = True
self.sprite.display(self.surf.screen)
#self.surface.screen.blit(self.image, self.pos)
self.do_blit = False
# print self.lastMove - time.time()
if True in self.moving and abs(self.lastMove - time.time()) >= .08:
self.lastMove = time.time()
if self.moving[0]: self.move(x=-1)
if self.moving[1]: self.move(x=1)#down
if self.moving[2]: self.move(y=-1)#left
if self.moving[3]: self.move(y=1)#right
def move(self, x=0, y=0):
self.pos[1]+=x*10
self.pos[0]+=y*10
self.do_blit = True
if y < 0 and self.sprite.dir == 1:
self.sprite.flip()
elif y > 0 and self.sprite.dir == -1:
self.sprite.flip()
| 30.255814
| 75
| 0.544965
| 186
| 1,301
| 3.752688
| 0.274194
| 0.08596
| 0.057307
| 0.08596
| 0.157593
| 0.094556
| 0.094556
| 0.094556
| 0.094556
| 0.094556
| 0
| 0.046591
| 0.323597
| 1,301
| 43
| 76
| 30.255814
| 0.746591
| 0.071483
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.058824
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
605b1532a73c491b1c591dcd0c51687f13109748
| 1,019
|
py
|
Python
|
toys/layers/pool.py
|
cbarrick/toys
|
0368036ddb7594c0b6e7cdc704aeec918786e58a
|
[
"MIT"
] | 1
|
2018-04-28T18:29:37.000Z
|
2018-04-28T18:29:37.000Z
|
toys/layers/pool.py
|
cbarrick/csb
|
0368036ddb7594c0b6e7cdc704aeec918786e58a
|
[
"MIT"
] | null | null | null |
toys/layers/pool.py
|
cbarrick/csb
|
0368036ddb7594c0b6e7cdc704aeec918786e58a
|
[
"MIT"
] | null | null | null |
from typing import Sequence
import torch
from torch import nn
class MaxPool2d(nn.Module):
def __init__(self, kernel_size, **kwargs):
super().__init__()
stride = kwargs.setdefault('stride', kernel_size)
padding = kwargs.setdefault('padding', 0)
dilation = kwargs.setdefault('dilation', 1)
return_indices = kwargs.setdefault('return_indices', False)
ceil_mode = kwargs.setdefault('ceil_mode', False)
self.pool = nn.MaxPool2d(kernel_size,
stride=stride, padding=padding, dilation=dilation,
return_indices=return_indices, ceil_mode=ceil_mode)
def forward(self, x):
(*batch, height, width, channels) = x.shape
x = x.view(-1, height, width, channels)
x = torch.einsum('nhwc->nchw', [x])
x = self.pool(x)
x = torch.einsum('nchw->nhwc', [x])
(_, new_height, new_width, _) = x.shape
x = x.contiguous()
x = x.view(*batch, new_height, new_width, channels)
return x
| 33.966667
| 67
| 0.62316
| 125
| 1,019
| 4.88
| 0.328
| 0.131148
| 0.062295
| 0.065574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006527
| 0.248283
| 1,019
| 29
| 68
| 35.137931
| 0.789817
| 0
| 0
| 0
| 0
| 0
| 0.062807
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.125
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
605ed3488c51cb7e0a5749161c5e9f3896da6586
| 1,792
|
py
|
Python
|
fastseg/model/utils.py
|
SeockHwa/Segmentation_mobileV3
|
01d90eeb32232346b8ed071eaf5d03322049be11
|
[
"MIT"
] | 274
|
2020-08-12T00:29:30.000Z
|
2022-03-29T18:24:40.000Z
|
fastseg/model/utils.py
|
dcmartin/fastseg
|
c30759e07a52c7370eda11a93396c79f2b141778
|
[
"MIT"
] | 10
|
2020-08-13T06:15:14.000Z
|
2021-03-30T16:12:31.000Z
|
fastseg/model/utils.py
|
dcmartin/fastseg
|
c30759e07a52c7370eda11a93396c79f2b141778
|
[
"MIT"
] | 27
|
2020-08-12T00:29:21.000Z
|
2021-12-09T02:32:36.000Z
|
import torch.nn as nn
from .efficientnet import EfficientNet_B4, EfficientNet_B0
from .mobilenetv3 import MobileNetV3_Large, MobileNetV3_Small
def get_trunk(trunk_name):
"""Retrieve the pretrained network trunk and channel counts"""
if trunk_name == 'efficientnet_b4':
backbone = EfficientNet_B4(pretrained=True)
s2_ch = 24
s4_ch = 32
high_level_ch = 1792
elif trunk_name == 'efficientnet_b0':
backbone = EfficientNet_B0(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 1280
elif trunk_name == 'mobilenetv3_large':
backbone = MobileNetV3_Large(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 960
elif trunk_name == 'mobilenetv3_small':
backbone = MobileNetV3_Small(pretrained=True)
s2_ch = 16
s4_ch = 16
high_level_ch = 576
else:
raise ValueError('unknown backbone {}'.format(trunk_name))
return backbone, s2_ch, s4_ch, high_level_ch
class ConvBnRelu(nn.Module):
"""Convenience layer combining a Conv2d, BatchNorm2d, and a ReLU activation.
Original source of this code comes from
https://github.com/lingtengqiu/Deeperlab-pytorch/blob/master/seg_opr/seg_oprs.py
"""
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0,
norm_layer=nn.BatchNorm2d):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=False)
self.bn = norm_layer(out_planes, eps=1e-5)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
| 35.137255
| 84
| 0.651786
| 235
| 1,792
| 4.719149
| 0.408511
| 0.048693
| 0.049594
| 0.064923
| 0.13706
| 0.13706
| 0.088368
| 0.066727
| 0.066727
| 0.066727
| 0
| 0.045865
| 0.257813
| 1,792
| 50
| 85
| 35.84
| 0.78797
| 0.140625
| 0
| 0.125
| 0
| 0
| 0.054749
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.075
| 0
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
606417a48449b07f2cec077fb5c3441648a8cb09
| 30,091
|
py
|
Python
|
echopype/model/modelbase.py
|
leewujung/echopype-lfs-test
|
b76dcf42631d0ac9cef0efeced9be4afdc15e659
|
[
"Apache-2.0"
] | null | null | null |
echopype/model/modelbase.py
|
leewujung/echopype-lfs-test
|
b76dcf42631d0ac9cef0efeced9be4afdc15e659
|
[
"Apache-2.0"
] | null | null | null |
echopype/model/modelbase.py
|
leewujung/echopype-lfs-test
|
b76dcf42631d0ac9cef0efeced9be4afdc15e659
|
[
"Apache-2.0"
] | null | null | null |
"""
echopype data model that keeps tracks of echo data and
its connection to data files.
"""
import os
import warnings
import datetime as dt
from echopype.utils import uwa
import numpy as np
import xarray as xr
class ModelBase(object):
"""Class for manipulating echo data that is already converted to netCDF."""
def __init__(self, file_path=""):
self.file_path = file_path # this passes the input through file name test
self.noise_est_range_bin_size = 5 # meters per tile for noise estimation
self.noise_est_ping_size = 30 # number of pings per tile for noise estimation
self.MVBS_range_bin_size = 5 # meters per tile for MVBS
self.MVBS_ping_size = 30 # number of pings per tile for MVBS
self.Sv = None # calibrated volume backscattering strength
self.Sv_path = None # path to save calibrated results
self.Sv_clean = None # denoised volume backscattering strength
self.TS = None # calibrated target strength
self.TS_path = None # path to save TS calculation results
self.MVBS = None # mean volume backscattering strength
self._salinity = None
self._temperature = None
self._pressure = None
self._sound_speed = None
self._sample_thickness = None
self._range = None
self._seawater_absorption = None
@property
def salinity(self):
return self._salinity
@salinity.setter
def salinity(self, sal):
self._salinity = sal
@property
def pressure(self):
return self._pressure
@pressure.setter
def pressure(self, pres):
self._pressure = pres
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, t):
self._temperature = t
@property
def sample_thickness(self):
return self._sample_thickness
@sample_thickness.setter
def sample_thickness(self, sth):
self._sample_thickness = sth
@property
def range(self):
return self._range
@range.setter
def range(self, rr):
self._range = rr
@property
def seawater_absorption(self):
return self._seawater_absorption
@seawater_absorption.setter
def seawater_absorption(self, absorption):
self._seawater_absorption.values = absorption
@property
def sound_speed(self):
return self._sound_speed
@sound_speed.setter
def sound_speed(self, ss):
if isinstance(self._sound_speed, xr.DataArray):
self._sound_speed.values = ss
else:
self._sound_speed = ss
@property
def file_path(self):
return self._file_path
@file_path.setter
def file_path(self, p):
self._file_path = p
# Load netCDF groups if file format is correct
pp = os.path.basename(p)
_, ext = os.path.splitext(pp)
supported_ext_list = ['.raw', '.01A']
if ext in supported_ext_list:
print('Data file in manufacturer format, please convert to .nc first.')
elif ext == '.nc':
self.toplevel = xr.open_dataset(self.file_path)
# Get .nc filenames for storing processed data if computation is performed
self.Sv_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_Sv.nc')
self.Sv_clean_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_Sv_clean.nc')
self.TS_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_TS.nc')
self.MVBS_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_MVBS.nc')
# Raise error if the file format convention does not match
if self.toplevel.sonar_convention_name != 'SONAR-netCDF4':
raise ValueError('netCDF file convention not recognized.')
self.toplevel.close()
else:
raise ValueError('Data file format not recognized.')
def calc_sound_speed(self, src='file'):
"""Base method to be overridden for calculating sound_speed for different sonar models
"""
# issue warning when subclass methods not available
print("Sound speed calculation has not been implemented for this sonar model!")
def calc_seawater_absorption(self, src='file'):
"""Base method to be overridden for calculating seawater_absorption for different sonar models
"""
# issue warning when subclass methods not available
print("Seawater absorption calculation has not been implemented for this sonar model!")
def calc_sample_thickness(self):
"""Base method to be overridden for calculating sample_thickness for different sonar models.
"""
# issue warning when subclass methods not available
print('Sample thickness calculation has not been implemented for this sonar model!')
def calc_range(self):
"""Base method to be overridden for calculating range for different sonar models.
"""
# issue warning when subclass methods not available
print('Range calculation has not been implemented for this sonar model!')
def recalculate_environment(self, ss=True, sa=True, st=True, r=True):
""" Recalculates sound speed, seawater absorption, sample thickness, and range using
salinity, temperature, and pressure
Parameters
----------
ss : bool
Whether to calcualte sound speed. Defaults to `True`
sa : bool
Whether to calcualte seawater absorption. Defaults to `True`
st : bool
Whether to calcualte sample thickness. Defaults to `True`
r : bool
Whether to calcualte range. Defaults to `True`
"""
s, t, p = self.salinity, self.temperature, self.pressure
if s is not None and t is not None and p is not None:
if ss:
self.sound_speed = self.calc_sound_speed(src='user')
if sa:
self.seawater_absorption = self.calc_seawater_absorption(src='user')
if st:
self.sample_thickness = self.calc_sample_thickness()
if r:
self.range = self.calc_range()
elif s is None:
print("Salinity was not provided. Environment was not recalculated")
elif t is None:
print("Temperature was not provided. Environment was not recalculated")
else:
print("Pressure was not provided. Environment was not recalculated")
def calibrate(self):
"""Base method to be overridden for volume backscatter calibration and echo-integration for different sonar models.
"""
# issue warning when subclass methods not available
print('Calibration has not been implemented for this sonar model!')
def calibrate_TS(self):
"""Base method to be overridden for target strength calibration and echo-integration for different sonar models.
"""
# issue warning when subclass methods not available
print('Target strength calibration has not been implemented for this sonar model!')
def validate_path(self, save_path, save_postfix):
"""Creates a directory if it doesnt exist. Returns a valid save path.
"""
def _assemble_path():
file_in = os.path.basename(self.file_path)
file_name, file_ext = os.path.splitext(file_in)
return file_name + save_postfix + file_ext
if save_path is None:
save_dir = os.path.dirname(self.file_path)
file_out = _assemble_path()
else:
path_ext = os.path.splitext(save_path)[1]
# If given save_path is file, split into directory and file
if path_ext != '':
save_dir, file_out = os.path.split(save_path)
if save_dir == '': # save_path is only a filename without directory
save_dir = os.path.dirname(self.file_path) # use directory from input file
# If given save_path is a directory, get a filename from input .nc file
else:
save_dir = save_path
file_out = _assemble_path()
# Create folder if not already exists
if save_dir == '':
# TODO: should we use '.' instead of os.getcwd()?
save_dir = os.getcwd() # explicit about path to current directory
if not os.path.exists(save_dir):
os.mkdir(save_dir)
return os.path.join(save_dir, file_out)
@staticmethod
def get_tile_params(r_data_sz, p_data_sz, r_tile_sz, p_tile_sz, sample_thickness):
"""Obtain ping_time and range_bin parameters associated with groupby and groupby_bins operations.
These parameters are used in methods remove_noise(), noise_estimates(), get_MVBS().
Parameters
----------
r_data_sz : int
number of range_bin entries in data
p_data_sz : int
number of ping_time entries in data
r_tile_sz : float
tile size along the range_bin dimension [m]
p_tile_sz : int
tile size along the ping_time dimension [number of pings]
sample_thickness : float
thickness of each data sample, determined by sound speed and pulse duration
Returns
-------
r_tile_sz : int
modified tile size along the range dimension [m], determined by sample_thickness
r_tile_bin_edge : list of int
bin edges along the range_bin dimension for :py:func:`xarray.DataArray.groupby_bins` operation
p_tile_bin_edge : list of int
bin edges along the ping_time dimension for :py:func:`xarray.DataArray.groupby_bins` operation
"""
# Adjust noise_est_range_bin_size because range_bin_size may be an inconvenient value
num_r_per_tile = np.round(r_tile_sz / sample_thickness).astype(int) # num of range_bin per tile
r_tile_sz = num_r_per_tile * sample_thickness
# Total number of range_bin and ping tiles
num_tile_range_bin = np.ceil(r_data_sz / num_r_per_tile).astype(int)
if np.mod(p_data_sz, p_tile_sz) == 0:
num_tile_ping = np.ceil(p_data_sz / p_tile_sz).astype(int) + 1
else:
num_tile_ping = np.ceil(p_data_sz / p_tile_sz).astype(int)
# Tile bin edges along range
# ... -1 to make sure each bin has the same size because of the right-inclusive and left-exclusive bins
r_tile_bin_edge = [np.arange(x.values + 1) * y.values - 1 for x, y in zip(num_tile_range_bin, num_r_per_tile)]
p_tile_bin_edge = np.arange(num_tile_ping + 1) * p_tile_sz - 1
return r_tile_sz, r_tile_bin_edge, p_tile_bin_edge
def _get_proc_Sv(self, source_path=None, source_postfix='_Sv'):
"""Private method to return calibrated Sv either from memory or _Sv.nc file.
This method is called by remove_noise(), noise_estimates() and get_MVBS().
"""
if self.Sv is None: # calibration not yet performed
Sv_path = self.validate_path(save_path=source_path, # wrangle _Sv path
save_postfix=source_postfix)
if os.path.exists(Sv_path): # _Sv exists
self.Sv = xr.open_dataset(Sv_path) # load _Sv file
else:
# if path specification given but file do not exist:
if (source_path is not None) or (source_postfix != '_Sv'):
print('%s no calibrated data found in specified path: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), Sv_path))
else:
print('%s data has not been calibrated. ' % dt.datetime.now().strftime('%H:%M:%S'))
print(' performing calibration now and operate from Sv in memory.')
self.calibrate() # calibrate, have Sv in memory
return self.Sv
def remove_noise(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None,
SNR=0, Sv_threshold=None,
save=False, save_postfix='_Sv_clean', save_path=None):
"""Remove noise by using noise estimates obtained from the minimum mean calibrated power level
along each column of tiles.
See method noise_estimates() for details of noise estimation.
Reference: De Robertis & Higginbottom, 2017, ICES Journal of Marine Sciences
Parameters
----------
source_postfix : str
postfix of the Sv file used to remove noise from, default to '_Sv'
source_path : str
path of Sv file used to remove noise from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float, optional
Meters per tile for noise estimation [m]
noise_est_ping_size : int, optional
Number of pings per tile for noise estimation
SNR : int, optional
Minimum signal-to-noise ratio (remove values below this after general noise removal).
Sv_threshold : int, optional
Minimum Sv threshold [dB] (remove values below this after general noise removal)
save : bool, optional
Whether to save the denoised Sv (``Sv_clean``) into a new .nc file.
Default to ``False``.
save_postfix : str
Filename postfix, default to '_Sv_clean'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_Sv_clean.nc default
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Get calibrated Sv
if self.Sv is not None:
print('%s Remove noise from Sv stored in memory.' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
print('%s Remove noise from Sv stored in: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Get TVG and ABS for compensating for transmission loss
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Function for use with apply
def remove_n(x, rr):
p_c_lin = 10 ** ((x.Sv - x.ABS - x.TVG) / 10)
nn = 10 * np.log10(p_c_lin.mean(dim='ping_time').groupby_bins('range_bin', rr).mean().min(
dim='range_bin_bins')) + x.ABS + x.TVG
# Return values where signal is [SNR] dB above noise and at least [Sv_threshold] dB
if not Sv_threshold:
return x.Sv.where(x.Sv > (nn + SNR), other=np.nan)
else:
return x.Sv.where((x.Sv > (nn + SNR)) & (x > Sv_threshold), other=np.nan)
# Groupby noise removal operation
proc_data.coords['ping_idx'] = ('ping_time', np.arange(proc_data.Sv['ping_time'].size))
ABS.name = 'ABS'
TVG.name = 'TVG'
pp = xr.merge([proc_data, ABS])
pp = xr.merge([pp, TVG])
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
Sv_clean = pp.groupby_bins('ping_idx', ping_tile_bin_edge).\
map(remove_n, rr=range_bin_tile_bin_edge[0])
Sv_clean = Sv_clean.drop_vars(['ping_idx'])
else:
tmp_clean = []
cnt = 0
for key, val in pp.groupby('frequency'): # iterate over different frequency channel
tmp = val.groupby_bins('ping_idx', ping_tile_bin_edge). \
map(remove_n, rr=range_bin_tile_bin_edge[cnt])
cnt += 1
tmp_clean.append(tmp)
clean_val = np.array([zz.values for zz in xr.align(*tmp_clean, join='outer')])
Sv_clean = xr.DataArray(clean_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_clean[0]['ping_time'].values,
'range_bin': tmp_clean[0]['range_bin'].values},
dims=['frequency', 'ping_time', 'range_bin'])
# Set up DataSet
Sv_clean.name = 'Sv'
Sv_clean = Sv_clean.to_dataset()
Sv_clean['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
Sv_clean.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Attach calculated range into data set
Sv_clean['range'] = (('frequency', 'range_bin'), self.range.T)
# Save as object attributes as a netCDF file
self.Sv_clean = Sv_clean
# TODO: now adding the below so that MVBS can be calculated directly
# from the cleaned Sv without saving and loading Sv_clean from disk.
# However this is not explicit to the user. A better way to do this
# is to change get_MVBS() to first check existence of self.Sv_clean
# when `_Sv_clean` is specified as the source_postfix.
if not print_src: # remove noise from Sv stored in memory
self.Sv = Sv_clean.copy()
if save:
self.Sv_clean_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving denoised Sv to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.Sv_clean_path))
Sv_clean.to_netcdf(self.Sv_clean_path)
# Close opened resources
proc_data.close()
def noise_estimates(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None):
"""Obtain noise estimates from the minimum mean calibrated power level along each column of tiles.
The tiles here are defined by class attributes noise_est_range_bin_size and noise_est_ping_size.
This method contains redundant pieces of code that also appear in method remove_noise(),
but this method can be used separately to determine the exact tile size for noise removal before
noise removal is actually performed.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate noise estimates from, default to '_Sv'
source_path : str
path of Sv file used to calculate noise estimates from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float
meters per tile for noise estimation [m]
noise_est_ping_size : int
number of pings per tile for noise estimation
Returns
-------
noise_est : xarray DataSet
noise estimates as a DataArray with dimension [ping_time x range_bin]
ping_time and range_bin are taken from the first element of each tile along each of the dimensions
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Use calibrated data to calculate noise removal
proc_data = self._get_proc_Sv()
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Values for noise estimates
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Noise estimates
proc_data['power_cal'] = 10 ** ((proc_data.Sv - ABS - TVG) / 10)
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
noise_est = 10 * np.log10(proc_data['power_cal'].coarsen(
ping_time=self.noise_est_ping_size,
range_bin=int(np.unique(self.noise_est_range_bin_size / self.sample_thickness)),
boundary='pad').mean().min(dim='range_bin'))
else:
range_bin_coarsen_idx = (self.noise_est_range_bin_size / self.sample_thickness).astype(int)
tmp_noise = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(proc_data['power_cal'].sel(frequency=freq).coarsen(
ping_time=self.noise_est_ping_size,
range_bin=r_bin.values,
boundary='pad').mean().min(dim='range_bin'))
tmp_da.name = 'noise_est'
tmp_noise.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
noise_val = np.array([zz.values for zz in xr.align(*tmp_noise, join='outer')])
noise_est = xr.DataArray(noise_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_noise[0]['ping_time'].values},
dims=['frequency', 'ping_time'])
noise_est = noise_est.to_dataset(name='noise_est')
noise_est['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
noise_est.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Close opened resources
proc_data.close()
return noise_est
def get_MVBS(self, source_postfix='_Sv', source_path=None,
MVBS_range_bin_size=None, MVBS_ping_size=None,
save=False, save_postfix='_MVBS', save_path=None):
"""Calculate Mean Volume Backscattering Strength (MVBS).
The calculation uses class attributes MVBS_ping_size and MVBS_range_bin_size to
calculate and save MVBS as a new attribute to the calling EchoData instance.
MVBS is an xarray DataArray with dimensions ``ping_time`` and ``range_bin``
that are from the first elements of each tile along the corresponding dimensions
in the original Sv or Sv_clean DataArray.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate MVBS, default to '_Sv'
source_path : str
path of Sv file used to calculate MVBS, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
MVBS_range_bin_size : float, optional
meters per tile for calculating MVBS [m]
MVBS_ping_size : int, optional
number of pings per tile for calculating MVBS
save : bool, optional
whether to save the calculated MVBS into a new .nc file, default to ``False``
save_postfix : str
Filename postfix, default to '_MVBS'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_MVBS.nc default
"""
# Check params
if (MVBS_range_bin_size is not None) and (self.MVBS_range_bin_size != MVBS_range_bin_size):
self.MVBS_range_bin_size = MVBS_range_bin_size
if (MVBS_ping_size is not None) and (self.MVBS_ping_size != MVBS_ping_size):
self.MVBS_ping_size = MVBS_ping_size
# Get Sv by validating path and calibrate if not already done
if self.Sv is not None:
print('%s use Sv stored in memory to calculate MVBS' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
if self.Sv_path is not None:
print('%s Sv source used to calculate MVBS: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
else:
print('%s Sv source used to calculate MVBS: memory' %
dt.datetime.now().strftime('%H:%M:%S'))
# Get tile indexing parameters
self.MVBS_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.MVBS_range_bin_size,
p_tile_sz=self.MVBS_ping_size,
sample_thickness=self.sample_thickness)
# Calculate MVBS
Sv_linear = 10 ** (proc_data.Sv / 10) # convert to linear domain before averaging
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
MVBS = 10 * np.log10(Sv_linear.coarsen(
ping_time=self.MVBS_ping_size,
range_bin=int(np.unique(self.MVBS_range_bin_size / self.sample_thickness)),
boundary='pad').mean())
MVBS.coords['range_bin'] = ('range_bin', np.arange(MVBS['range_bin'].size))
else:
range_bin_coarsen_idx = (self.MVBS_range_bin_size / self.sample_thickness).astype(int)
tmp_MVBS = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(Sv_linear.sel(frequency=freq).coarsen(
ping_time=self.MVBS_ping_size,
range_bin=r_bin.values,
boundary='pad').mean())
tmp_da.coords['range_bin'] = ('range_bin', np.arange(tmp_da['range_bin'].size))
tmp_da.name = 'MVBS'
tmp_MVBS.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
MVBS_val = np.array([zz.values for zz in xr.align(*tmp_MVBS, join='outer')])
MVBS = xr.DataArray(MVBS_val,
coords={'frequency': Sv_linear['frequency'].values,
'ping_time': tmp_MVBS[0]['ping_time'].values,
'range_bin': np.arange(MVBS_val.shape[2])},
dims=['frequency', 'ping_time', 'range_bin']).dropna(dim='range_bin', how='all')
# Set MVBS attributes
MVBS.name = 'MVBS'
MVBS = MVBS.to_dataset()
MVBS['MVBS_range_bin_size'] = ('frequency', self.MVBS_range_bin_size)
MVBS.attrs['MVBS_ping_size'] = self.MVBS_ping_size
# Save results in object and as a netCDF file
self.MVBS = MVBS
if save:
self.MVBS_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving MVBS to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.MVBS_path))
MVBS.to_netcdf(self.MVBS_path)
# Close opened resources
proc_data.close()
| 47.763492
| 123
| 0.618557
| 4,041
| 30,091
| 4.377629
| 0.112348
| 0.042058
| 0.032561
| 0.024421
| 0.548785
| 0.501583
| 0.481402
| 0.439288
| 0.396099
| 0.364613
| 0
| 0.003939
| 0.299691
| 30,091
| 629
| 124
| 47.839428
| 0.835524
| 0.329534
| 0
| 0.269341
| 0
| 0
| 0.102349
| 0.002517
| 0
| 0
| 0
| 0.006359
| 0
| 1
| 0.091691
| false
| 0
| 0.017192
| 0.022923
| 0.154728
| 0.077364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60648a56773ceecb201aec8a10a45d6b2f493b08
| 2,755
|
py
|
Python
|
Python/face_detect_camera/managers.py
|
abondar24/OpenCVBase
|
9b23e3b31304e77ad1135d90efb41e3dc069194a
|
[
"Apache-2.0"
] | null | null | null |
Python/face_detect_camera/managers.py
|
abondar24/OpenCVBase
|
9b23e3b31304e77ad1135d90efb41e3dc069194a
|
[
"Apache-2.0"
] | null | null | null |
Python/face_detect_camera/managers.py
|
abondar24/OpenCVBase
|
9b23e3b31304e77ad1135d90efb41e3dc069194a
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
import time
class CaptureManager(object):
def __init__(self, capture, preview_window_manager=None, should_mirror_preview = False):
self.preview_window_manager = preview_window_manager
self.should_mirror_preview = should_mirror_preview
self._capture = capture
self._channel = 0
self._entered_frame = False
self._frame = None
self._frames_elapsed = long(0)
self._fps_est = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self):
return self._channel
@property
def frame(self):
if self._entered_frame and self._frame is None:
_, self._frame = self._capture.retrieve(channel=self.channel)
return self._frame
def enter_frame(self):
# capture the next frame
assert not self._entered_frame, 'previous enter_frame() had no matching exit_frame()'
if self._capture is not None:
self._entered_frame = self._capture.grab()
def exit_frame(self):
# draw to window, write to files, release the frame
# frame is retrievable or not
if self.frame is None:
self._entered_frame = False
return
if self._frames_elapsed == 0:
self._start_time = time.time()
else:
time_elapsed = time.time() - self._start_time
self._fps_est = self._frames_elapsed / time_elapsed
self._frames_elapsed += 1
# draw
if self.preview_window_manager is not None:
if self.should_mirror_preview:
mirrored_frame = np.fliplr(self._frame).copy()
self.preview_window_manager.show(mirrored_frame)
else:
self.preview_window_manager.show(self._frame)
# release the frame
self._frame = None
self._entered_frame = False
class WindowManager(object):
def __init__(self, window_name, keypress_callback = None):
self.keypress_callback = keypress_callback
self._window_name = window_name
self._is_window_created = False
@property
def is_window_created(self):
return self._is_window_created
def create_window(self):
cv2.namedWindow(self._window_name)
self._is_window_created = True
def show(self, frame):
cv2.imshow(self._window_name, frame)
def destroy_window(self):
cv2.destroyWindow(self._window_name)
self._is_window_created = False
def process_events(self):
keykode = cv2.waitKey(1)
if self.keypress_callback is not None and keykode != -1:
keykode &= 0xFF
self.keypress_callback(keykode)
| 28.697917
| 93
| 0.642468
| 334
| 2,755
| 4.967066
| 0.230539
| 0.048825
| 0.072333
| 0.057866
| 0.18264
| 0.100663
| 0.063291
| 0
| 0
| 0
| 0
| 0.006082
| 0.283848
| 2,755
| 95
| 94
| 29
| 0.834769
| 0.044646
| 0
| 0.238806
| 0
| 0
| 0.019421
| 0
| 0
| 0
| 0.001523
| 0
| 0.014925
| 1
| 0.179104
| false
| 0
| 0.044776
| 0.044776
| 0.328358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6064dc0f50d6a2d8e20ae50d87b6b6f9606110f6
| 6,937
|
py
|
Python
|
ELLA/ELLA.py
|
micaelverissimo/lifelong_ringer
|
d2e7173ce08d1c087e811f6451cae1cb0e381076
|
[
"MIT"
] | null | null | null |
ELLA/ELLA.py
|
micaelverissimo/lifelong_ringer
|
d2e7173ce08d1c087e811f6451cae1cb0e381076
|
[
"MIT"
] | null | null | null |
ELLA/ELLA.py
|
micaelverissimo/lifelong_ringer
|
d2e7173ce08d1c087e811f6451cae1cb0e381076
|
[
"MIT"
] | null | null | null |
""" Alpha version of a version of ELLA that plays nicely with sklearn
@author: Paul Ruvolo
"""
from math import log
import numpy as np
from scipy.special import logsumexp
from scipy.linalg import sqrtm, inv, norm
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression, Lasso
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, explained_variance_score
class ELLA(object):
""" The ELLA model """
def __init__(self, d, k, base_learner, base_learner_kwargs = {}, mu = 1, lam = 1, k_init = False):
""" Initializes a new model for the given base_learner.
d: the number of parameters for the base learner
k: the number of latent model components
base_learner: the base learner to use (currently can only be
LinearRegression, Ridge, or LogisticRegression).
base_learner_kwargs: keyword arguments to base learner (for instance to
specify regularization strength)
mu: hyperparameter for sparsity
lam: L2 penalty on L
mu: the L_1 penalty to use
lam: the L_2 penalty to use
NOTE: currently only binary logistic regression is supported
"""
self.d = d
self.k = k
self.L = np.random.randn(d,k)
self.A = np.zeros((d * k, d * k))
self.b = np.zeros((d * k, 1))
self.S = np.zeros((k, 0))
self.T = 0
self.mu = mu
self.lam = lam
self.k_init = k_init
if base_learner in [LinearRegression, Ridge]:
self.perf_metric = explained_variance_score
elif base_learner in [LogisticRegression]:
self.perf_metric = accuracy_score
else:
raise Exception("Unsupported Base Learner")
self.base_learner = base_learner
self.base_learner_kwargs = base_learner_kwargs
def fit(self, X, y, task_id):
""" Fit the model to a new batch of training data. The task_id must
start at 0 and increase by one each time this function is called.
Currently you cannot add new data to old tasks.
X: the training data
y: the trianing labels
task_id: the id of the task
"""
self.T += 1
single_task_model = self.base_learner(fit_intercept = False, **self.base_learner_kwargs).fit(X, y)
D_t = self.get_hessian(single_task_model, X, y)
D_t_sqrt = sqrtm(D_t)
theta_t = single_task_model.coef_
sparse_encode = Lasso(alpha = self.mu / (X.shape[0] * 2.0),
fit_intercept = False, tol=1e9, max_iter=50000).fit(D_t_sqrt.dot(self.L),
D_t_sqrt.dot(theta_t.T))
if self.k_init and task_id < self.k:
sparse_coeffs = np.zeros((self.k,))
sparse_coeffs[task_id] = 1.0
else:
sparse_coeffs = sparse_encode.coef_
self.S = np.hstack((self.S, np.matrix(sparse_coeffs).T))
self.A += np.kron(self.S[:,task_id].dot(self.S[:,task_id].T), D_t)
self.b += np.kron(self.S[:,task_id].T, np.mat(theta_t).dot(D_t)).T
L_vectorized = inv(self.A / self.T + self.lam * np.eye(self.d * self.k, self.d * self.k)).dot(self.b) / self.T
self.L = L_vectorized.reshape((self.k, self.d)).T
self.revive_dead_components()
def revive_dead_components(self):
""" re-initailizes any components that have decayed to 0 """
for i,val in enumerate(np.sum(self.L, axis = 0)):
if abs(val) < 10 ** -8:
self.L[:, i] = np.random.randn(self.d,)
def predict(self, X, task_id):
""" Output ELLA's predictions for the specified data on the specified
task_id. If using a continuous model (Ridge and LinearRegression)
the result is the prediction. If using a classification model
(LogisticRgerssion) the output is currently a probability.
"""
if self.base_learner == LinearRegression or self.base_learner == Ridge:
return X.dot(self.L.dot(self.S[:, task_id]))
elif self.base_learner == LogisticRegression:
return 1. / (1.0 + np.exp(-X.dot(self.L.dot(self.S[:, task_id])))) > 0.5
def predict_probs(self, X, task_id):
""" Output ELLA's predictions for the specified data on the specified
task_id. If using a continuous model (Ridge and LinearRegression)
the result is the prediction. If using a classification model
(LogisticRgerssion) the output is currently a probability.
"""
if self.base_learner == LinearRegression or self.base_learner == Ridge:
raise Exception("This base learner does not support predicting probabilities")
elif self.base_learner == LogisticRegression:
return np.exp(self.predict_logprobs(X, task_id))
def predict_logprobs(self, X, task_id):
""" Output ELLA's predictions for the specified data on the specified
task_id. If using a continuous model (Ridge and LinearRegression)
the result is the prediction. If using a classification model
(LogisticRgerssion) the output is currently a probability.
"""
if self.base_learner == LinearRegression or self.base_learner == Ridge:
raise Exception("This base learner does not support predicting probabilities")
elif self.base_learner == LogisticRegression:
return -logsumexp(np.hstack((np.zeros((X.shape[0], 1)), -X.dot(self.L.dot(self.S[:, task_id])))), axis = 1)
def score(self, X, y, task_id):
""" Output the score for ELLA's model on the specified testing data.
If using a continuous model (Ridge and LinearRegression)
the score is explained variance. If using a classification model
(LogisticRegression) the score is accuracy.
"""
return self.perf_metric(self.predict(X, task_id), y)
def get_hessian(self, model, X, y):
""" ELLA requires that each single task learner provide the Hessian
of the loss function evaluated around the optimal single task
parameters. This funciton implements this for the base learners
that are currently supported """
theta_t = model.coef_
if self.base_learner == LinearRegression:
return X.T.dot(X)/(2.0 * X.shape[0])
elif self.base_learner == Ridge:
return X.T.dot(X)/(2.0 * X.shape[0]) + model.alpha * np.eye(self.d, self.d)
elif self.base_learner == LogisticRegression:
preds = 1. / (1.0 + np.exp(-X.dot(theta_t.T)))
base = np.tile(preds * (1 - preds), (1, X.shape[1]))
hessian = (np.multiply(X, base)).T.dot(X) / (2.0 * X.shape[0])
return hessian + np.eye(self.d,self.d) / (2.0 * model.C)
| 49.198582
| 119
| 0.618135
| 974
| 6,937
| 4.285421
| 0.213552
| 0.081696
| 0.057499
| 0.015812
| 0.376378
| 0.327743
| 0.295879
| 0.291088
| 0.287734
| 0.259224
| 0
| 0.010459
| 0.283264
| 6,937
| 141
| 120
| 49.198582
| 0.829043
| 0.313824
| 0
| 0.139241
| 0
| 0
| 0.03324
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101266
| false
| 0
| 0.088608
| 0
| 0.303797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60652bf0a5fb58eb37f612eac700378eff72f02f
| 1,970
|
py
|
Python
|
webhook/utils.py
|
Myst1c-a/phen-cogs
|
672f9022ddbbd9a84b0a05357347e99e64a776fc
|
[
"MIT"
] | null | null | null |
webhook/utils.py
|
Myst1c-a/phen-cogs
|
672f9022ddbbd9a84b0a05357347e99e64a776fc
|
[
"MIT"
] | null | null | null |
webhook/utils.py
|
Myst1c-a/phen-cogs
|
672f9022ddbbd9a84b0a05357347e99e64a776fc
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020-present phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
import discord
from redbot.core.commands import Context
USER_MENTIONS = discord.AllowedMentions.none()
USER_MENTIONS.users = True
WEBHOOK_RE = re.compile(
r"discord(?:app)?.com/api/webhooks/(?P<id>[0-9]{17,21})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})"
)
async def _monkeypatch_send(ctx: Context, content: str = None, **kwargs) -> discord.Message:
self = ctx.bot.get_cog("Webhook")
original_kwargs = kwargs.copy()
try:
webhook = await self.get_webhook(ctx=ctx)
kwargs["username"] = ctx.author.display_name
kwargs["avatar_url"] = ctx.author.avatar_url
kwargs["wait"] = True
return await webhook.send(content, **kwargs)
except Exception:
return await super(Context, ctx).send(content, **original_kwargs)
class FakeResponse:
def __init__(self):
self.status = 403
self.reason = "Forbidden"
| 35.818182
| 96
| 0.740102
| 287
| 1,970
| 5.020906
| 0.56446
| 0.061069
| 0.018043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012891
| 0.173096
| 1,970
| 54
| 97
| 36.481481
| 0.8717
| 0.545178
| 0
| 0
| 0
| 0.043478
| 0.143018
| 0.100225
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.130435
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6066634d419973bf2d50293d1e8b24d66fea6c84
| 3,554
|
py
|
Python
|
feed/tests/test_consts.py
|
cul-it/arxiv-rss
|
40c0e859528119cc8ba3700312cb8df095d95cdd
|
[
"MIT"
] | 4
|
2020-06-29T15:05:37.000Z
|
2022-02-02T10:28:28.000Z
|
feed/tests/test_consts.py
|
arXiv/arxiv-feed
|
82923d062e2524df94c22490cf936a988559ce66
|
[
"MIT"
] | 12
|
2020-03-06T16:45:00.000Z
|
2022-03-02T15:36:14.000Z
|
feed/tests/test_consts.py
|
cul-it/arxiv-rss
|
40c0e859528119cc8ba3700312cb8df095d95cdd
|
[
"MIT"
] | 2
|
2020-12-06T16:30:06.000Z
|
2021-11-05T12:29:08.000Z
|
import pytest
from feed.consts import FeedVersion
from feed.utils import randomize_case
from feed.errors import FeedVersionError
# FeedVersion.supported
def test_feed_version_supported():
assert FeedVersion.supported() == {
FeedVersion.RSS_2_0,
FeedVersion.ATOM_1_0,
}
# FeedVersion.get
def test_feed_version_get_supported():
# RSS full version
assert (
FeedVersion.get(randomize_case(FeedVersion.RSS_2_0.lower()))
== FeedVersion.RSS_2_0
)
# RSS only number
assert FeedVersion.get("2.0") == FeedVersion.RSS_2_0
# Atom full version
assert (
FeedVersion.get(randomize_case(FeedVersion.ATOM_1_0.lower()))
== FeedVersion.ATOM_1_0
)
# Atom only number
assert FeedVersion.get("1.0", atom=True) == FeedVersion.ATOM_1_0
def test_feed_version_get_unsupported():
# RSS 0.91 full version
rss_0_91 = randomize_case(FeedVersion.RSS_0_91)
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(rss_0_91)
ex: FeedVersionError = excinfo.value
assert ex.version == rss_0_91
assert ex.supported == FeedVersion.supported()
# RSS 0.91 only number
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get("0.91")
ex: FeedVersionError = excinfo.value
assert ex.version == "RSS 0.91"
assert ex.supported == FeedVersion.supported()
# RSS 1.0 full version
rss_1_0 = randomize_case(FeedVersion.RSS_1_0)
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(rss_1_0)
ex: FeedVersionError = excinfo.value
assert ex.version == rss_1_0
assert ex.supported == FeedVersion.supported()
# RSS 1.0 only number
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get("1.0")
ex: FeedVersionError = excinfo.value
assert ex.version == "RSS 1.0"
assert ex.supported == FeedVersion.supported()
def test_feed_version_get_invalid():
# RSS
for version, test in [
("RSS 3.3", "3.3"),
("RSS 0.1", "0.1"),
("RSS 1.1", "RSS 1.1"),
("RSS 2.1", "RSS 2.1"),
]:
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(test)
ex: FeedVersionError = excinfo.value
assert ex.version == version
assert ex.supported == FeedVersion.supported()
# Atom
for version, test, prefere in [
("Atom 0.1", "0.1", True),
("Atom 0.91", "0.91", True),
("Atom 2.0", "2.0", True),
("Atom 0.1", "Atom 0.1", False),
("Atom 0.91", "Atom 0.91", False),
("Atom 2.0", "Atom 2.0", False),
]:
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(test, atom=prefere)
ex: FeedVersionError = excinfo.value
assert ex.version == version
assert ex.supported == FeedVersion.supported()
# Nonsense
for version in ["foo", "bar", "baz"]:
with pytest.raises(FeedVersionError) as excinfo:
FeedVersion.get(version)
ex: FeedVersionError = excinfo.value
assert ex.version == version
assert ex.supported == FeedVersion.supported()
def test_is_property():
# RSS
assert FeedVersion.RSS_0_91.is_rss
assert FeedVersion.RSS_1_0.is_rss
assert FeedVersion.RSS_2_0.is_rss
assert not FeedVersion.RSS_0_91.is_atom
assert not FeedVersion.RSS_1_0.is_atom
assert not FeedVersion.RSS_2_0.is_atom
# Atom
assert FeedVersion.ATOM_1_0.is_atom
assert not FeedVersion.ATOM_1_0.is_rss
| 27.765625
| 69
| 0.652786
| 469
| 3,554
| 4.780384
| 0.102345
| 0.016949
| 0.024086
| 0.099911
| 0.694023
| 0.58876
| 0.552632
| 0.521409
| 0.444692
| 0.340321
| 0
| 0.044936
| 0.236072
| 3,554
| 127
| 70
| 27.984252
| 0.780847
| 0.060777
| 0
| 0.337349
| 0
| 0
| 0.051174
| 0
| 0
| 0
| 0
| 0
| 0.325301
| 1
| 0.060241
| false
| 0
| 0.048193
| 0
| 0.108434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
606899616433fe3e3273bc5fab025d75f1c9d731
| 3,953
|
py
|
Python
|
turorials/Google/projects/01_02_TextClassification/01_02_main.py
|
Ubpa/LearnTF
|
2c9f5d790a9911a860da1e0db4c7bb56a9eee5cb
|
[
"MIT"
] | null | null | null |
turorials/Google/projects/01_02_TextClassification/01_02_main.py
|
Ubpa/LearnTF
|
2c9f5d790a9911a860da1e0db4c7bb56a9eee5cb
|
[
"MIT"
] | null | null | null |
turorials/Google/projects/01_02_TextClassification/01_02_main.py
|
Ubpa/LearnTF
|
2c9f5d790a9911a860da1e0db4c7bb56a9eee5cb
|
[
"MIT"
] | null | null | null |
#----------------
# 01_02 文本分类
#----------------
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
# TensorFlow's version : 1.12.0
print('TensorFlow\'s version : ', tf.__version__)
#----------------
# 1 下载 IMDB 数据集
#----------------
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
#----------------
# 2 探索数据
#----------------
# Training entries: 25000, labels: 25000
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
print(train_data[0])
# (218, 189)
print(len(train_data[0]), len(train_data[1]))
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
decode_review(train_data[0])
#----------------
# 3 准备数据
#----------------
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
# (256, 256)
print((len(train_data[0]), len(train_data[1])))
print(train_data[0])
#----------------
# 4 构建模型
#----------------
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
#----------------
# 5 创建验证集
#----------------
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
#----------------
# 6 训练模型
#----------------
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
#----------------
# 7 评估模型
#----------------
results = model.evaluate(test_data, test_labels)
print(results)
#----------------
# 8 创建准确率和损失随时间变化的图
#----------------
history_dict = history.history
# dict_keys(['loss', 'val_loss', 'val_acc', 'acc'])
print(history_dict.keys())
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# loss
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# acc
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
| 24.251534
| 86
| 0.583607
| 491
| 3,953
| 4.527495
| 0.336049
| 0.052632
| 0.026991
| 0.034188
| 0.17184
| 0.102564
| 0.064777
| 0.064777
| 0.064777
| 0
| 0
| 0.033773
| 0.213509
| 3,953
| 162
| 87
| 24.401235
| 0.681248
| 0.200354
| 0
| 0.184211
| 0
| 0
| 0.093329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013158
| false
| 0
| 0.052632
| 0.013158
| 0.078947
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6068adea6b26bf93c6fc76af394fa54701dacddb
| 6,134
|
py
|
Python
|
backend/api/urls.py
|
12xiaoni/text-label
|
7456c5e73d32bcfc81a02be7e0d748f162934d35
|
[
"MIT"
] | null | null | null |
backend/api/urls.py
|
12xiaoni/text-label
|
7456c5e73d32bcfc81a02be7e0d748f162934d35
|
[
"MIT"
] | null | null | null |
backend/api/urls.py
|
12xiaoni/text-label
|
7456c5e73d32bcfc81a02be7e0d748f162934d35
|
[
"MIT"
] | null | null | null |
from django.urls import include, path
from .views import (annotation, auto_labeling, comment, example, example_state,
health, label, project, tag, task)
from .views.tasks import category, relation, span, text
urlpatterns_project = [
path(
route='category-types',
view=label.CategoryTypeList.as_view(),
name='category_types'
),
path(
route='category-types/<int:label_id>',
view=label.CategoryTypeDetail.as_view(),
name='category_type'
),
path(
route='span-types',
view=label.SpanTypeList.as_view(),
name='span_types'
),
path(
route='span-types/<int:label_id>',
view=label.SpanTypeDetail.as_view(),
name='span_type'
),
path(
route='category-type-upload',
view=label.CategoryTypeUploadAPI.as_view(),
name='category_type_upload'
),
path(
route='span-type-upload',
view=label.SpanTypeUploadAPI.as_view(),
name='span_type_upload'
),
path(
route='examples',
view=example.ExampleList.as_view(),
name='example_list'
),
path(
route='examples/<int:example_id>',
view=example.ExampleDetail.as_view(),
name='example_detail'
),
path(
route='relation_types',
view=label.RelationTypeList.as_view(),
name='relation_types_list'
),
path(
route='relation_type-upload',
view=label.RelationTypeUploadAPI.as_view(),
name='relation_type-upload'
),
path(
route='relation_types/<int:relation_type_id>',
view=label.RelationTypeDetail.as_view(),
name='relation_type_detail'
),
path(
route='annotation_relations',
view=relation.RelationList.as_view(),
name='relation_types_list'
),
path(
route='annotation_relation-upload',
view=relation.RelationUploadAPI.as_view(),
name='annotation_relation-upload'
),
path(
route='annotation_relations/<int:annotation_relation_id>',
view=relation.RelationDetail.as_view(),
name='annotation_relation_detail'
),
path(
route='approval/<int:example_id>',
view=annotation.ApprovalAPI.as_view(),
name='approve_labels'
),
path(
route='examples/<int:example_id>/categories',
view=category.CategoryListAPI.as_view(),
name='category_list'
),
path(
route='examples/<int:example_id>/categories/<int:annotation_id>',
view=category.CategoryDetailAPI.as_view(),
name='category_detail'
),
path(
route='examples/<int:example_id>/spans',
view=span.SpanListAPI.as_view(),
name='span_list'
),
path(
route='examples/<int:example_id>/spans/<int:annotation_id>',
view=span.SpanDetailAPI.as_view(),
name='span_detail'
),
path(
route='examples/<int:example_id>/texts',
view=text.TextLabelListAPI.as_view(),
name='text_list'
),
path(
route='examples/<int:example_id>/texts/<int:annotation_id>',
view=text.TextLabelDetailAPI.as_view(),
name='text_detail'
),
path(
route='tags',
view=tag.TagList.as_view(),
name='tag_list'
),
path(
route='tags/<int:tag_id>',
view=tag.TagDetail.as_view(),
name='tag_detail'
),
path(
route='examples/<int:example_id>/comments',
view=comment.CommentListDoc.as_view(),
name='comment_list_doc'
),
path(
route='comments',
view=comment.CommentListProject.as_view(),
name='comment_list_project'
),
path(
route='examples/<int:example_id>/comments/<int:comment_id>',
view=comment.CommentDetail.as_view(),
name='comment_detail'
),
path(
route='examples/<int:example_id>/states',
view=example_state.ExampleStateList.as_view(),
name='example_state_list'
),
path(
route='auto-labeling-templates',
view=auto_labeling.AutoLabelingTemplateListAPI.as_view(),
name='auto_labeling_templates'
),
path(
route='auto-labeling-templates/<str:option_name>',
view=auto_labeling.AutoLabelingTemplateDetailAPI.as_view(),
name='auto_labeling_template'
),
path(
route='auto-labeling-configs',
view=auto_labeling.AutoLabelingConfigList.as_view(),
name='auto_labeling_configs'
),
path(
route='auto-labeling-configs/<int:config_id>',
view=auto_labeling.AutoLabelingConfigDetail.as_view(),
name='auto_labeling_config'
),
path(
route='auto-labeling-config-testing',
view=auto_labeling.AutoLabelingConfigTest.as_view(),
name='auto_labeling_config_test'
),
path(
route='examples/<int:example_id>/auto-labeling',
view=auto_labeling.AutoLabelingAnnotation.as_view(),
name='auto_labeling_annotation'
),
path(
route='auto-labeling-parameter-testing',
view=auto_labeling.AutoLabelingConfigParameterTest.as_view(),
name='auto_labeling_parameter_testing'
),
path(
route='auto-labeling-template-testing',
view=auto_labeling.AutoLabelingTemplateTest.as_view(),
name='auto_labeling_template_test'
),
path(
route='auto-labeling-mapping-testing',
view=auto_labeling.AutoLabelingMappingTest.as_view(),
name='auto_labeling_mapping_test'
)
]
urlpatterns = [
path(
route='health',
view=health.Health.as_view(),
name='health'
),
path(
route='projects',
view=project.ProjectList.as_view(),
name='project_list'
),
path(
route='tasks/status/<task_id>',
view=task.TaskStatus.as_view(),
name='task_status'
),
path(
route='projects/<int:project_id>',
view=project.ProjectDetail.as_view(),
name='project_detail'
),
path('projects/<int:project_id>/', include(urlpatterns_project))
]
| 28.798122
| 79
| 0.616074
| 637
| 6,134
| 5.698587
| 0.153846
| 0.099174
| 0.110193
| 0.060606
| 0.302755
| 0.179339
| 0.126171
| 0.019835
| 0
| 0
| 0
| 0
| 0.250408
| 6,134
| 212
| 80
| 28.933962
| 0.789474
| 0
| 0
| 0.38756
| 0
| 0
| 0.289208
| 0.194327
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014354
| 0
| 0.014354
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6068f5688ef9ffee1272e2ce66f9f86a9888991e
| 4,855
|
py
|
Python
|
nwbwidgets/test/test_base.py
|
d-sot/nwb-jupyter-widgets
|
f9bf5c036c39f29e26b3cdb78198cccfa1b13cef
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
nwbwidgets/test/test_base.py
|
d-sot/nwb-jupyter-widgets
|
f9bf5c036c39f29e26b3cdb78198cccfa1b13cef
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
nwbwidgets/test/test_base.py
|
d-sot/nwb-jupyter-widgets
|
f9bf5c036c39f29e26b3cdb78198cccfa1b13cef
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pynwb import TimeSeries
from datetime import datetime
from dateutil.tz import tzlocal
from pynwb import NWBFile
from ipywidgets import widgets
from pynwb.core import DynamicTable
from pynwb.file import Subject
from nwbwidgets.view import default_neurodata_vis_spec
from pynwb import ProcessingModule
from pynwb.behavior import Position, SpatialSeries
from nwbwidgets.base import show_neurodata_base,processing_module, nwb2widget, show_text_fields, \
fig2widget, vis2widget, show_fields, show_dynamic_table, df2accordion, lazy_show_over_data
import unittest
import pytest
def test_show_neurodata_base():
start_time = datetime(2017, 4, 3, 11, tzinfo=tzlocal())
create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())
nwbfile = NWBFile(session_description='demonstrate NWBFile basics',
identifier='NWB123',
session_start_time=start_time,
file_create_date=create_date,
related_publications='https://doi.org/10.1088/1741-2552/aaa904',
experimenter='Dr. Pack')
assert isinstance(show_neurodata_base(nwbfile,default_neurodata_vis_spec), widgets.Widget)
def test_show_text_fields():
data = np.random.rand(160,3)
ts = TimeSeries(name='test_timeseries', data=data, unit='m', starting_time=0.0, rate=1.0)
assert isinstance(show_text_fields(ts), widgets.Widget)
class ProcessingModuleTestCase(unittest.TestCase):
def setUp(self):
spatial_series = SpatialSeries(name='position',
data=np.linspace(0, 1, 20),
rate=50.,
reference_frame='starting gate')
self.position = Position(spatial_series=spatial_series)
def test_processing_module(self):
start_time = datetime(2020, 1, 29, 11, tzinfo=tzlocal())
nwbfile = NWBFile(session_description='Test Session',
identifier='NWBPM',
session_start_time=start_time)
behavior_module = ProcessingModule(name='behavior',
description='preprocessed behavioral data')
nwbfile.add_processing_module(behavior_module)
nwbfile.processing['behavior'].add(self.position)
processing_module(nwbfile.processing['behavior'], default_neurodata_vis_spec)
def test_nwb2widget(self):
nwb2widget(self.position, default_neurodata_vis_spec)
def test_fig2widget():
data = np.random.rand(160, 3)
fig = plt.figure(figsize=(10, 5))
plt.plot(data)
assert isinstance(fig2widget(fig), widgets.Widget)
class Test_vis2widget:
def test_vis2widget_input_widget(self):
wg = widgets.IntSlider(
value=7,
min=0,
max=10,
step=1,
description='Test:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
assert isinstance(vis2widget(wg), widgets.Widget)
def test_vis2widget_input_figure(self):
data = np.random.rand(160,3)
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
assert isinstance(vis2widget(fig), widgets.Widget)
def test_vis2widget_input_other(self):
data = np.random.rand(160,3)
with pytest.raises(ValueError, match="unsupported vis type"):
vis2widget(data)
def test_show_subject():
node = Subject(age='8', sex='m', species='macaque')
show_fields(node)
def test_show_dynamic_table():
d = {'col1': [1, 2], 'col2': [3, 4]}
DT = DynamicTable.from_dataframe(df=pd.DataFrame(data=d),
name='Test Dtable',
table_description='no description')
show_dynamic_table(DT)
def test_df2accordion():
df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
columns=['a', 'b', 'c'])
def func_fig(data):
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
return fig
df2accordion(df=df,by='a',func=func_fig)
def test_df2accordion_single():
df = pd.DataFrame(np.array([1]),
columns=['a'])
def func_fig(data):
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
return fig
df2accordion(df=df,by='a',func=func_fig)
def test_lazy_show_over_data():
list_ = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
def func_fig(data):
fig=plt.figure(figsize=(10, 5))
plt.plot(data)
return fig
assert isinstance(lazy_show_over_data(list_=list_,func_=func_fig),widgets.Widget)
| 32.366667
| 98
| 0.622039
| 575
| 4,855
| 5.069565
| 0.297391
| 0.031218
| 0.020583
| 0.03259
| 0.273413
| 0.242539
| 0.146141
| 0.136535
| 0.136535
| 0.13036
| 0
| 0.038701
| 0.270855
| 4,855
| 149
| 99
| 32.583893
| 0.784746
| 0
| 0
| 0.2
| 0
| 0
| 0.055613
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 1
| 0.154545
| false
| 0
| 0.145455
| 0
| 0.345455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
606923d815b75242b92321d08cd16583deeb515a
| 7,987
|
py
|
Python
|
subliminal/video.py
|
orikad/subliminal
|
5bd87a505f7a4cad2a3a872128110450c69da4f0
|
[
"MIT"
] | null | null | null |
subliminal/video.py
|
orikad/subliminal
|
5bd87a505f7a4cad2a3a872128110450c69da4f0
|
[
"MIT"
] | null | null | null |
subliminal/video.py
|
orikad/subliminal
|
5bd87a505f7a4cad2a3a872128110450c69da4f0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division
from datetime import datetime, timedelta
import logging
import os
from guessit import guessit
logger = logging.getLogger(__name__)
#: Video extensions
VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
'.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
'.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
'.m4v', '.mjp', '.mjpeg', '.mjpg', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx', '.mp4',
'.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm' '.ogv', '.omf',
'.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv', '.vivo',
'.vob', '.vro', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
class Video(object):
"""Base class for videos.
Represent a video, existing or not.
:param str name: name or path of the video.
:param str format: format of the video (HDTV, WEB-DL, BluRay, ...).
:param str release_group: release group of the video.
:param str resolution: resolution of the video stream (480p, 720p, 1080p or 1080i).
:param str video_codec: codec of the video stream.
:param str audio_codec: codec of the main audio stream.
:param str imdb_id: IMDb id of the video.
:param dict hashes: hashes of the video file by provider names.
:param int size: size of the video file in bytes.
:param set subtitle_languages: existing subtitle languages.
"""
def __init__(self, name, format=None, release_group=None, resolution=None, video_codec=None, audio_codec=None,
imdb_id=None, hashes=None, size=None, subtitle_languages=None):
#: Name or path of the video
self.name = name
#: Format of the video (HDTV, WEB-DL, BluRay, ...)
self.format = format
#: Release group of the video
self.release_group = release_group
#: Resolution of the video stream (480p, 720p, 1080p or 1080i)
self.resolution = resolution
#: Codec of the video stream
self.video_codec = video_codec
#: Codec of the main audio stream
self.audio_codec = audio_codec
#: IMDb id of the video
self.imdb_id = imdb_id
#: Hashes of the video file by provider names
self.hashes = hashes or {}
#: Size of the video file in bytes
self.size = size
#: Existing subtitle languages
self.subtitle_languages = subtitle_languages or set()
@property
def exists(self):
"""Test whether the video exists"""
return os.path.exists(self.name)
@property
def age(self):
"""Age of the video"""
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta()
@classmethod
def fromguess(cls, name, guess):
"""Create an :class:`Episode` or a :class:`Movie` with the given `name` based on the `guess`.
:param str name: name of the video.
:param dict guess: guessed data.
:raise: :class:`ValueError` if the `type` of the `guess` is invalid
"""
if guess['type'] == 'episode':
return Episode.fromguess(name, guess)
if guess['type'] == 'movie':
return Movie.fromguess(name, guess)
raise ValueError('The guess must be an episode or a movie guess')
@classmethod
def fromname(cls, name, options=None):
"""Shortcut for :meth:`fromguess` with a `guess` guessed from the `name`.
:param str name: name of the video.
"""
if options is not None:
return cls.fromguess(name, guessit(name, options=options))
else:
return cls.fromguess(name, guessit(name))
def __repr__(self):
return '<%s [%r]>' % (self.__class__.__name__, self.name)
def __hash__(self):
return hash(self.name)
class Episode(Video):
"""Episode :class:`Video`.
:param str series: series of the episode.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:param str title: title of the episode.
:param int year: year of the series.
:param bool original_series: whether the series is the first with this name.
:param int tvdb_id: TVDB id of the episode.
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, series, season, episode, title=None, year=None, original_series=True, tvdb_id=None,
series_tvdb_id=None, series_imdb_id=None, **kwargs):
super(Episode, self).__init__(name, **kwargs)
#: Series of the episode
self.series = series
#: Season number of the episode
self.season = season
#: Episode number of the episode
self.episode = episode
#: Title of the episode
self.title = title
#: Year of series
self.year = year
#: The series is the first with this name
self.original_series = original_series
#: TVDB id of the episode
self.tvdb_id = tvdb_id
#: TVDB id of the series
self.series_tvdb_id = series_tvdb_id
#: IMDb id of the series
self.series_imdb_id = series_imdb_id
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'episode':
raise ValueError('The guess must be an episode guess')
if 'title' not in guess or 'episode' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], guess.get('season', 1), guess['episode'], title=guess.get('episode_title'),
year=guess.get('year'), format=guess.get('format'), original_series='year' not in guess,
release_group=guess.get('release_group'), resolution=guess.get('screen_size'),
video_codec=guess.get('video_codec'), audio_codec=guess.get('audio_codec'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'episode'}))
def __repr__(self):
if self.year is None:
return '<%s [%r, %dx%d]>' % (self.__class__.__name__, self.series, self.season, self.episode)
return '<%s [%r, %d, %dx%d]>' % (self.__class__.__name__, self.series, self.year, self.season, self.episode)
class Movie(Video):
"""Movie :class:`Video`.
:param str title: title of the movie.
:param int year: year of the movie.
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, title, year=None, **kwargs):
super(Movie, self).__init__(name, **kwargs)
#: Title of the movie
self.title = title
#: Year of the movie
self.year = year
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'movie':
raise ValueError('The guess must be a movie guess')
if 'title' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], format=guess.get('format'), release_group=guess.get('release_group'),
resolution=guess.get('screen_size'), video_codec=guess.get('video_codec'),
audio_codec=guess.get('audio_codec'), year=guess.get('year'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'movie'}))
def __repr__(self):
if self.year is None:
return '<%s [%r]>' % (self.__class__.__name__, self.title)
return '<%s [%r, %d]>' % (self.__class__.__name__, self.title, self.year)
| 35.497778
| 116
| 0.597471
| 1,023
| 7,987
| 4.526882
| 0.203324
| 0.042108
| 0.041028
| 0.018355
| 0.495141
| 0.400561
| 0.315267
| 0.271216
| 0.199957
| 0.180091
| 0
| 0.00883
| 0.262677
| 7,987
| 224
| 117
| 35.65625
| 0.777551
| 0.287592
| 0
| 0.235294
| 0
| 0
| 0.149068
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.04902
| 0.039216
| 0.392157
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
606d3133565f3d0c7f55e0387f7f06dca6adb6f2
| 7,097
|
py
|
Python
|
shadowsocksr_cli/main.py
|
MaxSherry/ssr-command-client
|
e52ea0a74e2a1bbdd7e816e0e2670d66ebdbf159
|
[
"MIT"
] | null | null | null |
shadowsocksr_cli/main.py
|
MaxSherry/ssr-command-client
|
e52ea0a74e2a1bbdd7e816e0e2670d66ebdbf159
|
[
"MIT"
] | null | null | null |
shadowsocksr_cli/main.py
|
MaxSherry/ssr-command-client
|
e52ea0a74e2a1bbdd7e816e0e2670d66ebdbf159
|
[
"MIT"
] | null | null | null |
"""
@author: tyrantlucifer
@contact: tyrantlucifer@gmail.com
@blog: https://tyrantlucifer.com
@file: main.py
@time: 2021/2/18 21:36
@desc: shadowsocksr-cli入口函数
"""
import argparse
import traceback
from shadowsocksr_cli.functions import *
def get_parser():
parser = argparse.ArgumentParser(description=color.blue("The shadowsocksr command client based Python."),
epilog=color.yellow('Powered by ') + color.green('tyrantlucifer') + color.yellow(
". If you have any questions,you can send e-mails to ") + color.green(
"tyrantlucifer@gmail.com"))
parser.add_argument("-l", "--list", action="store_true", help="show ssr list")
parser.add_argument("-p", "--port", default=1080, metavar="local_port", type=int,
help="assign local proxy port,use with -s")
parser.add_argument("-s", "--start", metavar="ssr_id", type=int, help="start ssr proxy")
parser.add_argument("-S", "--stop", nargs='?', const=-1, metavar="ssr_id", type=int, help="stop ssr proxy")
parser.add_argument("-u", "--update", action="store_true", help="update ssr list")
parser.add_argument("-v", "--version", action="store_true", help="display version")
parser.add_argument("--generate-clash", action="store_true", help="generate clash config yaml")
parser.add_argument("--display-json", metavar="ssr_id", type=int, help="display ssr json info")
parser.add_argument("--test-speed", type=int, metavar="ssr_id", help="test ssr nodes download and upload speed")
parser.add_argument("--fast-node", action="store_true", help="find most fast by delay and start ssr proxy")
parser.add_argument("--setting-url", metavar="ssr_subscribe_url", help="setting ssr subscribe url")
parser.add_argument("--setting-address", metavar="ssr_local_address", help="setting ssr local address")
parser.add_argument("--list-url", action="store_true", help="list ssr subscribe url")
parser.add_argument("--add-url", metavar="ssr_subscribe_url", help="add ssr subscribe url")
parser.add_argument("--remove-url", metavar="ssr_subscribe_url", help="remove ssr subscribe url")
parser.add_argument("--list-address", action="store_true", help="list ssr local address")
parser.add_argument("--parse-url", metavar="ssr_url", help="pares ssr url")
parser.add_argument("--append-ssr", metavar="ssr_file_path", help="append ssr nodes from file")
parser.add_argument("-b", action="store_true", help="append_ssr file is base64")
parser.add_argument("--clear-ssr", metavar="ssr_id", nargs="?", const="fail",
help="if ssr_id is not empty, clear ssr node by ssr_id, else clear fail nodes")
parser.add_argument("-all", action="store_true", help="clear all ssr node")
parser.add_argument("--add-ssr", metavar="ssr_url", help="add ssr node")
parser.add_argument("--test-again", metavar="ssr_node_id", type=int, help="test ssr node again")
parser.add_argument("--print-qrcode", metavar="ssr_node_id", type=int, help="print ssr node qrcode")
parser.add_argument("--http", metavar="action[start stop status]", help="Manager local http server")
parser.add_argument("--http-port", metavar="http server port", default=80, type=int,
help="assign local http server port")
parser.add_argument("--setting-global-proxy", action="store_true",
help="setting system global proxy,only support on " + color.red('Ubuntu Desktop'))
parser.add_argument("--setting-pac-proxy", action="store_true",
help="setting system pac proxy,only support on " + color.red('Ubuntu Desktop'))
parser.add_argument("--close-system-proxy", action="store_true",
help="close system proxy,only support on " + color.red('Ubuntu Desktop'))
return parser
def main():
parser = get_parser()
args = parser.parse_args()
if args.list:
DisplayShadowsocksr.display_shadowsocksr_list()
elif args.update:
UpdateConfigurations.update_subscribe()
elif args.fast_node:
HandleShadowsocksr.select_fast_node(args.port)
elif args.start is not None:
HandleShadowsocksr.start(ssr_id=args.start, local_port=args.port)
elif args.stop is not None:
HandleShadowsocksr.stop(ssr_id=args.stop, local_port=args.port)
elif args.version:
DisplayShadowsocksr.display_version()
elif args.setting_url:
UpdateConfigurations.reset_subscribe_url(args.setting_url)
elif args.append_ssr:
if not os.path.isfile(args.append_ssr):
logger.error(f'append_ssr file {args.append_ssr} is not exists')
return
with open(args.append_ssr, 'r', encoding='UTF-8') as f:
txt = f.read()
if args.b:
txt = ParseShadowsocksr.base64_decode(txt)
ssr_set = set()
for line in txt.splitlines():
for ssr in re.findall(r'ssr://[0-9a-zA-Z=-_/+]+', line):
ssr_set.add(ssr)
for ssr in ssr_set:
try:
UpdateConfigurations.append_ssr_node(ssr)
except Exception as e:
logger.error(f'add ssr node error {ssr}')
logger.error(traceback.format_exc())
elif args.clear_ssr:
UpdateConfigurations.clear_ssr_nodes(args.clear_ssr, args.all)
elif args.setting_address:
UpdateConfigurations.update_local_address(args.setting_address)
elif args.list_url:
DisplayShadowsocksr.display_subscribe_url()
elif args.add_url:
UpdateConfigurations.add_subscribe_url(args.add_url)
elif args.remove_url:
UpdateConfigurations.remove_subscribe_url(args.remove_url)
elif args.list_address:
DisplayShadowsocksr.display_local_address()
elif args.parse_url:
DisplayShadowsocksr.display_shadowsocksr_json_by_url(args.parse_url)
elif args.add_ssr:
UpdateConfigurations.add_shadowsocksr_by_url(args.add_ssr)
elif args.test_again is not None:
UpdateConfigurations.update_shadowsocksr_connect_status(ssr_id=args.test_again)
elif args.print_qrcode is not None:
DisplayShadowsocksr.display_qrcode(ssr_id=args.print_qrcode)
elif args.setting_global_proxy:
UpdateSystemProxy.open_global_proxy(args.port, args.http_port)
elif args.setting_pac_proxy:
UpdateSystemProxy.open_pac_proxy(args.port, args.http_port)
elif args.close_system_proxy:
UpdateSystemProxy.close_proxy(args.port, args.http_port)
elif args.test_speed is not None:
DisplayShadowsocksr.display_shadowsocksr_speed(ssr_id=args.test_speed)
elif args.display_json is not None:
DisplayShadowsocksr.display_shadowsocksr_json(ssr_id=args.display_json)
elif args.generate_clash:
GenerateClashConfig.generate_clash_config()
elif args.http:
HandleHttpServer.handle_http_server(args.http, args.port, args.http_port)
else:
parser.print_help()
if __name__ == "__main__":
main()
| 52.962687
| 118
| 0.683106
| 927
| 7,097
| 5.035599
| 0.200647
| 0.055913
| 0.105613
| 0.048843
| 0.261354
| 0.209083
| 0.080977
| 0.053556
| 0.023993
| 0.023993
| 0
| 0.004358
| 0.19163
| 7,097
| 133
| 119
| 53.360902
| 0.809308
| 0.02184
| 0
| 0
| 0
| 0
| 0.246322
| 0.009807
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017094
| false
| 0
| 0.025641
| 0
| 0.059829
| 0.034188
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
606de86a65d9bb68c662f132a9f86b56feda8791
| 672
|
py
|
Python
|
zad1.py
|
nadkkka/H8PW
|
21b5d28bb42af163e7dad43368d21b550ae66618
|
[
"MIT"
] | 6
|
2019-10-20T18:25:28.000Z
|
2019-11-17T12:21:42.000Z
|
zad1.py
|
nadkkka/H8PW
|
21b5d28bb42af163e7dad43368d21b550ae66618
|
[
"MIT"
] | null | null | null |
zad1.py
|
nadkkka/H8PW
|
21b5d28bb42af163e7dad43368d21b550ae66618
|
[
"MIT"
] | 4
|
2019-10-20T18:25:28.000Z
|
2019-11-30T19:33:47.000Z
|
def repleace_pattern(t,s,r):
assert len(t) > 0
assert len(s) > 0
assert len(r) > 0
assert len(t) >= len(s)
n = len(t)
m = len(s)
k = len(r)
idx = -1
for i in range(0, n):
if t[i] == s[0]:
pattern = True
for j in range(1,m):
if t[i+j] != s[j]:
pattern = False
break
if(pattern):
idx=i
break
result = t
print(idx)
if(idx!=-1):
result = [*t[0:idx],*r,*t[idx+m:n]]
return result
print (repleace_pattern([1,2,3,1,2,3,4],[1,2,3,4],[9,0]))
| 18.162162
| 58
| 0.383929
| 101
| 672
| 2.534653
| 0.29703
| 0.140625
| 0.117188
| 0.03125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06044
| 0.458333
| 672
| 36
| 59
| 18.666667
| 0.642857
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.04
| false
| 0
| 0
| 0
| 0.08
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
606dfbab5706a842277bbd2a3b9198129d579201
| 2,249
|
py
|
Python
|
mycroft/client/enclosure/weather.py
|
Matjordan/mycroft-core
|
8b64930f3b3dae671535fc3b096ce9d846c54f6d
|
[
"Apache-2.0"
] | null | null | null |
mycroft/client/enclosure/weather.py
|
Matjordan/mycroft-core
|
8b64930f3b3dae671535fc3b096ce9d846c54f6d
|
[
"Apache-2.0"
] | null | null | null |
mycroft/client/enclosure/weather.py
|
Matjordan/mycroft-core
|
8b64930f3b3dae671535fc3b096ce9d846c54f6d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EnclosureWeather:
"""
Listens for Enclosure API commands to display indicators of the weather.
Performs the associated command on Arduino by writing on the Serial port.
"""
def __init__(self, bus, writer):
self.bus = bus
self.writer = writer
self.__init_events()
def __init_events(self):
self.bus.on('enclosure.weather.display', self.display)
def display(self, event=None):
if event and event.data:
# Convert img_code to icon
img_code = event.data.get("img_code", None)
icon = None
if img_code == 0:
# sunny
icon = "IICEIBMDNLMDIBCEAA"
elif img_code == 1:
# partly cloudy
icon = "IIEEGBGDHLHDHBGEEA"
elif img_code == 2:
# cloudy
icon = "IIIBMDMDODODODMDIB"
elif img_code == 3:
# light rain
icon = "IIMAOJOFPBPJPFOBMA"
elif img_code == 4:
# raining
icon = "IIMIOFOBPFPDPJOFMA"
elif img_code == 5:
# storming
icon = "IIAAIIMEODLBJAAAAA"
elif img_code == 6:
# snowing
icon = "IIJEKCMBPHMBKCJEAA"
elif img_code == 7:
# wind/mist
icon = "IIABIBIBIJIJJGJAGA"
temp = event.data.get("temp", None)
if icon is not None and temp is not None:
icon = "x=2," + icon
msg = "weather.display=" + str(temp) + "," + str(icon)
self.writer.write(msg)
| 34.075758
| 77
| 0.56514
| 257
| 2,249
| 4.863813
| 0.48249
| 0.0616
| 0.0616
| 0.0256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011716
| 0.354824
| 2,249
| 65
| 78
| 34.6
| 0.849759
| 0.354824
| 0
| 0
| 0
| 0
| 0.143262
| 0.017731
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
606ffbed507972fed40e1f7c61ad9e16979a735d
| 1,501
|
py
|
Python
|
a_other_video/MCL-Motion-Focused-Contrastive-Learning/sts/motion_sts.py
|
alisure-fork/Video-Swin-Transformer
|
aa0a31bd4df0ad2cebdcfb2ad53df712fce79809
|
[
"Apache-2.0"
] | null | null | null |
a_other_video/MCL-Motion-Focused-Contrastive-Learning/sts/motion_sts.py
|
alisure-fork/Video-Swin-Transformer
|
aa0a31bd4df0ad2cebdcfb2ad53df712fce79809
|
[
"Apache-2.0"
] | null | null | null |
a_other_video/MCL-Motion-Focused-Contrastive-Learning/sts/motion_sts.py
|
alisure-fork/Video-Swin-Transformer
|
aa0a31bd4df0ad2cebdcfb2ad53df712fce79809
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
from scipy import ndimage
def compute_motion_boudary(flow_clip):
mx = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
my = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
dx_all = []
dy_all = []
mb_x = 0
mb_y = 0
for flow_img in flow_clip:
d_x = ndimage.convolve(flow_img, mx)
d_y = ndimage.convolve(flow_img, my)
dx_all.append(d_x)
dy_all.append(d_y)
mb_x += d_x
mb_y += d_y
dx_all = np.array(dx_all)
dy_all = np.array(dy_all)
return dx_all, dy_all, mb_x, mb_y
def zero_boundary(frame_mag):
frame_mag[:8, :] = 0
frame_mag[:, :8] = 0
frame_mag[-8:, :] = 0
frame_mag[:, -8:] = 0
return frame_mag
def motion_mag_downsample(mag, size, input_size):
block_size = input_size // size
mask = np.zeros((size,size))
for i in range(size):
for j in range(size):
x_start = i * block_size
x_end = x_start + block_size
y_start = j * block_size
y_end = y_start + block_size
tmp_block = mag[x_start:x_end, y_start:y_end]
block_mean = np.mean(tmp_block)
mask[i, j] = block_mean
return mask
def motion_sts(flow_clip, size, input_size):
dx_all, dy_all, dx_sum, dy_sum = compute_motion_boudary(flow_clip)
mag, ang = cv2.cartToPolar(dx_sum, dy_sum, angleInDegrees=True)
mag_down = motion_mag_downsample(mag, size, input_size)
return mag_down
| 24.209677
| 70
| 0.592938
| 250
| 1,501
| 3.256
| 0.228
| 0.014742
| 0.034398
| 0.04914
| 0.246929
| 0.178133
| 0.135135
| 0.04914
| 0.04914
| 0.04914
| 0
| 0.027907
| 0.283811
| 1,501
| 61
| 71
| 24.606557
| 0.729302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.068182
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6070e1255db727d4dd3901174951be184b84e950
| 2,691
|
py
|
Python
|
Student Database/input_details.py
|
manas1410/Miscellaneous-Development
|
8ffd2b586cb05b12ed0855d97c3015c8bb2a6c01
|
[
"MIT"
] | null | null | null |
Student Database/input_details.py
|
manas1410/Miscellaneous-Development
|
8ffd2b586cb05b12ed0855d97c3015c8bb2a6c01
|
[
"MIT"
] | null | null | null |
Student Database/input_details.py
|
manas1410/Miscellaneous-Development
|
8ffd2b586cb05b12ed0855d97c3015c8bb2a6c01
|
[
"MIT"
] | null | null | null |
from tkinter import*
import tkinter.font as font
import sqlite3
name2=''
regis2=''
branch2=''
def main():
inp=Tk()
inp.geometry("430x300")
inp.title("Enter The Details")
inp.iconbitmap("logo/spectrumlogo.ico")
f=font.Font(family='Bookman Old Style',size=15,weight='bold')
f1=font.Font(family='Bookman Old Style',size=20,weight='bold')
global n2
global reg2
global b2
det=Label(inp,text=" Enter The Details\n",font=f1,fg='magenta')
det.grid(row=0,column=0,columnspan=2)
n1=Label(inp,text=" Name:",font=f)
n1.grid(row=1,column=0)
n2=Entry(inp,width=40)
n2.grid(row=1,column=1)
reg1=Label(inp,text="Registration ID:",font=f)
reg1.grid(row=2,column=0)
reg2=Entry(inp,width=40)
reg2.grid(row=2,column=1)
b1=Label(inp,text=" Branch:",font=f)
b1.grid(row=3,column=0)
b2=Entry(inp,width=40)
b2.grid(row=3,column=1)
invalid=Label(inp,text=' ',fg='red')
invalid.grid(row=4,columnspan=2)
def submit():
name2=n2.get()
regis2=reg2.get()
branch2=b2.get()
l=[name2,regis2,branch2]
if (None in l or "" in l):
invalid['text']="Please fill all the fields"
else:
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
#insert into tabels
c.execute("""UPDATE mark_list SET name=? WHERE name=?""",(name2,' '))
c.execute("""UPDATE mark_list SET registration_no=? WHERE registration_no=?""",(regis2,' '))
c.execute("""UPDATE mark_list SET branch=? WHERE branch=?""",(branch2,' '))
#commit_changes
db.commit()
#close connection
db.close()
inp.destroy()
import subject
subject.main()
def back():
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
c.execute("""DELETE from mark_list where name=' '""")
#commit_changes
db.commit()
#close connection
db.close()
inp.destroy()
import welcome
welcome.main()
#buttons
sub1=Button(inp,text="Submit",borderwidth=3,padx=40,font=f,bg='green',command=submit)
sub1.grid(row=5,column=0,columnspan=2)
back1=Button(inp,text="Back",borderwidth=3,padx=20,font=f,bg='red',command=back)
back1.grid(row=6,column=0,columnspan=2)
inp.mainloop()
if __name__=='__main__':
main()
| 25.149533
| 106
| 0.536603
| 339
| 2,691
| 4.20649
| 0.330383
| 0.049088
| 0.042076
| 0.037868
| 0.23913
| 0.23913
| 0.186536
| 0.140252
| 0.140252
| 0.140252
| 0
| 0.045479
| 0.313638
| 2,691
| 106
| 107
| 25.386792
| 0.726584
| 0.036046
| 0
| 0.151515
| 0
| 0
| 0.178773
| 0.008475
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.075758
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60720921ca305f9abf0188d61f032d72e5cdb0ce
| 16,036
|
py
|
Python
|
IQS5xx/IQS5xx.py
|
jakezimmerTHT/py_IQS5xx
|
5f90be17ea0429eeeb3726c7647f0b7ad1fb7b06
|
[
"MIT"
] | 1
|
2019-02-26T11:56:26.000Z
|
2019-02-26T11:56:26.000Z
|
IQS5xx/IQS5xx.py
|
jakezimmerTHT/py_IQS5xx
|
5f90be17ea0429eeeb3726c7647f0b7ad1fb7b06
|
[
"MIT"
] | null | null | null |
IQS5xx/IQS5xx.py
|
jakezimmerTHT/py_IQS5xx
|
5f90be17ea0429eeeb3726c7647f0b7ad1fb7b06
|
[
"MIT"
] | 1
|
2022-02-22T19:47:26.000Z
|
2022-02-22T19:47:26.000Z
|
import unittest
import time
import logging
logging.basicConfig()
from intelhex import IntelHex
import Adafruit_GPIO.I2C as i2c
from gpiozero import OutputDevice
from gpiozero import DigitalInputDevice
from ctypes import c_uint8, c_uint16, c_uint32, cast, pointer, POINTER
from ctypes import create_string_buffer, Structure
from fcntl import ioctl
import struct
import Adafruit_PureIO.smbus as smbus
from Adafruit_PureIO.smbus import make_i2c_rdwr_data
from IQS5xx_Defs import *
def bytesToHexString(bytes):
if isinstance(bytes, basestring):
return ''.join('{:02x} '.format(ord(c)) for c in bytes)
if isinstance(bytes, bytearray):
return ''.join('{:02x} '.format(b) for b in bytes)
raise ValueError("Must pass bytesToHexString() a string or bytearray")
IQS5xx_DEFAULT_ADDRESS = 0x74
IQS5xx_MAX_ADDRESS = 0x78
CHECKSUM_DESCRIPTOR_START = 0x83C0
CHECKSUM_DESCRIPTOR_END = 0x83FF
APP_START_ADDRESS = 0x8400
APP_END_ADDRESS = 0xBDFF #inclusive
NV_SETTINGS_START = 0xBE00
NV_SETTINGS_END = 0xBFFF #inclusive
FLASH_PADDING = 0x00
BLOCK_SIZE = 64
APP_SIZE_BLOCKS = (((APP_END_ADDRESS+1) - APP_START_ADDRESS) / BLOCK_SIZE)
NV_SETTINGS_SIZE_BLOCKS = (((NV_SETTINGS_END+1) - NV_SETTINGS_START) / BLOCK_SIZE)
BL_CMD_READ_VERSION = 0x00
BL_CMD_READ_64_BYTES = 0x01
BL_CMD_EXECUTE_APP = 0x02 # Write only, 0 bytes
BL_CMD_RUN_CRC = 0x03
BL_CRC_FAIL = 0x01
BL_CRC_PASS = 0x00
BL_VERSION = 0x0200
def swapEndianess(uint16):
return ((uint16 & 0xFF) << 8) | ((uint16 & 0xFF00) >> 8)
def writeBytes(self, data):
self._bus.write_bytes(self._address, bytes(data))
i2c.Device.writeBytes = writeBytes
def readBytes(self, data):
return self._bus.read_bytes(self._address, data)
i2c.Device.readBytes = readBytes
def writeRawListReadRawList(self, data, readLength):
self.writeBytes(data)
# This isn't using a repeat start
return self.readBytes(readLength)
i2c.Device.writeRawListReadRawList = writeRawListReadRawList
def writeBytes_16BitAddress(self, address, data):
addressBytes = struct.pack('>H', address)
dataBytes = bytearray(data)
bytes = addressBytes + dataBytes
self.writeBytes(bytes)
i2c.Device.writeBytes_16BitAddress = writeBytes_16BitAddress
def readBytes_16BitAddress(self, address, length):
assert self._bus._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
reg = c_uint16(swapEndianess(address))
result = create_string_buffer(length)
# Build ioctl request.
request = make_i2c_rdwr_data([
(self._address, 0, 2, cast(pointer(reg), POINTER(c_uint8))), # Write cmd register.
(self._address, smbus.I2C_M_RD, length, cast(result, POINTER(c_uint8))) # Read data.
])
# Make ioctl call and return result data.
ioctl(self._bus._device.fileno(), smbus.I2C_RDWR, request)
return bytearray(result.raw) # Use .raw instead of .value which will stop at a null byte!
i2c.Device.readBytes_16BitAddress = readBytes_16BitAddress
def readByte_16BitAddress(self, address):
result = self.readBytes_16BitAddress(address, 1)
result = struct.unpack('>B', result)[0]
return result
i2c.Device.readByte_16BitAddress = readByte_16BitAddress
def writeByte_16BitAddress(self, address, value, mask=0xFF):
if mask is not 0xFF:
register = self.readByte_16BitAddress(address)
register &= ~mask
register |= (value & mask)
value = register
format = '>HB' if (value > 0) else '>Hb'
bytes = struct.pack(format, address, value)
self.writeBytes(bytes)
i2c.Device.writeByte_16BitAddress = writeByte_16BitAddress
class IQS5xx(object):
def __init__(self, resetPin, readyPin, address=IQS5xx_DEFAULT_ADDRESS):
self.address = address
self._resetPinNum = resetPin
self._readyPinNum = readyPin
self._resetPin = OutputDevice(pin=self._resetPinNum, active_high=False, initial_value=True)
self._readypin = DigitalInputDevice(pin=self._readyPinNum, active_state=True, pull_up=None)
def begin(self):
self.releaseReset()
time.sleep(0.01)
self.waitUntilReady()
self.acknowledgeReset()
time.sleep(0.01)
self.acknowledgeReset()
time.sleep(0.01)
self.endSession()
time.sleep(0.020)
@property
def address(self):
return self.__address
@address.setter
def address(self, value):
if (value < IQS5xx_DEFAULT_ADDRESS) or (value > IQS5xx_MAX_ADDRESS):
raise ValueError("Invalid I2C Address. Use something in the range [%x, %x]" %(IQS5xx_DEFAULT_ADDRESS, IQS5xx_MAX_ADDRESS))
self.__address = value
self._device = i2c.get_i2c_device(value)
self._logger = logging.getLogger('IQS5xx.Address.{0:#0X}'.format(value))
def readUniqueID(self):
return bytesToHexString(self._device.readBytes_16BitAddress(0xF000, 12))
def setupComplete(self):
self._device.writeByte_16BitAddress(SystemConfig0_adr, SETUP_COMPLETE, SETUP_COMPLETE)
def setManualControl(self):
self._device.writeByte_16BitAddress(SystemConfig0_adr, MANUAL_CONTROL, MANUAL_CONTROL)
self._device.writeByte_16BitAddress(SystemControl0_adr, 0x00, 0x07) # active mode
def setTXPinMappings(self, pinList):
assert isinstance(pinList, list), "TX pinList must be a list of integers"
assert 0 <= len(pinList) <= 15, "TX pinList must be between 0 and 15 long"
self._device.writeBytes_16BitAddress(TxMapping_adr, pinList)
self._device.writeByte_16BitAddress(TotalTx_adr, len(pinList))
def setRXPinMappings(self, pinList):
assert isinstance(pinList, list), "RX pinList must be a list of integers"
assert 0 <= len(pinList) <= 10, "RX pinList must be between 0 and 15 long"
self._device.writeBytes_16BitAddress(RxMapping_adr, pinList)
self._device.writeByte_16BitAddress(TotalRx_adr, len(pinList))
def enableChannel(self, txChannel, rxChannel, enabled):
assert 0 <= txChannel < 15, "txChannel must be less than 15"
assert 0 <= rxChannel < 10, "rxChannel must be less than 10"
registerAddy = ActiveChannels_adr + (txChannel * 2)
if rxChannel >= 8:
mask = 1 << (rxChannel - 8)
else:
registerAddy += 1
mask = 1 << rxChannel
value = mask if enabled else 0x00
self._device.writeByte_16BitAddress(registerAddy, value, mask)
def setTXRXChannelCount(self, tx_count, rx_count):
assert 0 <= txChannel <= 15, "tx_count must be less or equal tp 15"
assert 0 <= rxChannel <= 10, "rx_count must be less than or equal to 10"
self._device.writeByte_16BitAddress(TotalTx_adr, txChannel)
self._device.writeByte_16BitAddress(TotalRx_adr, rxChannel)
def swapXY(self, swapped):
value = SWITCH_XY_AXIS if swapped else 0x00
self._device.writeByte_16BitAddress(XYConfig0_adr, value, SWITCH_XY_AXIS)
def setAtiGlobalC(self, globalC):
self._device.writeByte_16BitAddress(GlobalATIC_adr, globalC)
def setChannel_ATI_C_Adjustment(self, txChannel, rxChannel, adjustment):
assert 0 <= txChannel < 15, "txChannel must be less than 15"
assert 0 <= rxChannel < 10, "rxChannel must be less than 10"
registerAddy = ATICAdjust_adr + (txChannel * 10) + rxChannel
self._device.writeByte_16BitAddress(registerAddy, adjustment)
def setTouchMultipliers(self, set, clear):
self._device.writeByte_16BitAddress(GlobalTouchSet_adr, set)
self._device.writeByte_16BitAddress(GlobalTouchClear_adr, clear)
def rxFloat(self, floatWhenInactive):
value = RX_FLOAT if floatWhenInactive else 0x00
self._device.writeByte_16BitAddress(HardwareSettingsA_adr, value, RX_FLOAT)
def runAtiAlgorithm(self):
self._device.writeByte_16BitAddress(SystemControl0_adr, AUTO_ATI, AUTO_ATI)
def acknowledgeReset(self):
self._device.writeByte_16BitAddress(SystemControl0_adr, ACK_RESET, ACK_RESET)
def atiErrorDetected(self):
reg = self._device.readByte_16BitAddress(SystemInfo0_adr)
return bool(reg & ATI_ERROR)
def reseed(self):
self._device.writeByte_16BitAddress(SystemControl0_adr, RESEED, RESEED)
def endSession(self):
self._device.writeByte_16BitAddress(EndWindow_adr, 0x00)
time.sleep(0.001)
def readVersionNumbers(self):
bytes = self._device.readBytes_16BitAddress(ProductNumber_adr, 6)
fields = struct.unpack(">HHBB",bytes)
return {"product":fields[0], "project":fields[1], "major":fields[2], "minor":fields[3]}
def bootloaderAvailable(self):
BOOTLOADER_AVAILABLE = 0xA5
NO_BOOTLOADER = 0xEE
result = self._device.readByte_16BitAddress(BLStatus_adr)
# result = ord(result)
if result == BOOTLOADER_AVAILABLE:
return True
elif result == NO_BOOTLOADER:
return False
else:
raise ValueError("Unexpected value returned for bootloader status: {0:#0X}".format(result))
def holdReset(self, millis=None):
self._resetPin.on()
if millis != None:
time.sleep(millis/1000.0)
self.releaseReset()
def releaseReset(self):
self._resetPin.off()
def isReady(self):
return self._readypin.is_active
def waitUntilReady(self, timeout=None):
self._readypin.wait_for_active(timeout)
def updateFirmware(self, hexFilePath, newDeviceAddress=None):
hexFile = IntelHex(source = hexFilePath)
hexFile.padding = FLASH_PADDING
appBinary = hexFile.tobinarray(start=APP_START_ADDRESS, end=NV_SETTINGS_END)
crcBinary = hexFile.tobinarray(start=CHECKSUM_DESCRIPTOR_START, end=CHECKSUM_DESCRIPTOR_END)
if newDeviceAddress:
self._logger.debug("Modifying the last byte in NV settings to change Device I2C Addrress to {0:#0X}".format(newDeviceAddress))
if (newDeviceAddress < IQS5xx_DEFAULT_ADDRESS) or (newDeviceAddress > IQS5xx_MAX_ADDRESS):
raise ValueError("Invalid I2C Address. Use something in the range [%x, %x]" %(IQS5xx_DEFAULT_ADDRESS, IQS5xx_MAX_ADDRESS))
appBinary[-1] = newDeviceAddress
# Step 1 - Enter Bootloader
self._logger.debug("Entering Bootloader")
bootloaderAddress = 0x40 ^ self.address
bootloaderDevice = i2c.get_i2c_device(bootloaderAddress)
self.holdReset(100)
bootloaderEntered = False
for i in range(10):
try:
version = bootloaderDevice.readU16(BL_CMD_READ_VERSION, little_endian=False)
bootloaderEntered = True
except:
pass
if not bootloaderEntered:
raise IOError("Timeout while trying to enter bootlaoder")
self._logger.debug("Bootloader entered successfully")
# Step 2 - Read and verify the bootloader version number
self._logger.debug("Reading Bootloader version")
if version != BL_VERSION:
raise Exception("Incompatible bootloader version detected: {0:#0X}".format(version))
self._logger.debug("Bootloader version is compatible: 0x%02X",version)
# Step 3 - Write the new application firmware and settings
self._logger.debug("Starting to write Application and NV settings")
for blockNum in range(APP_SIZE_BLOCKS + NV_SETTINGS_SIZE_BLOCKS):
blockAddress = APP_START_ADDRESS + (blockNum * BLOCK_SIZE)
self._logger.debug('Writing 64-byte block {0}/{1} at address {2:#0X}'.format(blockNum+1, APP_SIZE_BLOCKS + NV_SETTINGS_SIZE_BLOCKS ,blockAddress))
data = bytearray(BLOCK_SIZE + 2)
data[0] = (blockAddress >> 8) & 0xFF
data[1] = blockAddress & 0xFF
data[2:] = appBinary[blockNum*BLOCK_SIZE : (blockNum+1)*BLOCK_SIZE]
bootloaderDevice.writeBytes(data)
time.sleep(.010) # give the device time to write to flash
# Step 4 - Write the checksum descriptor section
self._logger.debug("Writing CRC section")
blockAddress = CHECKSUM_DESCRIPTOR_START
data = bytearray(BLOCK_SIZE + 2)
data[0] = (blockAddress >> 8) & 0xFF
data[1] = blockAddress & 0xFF
data[2:] = crcBinary[0:]
bootloaderDevice.writeBytes(data)
time.sleep(0.010) # give the device time to write to flash
# Step 5 - Perform CRC and read back settins section
time.sleep(0.1)
self._logger.debug("Performing CRC calculation")
bootloaderDevice.writeRaw8(BL_CMD_RUN_CRC)
time.sleep(0.2)
crcStatus = bootloaderDevice.readRaw8()
if crcStatus != BL_CRC_PASS:
raise Exception("CRC Failure")
self._logger.debug("CRC Success")
self._logger.debug("Reading back NV settings and comparing")
for blockNum in range(NV_SETTINGS_SIZE_BLOCKS):
blockAddress = NV_SETTINGS_START + (blockNum * BLOCK_SIZE)
self._logger.debug('Reading 64-byte block {0}/{1} at address {2:#0X}'.format(blockNum+1, NV_SETTINGS_SIZE_BLOCKS, blockAddress))
data = bytearray(3)
data[0] = BL_CMD_READ_64_BYTES
data[1] = (blockAddress >> 8) & 0xFF
data[2] = blockAddress & 0xFF
reply = bootloaderDevice.writeRawListReadRawList(data, BLOCK_SIZE)
expectedReply = appBinary[(APP_SIZE_BLOCKS+blockNum)*BLOCK_SIZE : (APP_SIZE_BLOCKS+blockNum+1)*BLOCK_SIZE].tostring()
if reply != expectedReply:
raise Exception("Unexpected values while reading back NV Setting: {0} \nExpected values: {1}".format(bytesToHexString(reply), bytesToHexString(expectedReply)))
self._logger.debug("NV Settings match expected values")
# Step 6 - Execute application
self._logger.debug("Execute Application")
bootloaderDevice.writeRaw8(BL_CMD_EXECUTE_APP)
if newDeviceAddress:
self.address = newDeviceAddress
class TestIQS5xx(unittest.TestCase):
hexFile = "IQS550_B000_Trackpad_40_15_2_2_BL.HEX"
possibleAddresses = [0x74, 0x75, 0x76, 0x77]
desiredAddress = 0x74
device = None
def setUp(self):
if not self.__class__.device:
self.__class__.device = IQS5xx(17, 27)
for address in self.__class__.possibleAddresses:
self.__class__.device.address = address
self.__class__.device._logger.setLevel(logging.DEBUG)
try:
self.__class__.device.waitUntilReady(1)
self.__class__.device.bootloaderAvailable()
break
except:
if address == self.__class__.possibleAddresses[-1]:
raise IOError("Couldn't communicate with the controller")
if self.__class__.device.address != self.__class__.desiredAddress:
self.__class__.device.updateFirmware(self.__class__.hexFile, newDeviceAddress=self.__class__.desiredAddress)
def tearDown(self):
if self.__class__.device.address != self.__class__.desiredAddress:
print("Cleaning up by reprogramming the controller to the default address")
self.__class__.device.updateFirmware(self.__class__.hexFile, newDeviceAddress=self.__class__.desiredAddress)
def test_bootloaderAvailable(self):
self.assertTrue(self.__class__.device.bootloaderAvailable())
# @unittest.skip
# def test_update(self):
# self.__class__.device.updateFirmware(self.__class__.hexFile)
#
# @unittest.skip
# def test_update_and_changeaddress(self):
# newAddy = 0x77
# self.__class__.device.updateFirmware(self.__class__.hexFile, newDeviceAddress=newAddy)
# self.assertEqual(self.__class__.device.address, newAddy)
# time.sleep(0.1)
# self.assertTrue(self.__class__.device.bootloaderAvailable())
if __name__ == '__main__':
unittest.main()
| 41.760417
| 175
| 0.692816
| 1,876
| 16,036
| 5.686567
| 0.216951
| 0.023435
| 0.048088
| 0.052306
| 0.257218
| 0.219723
| 0.164886
| 0.125891
| 0.102925
| 0.102925
| 0
| 0.034518
| 0.215952
| 16,036
| 383
| 176
| 41.869452
| 0.813966
| 0.066725
| 0
| 0.118243
| 0
| 0
| 0.101038
| 0.00395
| 0
| 0
| 0.012186
| 0
| 0.040541
| 1
| 0.135135
| false
| 0.013514
| 0.047297
| 0.016892
| 0.25
| 0.003378
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60736b2c5b81bc8177746e07c1771f09afc46a66
| 2,214
|
py
|
Python
|
program.py
|
siddhi117/ADB_Homework
|
1751b3cc2d5ec1584efdf7f8961507bc29179e49
|
[
"MIT"
] | null | null | null |
program.py
|
siddhi117/ADB_Homework
|
1751b3cc2d5ec1584efdf7f8961507bc29179e49
|
[
"MIT"
] | null | null | null |
program.py
|
siddhi117/ADB_Homework
|
1751b3cc2d5ec1584efdf7f8961507bc29179e49
|
[
"MIT"
] | null | null | null |
import sqlite3
from bottle import route, run,debug,template,request,redirect
@route('/todo')
def todo_list():
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("SELECT id, task FROM todo WHERE status LIKE '1'")
result = c.fetchall()
c.close()
output = template('make_table', rows=result)
return output
@route('/new', method='GET')
def new_item():
if request.GET.save:
new = request.GET.task.strip()
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("INSERT INTO todo (task,status) VALUES (?,?)", (new,1))
new_id = c.lastrowid
conn.commit()
c.close()
redirect('/todo')
#return '<p>The new task was inserted into the database, the ID is %s</p>' % new_id
else:
return template('new_task.tpl')
@route('/do_insert' , method='GET')
def get_id():
redirect('/new')
@route('/edit/<no:int>', method='GET')
def edit_item(no):
if request.GET.save:
edit = request.GET.task.strip()
status = request.GET.status.strip()
if status == 'open':
status = 1
else:
status = 0
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("UPDATE todo SET task = ?, status = ? WHERE id LIKE ?", (edit, status, no))
conn.commit()
return '<p>The item number %s was successfully updated</p>' % no
else:
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("SELECT task FROM todo WHERE id LIKE ?", (str(no)))
cur_data = c.fetchone()
return template('edit_task', old=cur_data, no=no)
@route('/find_edit' , method='GET')
def get_id():
id_edit = request.GET.editdata.strip()
redirect('/edit/' + id_edit)
@route('/delete/<no:int>', method='GET')
def delete_item(no):
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("DELETE FROM todo WHERE id LIKE ?", (str(no)))
conn.commit()
redirect('/todo')
@route('/find_delete' , method='GET')
def get_id():
id_delete = request.GET.deletedata.strip()
redirect('/delete/' + id_delete)
debug(True)
run(reloader=True)
| 27.333333
| 93
| 0.584914
| 297
| 2,214
| 4.289562
| 0.259259
| 0.054945
| 0.056515
| 0.086342
| 0.285714
| 0.245683
| 0.215856
| 0.178179
| 0.178179
| 0.178179
| 0
| 0.006031
| 0.251129
| 2,214
| 80
| 94
| 27.675
| 0.762364
| 0.037037
| 0
| 0.390625
| 0
| 0
| 0.21023
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109375
| false
| 0
| 0.03125
| 0
| 0.203125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60761440b9fc5de896572a3624d9ef4c6e6c7759
| 3,246
|
py
|
Python
|
pipeline/metadata/maxmind.py
|
censoredplanet/censoredplanet-analysis
|
f5e5d82f890e47599bc0baa9a9390f3c5147a6f7
|
[
"Apache-2.0"
] | 6
|
2021-06-02T11:15:12.000Z
|
2022-03-04T12:09:35.000Z
|
pipeline/metadata/maxmind.py
|
censoredplanet/censoredplanet-analysis
|
f5e5d82f890e47599bc0baa9a9390f3c5147a6f7
|
[
"Apache-2.0"
] | 24
|
2021-04-13T18:07:29.000Z
|
2022-03-25T20:26:27.000Z
|
pipeline/metadata/maxmind.py
|
censoredplanet/censoredplanet-analysis
|
f5e5d82f890e47599bc0baa9a9390f3c5147a6f7
|
[
"Apache-2.0"
] | 2
|
2021-06-02T11:30:21.000Z
|
2021-08-20T12:17:12.000Z
|
"""Module to initialize Maxmind databases and lookup IP metadata."""
import logging
import os
from typing import Optional, Tuple, NamedTuple
import geoip2.database
from pipeline.metadata.mmdb_reader import mmdb_reader
MAXMIND_CITY = 'GeoLite2-City.mmdb'
MAXMIND_ASN = 'GeoLite2-ASN.mmdb'
# Tuple(netblock, asn, as_name, country)
# ex: ("1.0.0.1/24", 13335, "CLOUDFLARENET", "AU")
MaxmindReturnValues = NamedTuple('MaxmindReturnValues',
[('netblock', Optional[str]), ('asn', int),
('as_name', Optional[str]),
('country', Optional[str])])
class MaxmindIpMetadata():
"""Lookup database for Maxmind ASN and country metadata."""
def __init__(self, maxmind_folder: str) -> None:
"""Create a Maxmind Database.
Args:
maxmind_folder: a folder containing maxmind files.
Either a gcs filepath or a local system folder.
"""
maxmind_city_path = os.path.join(maxmind_folder, MAXMIND_CITY)
maxmind_asn_path = os.path.join(maxmind_folder, MAXMIND_ASN)
self.maxmind_city = mmdb_reader(maxmind_city_path)
self.maxmind_asn = mmdb_reader(maxmind_asn_path)
def lookup(self, ip: str) -> MaxmindReturnValues:
"""Lookup metadata infomation about an IP.
Args:
ip: string of the format 1.1.1.1 (ipv4 only)
Returns: MaxmindReturnValues
Raises:
KeyError: when the IP's ASN can't be found
"""
(asn, as_name, netblock) = self._get_maxmind_asn(ip)
country = self._get_country_code(ip)
if not asn:
raise KeyError(f"No Maxmind entry for {ip}")
return MaxmindReturnValues(netblock, asn, as_name, country)
def _get_country_code(self, vp_ip: str) -> Optional[str]:
"""Get country code for IP address.
Args:
vp_ip: IP address of vantage point (as string)
Returns:
2-letter ISO country code
"""
try:
vp_info = self.maxmind_city.city(vp_ip)
return vp_info.country.iso_code
except (ValueError, geoip2.errors.AddressNotFoundError) as e:
logging.warning('Maxmind: %s\n', e)
return None
def _get_maxmind_asn(
self, vp_ip: str) -> Tuple[Optional[int], Optional[str], Optional[str]]:
"""Get ASN information for IP address.
Args:
vp_ip: IP address of vantage point (as string)
Returns:
Tuple containing AS num, AS org, and netblock
"""
try:
vp_info = self.maxmind_asn.asn(vp_ip)
asn = vp_info.autonomous_system_number
as_name = vp_info.autonomous_system_organization
if vp_info.network:
netblock: Optional[str] = vp_info.network.with_prefixlen
else:
netblock = None
return asn, as_name, netblock
except (ValueError, geoip2.errors.AddressNotFoundError) as e:
logging.warning('Maxmind: %s\n', e)
return None, None, None
class FakeMaxmindIpMetadata(MaxmindIpMetadata):
"""A fake lookup table for testing MaxmindIpMetadata."""
# pylint: disable=super-init-not-called
def __init__(self) -> None:
pass
# pylint: disable=no-self-use
def lookup(self, _: str) -> MaxmindReturnValues:
return MaxmindReturnValues('101.103.0.0/16', 1221, 'ASN-TELSTRA', 'AU')
| 30.336449
| 78
| 0.663586
| 417
| 3,246
| 4.997602
| 0.306954
| 0.043186
| 0.017274
| 0.020154
| 0.212092
| 0.169866
| 0.169866
| 0.137236
| 0.137236
| 0.137236
| 0
| 0.014412
| 0.230437
| 3,246
| 106
| 79
| 30.622642
| 0.819856
| 0.282193
| 0
| 0.122449
| 0
| 0
| 0.072184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122449
| false
| 0.020408
| 0.102041
| 0.020408
| 0.387755
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60765090bf7eb3ddb56eaccfac94b3add8ca8a04
| 844
|
py
|
Python
|
nas_big_data/combo/best/combo_4gpu_8_agebo/predict.py
|
deephyper/NASBigData
|
18f083a402b80b1d006eada00db7287ff1802592
|
[
"BSD-2-Clause"
] | 3
|
2020-08-07T12:05:12.000Z
|
2021-04-05T19:38:37.000Z
|
nas_big_data/combo/best/combo_2gpu_8_agebo/predict.py
|
deephyper/NASBigData
|
18f083a402b80b1d006eada00db7287ff1802592
|
[
"BSD-2-Clause"
] | 2
|
2020-07-17T14:44:12.000Z
|
2021-04-04T14:52:11.000Z
|
nas_big_data/combo/best/combo_4gpu_8_agebo/predict.py
|
deephyper/NASBigData
|
18f083a402b80b1d006eada00db7287ff1802592
|
[
"BSD-2-Clause"
] | 1
|
2021-03-28T01:49:21.000Z
|
2021-03-28T01:49:21.000Z
|
import os
import numpy as np
import tensorflow as tf
from nas_big_data.combo.load_data import load_data_npz_gz
from deephyper.nas.run.util import create_dir
from deephyper.nas.train_utils import selectMetric
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in range(4)])
HERE = os.path.dirname(os.path.abspath(__file__))
fname = HERE.split("/")[-1]
output_dir = "logs"
create_dir(output_dir)
X_test, y_test = load_data_npz_gz(test=True)
dependencies = {
"r2":selectMetric("r2")
}
model = tf.keras.models.load_model(f"best_model_{fname}.h5", custom_objects=dependencies)
model.compile(
metrics=["mse", "mae", selectMetric("r2")]
)
score = model.evaluate(X_test, y_test)
score_names = ["loss", "mse", "mae", "r2"]
print("score:")
output = " ".join([f"{sn}:{sv:.3f}" for sn,sv in zip(score_names, score)])
print(output)
| 26.375
| 89
| 0.722749
| 135
| 844
| 4.296296
| 0.525926
| 0.041379
| 0.037931
| 0.044828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010667
| 0.111374
| 844
| 32
| 90
| 26.375
| 0.762667
| 0
| 0
| 0
| 0
| 0
| 0.107692
| 0.024852
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6077a342275cffb372223916fb877af7a30c823c
| 14,744
|
py
|
Python
|
ship/utils/utilfunctions.py
|
duncan-r/SHIP
|
2c4c22c77f9c18ea545d3bce70a36aebbd18256a
|
[
"MIT"
] | 6
|
2016-04-10T17:32:44.000Z
|
2022-03-13T18:41:21.000Z
|
ship/utils/utilfunctions.py
|
duncan-r/SHIP
|
2c4c22c77f9c18ea545d3bce70a36aebbd18256a
|
[
"MIT"
] | 19
|
2017-06-23T08:21:53.000Z
|
2017-07-26T08:23:03.000Z
|
ship/utils/utilfunctions.py
|
duncan-r/SHIP
|
2c4c22c77f9c18ea545d3bce70a36aebbd18256a
|
[
"MIT"
] | 6
|
2016-10-26T16:04:38.000Z
|
2019-04-25T23:55:06.000Z
|
"""
Summary:
Utility Functions that could be helpful in any part of the API.
All functions that are likely to be called across a number of classes
and Functions in the API should be grouped here for convenience.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO: This module, like a lot of other probably, needs reviewing for how
'Pythonic' t is. There are a lot of places where generators,
comprehensions, maps, etc should be used to speed things up and make
them a bit clearer.
More importantly there are a lot of places using '==' compare that
should be using 'in' etc. This could cause bugs and must be fixed
soon.
Updates:
"""
from __future__ import unicode_literals
import re
import os
import operator
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
# def resolveSeDecorator(se_vals, path):
# """Decorator function for replacing Scen/Evt placholders.
#
# Checks fro scenario and event placeholders in the return value of a
# function and replaces them with corresponding values if found.
#
# Args:
# se_vals(dict): standard scenario/event dictionary in the format:
# {'scenario': {
# """
# def seDecorator(func):
# def seWrapper(*args, **kwargs):
# result = func(*args, **kwargs)
#
# if '~' in result:
# # Check for scenarion stuff
# for key, val in self.se_vals['scenario'].items():
# temp = '~' + key + '~'
# if temp in result:
# result = result.replace(temp, val)
# # Check for event stuff
# for key, val in self.se_vals['event'].items():
# temp = '~' + key + '~'
# if temp in result:
# result = result.replace(temp, val)
# return result
# return seWrapper
# return seDecorator
def formatFloat(value, no_of_dps, ignore_empty_str=True):
"""Format a float as a string to given number of decimal places.
Args:
value(float): the value to format.
no_of_dps(int): number of decimal places to format to.
ignore_empty_str(True): return a stripped blank string if set to True.
Return:
str - the formatted float.
Raises:
ValueError - if value param is not type float.
"""
if ignore_empty_str and not isNumeric(value) and str(value).strip() == '':
return str(value).strip()
if not isNumeric(value):
raise ValueError
decimal_format = '%0.' + str(no_of_dps) + 'f'
value = decimal_format % float(value)
return value
def checkFileType(file_path, ext):
"""Checks a file to see that it has the right extension.
Args:
file_path (str): The file path to check.
ext (List): list containing the extension types to match the file
against.
Returns:
True if the extension matches the ext variable given or False if not.
"""
file_ext = os.path.splitext(file_path)[1]
logger.info('File ext = ' + file_ext)
for e in ext:
if e == file_ext:
return True
else:
return False
def isNumeric(s):
"""Tests if string is a number or not.
Simply tries to convert it and catches the error if launched.
Args:
s (str): string to test number compatibility.
Returns:
Bool - True if number. False if not.
"""
try:
float(s)
return True
except (ValueError, TypeError):
return False
def encodeStr(value):
try:
value = unicode(value, "utf-8")
return value
except (UnicodeDecodeError, NameError, TypeError):
return value
def isString(value):
"""Tests a given value to see if it is an instance of basestring or not.
Note:
This function should be used whenever testing this as it accounts for
both Python 2.7+ and 3.2+ variations of string.
Args:
value: the variable to test.
Returns:
Bool - True if value is a unicode str (basestring type)
"""
try:
return isinstance(value, basestring)
except NameError:
return isinstance(value, str)
# if not isinstance(value, basestring):
# return False
#
# return True
def isList(value):
"""Test a given value to see if it is a list or not.
Args:
value: the variable to test for list type.
Returns:
True if value is of type list; False otherwise.
"""
if not isinstance(value, list):
return False
return True
def arrayToString(self, str_array):
"""Convert a list to a String
Creates one string by adding each part of the array to one string using
', '.join()
Args:
str_array (List): to convert into single string.
Returns:
str - representaion of the array joined together.
Raises:
ValueError: if not contents of list are instances of basestring.
"""
if not isinstance(str_array[0], basestring):
raise ValueError('Array values are not strings')
out_string = ''
out_string = ', '.join(str_array)
return out_string
def findSubstringInList(substr, the_list):
"""Returns a list containing the indices that a substring was found at.
Uses a generator to quickly find all indices that str appears in.
Args:
substr (str): the sub string to search for.
the_list (List): a list containing the strings to search.
Returns:
tuple - containing:
* a list with the indices that the substring was found in
(this list can be empty if no matches were found).
* an integer containing the number of elements it was found in.
"""
indices = [i for i, s in enumerate(the_list) if substr in s]
return indices, len(indices)
def findMax(val1, val2):
"""Returns tuple containing min, max of two values
Args:
val1: first integer or float.
val2: second integer or float.
Returns:
tuple - containing:
* lower value
* higher value
* False if not same or True if the same.
"""
if val1 == val2:
return val1, val2, True
elif val1 > val2:
return val2, val1, False
else:
return val1, val2, False
def fileExtensionWithoutPeriod(filepath, name_only=False):
"""Extracts the extension without '.' from filepath.
The extension will always be converted to lower case before returning.
Args:
filepath (str): A full filepath if name_only=False. Otherwise a file
name with extension if name_only=True.
name_only (bool): True if filepath is only filename.extension.
"""
if name_only:
file, ext = os.path.splitext(filepath)
else:
path, filename = os.path.split(filepath)
file, ext = os.path.splitext(filename)
ext = ext[1:]
return ext.lower()
def findWholeWord(w):
"""Find a whole word amoungst a string."""
return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
def convertRunOptionsToSEDict(options):
"""Converts tuflow command line options to scenario/event dict.
Tuflow uses command line option (e.g. -s1 blah -e1 blah) to set scenario
values which can either be provided on the command line or through the
FMP run form. The TuflowLoader can use these arguments but requires a
slightly different setup.
This function converts the command line string into the scenarion and
event dictionary expected by the TuflowLoader.
Args:
options(str): command line options.
Return:
dict - {'scenario': {'s1': blah}, 'event': {'e1': blah}}
Raises:
AttributeError: if both -s and -s1 or -e and -e1 occurr in the options
string. -x and -x1 are treated as the same variable by tuflow and
one of the values would be ignored.
"""
if ' -s ' in options and ' -s1 ' in options:
raise AttributeError
if ' -e ' in options and ' -e2 ' in options:
raise AttributeError
outvals = {'scenario': {}, 'event': {}}
vals = options.split(" ")
for i in range(len(vals)):
if vals[i].startswith('-s'):
outvals['scenario'][vals[i][1:]] = vals[i + 1]
elif vals[i].startswith('-e'):
outvals['event'][vals[i][1:]] = vals[i + 1]
return outvals
def getSEResolvedFilename(filename, se_vals):
"""Replace a tuflow placeholder filename with the scenario/event values.
Replaces all of the placholder values (e.g. ~s1~_~e1~) in a tuflow
filename with the corresponding values provided in the run options string.
If the run options flags are not found in the filename their values will
be appended to the end of the string.
The setup of the returned filename is always the same:
- First replace all placeholders with corresponding flag values.
- s1 == s and e1 == e.
- Append additional e values to end with '_' before first and '+' before others.
- Append additional s values to end with '_' before first and '+' before others.
Args:
filename(str): the filename to update.
se_vals(str): the run options string containing the 's' and
'e' flags and their corresponding values.
Return:
str - the updated filename.
"""
if not 'scenario' in se_vals.keys():
se_vals['scenario'] = {}
if not 'event' in se_vals.keys():
se_vals['event'] = {}
# Format the key value pairs into a list and combine the scenario and
# event list together and sort them into e, e1, e2, s, s1, s2 order.
scen_keys = ['-' + a for a in se_vals['scenario'].keys()]
scen_vals = se_vals['scenario'].values()
event_keys = ['-' + a for a in se_vals['event'].keys()]
event_vals = se_vals['event'].values()
scen = [list(a) for a in zip(scen_keys, scen_vals)]
event = [list(a) for a in zip(event_keys, event_vals)]
se_vals = scen + event
vals = sorted(se_vals, key=operator.itemgetter(0))
# Build a new filename by replacing or adding the flag values
outname = filename
in_e = False
for v in vals:
placeholder = ''.join(['~', v[0][1:], '~'])
if placeholder in filename:
outname = outname.replace(placeholder, v[1])
elif v[0] == '-e1' and '~e~' in filename and not '-e' in se_vals:
outname = outname.replace('~e~', v[1])
elif v[0] == '-s1' and '~s~' in filename and not '-s' in se_vals:
outname = outname.replace('~s~', v[1])
# DEBUG - CHECK THIS IS TRUE!
elif v[0] == '-e' and '~e1~' in filename:
outname = outname.replace('~e1~', v[1])
elif v[0] == '-s' and '~s1~' in filename:
outname = outname.replace('~s1~', v[1])
else:
if v[0].startswith('-e'):
if not in_e:
prefix = '_'
else:
prefix = '+'
in_e = True
elif v[0].startswith('-s'):
if in_e:
prefix = '_'
else:
prefix = '+'
in_e = False
outname += prefix + v[1]
return outname
def enum(*sequential, **named):
"""Creates a new enum using the values handed to it.
Taken from Alec Thomas on StackOverflow:
http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
Examples:
Can be created and accessed using:
>>> Numbers = enum('ZERO', 'ONE', 'TWO')
>>> Numbers.ZERO
0
>>> Numbers.ONE
1
Or reverse the process o get the name from the value:
>>> Numbers.reverse_mapping['three']
'THREE'
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.items())
enums['reverse_mapping'] = reverse
return type(str('Enum'), (), enums)
class FileQueue(object):
"""Queueing class for storing data to go into the database
"""
def __init__(self):
self.items = []
def isEmpty(self):
"""Returns True if list is empty
"""
return self.items == []
def enqueue(self, item):
"""Add an item to the queue
"""
self.items.insert(0, item)
def dequeue(self):
"""Pop an item from the front of the queue.
"""
return self.items.pop()
def size(self):
"""Get the size of the queue
"""
return len(self.items)
class LoadStack(object):
"""Stack class for loading logic."""
def __init__(self, max_size=-1):
self.items = []
self.max_size = max_size
def isEmpty(self):
"""Return True if stack is empty."""
return self.items == []
def add(self, item):
"""Add an item to the stack.
Args:
item: the item to add to the stack.
Raises:
IndexError: if max_size has been set and adding another item would
make the stack bigger than max size.
"""
if not self.max_size == -1:
if len(self.items) + 1 > self.max_size:
raise IndexError
self.items.append(item)
def pop(self):
"""Get an item From the stack.
Return:
item from the top of the stack.
Raises:
IndexError: if the stack is empty.
"""
if len(self.items) == 0:
raise IndexError
return self.items.pop()
def peek(self):
"""See what the next item on the stack is, but don't remove it.
Return:
item from the top of the stack.
Raises:
IndexError: if the stack is empty.
"""
if len(self.items) == 0:
raise IndexError
return self.items[-1]
def size(self):
"""Return the number of items in the stack."""
return len(self.items)
| 30.151329
| 89
| 0.573928
| 1,889
| 14,744
| 4.426681
| 0.220222
| 0.012916
| 0.00574
| 0.003348
| 0.147931
| 0.114327
| 0.074145
| 0.059555
| 0.048075
| 0.038268
| 0
| 0.009148
| 0.332746
| 14,744
| 488
| 90
| 30.213115
| 0.840821
| 0.51994
| 0
| 0.267081
| 0
| 0
| 0.040543
| 0
| 0
| 0
| 0
| 0.002049
| 0
| 1
| 0.15528
| false
| 0
| 0.031056
| 0
| 0.378882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6077b8cff40a612dbe4bda3b40ee9c7455ae0910
| 1,244
|
py
|
Python
|
FWCore/MessageService/test/u28_cerr_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
FWCore/MessageService/test/u28_cerr_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
FWCore/MessageService/test/u28_cerr_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
# u28_cerr_cfg.py:
#
# Non-regression test configuration file for MessageLogger service:
# distinct threshold level for linked destination, where
#
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
import FWCore.Framework.test.cmsExceptionsFatal_cff
process.options = FWCore.Framework.test.cmsExceptionsFatal_cff.options
process.load("FWCore.MessageService.test.Services_cff")
process.MessageLogger = cms.Service("MessageLogger",
categories = cms.untracked.vstring('preEventProcessing'),
destinations = cms.untracked.vstring('cerr'),
statistics = cms.untracked.vstring('cerr_stats'),
cerr_stats = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING'),
output = cms.untracked.string('cerr')
),
u28_output = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
noTimeStamps = cms.untracked.bool(True),
preEventProcessing = cms.untracked.PSet(
limit = cms.untracked.int32(0)
)
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(3)
)
process.source = cms.Source("EmptySource")
process.sendSomeMessages = cms.EDAnalyzer("UnitTestClient_A")
process.p = cms.Path(process.sendSomeMessages)
| 29.619048
| 70
| 0.728296
| 137
| 1,244
| 6.547445
| 0.445255
| 0.173913
| 0.071349
| 0.082497
| 0.185061
| 0.095875
| 0.095875
| 0
| 0
| 0
| 0
| 0.009488
| 0.152733
| 1,244
| 41
| 71
| 30.341463
| 0.841556
| 0.110129
| 0
| 0
| 0
| 0
| 0.118074
| 0.035422
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
607a424b8a6541dc8b215105306da525113497c5
| 2,001
|
py
|
Python
|
content/browse/utils.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | 2
|
2022-01-24T23:30:18.000Z
|
2022-01-26T00:21:22.000Z
|
content/browse/utils.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | null | null | null |
content/browse/utils.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | null | null | null |
"""
Created:04 Mar. 2020
Author: Jordan Prechac
"""
from revibe._helpers import const
from administration.utils import retrieve_variable
from content.models import Song, Album, Artist
from content.serializers import v1 as cnt_ser_v1
# -----------------------------------------------------------------------------
# _DEFAULT_LIMIT = 50
# limit_variable = retrieve_variable()
# try:
# limit_variable = int(limit_variable)
# _DEFAULT_LIMIT = max(min(limit_variable, 100), 10)
# except ValueError as ve:
# print("Could not read browse section default limit variable")
# print(ve)
def _DEFAULT_LIMIT():
limit_variable = retrieve_variable("browse_section_default_limit", 50)
try:
limit_variable = int(limit_variable)
return max(min(limit_variable, 100), 10)
except ValueError as ve:
print("Could not read browse section default limit variable")
print(ve)
return 50
_name = "name"
_type = "type"
_results = "results"
_endpoint = "endpoint"
def _browse_song(annotation, limit=None, platform=const.REVIBE_STRING, **options):
limit = limit if limit else _DEFAULT_LIMIT()
songs = Song.display_objects.filter(platform=platform).annotate(count=annotation).order_by('-count')[:limit]
options[_results] = cnt_ser_v1.SongSerializer(songs, many=True).data
return options
def _browse_album(annotation, limit=None, **options):
limit = limit if limit else _DEFAULT_LIMIT()
albums = Album.display_objects.filter(platform=const.REVIBE_STRING).annotate(count=annotation).order_by('-count')[:limit]
options[_results] = cnt_ser_v1.AlbumSerializer(albums, many=True).data
return options
def _browse_artist(annotation, limit=None, **options):
limit = limit if limit else _DEFAULT_LIMIT()
artists = Artist.display_objects.filter(platform=const.REVIBE_STRING).annotate(count=annotation).order_by('-count')[:limit]
options[_results] = cnt_ser_v1.ArtistSerializer(artists, many=True).data
return options
| 34.5
| 127
| 0.710645
| 248
| 2,001
| 5.504032
| 0.294355
| 0.095238
| 0.023443
| 0.054945
| 0.580952
| 0.562637
| 0.515751
| 0.465934
| 0.43663
| 0.43663
| 0
| 0.015836
| 0.147926
| 2,001
| 58
| 128
| 34.5
| 0.784751
| 0.192404
| 0
| 0.1875
| 0
| 0
| 0.075578
| 0.017489
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.40625
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
607a5c3dca22f0d225966ee1a7f786fad681858e
| 4,433
|
py
|
Python
|
Segmentation/model.py
|
vasetrendafilov/ComputerVision
|
5fcbe57fb1609ef44733aed0fab8c69d71fae21f
|
[
"MIT"
] | null | null | null |
Segmentation/model.py
|
vasetrendafilov/ComputerVision
|
5fcbe57fb1609ef44733aed0fab8c69d71fae21f
|
[
"MIT"
] | null | null | null |
Segmentation/model.py
|
vasetrendafilov/ComputerVision
|
5fcbe57fb1609ef44733aed0fab8c69d71fae21f
|
[
"MIT"
] | null | null | null |
"""
Authors: Elena Vasileva, Zoran Ivanovski
E-mail: elenavasileva95@gmail.com, mars@feit.ukim.edu.mk
Course: Mashinski vid, FEEIT, Spring 2021
Date: 09.03.2021
Description: function library
model operations: construction, loading, saving
Python version: 3.6
"""
# python imports
from keras.layers import Conv2D, Conv2DTranspose, MaxPool2D, UpSampling2D, Input, Concatenate
from keras.models import Model, model_from_json
def load_model(model_path, weights_path):
"""
loads a pre-trained model configuration and calculated weights
:param model_path: path of the serialized model configuration file (.json) [string]
:param weights_path: path of the serialized model weights file (.h5) [string]
:return: model - keras model object
"""
# --- load model configuration ---
json_file = open(model_path, 'r')
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json) # load model architecture
model.load_weights(weights_path) # load weights
return model
def construct_model_unet_orig(input_shape):
"""
construct semantic segmentation model architecture (encoder-decoder)
:param input_shape: list of input dimensions (height, width, depth) [tuple]
:return: model - Keras model object
"""
input = Input(shape=input_shape)
# --- encoder ---
conv1 = Conv2D(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(input)
conv11 = Conv2D(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPool2D(pool_size=(2, 2))(conv11)
conv2 = Conv2D(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv22 = Conv2D(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPool2D(pool_size=(2, 2))(conv22)
conv3 = Conv2D(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv33 = Conv2D(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPool2D(pool_size=(2, 2))(conv33)
conv4 = Conv2D(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv44 = Conv2D(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
pool4 = MaxPool2D(pool_size=(2, 2))(conv44)
# --- decoder ---
conv5 = Conv2D(filters=1024, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv55 = Conv2D(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
up1 = UpSampling2D(size=(2, 2))(conv55)
merge1 = Concatenate(axis=3)([conv44, up1])
deconv1 = Conv2DTranspose(filters=512, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
deconv11 = Conv2DTranspose(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv1)
up2 = UpSampling2D(size=(2, 2))(deconv11)
merge2 = Concatenate(axis=3)([conv33, up2])
deconv2 = Conv2DTranspose(filters=256, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge2)
deconv22 = Conv2DTranspose(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv2)
up3 = UpSampling2D(size=(2, 2))(deconv22)
merge3 = Concatenate(axis=3)([conv22, up3])
deconv3 = Conv2DTranspose(filters=128, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge3)
deconv33 = Conv2DTranspose(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv3)
up4 = UpSampling2D(size=(2, 2))(deconv33)
merge4 = Concatenate(axis=3)([conv11, up4])
deconv4 = Conv2DTranspose(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(merge4)
deconv44 = Conv2DTranspose(filters=64, kernel_size=3, activation='relu', padding='same', kernel_initializer='he_normal')(deconv4)
output = Conv2DTranspose(filters=input_shape[2], kernel_size=1, padding='same', activation='sigmoid')(deconv44)
model = Model(input=input, output=output)
return model
| 47.666667
| 134
| 0.723438
| 560
| 4,433
| 5.578571
| 0.248214
| 0.060819
| 0.06338
| 0.120999
| 0.514725
| 0.473111
| 0.455186
| 0.455186
| 0.455186
| 0.455186
| 0
| 0.058425
| 0.135123
| 4,433
| 92
| 135
| 48.184783
| 0.75639
| 0.187007
| 0
| 0.045455
| 0
| 0
| 0.089805
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.045455
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
607a8097d7dacf319776f076e01a66d964066e6e
| 631
|
py
|
Python
|
Day24_Python/part1.py
|
Rog3rSm1th/PolyglotOfCode
|
a70f50b5c882139727cbdf75144a8346cb6c538b
|
[
"MIT"
] | 7
|
2021-03-23T14:08:01.000Z
|
2021-05-17T16:24:16.000Z
|
Day24_Python/part1.py
|
Rog3rSm1th/PolyglotOfCode
|
a70f50b5c882139727cbdf75144a8346cb6c538b
|
[
"MIT"
] | null | null | null |
Day24_Python/part1.py
|
Rog3rSm1th/PolyglotOfCode
|
a70f50b5c882139727cbdf75144a8346cb6c538b
|
[
"MIT"
] | 2
|
2021-04-29T22:03:02.000Z
|
2022-01-18T15:55:42.000Z
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
from itertools import combinations
def solve(packages, groups):
total = sum(packages)
result = 9999999999999999
# we should use `for i in range(1, len(packages) - 2)` but it would
# make the computation significantly slower
for i in range(1, 7):
for c in combinations(packages, i):
if sum(c) == total / groups:
quantum_entanglement = reduce(lambda a, b: a * b, list(c))
result = min(result, quantum_entanglement)
return result
packages = [int(num) for num in open('input.txt')]
print(solve(packages, 3))
| 30.047619
| 74
| 0.62916
| 86
| 631
| 4.593023
| 0.651163
| 0.065823
| 0.03038
| 0.055696
| 0.060759
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048832
| 0.253566
| 631
| 21
| 75
| 30.047619
| 0.789809
| 0.239303
| 0
| 0
| 0
| 0
| 0.018868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
607b94247f45130f250e70bb74d679462531a2da
| 5,921
|
py
|
Python
|
generate-album.py
|
atomicparade/photo-album
|
437bc18bb00da5ce27216d03b48b78d60a0ad3fd
|
[
"CC0-1.0",
"Unlicense"
] | null | null | null |
generate-album.py
|
atomicparade/photo-album
|
437bc18bb00da5ce27216d03b48b78d60a0ad3fd
|
[
"CC0-1.0",
"Unlicense"
] | null | null | null |
generate-album.py
|
atomicparade/photo-album
|
437bc18bb00da5ce27216d03b48b78d60a0ad3fd
|
[
"CC0-1.0",
"Unlicense"
] | null | null | null |
import configparser
import math
import re
import urllib
from pathlib import Path
from PIL import Image
def get_images(image_directory, thumbnail_directory, thumbnail_size):
thumbnail_directory = Path(thumbnail_directory)
for file in [file for file in thumbnail_directory.glob('*')]:
file.unlink()
thumbnail_directory.mkdir(mode=0o755, exist_ok=True)
files = [file for file in Path(image_directory).glob('*')]
images = []
for file in files:
thumbnail_name = Path(thumbnail_directory, file.stem + '.jpg')
image = Image.open(file)
image.thumbnail(thumbnail_size)
top_left = (0, 0)
if image.width < thumbnail_size[0]:
top_left = (math.floor(abs(image.width - thumbnail_size[0]) / 2), top_left[1])
if image.height < thumbnail_size[1]:
top_left = (top_left[0], math.floor(abs(image.height - thumbnail_size[1]) / 2))
final_image = Image.new('RGB', thumbnail_size, (0, 0, 0))
final_image.paste(image, top_left)
final_image.save(thumbnail_name, 'jpeg')
if '_' in file.stem:
description = file.stem.split('_', maxsplit=1)[1]
else:
description = file.stem
images.append({
'path': str(file),
'thumbnail': thumbnail_name,
'description': description,
'stem': file.stem
})
def get_image_file_number(image):
if re.match(r'^(\d+)', image['stem']) is not None:
return int(re.split(r'^(\d+)', image['stem'])[1])
else:
return 999
images = sorted(images, key=get_image_file_number)
return images
def write_html(file, images, page_title, thumbnail_size):
file.write(f'''\
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{page_title}</title>
<link rel="stylesheet" type="text/css" href="album.css">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
</head>
<body>
<h1>{page_title}</h1>
<div id="album">
\
''')
# write thumbnails
for image, idx in zip(images, range(1, len(images) + 1)):
thumbnail_path = urllib.parse.quote(str(image['thumbnail']).replace('\\', '/'))
file.write(f'''\
<p id="thumbnail-{idx}" class="thumbnail"><img src="{thumbnail_path}" alt="{image['description']}" width="{thumbnail_size[0]}" height="{thumbnail_size[1]}"></p>\
''')
file.write(f'''\
<div id="large-view">
<p id="instructions" class="image">Hover over an image</p>
''')
# write images
for image, idx in zip(images, range(1, len(images) + 1)):
image_path = urllib.parse.quote(str(image['path']).replace('\\', '/'))
file.write(f'''\
<p id="image-{idx}" class="image"><img src="{image_path}" alt="{image['description']}"><br>{image['description']}</p>
''')
file.write(f'''\
</div>
</div>
</body>
</html>
''')
def write_css(file, images):
file.write('''\
@media print {
body {
font-family: sans-serif;
}
.thumbnail {
display: none;
}
#instructions {
display: none;
}
.image img {
max-width: 100%;
margin-bottom: 1em;
}
}
@media
screen and (max-width: 768px),
/* Tablets and smartphones */
screen and (hover: none)
{
body {
background: #333;
color: #eee;
font-family: sans-serif;
margin: 1em;
padding: 0;
}
h1 {
margin-top: 0;
}
.thumbnail {
display: none;
}
#instructions {
display: none;
}
.image:nth-child(2) img {
margin-top: 0;
}
.image img {
max-width: calc(100vw - 3em);
}
}
@media
screen and (min-width: 769px) and (hover: hover),
/* IE10 and IE11 (they don't support (hover: hover) */
screen and (min-width: 769px) and (-ms-high-contrast: none),
screen and (min-width: 769px) and (-ms-high-contrast: active)
{
body {
background: #333;
color: #eee;
font-family: sans-serif;
margin: 2em 60% 2em 4em;
padding: 0;
}
.album {
display: flex;
flex-direction: row;
flex-wrap: wrap;
}
.thumbnail {
display: inline-block;;
margin: 0 .5em .2em 0;
}
.image {
background: #333;
display: none;
position: fixed;
top: 2em;
left: 40%;
text-align: center;
height: 90vh;
width: calc(60% - 4em);
}
.image img {
display: block;
max-height: 92%;
max-width: 100%;
margin: 0 auto;
}
#instructions {
display: block;
top: 4em;
}
''')
if len(images) > 0:
for idx in range(1, len(images) + 1):
file.write(f'''\
#thumbnail-{idx}:hover ~ #large-view #image-{idx}\
''')
if idx < len(images):
file.write('''\
,
''')
file.write('''\
{
display: block;
}
''')
file.write('''\
}
''')
def main():
config = configparser.ConfigParser()
config.read('./config')
image_directory = config['settings']['image_directory']
output_css = config['settings']['output_css']
output_html = config['settings']['output_html']
page_title = config['settings']['page_title']
thumbnail_directory = config['settings']['thumbnail_directory']
thumbnail_width = int(config['settings']['thumbnail_width'])
thumbnail_height = int(config['settings']['thumbnail_height'])
thumbnail_size = (thumbnail_width, thumbnail_height)
out_html = open(output_html, 'w')
out_css = open(output_css, 'w')
images = get_images(image_directory, thumbnail_directory, thumbnail_size)
write_html(out_html, images, page_title, thumbnail_size)
write_css(out_css, images)
out_html.close()
out_css.close()
if __name__ == '__main__':
main()
| 22.861004
| 161
| 0.570174
| 704
| 5,921
| 4.667614
| 0.261364
| 0.05143
| 0.018259
| 0.017346
| 0.236458
| 0.176202
| 0.139379
| 0.110164
| 0.077298
| 0.053561
| 0
| 0.022917
| 0.270394
| 5,921
| 258
| 162
| 22.949612
| 0.737731
| 0.004898
| 0
| 0.306931
| 0
| 0.009901
| 0.474444
| 0.049244
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024752
| false
| 0
| 0.029703
| 0
| 0.069307
| 0.004951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
607c6c12c8adb56d644bb03375a7a0619419eb1b
| 4,385
|
py
|
Python
|
tests/test_sne_truth.py
|
LSSTDESC/sims_TruthCatalog
|
348f5d231997eed387aaa6e3fd4218c126e14cdb
|
[
"BSD-3-Clause"
] | 2
|
2020-02-04T22:59:41.000Z
|
2020-03-19T00:17:09.000Z
|
tests/test_sne_truth.py
|
LSSTDESC/sims_TruthCatalog
|
348f5d231997eed387aaa6e3fd4218c126e14cdb
|
[
"BSD-3-Clause"
] | 7
|
2020-02-10T21:59:19.000Z
|
2021-04-27T16:31:26.000Z
|
tests/test_sne_truth.py
|
LSSTDESC/sims_TruthCatalog
|
348f5d231997eed387aaa6e3fd4218c126e14cdb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Unit tests for SNIa truth catalog code.
"""
import os
import unittest
import sqlite3
import numpy as np
import pandas as pd
from desc.sims_truthcatalog import SNeTruthWriter, SNSynthPhotFactory
class SNSynthPhotFactoryTestCase(unittest.TestCase):
"""
Test case class for SNIa synthetic photometry factory class.
"""
def test_SNSythPhotFactory(self):
"""
Test some flux calculations using the underlying SNObject
and SyntheticPhotometry classes.
"""
sp_factory = SNSynthPhotFactory(z=0.6322702169418335,
t0=61719.9950436545,
x0=4.2832710977804034e-06,
x1=-1.207738485943195,
c=-0.0069750402968899936,
snra=55.26407314527358,
sndec=-40.81575605788344)
mjds = (61689.150791, 61697.354470, 61712.258685)
bands = ('z', 'i', 'r')
fluxes = (2.6401569864737633, 71.18561504923377, 1048.0327802379868)
for mjd, band, flux in zip(mjds, bands, fluxes):
sp = sp_factory.create(mjd)
self.assertAlmostEqual(sp.calcFlux(band), flux)
class SNeTruthWriterTestCase(unittest.TestCase):
"""
Test case class for SNIa truth catalog generation class.
"""
def setUp(self):
self.outfile = 'test_sne_truth_cat.db'
self.data_dir = os.path.join(os.environ['SIMS_TRUTHCATALOG_DIR'],
'data')
sn_db_file = os.path.join(self.data_dir,
'sne_cosmoDC2_v1.1.4_MS_DDF_small.db')
self.sne_truth_writer = SNeTruthWriter(self.outfile, sn_db_file)
def tearDown(self):
if os.path.isfile(self.outfile):
os.remove(self.outfile)
def test_truth_summary(self):
"""Test that the truth_summary columns are filled out as expected."""
self.sne_truth_writer.write()
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from truth_summary', conn)
zeros = np.zeros(len(df))
ones = np.ones(len(df))
np.testing.assert_equal(df['is_variable'], ones)
np.testing.assert_equal(df['is_pointsource'], ones)
for band in 'ugrizy':
flux_col = f'flux_{band}'
np.testing.assert_equal(df[flux_col], zeros)
flux_col += '_noMW'
np.testing.assert_equal(df[flux_col], zeros)
def test_auxiliary_truth(self):
"""
Test that the columns from the sne_params table are transcribed
correctly.
"""
self.sne_truth_writer.write_auxiliary_truth()
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from sn_auxiliary_info', conn)
np.testing.assert_equal(self.sne_truth_writer.sne_df['snid_in'],
df['id'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['galaxy_id'],
df['host_galaxy'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['snra_in'],
df['ra'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['t0_in'],
df['t0'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['z_in'],
df['redshift'].to_numpy())
def test_variability_truth(self):
"""
Test some expected values for a SNIa in the test SNe catalog
using a small opsim db table.
"""
opsim_db_file = os.path.join(self.data_dir,
'minion_1016_desc_dithered_v4_small.db')
self.sne_truth_writer.write_variability_truth(opsim_db_file,
max_rows=60)
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from sn_variability_truth', conn)
my_object = 'MS_10195_1375'
self.assertIn(my_object, df['id'].to_list())
my_df = df.query(f'id == "{my_object}"')
for visit in (1425850, 1433860, 1495410):
self.assertIn(visit, my_df['obsHistID'].to_list())
if __name__ == '__main__':
unittest.main()
| 41.367925
| 77
| 0.580844
| 517
| 4,385
| 4.696325
| 0.346228
| 0.032949
| 0.044481
| 0.066722
| 0.312191
| 0.291186
| 0.250824
| 0.22117
| 0.170923
| 0.170923
| 0
| 0.083944
| 0.315393
| 4,385
| 105
| 78
| 41.761905
| 0.72485
| 0.109008
| 0
| 0.068493
| 0
| 0
| 0.097587
| 0.030231
| 0
| 0
| 0
| 0
| 0.164384
| 1
| 0.082192
| false
| 0
| 0.082192
| 0
| 0.191781
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
607dfd1e508e9c983974056179f2dfae1594aa2a
| 10,053
|
py
|
Python
|
testsite/management/commands/load_test_transactions.py
|
gikoluo/djaodjin-saas
|
badd7894ac327191008a1b3a0ebd0d07b55908c3
|
[
"BSD-2-Clause"
] | null | null | null |
testsite/management/commands/load_test_transactions.py
|
gikoluo/djaodjin-saas
|
badd7894ac327191008a1b3a0ebd0d07b55908c3
|
[
"BSD-2-Clause"
] | null | null | null |
testsite/management/commands/load_test_transactions.py
|
gikoluo/djaodjin-saas
|
badd7894ac327191008a1b3a0ebd0d07b55908c3
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2018, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime, logging, random
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
from django.template.defaultfilters import slugify
from django.utils.timezone import utc
from saas.backends.razorpay_processor import RazorpayBackend
from saas.models import Plan, Transaction, get_broker
from saas.utils import datetime_or_now
from saas.settings import PROCESSOR_ID
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Load the database with random transactions (testing purposes).
"""
USE_OF_SERVICE = 0
PAY_BALANCE = 1
REDEEM = 2
REFUND = 3
CHARGEBACK = 4
WRITEOFF = 5
FIRST_NAMES = (
'Anthony',
'Alexander',
'Alexis',
'Alicia',
'Ashley',
'Benjamin',
'Bruce',
'Chloe',
'Christopher',
'Daniel',
'David',
'Edward',
'Emily',
'Emma',
'Ethan',
'Grace',
'Isabella',
'Jacob',
'James',
'Jayden',
'Jennifer',
'John',
'Julia',
'Lily',
'Lucie',
'Luis',
'Matthew',
'Michael',
'Olivia',
'Ryan',
'Samantha',
'Samuel',
'Scott',
'Sophia',
'Williom',
)
LAST_NAMES = (
'Smith',
'Johnson',
'Williams',
'Jones',
'Brown',
'Davis',
'Miller',
'Wilson',
'Moore',
'Taylor',
'Anderson',
'Thomas',
'Jackson',
'White',
'Harris',
'Martin',
'Thompson',
'Garcia',
'Martinez',
'Robinson',
'Clark',
'Rogriguez',
'Lewis',
'Lee',
'Walker',
'Hall',
'Allen',
'Young',
'Hernandez',
'King',
'Wright',
'Lopez',
'Hill',
'Green',
'Baker',
'Gonzalez',
'Nelson',
'Mitchell',
'Perez',
'Roberts',
'Turner',
'Philips',
'Campbell',
'Parker',
'Collins',
'Stewart',
'Sanchez',
'Morris',
'Rogers',
'Reed',
'Cook',
'Bell',
'Cooper',
'Richardson',
'Cox',
'Ward',
'Peterson',
)
def add_arguments(self, parser):
parser.add_argument('--provider',
action='store', dest='provider',
default=settings.SAAS['BROKER']['GET_INSTANCE'],
help='create sample subscribers on this provider')
def handle(self, *args, **options):
#pylint: disable=too-many-locals,too-many-statements
from saas.managers.metrics import month_periods # avoid import loop
from saas.models import (Charge, ChargeItem, Organization, Plan,
Subscription)
RazorpayBackend.bypass_api = True
now = datetime.datetime.utcnow().replace(tzinfo=utc)
from_date = now
from_date = datetime.datetime(
year=from_date.year, month=from_date.month, day=1)
if args:
from_date = datetime.datetime.strptime(
args[0], '%Y-%m-%d')
# Create a set of 3 plans
broker = get_broker()
Plan.objects.get_or_create(
slug='basic',
defaults={
'title': "Basic",
'description': "Basic Plan",
'period_amount': 24900,
'broker_fee_percent': 0,
'period_type': 4,
'advance_discount': 1000,
'organization': broker,
'is_active': True
})
Plan.objects.get_or_create(
slug='medium',
defaults={
'title': "Medium",
'description': "Medium Plan",
'period_amount': 24900,
'broker_fee_percent': 0,
'period_type': 4,
'organization': broker,
'is_active': True
})
Plan.objects.get_or_create(
slug='premium',
defaults={
'title': "Premium",
'description': "Premium Plan",
'period_amount': 18900,
'broker_fee_percent': 0,
'period_type': 4,
'advance_discount': 81,
'organization': broker,
'is_active': True
})
# Create Income transactions that represents a growing bussiness.
provider = Organization.objects.get(slug=options['provider'])
processor = Organization.objects.get(pk=PROCESSOR_ID)
for end_period in month_periods(from_date=from_date):
nb_new_customers = random.randint(0, 9)
for _ in range(nb_new_customers):
queryset = Plan.objects.filter(
organization=provider, period_amount__gt=0)
plan = queryset[random.randint(0, queryset.count() - 1)]
created = False
trials = 0
while not created:
try:
first_name = self.FIRST_NAMES[random.randint(
0, len(self.FIRST_NAMES)-1)]
last_name = self.LAST_NAMES[random.randint(
0, len(self.LAST_NAMES)-1)]
full_name = '%s %s' % (first_name, last_name)
slug = slugify('demo%d' % random.randint(1, 1000))
customer, created = Organization.objects.get_or_create(
slug=slug, full_name=full_name)
#pylint: disable=catching-non-exception
except IntegrityError:
trials = trials + 1
if trials > 10:
raise RuntimeError(
'impossible to create a new customer after 10 trials.')
Organization.objects.filter(pk=customer.id).update(
created_at=end_period)
subscription = Subscription.objects.create(
organization=customer, plan=plan,
ends_at=now + datetime.timedelta(days=31))
Subscription.objects.filter(
pk=subscription.id).update(created_at=end_period)
# Insert some churn in %
churn_rate = 2
all_subscriptions = Subscription.objects.filter(
plan__organization=provider)
nb_churn_customers = (all_subscriptions.count()
* churn_rate // 100)
subscriptions = random.sample(list(all_subscriptions),
all_subscriptions.count() - nb_churn_customers)
for subscription in subscriptions:
nb_periods = random.randint(1, 6)
transaction_item = Transaction.objects.new_subscription_order(
subscription, nb_natural_periods=nb_periods,
created_at=end_period)
if transaction_item.dest_amount < 50:
continue
transaction_item.orig_amount = transaction_item.dest_amount
transaction_item.orig_unit = transaction_item.dest_unit
transaction_item.save()
charge = Charge.objects.create(
created_at=transaction_item.created_at,
amount=transaction_item.dest_amount,
customer=subscription.organization,
description='Charge for %d periods' % nb_periods,
last4=1241,
exp_date=datetime_or_now(),
processor=processor,
processor_key=str(transaction_item.pk),
# XXX We can't do that yet because of
# ``PROCESSOR_BACKEND.charge_distribution(self)``
# unit=transaction_item.dest_unit,
state=Charge.CREATED)
charge.created_at = transaction_item.created_at
charge.save()
ChargeItem.objects.create(
invoiced=transaction_item, charge=charge)
charge.payment_successful()
churned = all_subscriptions.exclude(
pk__in=[subscription.pk for subscription in subscriptions])
for subscription in churned:
subscription.ends_at = end_period
subscription.save()
self.stdout.write("%d new and %d churned customers at %s" % (
nb_new_customers, nb_churn_customers, end_period))
| 34.90625
| 80
| 0.552372
| 979
| 10,053
| 5.52809
| 0.412666
| 0.036031
| 0.017554
| 0.013304
| 0.150776
| 0.110865
| 0.074649
| 0.074649
| 0.074649
| 0.063932
| 0
| 0.011879
| 0.355217
| 10,053
| 287
| 81
| 35.027875
| 0.823048
| 0.170496
| 0
| 0.105042
| 0
| 0
| 0.133864
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008403
| false
| 0.004202
| 0.05042
| 0
| 0.096639
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
607e7c80f119c1c9c55adbd71e291f8ae17a8b06
| 2,801
|
py
|
Python
|
seq2seq_pt/s2s/xutils.py
|
magic282/SEASS
|
b780bf45b47d15145a148e5992bcd157c119d338
|
[
"MIT"
] | 36
|
2018-05-25T01:09:21.000Z
|
2022-01-25T02:45:18.000Z
|
seq2seq_pt/s2s/xutils.py
|
magic282/SEASS
|
b780bf45b47d15145a148e5992bcd157c119d338
|
[
"MIT"
] | 11
|
2018-06-30T14:19:21.000Z
|
2021-03-19T01:27:09.000Z
|
seq2seq_pt/s2s/xutils.py
|
magic282/SEASS
|
b780bf45b47d15145a148e5992bcd157c119d338
|
[
"MIT"
] | 10
|
2018-06-06T03:15:51.000Z
|
2022-01-25T02:45:44.000Z
|
import sys
import struct
def save_sf_model(model):
name_dicts = {'encoder.word_lut.weight': 'SrcWordEmbed_Embed_W',
'encoder.forward_gru.linear_input.weight': 'EncGRUL2R_GRU_W',
'encoder.forward_gru.linear_input.bias': 'EncGRUL2R_GRU_B',
'encoder.forward_gru.linear_hidden.weight': 'EncGRUL2R_GRU_U',
'encoder.backward_gru.linear_input.weight': 'EncGRUR2L_GRU_W',
'encoder.backward_gru.linear_input.bias': 'EncGRUR2L_GRU_B',
'encoder.backward_gru.linear_hidden.weight': 'EncGRUR2L_GRU_U',
'decoder.word_lut.weight': 'TrgWordEmbed_Embed_W',
'decoder.rnn.layers.0.linear_input.weight': 'DecGRU_GRU_W',
'decoder.rnn.layers.0.linear_input.bias': 'DecGRU_GRU_B',
'decoder.rnn.layers.0.linear_hidden.weight': 'DecGRU_GRU_U',
'decoder.attn.linear_pre.weight': 'Alignment_ConcatAtt_W',
'decoder.attn.linear_pre.bias': 'Alignment_ConcatAtt_B',
'decoder.attn.linear_q.weight': 'Alignment_ConcatAtt_U',
'decoder.attn.linear_v.weight': 'Alignment_ConcatAtt_v',
'decoder.readout.weight': 'Readout_Linear_W',
'decoder.readout.bias': 'Readout_Linear_b',
'decIniter.initer.weight': 'DecInitial_Linear_W',
'decIniter.initer.bias': 'DecInitial_Linear_b',
'generator.0.weight': 'Scoring_Linear_W',
'generator.0.bias': 'Scoring_Linear_b', }
nParams = sum([p.nelement() for p in model.parameters()])
# logger.info('* number of parameters: %d' % nParams)
b_count = 0
of = open('model', 'wb')
for name, param in model.named_parameters():
# logger.info('[{0}] [{1}] [{2}]'.format(name, param.size(), param.nelement()))
SF_name = name_dicts[name]
# print(SF_name)
byte_name = bytes(SF_name, 'utf-16-le')
name_size = len(byte_name)
byte_name_size = name_size.to_bytes(4, sys.byteorder)
of.write(byte_name_size)
of.write(byte_name)
b_count += len(byte_name_size)
b_count += len(byte_name)
d = param.data.cpu()
if param.dim() == 1:
d = d.unsqueeze(0)
elif not SF_name.endswith('Embed_W'):
d = d.transpose(0, 1).contiguous()
for dim in d.size():
dim_byte = dim.to_bytes(4, sys.byteorder)
of.write(dim_byte)
b_count += len(dim_byte)
datas = d.view(-1).numpy().tolist()
float_array = struct.pack('f' * len(datas), *datas)
b_count += len(float_array)
of.write(float_array)
of.close()
# print('Total write {0} bytes'.format(b_count))
| 47.474576
| 87
| 0.593717
| 349
| 2,801
| 4.481375
| 0.277937
| 0.035806
| 0.035806
| 0.044118
| 0.182225
| 0.108696
| 0.071611
| 0
| 0
| 0
| 0
| 0.012225
| 0.269904
| 2,801
| 58
| 88
| 48.293103
| 0.752567
| 0.06819
| 0
| 0
| 0
| 0
| 0.387716
| 0.254894
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.04
| 0
| 0.06
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
608009fe5a0b2fb99700c8345bb126060be7366e
| 4,219
|
py
|
Python
|
pml-services/pml_storage.py
|
Novartis/Project-Mona-Lisa
|
f8fcef5b434470e2a17e97fceaef46615eda1b31
|
[
"Apache-2.0"
] | 3
|
2017-10-17T14:49:54.000Z
|
2021-01-12T23:37:33.000Z
|
pml-services/pml_storage.py
|
Novartis/Project-Mona-Lisa
|
f8fcef5b434470e2a17e97fceaef46615eda1b31
|
[
"Apache-2.0"
] | 10
|
2019-12-16T20:37:22.000Z
|
2021-05-21T14:35:39.000Z
|
pml-services/pml_storage.py
|
Novartis/Project-Mona-Lisa
|
f8fcef5b434470e2a17e97fceaef46615eda1b31
|
[
"Apache-2.0"
] | 1
|
2018-09-12T17:06:18.000Z
|
2018-09-12T17:06:18.000Z
|
# Copyright 2017 Novartis Institutes for BioMedical Research Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from __future__ import print_function
import boto3
from boto3.dynamodb.conditions import Key
from random import randint
import os
import base64
class PMLStorage:
""" Project Mona Lisa Storage class.
"""
def __init__(self, storage_name):
self.storage_name = storage_name
def get_bucket(self):
"""
Returns:
(obj): The boto3 AWS S3 bucket object.
"""
s3 = boto3.resource('s3', region_name='TODO')
return s3.Bucket(self.storage_name)
def get_item_from_storage(self, item_key):
""" Get method for a image data in ML-PRJ image storage.
Args:
bucket_name (str): name for the storage.
item_key (str): key or filename for the item in storage.
Returns:
item (obj)
"""
# get the image data in the S3 bucket
img_obj = self.get_bucket().Object(item_key)
return str(img_obj.get()['Body'].read())
def post_item_in_storage(self, key, body, type='png'):
""" Posting collected image data in storage.
Args:
key (str): The unique key.
body (obj): the bulk data to be stored.
type (str): file suffix. The default is 'png'.
Returns:
bool: True if successful, otherwise, an error will
be thrown.
"""
self.get_bucket().put_object(
Key=key+str('.')+type,
Body=body,
ServerSideEncryption='AES256',
ContentType='img/'+type,
)
return True
def download_imgs(self, load_fns, save_dir):
""" Downloads all files in <load_fns> from storage to
the directory <save_dir>.
Args:
load_fns (list(str)): A list of strings of the filenames
of the files to be downloaded.
save_dir (str): A string of the source directory to
save the files. Formatted as:
/full/path/to/dir ... without a '/' character at
the end of the <save_dir>.
Returns:
bool: True if successful, otherwise, an error will
be thrown.
"""
print('downloading images from s3 . . .')
bucket = self.get_bucket()
pre_existing_fns = os.listdir(save_dir)
count = 0
for filename in load_fns:
count += 1
print(count)
if filename in pre_existing_fns:
continue
bucket.download_file(filename, save_dir + '/' + filename)
return True
def get_all_filenames(self):
""" Gets all filenames in storage.
Returns:
list(str): A list of all of the filenames
in the bucket, as a list of strings.
"""
iterobjs = self.get_bucket().objects.all()
filenames = [obj.key for obj in iterobjs]
return filenames
def remove_items(self, filenames):
""" Removes, from storage, all files from <filenames>.
Args:
filenames list(str): List of filenames, where
each filename is a string, of the filename contained
in the bucket.
Returns:
bool: True if successful, otherwise an error is thrown.
"""
bucket = self.get_bucket()
fn_objects = [{'Key': fn} for fn in filenames]
bucket.delete_objects(
Delete={
'Objects': fn_objects
}
)
return True
| 35.754237
| 584
| 0.576677
| 516
| 4,219
| 4.608527
| 0.337209
| 0.014718
| 0.027334
| 0.021447
| 0.076114
| 0.06434
| 0.06434
| 0.06434
| 0.046257
| 0.046257
| 0
| 0.009045
| 0.344868
| 4,219
| 117
| 585
| 36.059829
| 0.851302
| 0.471913
| 0
| 0.104167
| 0
| 0
| 0.038908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145833
| false
| 0
| 0.125
| 0
| 0.416667
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6081b2d6561823daafb84e5aca2d32f42f1d920c
| 3,196
|
py
|
Python
|
binan.py
|
Nightleaf0512/PythonCryptoCurriencyPriceChecker
|
9531d4a6978d280b4ca759d7ba24d3edf77fe5b2
|
[
"CC0-1.0"
] | null | null | null |
binan.py
|
Nightleaf0512/PythonCryptoCurriencyPriceChecker
|
9531d4a6978d280b4ca759d7ba24d3edf77fe5b2
|
[
"CC0-1.0"
] | null | null | null |
binan.py
|
Nightleaf0512/PythonCryptoCurriencyPriceChecker
|
9531d4a6978d280b4ca759d7ba24d3edf77fe5b2
|
[
"CC0-1.0"
] | null | null | null |
from binance.client import Client
import PySimpleGUI as sg
api_key = "your_binance_apikey"
secret_key = "your_binance_secretkey"
client = Client(api_key=api_key, api_secret=secret_key)
# price
def get_price(coin):
return round(float(client.get_symbol_ticker(symbol=f"{coin}USDT")['price']), 5)
def column_layout_price(coin):
col =[[sg.Text(f"{get_price(coin)}", font=("Arial", 9, 'bold'), size=(10,1), pad=(15,10), key=coin)]]
return col
# 24h percentchange
def price_change_24h(coin):
return round(float(client.get_ticker(symbol=f"{coin}USDT")["priceChangePercent"]), 2)
def column_layout_change(coin):
if price_change_24h(coin) == 0:
return [[sg.Text(f"{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="black", key=f"{coin}change")]]
elif price_change_24h(coin) > 0:
return [[sg.Text(f"+{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="green", key=f"{coin}change")]]
return [[sg.Text(f"{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="red", key=f"{coin}change")]]
def update_24h_change(coin):
if price_change_24h(coin) == 0:
window[f"{coin}change"].update(f"+{price_change_24h(coin)}%", text_color="black")
elif price_change_24h(coin) > 0:
window[f"{coin}change"].update(f"+{price_change_24h(coin)}%", text_color="green")
elif price_change_24h(coin) < 0:
window[f"{coin}change"].update(f"{price_change_24h(coin)}%", text_color="red")
# GUI
sg.theme('DefaultNoMoreNagging')
# Tabs
def tabs(coin):
tab_layout = [[sg.Image("{}.png".format(coin), size=(50,50)),
sg.Text("Price", font=("Arial", 10, 'bold'), size=(7,1), pad=(40,40)), sg.Text("24h change", font=("Arial", 10, 'bold'), size=(10,1), pad=(10,40))],
[sg.Text(f"{coin}/USDT", font=("Arial", 9, 'bold')), sg.Column(column_layout_price(coin)), sg.Column(column_layout_change(coin))]]
return tab_layout
# Layout
layout = [[sg.Text("Crypto Currencies", font=("Arial", 10, 'bold'))],
[sg.TabGroup([[sg.Tab("BTC", tabs("BTC"), border_width="18"), sg.Tab("XRP", tabs("XRP"), border_width="18"), sg.Tab("DOGE", tabs("DOGE"), border_width="18")]])]]
window = sg.Window("NightLeaf Crypto", layout)
def coin_window(*coins):
for coin in coins:
globals()[f"{coin}_last_price"] = 1
while True:
event,values = window.read(timeout=600)
if event == sg.WIN_CLOSED:
break
for coin in coins:
update_24h_change(coin)
price = get_price(coin)
if price != globals()[f"{coin}_last_price"]:
if price > globals()[f"{coin}_last_price"]:
window[f"{coin}"].update(f"{price} 🠕", text_color="green")
elif price < globals()[f"{coin}_last_price"]:
window[f"{coin}"].update(f"{price} 🠗", text_color="red")
globals()[f"{coin}_last_price"] = price
a_list =["BTC", "XRP", "DOGE"]
coin_window(*a_list)
| 42.613333
| 172
| 0.595432
| 453
| 3,196
| 4.028698
| 0.203091
| 0.043836
| 0.092055
| 0.118356
| 0.489315
| 0.387945
| 0.333699
| 0.31726
| 0.30411
| 0.30411
| 0
| 0.040173
| 0.205569
| 3,196
| 74
| 173
| 43.189189
| 0.677826
| 0.012203
| 0
| 0.115385
| 0
| 0
| 0.217095
| 0.056874
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134615
| false
| 0
| 0.038462
| 0.038462
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
608211636fe7f97cd14766030248070475866342
| 1,026
|
py
|
Python
|
saleor/graphql/ushop/bulk_mutations.py
|
nlkhagva/saleor
|
0d75807d08ac49afcc904733724ac870e8359c10
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/ushop/bulk_mutations.py
|
nlkhagva/saleor
|
0d75807d08ac49afcc904733724ac870e8359c10
|
[
"CC-BY-4.0"
] | 1
|
2022-02-15T03:31:12.000Z
|
2022-02-15T03:31:12.000Z
|
saleor/graphql/ushop/bulk_mutations.py
|
nlkhagva/ushop
|
abf637eb6f7224e2d65d62d72a0c15139c64bb39
|
[
"CC-BY-4.0"
] | null | null | null |
import graphene
from ...unurshop.ushop import models
from ..core.mutations import BaseBulkMutation, ModelBulkDeleteMutation
class UshopBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of ushop IDs to delete."
)
class Meta:
description = "Deletes shops."
model = models.Shop
permissions = ("page.manage_pages",)
class UshopBulkPublish(BaseBulkMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of ushop IDs to (un)publish."
)
is_published = graphene.Boolean(
required=True, description="Determine if ushops will be published or not."
)
class Meta:
description = "Publish ushops."
model = models.Shop
permissions = ("page.manage_pages",)
@classmethod
def bulk_action(cls, queryset, is_published):
queryset.update(is_published=is_published)
| 28.5
| 87
| 0.660819
| 105
| 1,026
| 6.390476
| 0.485714
| 0.065574
| 0.102832
| 0.074516
| 0.354694
| 0.354694
| 0.354694
| 0.232489
| 0.232489
| 0.232489
| 0
| 0
| 0.251462
| 1,026
| 35
| 88
| 29.314286
| 0.873698
| 0
| 0
| 0.37037
| 0
| 0
| 0.164717
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.111111
| 0
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
608242d64b36989e754c60c8ef5cc3a72e5f5595
| 3,180
|
py
|
Python
|
src/main/NLP/STRING_MATCH/scopus_ha_module_match.py
|
alvinajacquelyn/COMP0016_2
|
fd57706a992e1e47af7c802320890e93a15fc0c7
|
[
"MIT"
] | null | null | null |
src/main/NLP/STRING_MATCH/scopus_ha_module_match.py
|
alvinajacquelyn/COMP0016_2
|
fd57706a992e1e47af7c802320890e93a15fc0c7
|
[
"MIT"
] | null | null | null |
src/main/NLP/STRING_MATCH/scopus_ha_module_match.py
|
alvinajacquelyn/COMP0016_2
|
fd57706a992e1e47af7c802320890e93a15fc0c7
|
[
"MIT"
] | null | null | null |
import os, sys, re
import json
import pandas as pd
import pymongo
from main.LOADERS.publication_loader import PublicationLoader
from main.MONGODB_PUSHERS.mongodb_pusher import MongoDbPusher
from main.NLP.PREPROCESSING.preprocessor import Preprocessor
class ScopusStringMatch_HAmodule():
def __init__(self):
self.loader = PublicationLoader()
self.mongodb_pusher = MongoDbPusher()
self.preprocessor = Preprocessor()
def __progress(self, count, total, custom_text, suffix=''):
"""
Visualises progress for a process given a current count and a total count
"""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '*' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s %s %s\r' %(bar, percents, '%', custom_text, suffix))
sys.stdout.flush()
def __read_keywords(self, data: dict) -> None:
"""
Given a set of publications in a dictionary, performs pre-processing for all string type data fields.
Performs look-up on HA keyword occurences in a document.
Results are pushed to MongoDB (backed-up in JSON file - scopus_matches.json).
"""
resulting_data = {}
counter = 0
keywords = self.preprocessor.preprocess_keywords("main/HA_KEYWORDS/HA_Keywords.csv")
num_publications = len(data)
num_keywords = len(keywords)
for doi, publication in data.items():
# visualise the progress on a commandline
self.__progress(counter, num_publications, "processing scopus_matches.json")
counter += 1
description = self.preprocessor.tokenize(publication["Description"])
ha_occurences = {} # accumulator for HA Keywords found in a given document
for n in range(num_keywords):
ha_num = n + 1
ha = "HA " + str(ha_num) if ha_num < num_keywords else "Misc" # clean and process the string for documenting occurences
ha_occurences[ha] = {"Word_Found": []}
for keyword in keywords[n]:
if keyword in description:
ha_occurences[ha]["Word_Found"].append(keyword)
if len(ha_occurences[ha]["Word_Found"]) == 0:
ha_occurences.pop(ha, None) # clear out empty occurences
resulting_data[doi] = {"DOI": doi, "Related_HA": ha_occurences}
print()
self.mongodb_pusher.matched_scopus(resulting_data) # push the processed data to MongoDB
print()
# Record the same data locally, acts as a backup
with open('main/NLP/STRING_MATCH/HA_MODULE_RESULTS/scopus_matches_modules.json', 'w') as outfile:
json.dump(resulting_data, outfile)
def run(self):
"""
Controller method for self class
Loads modules from a pre-loaded pickle file
"""
data = self.loader.load_all()
self.__read_keywords(data)
| 42.972973
| 136
| 0.610377
| 372
| 3,180
| 5.051075
| 0.38172
| 0.038318
| 0.00479
| 0.028739
| 0.036722
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004935
| 0.299057
| 3,180
| 74
| 137
| 42.972973
| 0.838044
| 0.203459
| 0
| 0.041667
| 0
| 0
| 0.090248
| 0.042344
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.145833
| 0
| 0.25
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60848af0afddec7b1d22b291ac9bd888d5799291
| 642
|
py
|
Python
|
tools/urls.py
|
Cyberdeep/archerysec
|
a4b1a0c4f736bd70bdea693c7a7c479a69bb0f7d
|
[
"BSD-3-Clause"
] | null | null | null |
tools/urls.py
|
Cyberdeep/archerysec
|
a4b1a0c4f736bd70bdea693c7a7c479a69bb0f7d
|
[
"BSD-3-Clause"
] | null | null | null |
tools/urls.py
|
Cyberdeep/archerysec
|
a4b1a0c4f736bd70bdea693c7a7c479a69bb0f7d
|
[
"BSD-3-Clause"
] | 1
|
2018-08-12T17:29:35.000Z
|
2018-08-12T17:29:35.000Z
|
# _
# /\ | |
# / \ _ __ ___| |__ ___ _ __ _ _
# / /\ \ | '__/ __| '_ \ / _ \ '__| | | |
# / ____ \| | | (__| | | | __/ | | |_| |
# /_/ \_\_| \___|_| |_|\___|_| \__, |
# __/ |
# |___/
# Copyright (C) 2017-2018 ArcherySec
# This file is part of ArcherySec Project.
from django.conf.urls import url
from tools import views
app_name = 'tools'
urlpatterns = [
url(r'^sslscan/$',
views.sslscan,
name='sslscan'),
url(r'^sslscan_result/$',
views.sslscan_result,
name='sslscan_result'),
]
| 25.68
| 43
| 0.426791
| 43
| 642
| 5.046512
| 0.581395
| 0.179724
| 0.101382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020779
| 0.400312
| 642
| 24
| 44
| 26.75
| 0.542857
| 0.534268
| 0
| 0
| 0
| 0
| 0.196296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6084e8784a7c8fb58869bc711fe87b2383807ac6
| 7,484
|
py
|
Python
|
api/vm/base/utils.py
|
erigones/esdc-ce
|
2e39211a8f5132d66e574d3a657906c7d3c406fe
|
[
"Apache-2.0"
] | 97
|
2016-11-15T14:44:23.000Z
|
2022-03-13T18:09:15.000Z
|
api/vm/base/utils.py
|
erigones/esdc-ce
|
2e39211a8f5132d66e574d3a657906c7d3c406fe
|
[
"Apache-2.0"
] | 334
|
2016-11-17T19:56:57.000Z
|
2022-03-18T10:45:53.000Z
|
api/vm/base/utils.py
|
erigones/esdc-ce
|
2e39211a8f5132d66e574d3a657906c7d3c406fe
|
[
"Apache-2.0"
] | 33
|
2017-01-02T16:04:13.000Z
|
2022-02-07T19:20:24.000Z
|
from core.celery.config import ERIGONES_TASK_USER
from que.tasks import execute, get_task_logger
from vms.models import SnapshotDefine, Snapshot, BackupDefine, Backup, IPAddress
logger = get_task_logger(__name__)
def is_vm_missing(vm, msg):
"""
Check failed command output and return True if VM is not on compute node.
"""
check_str = vm.hostname + ': No such zone configured'
return check_str in msg
def vm_delete_snapshots_of_removed_disks(vm):
"""
This helper function deletes snapshots for VM with changing disk IDs. Bug #chili-363
++ Bug #chili-220 - removing snapshot and backup definitions for removed disks.
"""
removed_disk_ids = [Snapshot.get_real_disk_id(i) for i in vm.create_json_update_disks().get('remove_disks', [])]
if removed_disk_ids:
Snapshot.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
SnapshotDefine.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
Backup.objects.filter(vm=vm, disk_id__in=removed_disk_ids, last=True).update(last=False)
BackupDefine.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
return removed_disk_ids
def _reset_allowed_ip_usage(vm, ip):
"""Helper function used below. It sets the IP usage back to VM [1] only if other VMs, which use the address in
allowed_ips are in notcreated state."""
if all(other_vm.is_notcreated() for other_vm in ip.vms.exclude(uuid=vm.uuid)):
ip.usage = IPAddress.VM
ip.save()
def _is_ip_ok(ip_queryset, vm_ip, vm_network_uuid):
"""Helper function used below. Return True if vm_ip (string) is "dhcp" or is found in the IPAddress queryset
and has the expected usage flag and subnet uuid."""
if vm_ip == 'dhcp':
return True
return any(ip.ip == vm_ip and ip.subnet.uuid == vm_network_uuid and ip.usage == IPAddress.VM_REAL
for ip in ip_queryset)
def vm_update_ipaddress_usage(vm):
"""
This helper function is responsible for updating IPAddress.usage and IPAddress.vm of server IPs (#chili-615,1029),
by removing association from IPs that, are not set on any NIC and:
- when a VM is deleted all IP usages are set to IPAddress.VM (in DB) and
- when a VM is created or updated all IP usages are set to IPAddress.VM_REAL (on hypervisor) and
Always call this function _only_ after vm.json_active is synced with vm.json!!!
In order to properly understand this code you have understand the association between an IPAddress and Vm model.
This function may raise a ValueError if the VM and IP address were not properly associated (e.g. via vm_define_nic).
"""
current_ips = set(vm.json_active_get_ips(primary_ips=True, allowed_ips=False))
current_ips.update(vm.json_get_ips(primary_ips=True, allowed_ips=False))
current_allowed_ips = set(vm.json_active_get_ips(primary_ips=False, allowed_ips=True))
current_allowed_ips.update(vm.json_get_ips(primary_ips=False, allowed_ips=True))
# Return old IPs back to IP pool, so they can be used again
vm.ipaddress_set.exclude(ip__in=current_ips).update(vm=None, usage=IPAddress.VM)
# Remove association of removed vm.allowed_ips
for ip in vm.allowed_ips.exclude(ip__in=current_allowed_ips):
ip.vms.remove(vm)
_reset_allowed_ip_usage(vm, ip)
if vm.is_notcreated():
# Server was deleted from hypervisor
vm.ipaddress_set.filter(usage=IPAddress.VM_REAL).update(usage=IPAddress.VM)
for ip in vm.allowed_ips.filter(usage=IPAddress.VM_REAL):
_reset_allowed_ip_usage(vm, ip)
return
# Server was updated or created
vm.ipaddress_set.filter(usage=IPAddress.VM).update(usage=IPAddress.VM_REAL)
vm.allowed_ips.filter(usage=IPAddress.VM).update(usage=IPAddress.VM_REAL)
# The VM configuration may be changed directly on the hypervisor, thus the VM could have
# new NICs and IP addresses which configuration bypassed our API - issue #168.
vm_ips = vm.ipaddress_set.select_related('subnet').filter(usage=IPAddress.VM_REAL)
vm_allowed_ips = vm.allowed_ips.select_related('subnet').filter(usage=IPAddress.VM_REAL)
# For issue #168 we have to check the VM<->IPAddress association in a loop for each NIC, because we need to
# match the NIC.network_uuid with a Subnet.
for nic_id, nic in enumerate(vm.json_active_get_nics(), 1):
network_uuid = nic.get('network_uuid', None)
if network_uuid:
ip = nic.get('ip', '')
allowed_ips = nic.get('allowed_ips', [])
if ip:
logger.debug('VM: %s | NIC ID: %s | NIC network: %s | IP address: %s', vm, nic_id, network_uuid, ip)
if not _is_ip_ok(vm_ips, ip, network_uuid):
raise ValueError('VM %s NIC ID %s IP address %s is not properly associated with VM!' %
(vm, nic_id, ip))
for ip in allowed_ips:
logger.debug('VM: %s | NIC ID: %s | NIC network: %s | IP address: %s', vm, nic_id, network_uuid, ip)
if not _is_ip_ok(vm_allowed_ips, ip, network_uuid):
raise ValueError('VM %s NIC ID %s allowed IP address %s is not properly associated with VM!' %
(vm, nic_id, ip))
else:
raise ValueError('VM %s NIC ID %s does not have a network uuid!' % (vm, nic_id))
def vm_deploy(vm, force_stop=False):
"""
Internal API call used for finishing VM deploy;
Actually cleaning the json and starting the VM.
"""
if force_stop: # VM is running without OS -> stop
cmd = 'vmadm stop %s -F >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid)
else: # VM is stopped and deployed -> start
cmd = 'vmadm start %s >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid)
msg = 'Deploy server'
lock = 'vmadm deploy ' + vm.uuid
meta = {
'output': {
'returncode': 'returncode',
'stderr': 'message',
'stdout': 'json'
},
'replace_stderr': ((vm.uuid, vm.hostname),),
'msg': msg, 'vm_uuid': vm.uuid
}
callback = ('api.vm.base.tasks.vm_deploy_cb', {'vm_uuid': vm.uuid})
return execute(ERIGONES_TASK_USER, None, cmd, meta=meta, lock=lock, callback=callback,
queue=vm.node.fast_queue, nolog=True, ping_worker=False, check_user_tasks=False)
def vm_reset(vm):
"""
Internal API call used for VM reboots in emergency situations.
"""
cmd = 'vmadm stop %s -F; vmadm start %s' % (vm.uuid, vm.uuid)
return execute(ERIGONES_TASK_USER, None, cmd, callback=False, queue=vm.node.fast_queue, nolog=True,
check_user_tasks=False)
def vm_update(vm):
"""
Internal API used for updating VM if there were changes in json detected.
"""
logger.info('Running PUT vm_manage(%s), because something (vnc port?) has changed', vm)
from api.vm.base.views import vm_manage
from api.utils.request import get_dummy_request
from api.utils.views import call_api_view
request = get_dummy_request(vm.dc, method='PUT', system_user=True)
res = call_api_view(request, 'PUT', vm_manage, vm.hostname)
if res.status_code == 201:
logger.warn('PUT vm_manage(%s) was successful: %s', vm, res.data)
else:
logger.error('PUT vm_manage(%s) failed: %s (%s): %s', vm, res.status_code, res.status_text, res.data)
| 45.084337
| 120
| 0.67504
| 1,151
| 7,484
| 4.206777
| 0.220678
| 0.037175
| 0.039653
| 0.028914
| 0.332714
| 0.305865
| 0.276332
| 0.237092
| 0.188765
| 0.140851
| 0
| 0.004809
| 0.22194
| 7,484
| 165
| 121
| 45.357576
| 0.826722
| 0.266034
| 0
| 0.096774
| 0
| 0.043011
| 0.152031
| 0.005617
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086022
| false
| 0
| 0.064516
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60855d2a5eca63351c8e4dd3352e1f4b94d4ebb3
| 1,133
|
py
|
Python
|
993_Cousins-in-Binary-Tree.py
|
Coalin/Daily-LeetCode-Exercise
|
a064dcdc3a82314be4571d342c4807291a24f69f
|
[
"MIT"
] | 3
|
2018-07-05T05:51:10.000Z
|
2019-05-04T08:35:44.000Z
|
993_Cousins-in-Binary-Tree.py
|
Coalin/Daily-LeetCode-Exercise
|
a064dcdc3a82314be4571d342c4807291a24f69f
|
[
"MIT"
] | null | null | null |
993_Cousins-in-Binary-Tree.py
|
Coalin/Daily-LeetCode-Exercise
|
a064dcdc3a82314be4571d342c4807291a24f69f
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
x_depth = None
x_parent = None
x_found = 0
y_depth = None
y_parent = None
y_found = 0
def dfs(node, parent, depth):
nonlocal x_depth, x_parent, x_found, y_depth, y_found, y_parent
if not node:
return
if node.val == x:
x_depth = depth
x_parent = parent
x_found = 1
elif node.val == y:
y_depth = depth
y_parent = parent
y_found = 1
if x_found and y_found:
return
dfs(node.left, node, depth+1)
if x_found and y_found:
return
dfs(node.right, node, depth+1)
dfs(root, None, 0)
return x_depth == y_depth and x_parent != y_parent
| 30.621622
| 75
| 0.485437
| 144
| 1,133
| 3.611111
| 0.236111
| 0.057692
| 0.046154
| 0.034615
| 0.119231
| 0.119231
| 0.119231
| 0.119231
| 0.119231
| 0.119231
| 0
| 0.012539
| 0.436893
| 1,133
| 36
| 76
| 31.472222
| 0.802508
| 0.157988
| 0
| 0.178571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6085668853e75c8ad16430458a509e22fa3b1078
| 5,433
|
py
|
Python
|
docker-images/taigav2/taiga-back/tests/integration/test_tasks_tags.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | 1
|
2017-05-29T19:01:06.000Z
|
2017-05-29T19:01:06.000Z
|
docker-images/taigav2/taiga-back/tests/integration/test_tasks_tags.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | null | null | null |
docker-images/taigav2/taiga-back/tests/integration/test_tasks_tags.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2016 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
from collections import OrderedDict
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from .. import factories as f
import pytest
pytestmark = pytest.mark.django_db
def test_api_task_add_new_tags_with_error(client):
project = f.ProjectFactory.create()
task = f.create_task(project=project, status__project=project, milestone=None, user_story=None)
f.MembershipFactory.create(project=project, user=task.owner, is_admin=True)
url = reverse("tasks-detail", kwargs={"pk": task.pk})
data = {
"tags": [],
"version": task.version
}
client.login(task.owner)
data["tags"] = [1]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
data["tags"] = [["back"]]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
data["tags"] = [["back", "#cccc"]]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
data["tags"] = [[1, "#ccc"]]
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400, response.data
assert "tags" in response.data
def test_api_task_add_new_tags_without_colors(client):
project = f.ProjectFactory.create()
task = f.create_task(project=project, status__project=project, milestone=None, user_story=None)
f.MembershipFactory.create(project=project, user=task.owner, is_admin=True)
url = reverse("tasks-detail", kwargs={"pk": task.pk})
data = {
"tags": [
["back", None],
["front", None],
["ux", None]
],
"version": task.version
}
client.login(task.owner)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200, response.data
tags_colors = OrderedDict(project.tags_colors)
assert not tags_colors.keys()
project.refresh_from_db()
tags_colors = OrderedDict(project.tags_colors)
assert "back" in tags_colors and "front" in tags_colors and "ux" in tags_colors
def test_api_task_add_new_tags_with_colors(client):
project = f.ProjectFactory.create()
task = f.create_task(project=project, status__project=project, milestone=None, user_story=None)
f.MembershipFactory.create(project=project, user=task.owner, is_admin=True)
url = reverse("tasks-detail", kwargs={"pk": task.pk})
data = {
"tags": [
["back", "#fff8e7"],
["front", None],
["ux", "#fabada"]
],
"version": task.version
}
client.login(task.owner)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200, response.data
tags_colors = OrderedDict(project.tags_colors)
assert not tags_colors.keys()
project.refresh_from_db()
tags_colors = OrderedDict(project.tags_colors)
assert "back" in tags_colors and "front" in tags_colors and "ux" in tags_colors
assert tags_colors["back"] == "#fff8e7"
assert tags_colors["ux"] == "#fabada"
def test_api_create_new_task_with_tags(client):
project = f.ProjectFactory.create(tags_colors=[["front", "#aaaaaa"], ["ux", "#fabada"]])
status = f.TaskStatusFactory.create(project=project)
project.default_task_status = status
project.save()
f.MembershipFactory.create(project=project, user=project.owner, is_admin=True)
url = reverse("tasks-list")
data = {
"subject": "Test user story",
"project": project.id,
"tags": [
["back", "#fff8e7"],
["front", "#bbbbbb"],
["ux", None]
]
}
client.login(project.owner)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201, response.data
task_tags_colors = OrderedDict(response.data["tags"])
assert task_tags_colors["back"] == "#fff8e7"
assert task_tags_colors["front"] == "#aaaaaa"
assert task_tags_colors["ux"] == "#fabada"
tags_colors = OrderedDict(project.tags_colors)
project.refresh_from_db()
tags_colors = OrderedDict(project.tags_colors)
assert tags_colors["back"] == "#fff8e7"
assert tags_colors["ux"] == "#fabada"
assert tags_colors["front"] == "#aaaaaa"
| 33.537037
| 99
| 0.679735
| 711
| 5,433
| 5.063291
| 0.2391
| 0.083333
| 0.035
| 0.031111
| 0.644167
| 0.615833
| 0.584722
| 0.54
| 0.513333
| 0.513333
| 0
| 0.017065
| 0.191055
| 5,433
| 161
| 100
| 33.745342
| 0.802048
| 0.175778
| 0
| 0.622642
| 0
| 0
| 0.080511
| 0
| 0
| 0
| 0
| 0
| 0.216981
| 1
| 0.037736
| false
| 0
| 0.056604
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6085b11c27e78fe767fa371d2b33135501f31145
| 679
|
py
|
Python
|
pytorchocr/postprocess/cls_postprocess.py
|
satchelwu/PaddleOCR2Pytorch
|
6941565cfd4c45470cc3bf9d434c8c32267a33ef
|
[
"Apache-2.0"
] | 3
|
2021-04-23T12:31:07.000Z
|
2021-11-17T04:39:38.000Z
|
pytorchocr/postprocess/cls_postprocess.py
|
satchelwu/PaddleOCR2Pytorch
|
6941565cfd4c45470cc3bf9d434c8c32267a33ef
|
[
"Apache-2.0"
] | null | null | null |
pytorchocr/postprocess/cls_postprocess.py
|
satchelwu/PaddleOCR2Pytorch
|
6941565cfd4c45470cc3bf9d434c8c32267a33ef
|
[
"Apache-2.0"
] | 1
|
2022-03-24T03:31:34.000Z
|
2022-03-24T03:31:34.000Z
|
import torch
class ClsPostProcess(object):
""" Convert between text-label and text-index """
def __init__(self, label_list, **kwargs):
super(ClsPostProcess, self).__init__()
self.label_list = label_list
def __call__(self, preds, label=None, *args, **kwargs):
if isinstance(preds, torch.Tensor):
preds = preds.numpy()
pred_idxs = preds.argmax(axis=1)
decode_out = [(self.label_list[idx], preds[i, idx])
for i, idx in enumerate(pred_idxs)]
if label is None:
return decode_out
label = [(self.label_list[idx], 1.0) for idx in label]
return decode_out, label
| 33.95
| 62
| 0.60972
| 87
| 679
| 4.505747
| 0.45977
| 0.114796
| 0.132653
| 0.086735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 0.275405
| 679
| 20
| 63
| 33.95
| 0.79065
| 0.060383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6088008352658e01cb8328f084aae6c78e40e074
| 5,717
|
py
|
Python
|
inference_folder.py
|
aba-ai-learning/Single-Human-Parsing-LIP
|
b1c0c91cef34dabf598231127886b669838fc085
|
[
"MIT"
] | null | null | null |
inference_folder.py
|
aba-ai-learning/Single-Human-Parsing-LIP
|
b1c0c91cef34dabf598231127886b669838fc085
|
[
"MIT"
] | null | null | null |
inference_folder.py
|
aba-ai-learning/Single-Human-Parsing-LIP
|
b1c0c91cef34dabf598231127886b669838fc085
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import os
import argparse
import logging
import numpy as np
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torchvision import transforms
import cv2
import tqdm
from net.pspnet import PSPNet
models = {
'squeezenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='squeezenet'),
'densenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=512, backend='densenet'),
'resnet18': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18'),
'resnet34': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34'),
'resnet50': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50'),
'resnet101': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet101'),
'resnet152': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet152')
}
parser = argparse.ArgumentParser(description="Pyramid Scene Parsing Network")
parser.add_argument('--models-path', type=str, default='./checkpoints',
help='Path for storing model snapshots')
parser.add_argument('--backend', type=str,
default='densenet', help='Feature extractor')
parser.add_argument('--num-classes', type=int,
default=20, help="Number of classes.")
args = parser.parse_args()
def build_network(snapshot, backend):
epoch = 0
backend = backend.lower()
net = models[backend]()
net = nn.DataParallel(net)
if snapshot is not None:
_, epoch = os.path.basename(snapshot).split('_')
if not epoch == 'last':
epoch = int(epoch)
net.load_state_dict(torch.load(
snapshot, map_location=torch.device('cpu')))
logging.info(
"Snapshot for epoch {} loaded from {}".format(epoch, snapshot))
if torch.cuda.is_available():
net = net.cuda()
return net, epoch
def get_transform():
transform_image_list = [
# transforms.Resize((192, 256), 3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
return transforms.Compose(transform_image_list)
def show_image(img, pred):
fig, axes = plt.subplots(1, 2)
ax0, ax1 = axes
ax0.get_xaxis().set_ticks([])
ax0.get_yaxis().set_ticks([])
ax1.get_xaxis().set_ticks([])
ax1.get_yaxis().set_ticks([])
classes = np.array(('Background', # always index 0
'Hat', 'Hair', 'Glove', 'Sunglasses',
'UpperClothes', 'Dress', 'Coat', 'Socks',
'Pants', 'Jumpsuits', 'Scarf', 'Skirt',
'Face', 'Left-arm', 'Right-arm', 'Left-leg',
'Right-leg', 'Left-shoe', 'Right-shoe',))
colormap = [(0, 0, 0),
(1, 0.25, 0), (0, 0.25, 0), (0.5, 0, 0.25), (1, 1, 1),
(1, 0.75, 0), (0, 0, 0.5), (0.5, 0.25, 0), (0.75, 0, 0.25),
(1, 0, 0.25), (0, 0.5, 0), (0.5, 0.5, 0), (0.25, 0, 0.5),
(1, 0, 0.75), (0, 0.5, 0.5), (0.25, 0.5, 0.5), (1, 0, 0),
(1, 0.25, 0), (0, 0.75, 0), (0.5, 0.75, 0), ]
cmap = matplotlib.colors.ListedColormap(colormap)
bounds = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
h, w, _ = pred.shape
def denormalize(img, mean, std):
c, _, _ = img.shape
for idx in range(c):
img[idx, :, :] = img[idx, :, :] * std[idx] + mean[idx]
return img
img = denormalize(img.cpu().numpy(), [0.485, 0.456, 0.406], [
0.229, 0.224, 0.225])
img = img.transpose(1, 2, 0).reshape((h, w, 3))
pred = pred.reshape((h, w))
# show image
ax0.set_title('img')
ax0.imshow(img)
ax1.set_title('pred')
mappable = ax1.imshow(pred, cmap=cmap, norm=norm)
# colorbar legend
cbar = plt.colorbar(mappable, ax=axes, shrink=0.7, )
cbar.ax.get_yaxis().set_ticks([])
for j, lab in enumerate(classes):
cbar.ax.text(2.3, (j + 0.45) / 20.0, lab, ha='left', va='center', )
plt.savefig(fname="./result.jpg")
print('result saved to ./result.jpg')
plt.show()
def main():
# --------------- model --------------- #
snapshot = os.path.join(args.models_path, args.backend, 'PSPNet_last')
net, starting_epoch = build_network(snapshot, args.backend)
net.eval()
# ------------ load image ------------ #
data_transform = get_transform()
imgfolder = 'ACGPN/ACGPN_testdata/test_img/'
savefolder = 'ACGPN/ACGPN_testdata/test_humanparse/'
if not os.path.exists(savefolder):
os.mkdir(savefolder)
imglist = os.listdir(imgfolder)
for imgname in tqdm.tqdm(imglist):
imgpath = os.path.join(imgfolder, imgname)
print(imgpath)
img = Image.open(imgpath)
img = data_transform(img)
if torch.cuda.is_available():
img = img.cuda()
with torch.no_grad():
pred, _ = net(img.unsqueeze(dim=0))
pred = pred.squeeze(dim=0)
pred = pred.cpu().numpy().transpose(1, 2, 0)
pred = np.asarray(np.argmax(pred, axis=2),
dtype=np.uint8).reshape((256, 192, 1))
pred_3 = np.repeat(pred, 3, axis = 2)
savepath = os.path.join(savefolder, imgname)
cv2.imwrite(savepath, pred_3)
if __name__ == '__main__':
main()
| 36.414013
| 113
| 0.575477
| 784
| 5,717
| 4.098214
| 0.315051
| 0.013694
| 0.009337
| 0.039216
| 0.171491
| 0.154373
| 0.151261
| 0.148148
| 0.131964
| 0.123249
| 0
| 0.079199
| 0.249082
| 5,717
| 156
| 114
| 36.647436
| 0.669229
| 0.034459
| 0
| 0.016129
| 0
| 0
| 0.10855
| 0.012162
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040323
| false
| 0
| 0.104839
| 0
| 0.169355
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
608859a6a315773abcd6cc904b0d54529fd39c40
| 870
|
py
|
Python
|
src/random_policy.py
|
shuvoxcd01/Policy-Evaluation
|
6bfdfdaa67e1dd67edb75fcf5b4664f2584345ac
|
[
"Apache-2.0"
] | null | null | null |
src/random_policy.py
|
shuvoxcd01/Policy-Evaluation
|
6bfdfdaa67e1dd67edb75fcf5b4664f2584345ac
|
[
"Apache-2.0"
] | null | null | null |
src/random_policy.py
|
shuvoxcd01/Policy-Evaluation
|
6bfdfdaa67e1dd67edb75fcf5b4664f2584345ac
|
[
"Apache-2.0"
] | null | null | null |
from src.gridworld_mdp import GridWorld
class EquiprobableRandomPolicy:
def __init__(self):
self.world_model = GridWorld()
def get_prob(self, selected_action, state):
assert state in self.world_model.states
assert selected_action in self.world_model.actions
num_all_possible_actions = 0
times_selected_action_chosen = 0
for next_state in self.world_model.states:
for action in self.world_model.actions:
if self.world_model.reward_fn(state, action, next_state) == -1:
num_all_possible_actions += 1
if action == selected_action:
times_selected_action_chosen += 1
if not num_all_possible_actions:
return 0
prob = times_selected_action_chosen / num_all_possible_actions
return prob
| 31.071429
| 79
| 0.651724
| 106
| 870
| 4.981132
| 0.330189
| 0.102273
| 0.159091
| 0.121212
| 0.314394
| 0.212121
| 0
| 0
| 0
| 0
| 0
| 0.009788
| 0.295402
| 870
| 27
| 80
| 32.222222
| 0.85155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6088d8cdd329f8f2bb36a7c2566daad3bd603e75
| 6,013
|
py
|
Python
|
sktime/classification/feature_based/_summary_classifier.py
|
Rubiel1/sktime
|
2fd2290fb438224f11ddf202148917eaf9b73a87
|
[
"BSD-3-Clause"
] | 1
|
2021-09-08T14:24:52.000Z
|
2021-09-08T14:24:52.000Z
|
sktime/classification/feature_based/_summary_classifier.py
|
Rubiel1/sktime
|
2fd2290fb438224f11ddf202148917eaf9b73a87
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/classification/feature_based/_summary_classifier.py
|
Rubiel1/sktime
|
2fd2290fb438224f11ddf202148917eaf9b73a87
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Summary Classifier.
Pipeline classifier using the basic summary statistics and an estimator.
"""
__author__ = ["MatthewMiddlehurst"]
__all__ = ["SummaryClassifier"]
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sktime.base._base import _clone_estimator
from sktime.classification.base import BaseClassifier
from sktime.transformations.series.summarize import SummaryTransformer
class SummaryClassifier(BaseClassifier):
"""Summary statistic classifier.
This classifier simply transforms the input data using the SummaryTransformer
transformer and builds a provided estimator using the transformed data.
Parameters
----------
summary_functions : str, list, tuple, default=("mean", "std", "min", "max")
Either a string, or list or tuple of strings indicating the pandas
summary functions that are used to summarize each column of the dataset.
Must be one of ("mean", "min", "max", "median", "sum", "skew", "kurt",
"var", "std", "mad", "sem", "nunique", "count").
summary_quantiles : str, list, tuple or None, default=(0.25, 0.5, 0.75)
Optional list of series quantiles to calculate. If None, no quantiles
are calculated.
estimator : sklearn classifier, default=None
An sklearn estimator to be built using the transformed data. Defaults to a
Random Forest with 200 trees.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random, integer.
Attributes
----------
n_classes_ : int
Number of classes. Extracted from the data.
classes_ : ndarray of shape (n_classes)
Holds the label for each class.
See Also
--------
SummaryTransformer
Examples
--------
>>> from sktime.classification.feature_based import SummaryClassifier
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> clf = SummaryClassifier(estimator=RandomForestClassifier(n_estimators=10))
>>> clf.fit(X_train, y_train)
SummaryClassifier(...)
>>> y_pred = clf.predict(X_test)
"""
_tags = {
"capability:multivariate": True,
"capability:multithreading": True,
}
def __init__(
self,
summary_functions=("mean", "std", "min", "max"),
summary_quantiles=(0.25, 0.5, 0.75),
estimator=None,
n_jobs=1,
random_state=None,
):
self.summary_functions = summary_functions
self.summary_quantiles = summary_quantiles
self.estimator = estimator
self.n_jobs = n_jobs
self.random_state = random_state
self._transformer = None
self._estimator = None
self._transform_atts = 0
super(SummaryClassifier, self).__init__()
def _fit(self, X, y):
"""Fit a pipeline on cases (X,y), where y is the target variable.
Parameters
----------
X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
The training data.
y : array-like, shape = [n_instances]
The class labels.
Returns
-------
self :
Reference to self.
Notes
-----
Changes state by creating a fitted model that updates attributes
ending in "_" and sets is_fitted flag to True.
"""
self._transformer = SummaryTransformer(
summary_function=self.summary_functions,
quantiles=self.summary_quantiles,
)
self._estimator = _clone_estimator(
RandomForestClassifier(n_estimators=200)
if self.estimator is None
else self.estimator,
self.random_state,
)
m = getattr(self._estimator, "n_jobs", None)
if m is not None:
self._estimator.n_jobs = self._threads_to_use
X_t = self._transformer.fit_transform(X, y)
if X_t.shape[0] > len(y):
X_t = X_t.to_numpy().reshape((len(y), -1))
self._transform_atts = X_t.shape[1]
self._estimator.fit(X_t, y)
return self
def _predict(self, X):
"""Predict class values of n instances in X.
Parameters
----------
X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
The data to make predictions for.
Returns
-------
y : array-like, shape = [n_instances]
Predicted class labels.
"""
X_t = self._transformer.transform(X)
if X_t.shape[1] < self._transform_atts:
X_t = X_t.to_numpy().reshape((-1, self._transform_atts))
return self._estimator.predict(X_t)
def _predict_proba(self, X):
"""Predict class probabilities for n instances in X.
Parameters
----------
X : 3D np.array of shape = [n_instances, n_dimensions, series_length]
The data to make predict probabilities for.
Returns
-------
y : array-like, shape = [n_instances, n_classes_]
Predicted probabilities using the ordering in classes_.
"""
X_t = self._transformer.transform(X)
if X_t.shape[1] < self._transform_atts:
X_t = X_t.to_numpy().reshape((-1, self._transform_atts))
m = getattr(self._estimator, "predict_proba", None)
if callable(m):
return self._estimator.predict_proba(X_t)
else:
dists = np.zeros((X.shape[0], self.n_classes_))
preds = self._estimator.predict(X_t)
for i in range(0, X.shape[0]):
dists[i, self._class_dictionary[preds[i]]] = 1
return dists
| 32.327957
| 82
| 0.617163
| 727
| 6,013
| 4.903714
| 0.27923
| 0.009537
| 0.028612
| 0.025245
| 0.20533
| 0.190182
| 0.173072
| 0.136045
| 0.11641
| 0.11641
| 0
| 0.009887
| 0.276734
| 6,013
| 185
| 83
| 32.502703
| 0.809841
| 0.479461
| 0
| 0.089552
| 0
| 0
| 0.043055
| 0.017971
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059701
| false
| 0
| 0.074627
| 0
| 0.223881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
608982b20a5decc4b9d80d0fe548b89804a688a8
| 705
|
py
|
Python
|
pypeln/thread/api/to_iterable_thread_test.py
|
quarckster/pypeln
|
f4160d0f4d4718b67f79a0707d7261d249459a4b
|
[
"MIT"
] | 1,281
|
2018-09-20T05:35:27.000Z
|
2022-03-30T01:29:48.000Z
|
pypeln/thread/api/to_iterable_thread_test.py
|
webclinic017/pypeln
|
5231806f2cac9d2019dacbbcf913484fd268b8c1
|
[
"MIT"
] | 78
|
2018-09-18T20:38:12.000Z
|
2022-03-30T20:16:02.000Z
|
pypeln/thread/api/to_iterable_thread_test.py
|
webclinic017/pypeln
|
5231806f2cac9d2019dacbbcf913484fd268b8c1
|
[
"MIT"
] | 88
|
2018-09-24T10:46:14.000Z
|
2022-03-28T09:34:50.000Z
|
import typing as tp
from unittest import TestCase
import hypothesis as hp
from hypothesis import strategies as st
import pypeln as pl
import cytoolz as cz
MAX_EXAMPLES = 10
T = tp.TypeVar("T")
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_from_to_iterable(nums: tp.List[int]):
nums_pl = nums
nums_pl = pl.thread.from_iterable(nums_pl)
nums_pl = cz.partition_all(10, nums_pl)
nums_pl = pl.thread.map(sum, nums_pl)
nums_pl = pl.thread.to_iterable(nums_pl)
nums_pl = list(nums_pl)
nums_py = nums
nums_py = cz.partition_all(10, nums_py)
nums_py = map(sum, nums_py)
nums_py = list(nums_py)
assert nums_py == nums_pl
| 23.5
| 46
| 0.721986
| 122
| 705
| 3.92623
| 0.311475
| 0.150313
| 0.125261
| 0.100209
| 0.242171
| 0.083507
| 0
| 0
| 0
| 0
| 0
| 0.010363
| 0.178723
| 705
| 29
| 47
| 24.310345
| 0.816926
| 0
| 0
| 0
| 0
| 0
| 0.001418
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.045455
| false
| 0
| 0.272727
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
608b4a53b9f9505194dcc39105a821f7c54562c8
| 12,881
|
py
|
Python
|
acq4/drivers/ThorlabsMFC1/tmcm.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 1
|
2020-06-04T17:04:53.000Z
|
2020-06-04T17:04:53.000Z
|
acq4/drivers/ThorlabsMFC1/tmcm.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 24
|
2016-09-27T17:25:24.000Z
|
2017-03-02T21:00:11.000Z
|
acq4/drivers/ThorlabsMFC1/tmcm.py
|
sensapex/acq4
|
9561ba73caff42c609bd02270527858433862ad8
|
[
"MIT"
] | 4
|
2016-10-19T06:39:36.000Z
|
2019-09-30T21:06:45.000Z
|
from __future__ import print_function
"""
Low-level serial communication for Trinamic TMCM-140-42-SE controller
(used internally for the Thorlabs MFC1)
"""
import serial, struct, time, collections
try:
# this is nicer because it provides deadlock debugging information
from acq4.util.Mutex import RecursiveMutex as RLock
except ImportError:
from threading import RLock
try:
from ..SerialDevice import SerialDevice, TimeoutError, DataError
except ValueError:
## relative imports not allowed when running from command prompt, so
## we adjust sys.path when running the script for testing
if __name__ == '__main__':
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from SerialDevice import SerialDevice, TimeoutError, DataError
def threadsafe(method):
# decorator for automatic mutex lock/unlock
def lockMutex(self, *args, **kwds):
with self.lock:
return method(self, *args, **kwds)
return lockMutex
COMMANDS = {
'rol': 2,
'ror': 1,
'mvp': 4,
'mst': 3,
'rfs': 13,
'sco': 30,
'cco': 32,
'gco': 31,
'sap': 5,
'gap': 6,
'stap': 7,
'rsap': 8,
'sgp': 9,
'ggp': 10,
'stgp': 11,
'rsgp': 12,
'sio': 14,
'gio': 15,
'calc': 19,
'comp': 20,
'jc': 21,
'ja': 22,
'csub': 23,
'rsub': 24,
'wait': 27,
'stop': 28,
'sco': 30,
'gco': 31,
'cco': 32,
'calcx': 33,
'aap': 34,
'agp': 35,
'aco': 39,
'sac': 29,
'stop_application': 128,
'run_application': 129,
'step_application': 130,
'reset_application': 131,
'start_download': 132,
'stop_download': 133,
'get_application_status': 135,
'get_firmware_version': 136,
'restore_factory_settings': 137,
}
PARAMETERS = { # negative values indicate read-only parameters
'target_position': 0,
'actual_position': 1,
'target_speed': 2,
'actual_speed': 3,
'maximum_speed': 4,
'maximum_acceleration': 5,
'maximum_current': 6,
'standby_current': 7,
'target_pos_reached': 8,
'ref_switch_status': 9,
'right_limit_switch_status': 10,
'left_limit_switch_status': 11,
'right_limit_switch_disable': 12,
'left_limit_switch_disable': 13,
'minimum_speed': -130,
'acceleration': -135,
'ramp_mode': 138,
'microstep_resolution': 140,
'soft_stop_flag': 149,
'ramp_divisor': 153,
'pulse_divisor': 154,
'referencing_mode': 193,
'referencing_search_speed': 194,
'referencing_switch_speed': 195,
'distance_end_switches': 196,
'mixed_decay_threshold': 203,
'freewheeling': 204,
'stall_detection_threshold': 205,
'actual_load_value': 206,
'driver_error_flags': -208,
'encoder_position': 209,
'encoder_prescaler': 210,
'fullstep_threshold': 211,
'maximum_encoder_deviation': 212,
'power_down_delay': 214,
'absolute_encoder_value': -215,
}
GLOBAL_PARAMETERS = {
'eeprom_magic': 64,
'baud_rate': 65,
'serial_address': 66,
'ascii_mode': 67,
'eeprom_lock': 73,
'auto_start_mode': 77,
'tmcl_code_protection': 81,
'coordinate_storage': 84,
'tmcl_application_status': 128,
'download_mode': 129,
'tmcl_program_counter': 130,
'tick_timer': 132,
'random_number': -133,
}
OPERATORS = {
'add': 0,
'sub': 1,
'mul': 2,
'div': 3,
'mod': 4,
'and': 5,
'or': 6,
'xor': 7,
'not': 8,
'load': 9,
'swap': 10,
}
CONDITIONS = {
'ze': 0,
'nz': 1,
'eq': 2,
'ne': 3,
'gt': 4,
'ge': 5,
'lt': 6,
'le': 7,
'eto': 8,
'eal': 9,
'esd': 12,
}
STATUS = {
1: "Wrong checksum",
2: "Invalid command",
3: "Wrong type",
4: "Invalid value",
5: "Configuration EEPROM locked",
6: "Command not available",
}
class TMCMError(Exception):
def __init__(self, status):
self.status = status
msg = STATUS[status]
Exception.__init__(self, msg)
class TMCM140(SerialDevice):
def __init__(self, port, baudrate=9600, module_addr=1):
"""
port: serial COM port (eg. COM3 or /dev/ttyACM0)
baudrate: 9600 by default
module_addr: 1 by default
"""
self.lock = RLock(debug=True)
self.port = port
assert isinstance(module_addr, int)
assert module_addr > 0
self.module_addr = module_addr
self.module_str = chr(module_addr+64)
self._waiting_for_reply = False
SerialDevice.__init__(self, port=self.port, baudrate=baudrate)
@threadsafe
def command(self, cmd, type, motor, value):
"""Send a command to the controller and return the reply.
If an error is returned from the controller then raise an exception.
"""
self._send_cmd(cmd, type, motor, value)
return self._get_reply()
def rotate(self, velocity):
"""Begin rotating motor.
velocity: -2047 to +2047
negative values turn left; positive values turn right.
"""
assert isinstance(velocity, int)
assert -2047 <= velocity <= 2047
if velocity < 0:
direction = 'l'
velocity = -velocity
else:
direction = 'r'
self.command('ro'+direction, 0, 0, velocity)
def stop(self):
"""Stop the motor.
Note: does not stop currently running programs.
"""
self.command('mst', 0, 0, 0)
def move(self, pos, relative=False, velocity=None):
"""Rotate until reaching *pos*.
pos: The target position
relative: If True, then *pos* is interpreted as relative to the current
position
velocity: Optionally set the target velocity before moving
"""
assert isinstance(pos, int)
assert -2**32 <= pos < 2**32
if velocity is not None:
assert isinstance(velocity, int)
assert 0 <= velocity < 2048
raise NotImplementedError()
type = 1 if relative else 0
self.command('mvp', type, 0, pos)
def get_param(self, param):
pnum = abs(PARAMETERS[param])
return self.command('gap', pnum, 0, 0)[4]
def __getitem__(self, param):
return self.get_param(param)
def set_param(self, param, value, **kwds):
"""Set a parameter value.
If valus is 'accum' then the parameter is set from the accumulator
register.
"""
pnum = PARAMETERS[param]
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if pnum in (PARAMETERS['maximum_current'], PARAMETERS['standby_current']) and value > 100:
if kwds.get('force', False) is not True:
raise Exception("Refusing to set current > 100 (this can damage the motor). "
"To override, use force=True.")
if value == 'accum':
self.command('aap', pnum, 0, 0)
else:
self.command('sap', pnum, 0, value)
@threadsafe
def set_params(self, **kwds):
"""Set multiple parameters.
The driver is thread-locked until all parameters are set.
"""
for param, value in kwds.items():
self.set_param(param, value)
def __setitem__(self, param, value):
return self.set_param(param, value)
def get_global(self, param):
"""Return a global parameter or copy global to accumulator.
Use param='gpX' to refer to general-purpose variables.
"""
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = abs(GLOBAL_PARAMETERS[param])
bank = 0
return self.command('ggp', pnum, bank, 0)[4]
def set_global(self, param, value):
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = GLOBAL_PARAMETERS[param]
bank = 0
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if value == 'accum':
self.command('agp', pnum, bank, 0)
else:
self.command('sgp', pnum, bank, value)
def stop_program(self):
"""Stop the currently running TMCL program.
"""
self.command('stop_application', 0, 0, 0)
def start_program(self, address=None):
"""Start running TMCL program code from the given address (in bytes?),
or from the current address if None.
"""
if address is None:
self.command('run_application', 0, 0, 0)
else:
self.command('run_application', 1, 0, address)
def start_download(self, address=0):
"""Begin loading TMCL commands into EEPROM .
"""
self.command('start_download', 0, 0, address)
def stop_download(self):
"""Finish loading TMCL commands into EEPROM.
"""
self.command('stop_download', 0, 0, 0)
def write_program(self, address=0):
return ProgramManager(self, address)
def program_status(self):
"""Return current program status:
0=stop, 1=run, 2=step, 3=reset
"""
return self.command('get_application_status', 0, 0, 0)[4]
def calc(self, op, value):
opnum = OPERATORS[op]
if opnum > 9:
raise TypeError("Operator %s invalid for calc" % op)
self.command('calc', opnum, 0, value)
def calcx(self, op):
opnum = OPERATORS[op]
self.command('calcx', opnum, 0, 0)
def comp(self, val):
self.command('comp', 0, 0, val)
def jump(self, *args):
"""Program jump to *addr* (instruction index).
Usage:
jump(address)
jump(cond, address)
Where *cond* may be ze, nz, eq, ne, gt, ge, lt, le, eto, eal, or esd.
"""
if len(args) == 1:
assert isinstance(args[0], int)
self.command('ja', 0, 0, args[0])
else:
cnum = CONDITIONS[args[0]]
self.command('jc', cnum, 0, args[1])
def _send_cmd(self, cmd, type, motor, value):
"""Send a command to the controller.
"""
if self._waiting_for_reply:
raise Exception("Cannot send command; previous reply has not been "
"received yet.")
cmd_num = COMMANDS[cmd]
assert isinstance(type, int)
assert isinstance(motor, int)
# Try packing the value first as unsigned, then signed. (the overlapping
# integer ranges have identical bit representation, so there is no
# ambiguity)
try:
cmd = struct.pack('>BBBBI', self.module_addr, cmd_num, type, motor, value)
except struct.error:
cmd = struct.pack('>BBBBi', self.module_addr, cmd_num, type, motor, value)
chksum = sum(bytearray(cmd)) % 256
out = cmd + struct.pack('B', chksum)
self.write(out)
self._waiting_for_reply = True
def _get_reply(self):
"""Read and parse a reply from the controller.
Raise an exception if an error was reported.
"""
if not self._waiting_for_reply:
raise Exception("No reply expected.")
try:
d = self.read(9)
finally:
self._waiting_for_reply = False
d2 = self.readAll()
if len(d2) > 0:
raise Exception("Error: extra data while reading reply.")
parts = struct.unpack('>BBBBiB', d)
reply_addr, module_addr, status, cmd_num, value, chksum = parts
if chksum != sum(bytearray(d[:-1])) % 256:
raise Exception("Invalid checksum reading from controller.")
if status < 100:
raise TMCMError(status)
return parts
class ProgramManager(object):
def __init__(self, mcm, start=0):
self.mcm = mcm
self.start = start
self.count = 0
def __enter__(self):
self.mcm.lock.acquire()
self.mcm.start_download(self.start)
return self
def __exit__(self, *args):
# insert an extra stop to ensure the program can't leak
# into previously written code.
self.mcm.command('stop', 0, 0, 0)
self.mcm.stop_download()
self.mcm.lock.release()
def __getattr__(self, name):
self.count += 1
return getattr(self.mcm, name)
| 27.760776
| 98
| 0.559273
| 1,523
| 12,881
| 4.593565
| 0.313854
| 0.031447
| 0.002573
| 0.013579
| 0.133791
| 0.098914
| 0.066609
| 0.055174
| 0.055174
| 0.055174
| 0
| 0.042631
| 0.322568
| 12,881
| 463
| 99
| 27.820734
| 0.759111
| 0.158373
| 0
| 0.124601
| 0
| 0
| 0.175805
| 0.034439
| 0
| 0
| 0
| 0
| 0.035144
| 1
| 0.099042
| false
| 0
| 0.025559
| 0.009585
| 0.172524
| 0.003195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
608ba712fb9a01badbb4b12d5b51c50071dede95
| 981
|
py
|
Python
|
tests/generators/ios/test_core_data.py
|
brianleungwh/signals
|
d28d2722d681d390ebd21cd668d0b19f2f184451
|
[
"MIT"
] | 3
|
2016-02-04T22:58:03.000Z
|
2017-12-15T13:37:47.000Z
|
tests/generators/ios/test_core_data.py
|
brianleungwh/signals
|
d28d2722d681d390ebd21cd668d0b19f2f184451
|
[
"MIT"
] | 37
|
2015-08-28T20:17:23.000Z
|
2021-12-13T19:48:49.000Z
|
tests/generators/ios/test_core_data.py
|
brianleungwh/signals
|
d28d2722d681d390ebd21cd668d0b19f2f184451
|
[
"MIT"
] | 6
|
2016-01-12T18:51:27.000Z
|
2016-10-19T10:32:45.000Z
|
import unittest
from signals.generators.ios.core_data import get_current_version, get_core_data_from_folder
class CoreDataTestCase(unittest.TestCase):
def test_get_current_version(self):
version_name = get_current_version('./tests/files/doubledummy.xcdatamodeld')
self.assertEqual(version_name, 'dummy 2.xcdatamodel')
version_name = get_current_version('./tests/files/dummy.xcdatamodeld')
self.assertEqual(version_name, 'dummy.xcdatamodel')
def test_get_core_data_from_folder(self):
xcdatamodeld_path = './tests/files/doubledummy.xcdatamodeld'
contents_path = xcdatamodeld_path + '/dummy 2.xcdatamodel/contents'
self.assertEqual(get_core_data_from_folder(xcdatamodeld_path), contents_path)
xcdatamodeld_path = './tests/files/dummy.xcdatamodeld'
contents_path = xcdatamodeld_path + '/dummy.xcdatamodel/contents'
self.assertEqual(get_core_data_from_folder(xcdatamodeld_path), contents_path)
| 49.05
| 91
| 0.768603
| 115
| 981
| 6.2
| 0.252174
| 0.134642
| 0.095372
| 0.084151
| 0.645161
| 0.586255
| 0.339411
| 0.232819
| 0.232819
| 0.232819
| 0
| 0.00237
| 0.139653
| 981
| 19
| 92
| 51.631579
| 0.842417
| 0
| 0
| 0.133333
| 0
| 0
| 0.236493
| 0.192661
| 0
| 0
| 0
| 0
| 0.266667
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|