text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
def check_1a(answer):
import sympy
return answer == sympy.Rational(1,3)
def check_2a(answer):
import sympy
return answer == sympy.Symbol('y')
def check_2b(answer):
import sympy
return answer == sympy.Symbol('chi')
def check_2c(answer):
import sympy
return answer == sympy.Symbol('alpha3')
def check_2d(answer):
import sympy
return answer == sympy.Symbol(r'\mathcal{L}')
def check_3a(answer):
import sympy
a = sympy.Symbol('a')
b = sympy.Symbol('b')
c = sympy.Symbol('c')
d = sympy.Symbol('d')
z = sympy.Symbol('z')
return answer == (a*z+b)/(c*z+d)
def check_3b(answer):
import sympy
n = sympy.Symbol('n')
return answer == 2**n-1
def check_3c(answer):
import sympy
temp = sympy.Rational(1)
for i in range(19):
temp = 1+1/temp
return answer == sympy.fraction(temp)[0]
def check_4a(answer):
import sympy
temp = (sympy.sqrt(3)+sympy.sqrt(2))/(sympy.sqrt(3)-sympy.sqrt(2))
temp = sympy.fraction(temp)
temp = (temp[0]*temp[0],temp[1]*temp[0])
temp = (temp[0].simplify(), temp[1].simplify())
temp = temp[0]/temp[1]
return answer == temp
def check_5a(answer):
import sympy
temp = 3+4*sympy.I
temp = temp**2
temp = sympy.im(temp)
return answer == temp
def check_6a(answer):
import sympy
x = sympy.Symbol('x')
rec = (3/x+x)/2
x_cur = 1
for i in range(9):
x_cur = rec.subs(x, x_cur)
return answer == x_cur
def check_8a(answer):
import sympy
return answer == (1+sympy.sqrt(5))/2
|
{"hexsha": "2b05499903b1966c2bcba46542fd89e6376b3b99", "size": 1674, "ext": "py", "lang": "Python", "max_stars_repo_path": "verify_1.py", "max_stars_repo_name": "bolverk/cyborg_math", "max_stars_repo_head_hexsha": "298224dadd4218ebcc266b0a135e15595a359343", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-02-25T22:29:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T21:49:08.000Z", "max_issues_repo_path": "verify_1.py", "max_issues_repo_name": "bolverk/cyborg_math", "max_issues_repo_head_hexsha": "298224dadd4218ebcc266b0a135e15595a359343", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "verify_1.py", "max_forks_repo_name": "bolverk/cyborg_math", "max_forks_repo_head_hexsha": "298224dadd4218ebcc266b0a135e15595a359343", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-14T21:02:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-05T19:12:11.000Z", "avg_line_length": 18.6, "max_line_length": 70, "alphanum_fraction": 0.5698924731, "include": true, "reason": "import sympy", "num_tokens": 529}
|
[STATEMENT]
lemma simple_integral_null_set:
assumes "simple_function M u" "\<And>x. 0 \<le> u x" and "N \<in> null_sets M"
shows "(\<integral>\<^sup>Sx. u x * indicator N x \<partial>M) = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<integral>\<^sup>S x. u x * indicator N x \<partial>M = 0
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<integral>\<^sup>S x. u x * indicator N x \<partial>M = 0
[PROOF STEP]
have "AE x in M. indicator N x = (0 :: ennreal)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. AE x in M. indicator N x = 0
[PROOF STEP]
using \<open>N \<in> null_sets M\<close>
[PROOF STATE]
proof (prove)
using this:
N \<in> null_sets M
goal (1 subgoal):
1. AE x in M. indicator N x = 0
[PROOF STEP]
by (auto simp: indicator_def intro!: AE_I[of _ _ N])
[PROOF STATE]
proof (state)
this:
AE x in M. indicator N x = 0
goal (1 subgoal):
1. \<integral>\<^sup>S x. u x * indicator N x \<partial>M = 0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
AE x in M. indicator N x = 0
[PROOF STEP]
have "(\<integral>\<^sup>Sx. u x * indicator N x \<partial>M) = (\<integral>\<^sup>Sx. 0 \<partial>M)"
[PROOF STATE]
proof (prove)
using this:
AE x in M. indicator N x = 0
goal (1 subgoal):
1. \<integral>\<^sup>S x. u x * indicator N x \<partial>M = \<integral>\<^sup>S x. 0 \<partial>M
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
AE x in M. indicator N x = 0
simple_function M u
0 \<le> u ?x
N \<in> null_sets M
goal (1 subgoal):
1. \<integral>\<^sup>S x. u x * indicator N x \<partial>M = \<integral>\<^sup>S x. 0 \<partial>M
[PROOF STEP]
apply (intro simple_integral_cong_AE)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>AE x in M. indicator N x = 0; simple_function M u; \<And>x. 0 \<le> u x; N \<in> null_sets M\<rbrakk> \<Longrightarrow> simple_function M (\<lambda>x. u x * indicator N x)
2. \<lbrakk>AE x in M. indicator N x = 0; simple_function M u; \<And>x. 0 \<le> u x; N \<in> null_sets M\<rbrakk> \<Longrightarrow> simple_function M (\<lambda>x. 0)
3. \<lbrakk>AE x in M. indicator N x = 0; simple_function M u; \<And>x. 0 \<le> u x; N \<in> null_sets M\<rbrakk> \<Longrightarrow> AE x in M. u x * indicator N x = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<integral>\<^sup>S x. u x * indicator N x \<partial>M = \<integral>\<^sup>S x. 0 \<partial>M
goal (1 subgoal):
1. \<integral>\<^sup>S x. u x * indicator N x \<partial>M = 0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<integral>\<^sup>S x. u x * indicator N x \<partial>M = \<integral>\<^sup>S x. 0 \<partial>M
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<integral>\<^sup>S x. u x * indicator N x \<partial>M = \<integral>\<^sup>S x. 0 \<partial>M
goal (1 subgoal):
1. \<integral>\<^sup>S x. u x * indicator N x \<partial>M = 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<integral>\<^sup>S x. u x * indicator N x \<partial>M = 0
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1264, "file": null, "length": 13}
|
[STATEMENT]
lemma VLambda_eq_D2: "\<lbrakk>VLambda A f = VLambda A g; x \<in> elts A\<rbrakk> \<Longrightarrow> f x = g x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>VLambda A f = VLambda A g; x \<in> elts A\<rbrakk> \<Longrightarrow> f x = g x
[PROOF STEP]
by (metis beta)
|
{"llama_tokens": 124, "file": "ZFC_in_HOL_ZFC_Cardinals", "length": 1}
|
using JuMP, EAGO
m = Model()
EAGO.register_eago_operators!(m)
@variable(m, -1 <= x[i=1:4] <= 1)
@variable(m, -23.072345451695163 <= q <= 18.38163265505858)
add_NL_constraint(m, :(sigmoid(-0.6895881478464707 + 0.42824131807966515*sigmoid(-0.9657925733535908 + -0.20993433529810623*$(x[1]) + -0.9858887282433288*$(x[2]) + -0.7926811137744871*$(x[3]) + -0.8802726339564089*$(x[4])) + -0.8758217593243995*sigmoid(-0.29556778373435044 + 0.6695148008497993*$(x[1]) + 0.7620585252555783*$(x[2]) + 0.8362720647957991*$(x[3]) + 0.8751574333999192*$(x[4])) + -0.3819126296826152*sigmoid(-0.8627823044260601 + -0.3188988197195952*$(x[1]) + -0.18058802482760727*$(x[2]) + -0.7255181409249616*$(x[3]) + -0.8658172698743405*$(x[4])) + -0.7523924735752416*sigmoid(0.5506348394687617 + 0.9730675890304568*$(x[1]) + 0.0064031244486910666*$(x[2]) + -0.590531971380631*$(x[3]) + -0.964793293764588*$(x[4]))) + sigmoid(0.5903696565764314 + 0.6045010784464115*sigmoid(-0.9657925733535908 + -0.20993433529810623*$(x[1]) + -0.9858887282433288*$(x[2]) + -0.7926811137744871*$(x[3]) + -0.8802726339564089*$(x[4])) + 0.19992590084675577*sigmoid(-0.29556778373435044 + 0.6695148008497993*$(x[1]) + 0.7620585252555783*$(x[2]) + 0.8362720647957991*$(x[3]) + 0.8751574333999192*$(x[4])) + -0.13493555263327828*sigmoid(-0.8627823044260601 + -0.3188988197195952*$(x[1]) + -0.18058802482760727*$(x[2]) + -0.7255181409249616*$(x[3]) + -0.8658172698743405*$(x[4])) + 0.0007730145672497635*sigmoid(0.5506348394687617 + 0.9730675890304568*$(x[1]) + 0.0064031244486910666*$(x[2]) + -0.590531971380631*$(x[3]) + -0.964793293764588*$(x[4]))) + sigmoid(-0.987583954230804 + 0.6297203029880079*sigmoid(-0.9657925733535908 + -0.20993433529810623*$(x[1]) + -0.9858887282433288*$(x[2]) + -0.7926811137744871*$(x[3]) + -0.8802726339564089*$(x[4])) + -0.939181196020844*sigmoid(-0.29556778373435044 + 0.6695148008497993*$(x[1]) + 0.7620585252555783*$(x[2]) + 0.8362720647957991*$(x[3]) + 0.8751574333999192*$(x[4])) + 0.23747324715066886*sigmoid(-0.8627823044260601 + -0.3188988197195952*$(x[1]) + -0.18058802482760727*$(x[2]) + -0.7255181409249616*$(x[3]) + -0.8658172698743405*$(x[4])) + 0.2556344288953589*sigmoid(0.5506348394687617 + 0.9730675890304568*$(x[1]) + 0.0064031244486910666*$(x[2]) + -0.590531971380631*$(x[3]) + -0.964793293764588*$(x[4]))) + sigmoid(-0.44105426128556546 + -0.39452594471110514*sigmoid(-0.9657925733535908 + -0.20993433529810623*$(x[1]) + -0.9858887282433288*$(x[2]) + -0.7926811137744871*$(x[3]) + -0.8802726339564089*$(x[4])) + 0.16620534874676274*sigmoid(-0.29556778373435044 + 0.6695148008497993*$(x[1]) + 0.7620585252555783*$(x[2]) + 0.8362720647957991*$(x[3]) + 0.8751574333999192*$(x[4])) + 0.6245292013722641*sigmoid(-0.8627823044260601 + -0.3188988197195952*$(x[1]) + -0.18058802482760727*$(x[2]) + -0.7255181409249616*$(x[3]) + -0.8658172698743405*$(x[4])) + 0.9983459126930239*sigmoid(0.5506348394687617 + 0.9730675890304568*$(x[1]) + 0.0064031244486910666*$(x[2]) + -0.590531971380631*$(x[3]) + -0.964793293764588*$(x[4]))) - $q <= 0.0))
@objective(m, Min, q)
return m
|
{"hexsha": "ebec44cf9f464edb1a2ebce2f96078b2ba0b54b7", "size": 3242, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "solver_benchmarking/MINLPLib.jl/instances/ANN_Env/41_sigmoid_4_2_4.jl", "max_stars_repo_name": "PSORLab/RSActivationFunctions", "max_stars_repo_head_hexsha": "0bf8b4500b21144c076ea958ce93dbdd19a53314", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "solver_benchmarking/MINLPLib.jl/instances/ANN_Env/41_sigmoid_4_2_4.jl", "max_issues_repo_name": "PSORLab/RSActivationFunctions", "max_issues_repo_head_hexsha": "0bf8b4500b21144c076ea958ce93dbdd19a53314", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "solver_benchmarking/MINLPLib.jl/instances/ANN_Env/41_sigmoid_4_2_4.jl", "max_forks_repo_name": "PSORLab/RSActivationFunctions", "max_forks_repo_head_hexsha": "0bf8b4500b21144c076ea958ce93dbdd19a53314", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 190.7058823529, "max_line_length": 2900, "alphanum_fraction": 0.6597779149, "num_tokens": 1496}
|
"""Usage:
evaluate --gold=GOLD_FILE (--pred=PRED_FILE | --allfactual) --default=DEFAULT_VALUE
Prints the Mean Squared Error between a predicated and gold factuality file, using a specified default value.
Assumes that both gold and predicated agree on the events to annotate. """
from docopt import docopt
from operator import itemgetter
from scipy.stats import pearsonr
from sklearn.metrics import mean_absolute_error
from collections import defaultdict
from annotate_factulity import is_annot
import numpy as np
import matplotlib.pyplot as plt
import string
import math
import logging
logging.basicConfig(level = logging.DEBUG)
class Factuality:
"""
Container for factuality functions, and main driver through the constructor function.
Stores metrics as class variables.
"""
def __init__(self, gold_fn, pred_fn, default_value):
"""
Calculate metrics for gold against predicted.
The predicted file may contain entries marked as DEFAULT --
to indicate the predictor didn't assign a value to this entry.
Evaluate will replace such entries with default_value (Float).
"""
self.default_value = default_value
self.evaluate(gold_fn, pred_fn)
def evaluate(self, gold_fn, pred_fn):
"""
Driver for evaluating gold vs. predicted factuality.
if pred_fn is None, will evaluate the factual baseline
"""
self.load_values(gold_fn, pred_fn)
self.compute_agreement()
def extract_word_and_vals(self, fn):
"""
Extract only indices, words and factuality values
from an annotation file
"""
self.original_lines = list(enumerate(open(fn).readlines()))
return filter(lambda (line_ind, (ind, word, val)): val != "_",
[(line_ind, line.strip().split('\t')[:3])
for line_ind, line in self.original_lines
if line.strip()])
def load_values(self, gold_fn, pred_fn):
"""
Stores gold and predicted values into class memebers
"""
self.gold_vals = self.extract_word_and_vals(gold_fn)
if pred_fn is not None:
self.pred_vals = self.extract_word_and_vals(pred_fn)
# Sanity check -- Make sure that gold and pred agree on the input
self.gold_vals, self.pred_vals = self.validate_gold_pred(self.gold_vals, self.pred_vals)
# Extract only numerical factuality values
self.pred_vals = map(lambda (ind, val): (ind, float(val) if val != "DEFAULT" else self.default_value),
[(ind, val) for (ind, (_, word, val)) in self.pred_vals])
else:
logging.info("Evaluating All-factual baseline")
# Factual baseline
self.pred_vals = self.generate_factual_baseline(self.gold_vals)
# Extract only numerical factuality values
self.gold_vals = map(lambda (ind, val): (ind, float(val) if val != "DEFAULT" else self.default_value),
[(ind, val) for (ind, (_, word, val)) in self.gold_vals])
# self.gold_vals = map(float,
# map(itemgetter(2), self.gold_vals))
def compute_agreement(self):
"""
Compute agreement values after loading them into member variables
"""
golds = map(itemgetter(1),
self.gold_vals)
preds = map(itemgetter(1),
self.pred_vals)
self.mse = self._mse(golds, preds)
self.mae = mean_absolute_error(golds, preds)
self.normalized_mae = self.mae / 6.0
self.pearson = pearsonr(golds, preds)
def generate_factual_baseline(self, gold_vals):
"""
Generate a baseline which always assigns 3.0
"""
return [3.0] * len(gold_vals)
def validate_gold_pred(self, gold, pred):
"""
Returns true iff gold and pred agree on index and word
"""
#assert len(gold) == len(pred), "Lists are of different lenghts ({}, {})!".format(len(gold), len(pred))
d = defaultdict(lambda: defaultdict (lambda: []))
for i, x in gold:
ind, word = x[:2]
d[ind][word] = [(i, x)]
for i, y in pred:
ind, word = y[:2]
d[ind][word].append((i, y))
ret_gold = []
ret_pred = []
for ind, words in d.iteritems():
for word in words:
if len(d[ind][word]) == 2:
ret_gold.append(d[ind][word][0])
ret_pred.append(d[ind][word][1])
logging.debug("{} factuality annotations".format(len(ret_gold)))
return ret_gold, ret_pred
# return all([(x[0] == y[0]) and (x[1] == y[1]) for (x, y) in zip(gold, pred)])
def _mse(self, gold, pred):
"""
Compute the Mean Squared Error between two float lists
"""
n = len(gold) * 1.0
return sum([math.pow(x -y, 2) for (x, y) in zip(gold, pred)]) / n
def _mae(self, gold, pred):
"""
Compute the Mean Absolute Error between two float lists
"""
n = len(gold) * 1.0
return sum([abs(x -y) for (x, y) in zip(gold, pred)]) / n
def find_first_diff(self, gold, pred):
"""
Return the first index in which gold and pred differ in terms of word or index
"""
g = f.read_factuality(gold_fn)
p = f.read_factuality(pred_fn)
for i, ((id1, w1, _), (id2, w2, _)) in enumerate(zip(g, p)):
if (w1 != w2) or (id1 != id2):
return i
class Factuality_annotation:
"""
Load factuality annotations and calculate statistics about it
"""
def __init__(self, fn, default_value):
self.default_value = float(default_value)
self.vals = self.read_factuality(fn)
def hist(self, fn, title):
"""
Plot an histagram of the values stored at in this annotation
"""
hist, bin_edges = np.histogram(map(itemgetter(2), self.vals), bins = np.arange(start = -3, stop = 3.5, step = 0.5))
ind = np.arange(len(hist))
plt.xlim([-0.5, len(ind)])
plt.bar(ind, hist, width = 0.7)
plt.xticks(ind, bin_edges[:-1])
plt.xlabel('Label')
plt.ylabel('#Instances')
plt.title(title)
plt.savefig(fn)
return hist, bin_edges
def read_factuality(self, fact_fn):
"""
Reads factuality from file, assumes that each line is composed of tab separated token_id, word, factuality value
Ignores any other tab separated fields that might appear in the file.
Replaces "DEFAULT" labels with the default value.
Returns a list of (token_id, word, factuality value).
"""
ret = []
self.vals_per_sent = {}
cur_sent_fact = []
cur_sent = ''
for line in open(fact_fn):
line = line.strip()
if not line:
if cur_sent:
self.vals_per_sent[cur_sent] = cur_sent_fact
cur_sent = ''
cur_sent_fact = []
else:
data = line.split('\t')
token_id, word, fact_value, pos, head = data[:5]
if len(data) > 5:
rel = data[5]
else:
rel = 'punct'
cur_sent += word.translate(None, string.punctuation)
if is_annot(fact_value):
toAppend = [token_id, word, float(fact_value) if (fact_value != "DEFAULT") else self.default_value, pos, head, rel]
ret.append(toAppend)
cur_sent_fact.append(toAppend)
# Make sure that we flushed the last buffer of annotations
assert cur_sent == '', cur_sent
return ret
## Constants
# An annotation value for inspection purposes, used to indicate that this value should be
# removed from both train and test
IGNORE_DEFAULT = 4
if __name__ == '__main__':
logging.basicConfig(level = logging.DEBUG)
args = docopt(__doc__)
logging.error(args)
gold_fn = args['--gold']
pred_fn = args['--pred']
default_value = float(args['--default'])
f = Factuality(gold_fn, pred_fn, default_value)
logging.info("MAE:\t{}".format(f.mae))
logging.info("MSE:\t{}".format(f.mse))
logging.info("r:\t{}".format(f.pearson))
|
{"hexsha": "97a386a4aea10cba7daadfa0377f2aca1e23ece3", "size": 8431, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/evaluate.py", "max_stars_repo_name": "gabrielStanovsky/unified-factuality", "max_stars_repo_head_hexsha": "869fd23ec8fe71ab2b9f30389018615932d27cde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-08-01T07:42:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-11T12:51:42.000Z", "max_issues_repo_path": "src/evaluate.py", "max_issues_repo_name": "gabrielStanovsky/unified-factuality", "max_issues_repo_head_hexsha": "869fd23ec8fe71ab2b9f30389018615932d27cde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-08-11T02:37:05.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-13T20:52:30.000Z", "max_forks_repo_path": "src/evaluate.py", "max_forks_repo_name": "gabrielStanovsky/unified-factuality", "max_forks_repo_head_hexsha": "869fd23ec8fe71ab2b9f30389018615932d27cde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-18T10:23:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-14T08:29:13.000Z", "avg_line_length": 36.8165938865, "max_line_length": 135, "alphanum_fraction": 0.5874747954, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1947}
|
import pyredner
import numpy as np
import torch
import redner
# Optimize vertices of 2D meshes
# Use GPU if available
pyredner.set_use_gpu(torch.cuda.is_available())
# Setup camera: We place the camera at (0, 0, -1), with look vector
# (0, 0, 1). We also use an orthographic camera just to
# make the projection more "2D": the depth is only used
# for determining the order of the meshes.
cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -1.0]),
look_at = torch.tensor([0.0, 0.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (256, 256),
camera_type = redner.CameraType.orthographic)
# The materials:
mat_quad = pyredner.Material(\
diffuse_reflectance = torch.tensor([0.75, 0.75, 0.25],
device = pyredner.get_device()))
mat_tri = pyredner.Material(\
diffuse_reflectance = torch.tensor([0.9, 0.35, 0.35],
device = pyredner.get_device()))
materials = [mat_quad, mat_tri]
# We'll have a quad and a triangle as our meshes.
# First we define the 2D coordinates. The size of the screen is
# from -1.0 to 1.0. Y is pointing up.
quad_vertices_2d = torch.tensor(\
[[-0.3, 0.5], [0.2, 0.6], [-0.5, -0.3], [0.5, -0.4]],
device = pyredner.get_device())
tri_vertices_2d = torch.tensor(\
[[-0.6, 0.3], [0.4, 0.5], [-0.1, -0.2]],
device = pyredner.get_device())
# We need to pad the depth coordinates for these vertices
# We'll assign depth = 1 for the quad, depth = 0 for the triangle,
# so the triangle will block the quad.
quad_vertices = torch.cat((quad_vertices_2d,
torch.ones(quad_vertices_2d.shape[0], 1, device = pyredner.get_device())), dim=1).contiguous()
tri_vertices = torch.cat((tri_vertices_2d,
torch.zeros(tri_vertices_2d.shape[0], 1, device = pyredner.get_device())), dim=1).contiguous()
quad_indices = torch.tensor([[0, 1, 2], [1, 2, 3]], dtype = torch.int32, device = pyredner.get_device())
tri_indices = torch.tensor([[0, 1, 2]], dtype = torch.int32, device = pyredner.get_device())
shape_quad = pyredner.Shape(\
vertices = quad_vertices,
indices = quad_indices,
material_id = 0)
shape_tri = pyredner.Shape(\
vertices = tri_vertices,
indices = tri_indices,
material_id = 1)
shapes = [shape_quad, shape_tri]
# Setup the scene. We don't need lights.
scene = pyredner.Scene(camera = cam,
shapes = shapes,
materials = materials)
# We output the shape id, so that we can shape it later
args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
# Set max bounces to 0, we don't need lighting.
max_bounces = 0,
# Use the diffuse color as the output
channels = [redner.channels.diffuse_reflectance])
# Render the scene as our target image.
render = pyredner.RenderFunction.apply
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/target.exr')
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/target.png')
target = pyredner.imread('results/two_d_mesh/target.exr')
if pyredner.get_use_gpu():
target = target.cuda(device = pyredner.get_device())
# Perturb the scene, this is our initial guess
quad_vertices_2d = torch.tensor(\
[[-0.5, 0.3], [0.3, 0.4], [-0.7, -0.2], [0.4, -0.3]],
device = pyredner.get_device(),
requires_grad = True)
tri_vertices_2d = torch.tensor(\
[[-0.5, 0.4], [0.4, 0.6], [-0.0, -0.3]],
device = pyredner.get_device(),
requires_grad = True)
# Need to redo the concatenation
shape_quad.vertices = torch.cat((quad_vertices_2d,
torch.ones(quad_vertices_2d.shape[0], 1, device = pyredner.get_device())), dim=1).contiguous()
shape_tri.vertices = torch.cat((tri_vertices_2d,
torch.zeros(tri_vertices_2d.shape[0], 1, device = pyredner.get_device())), dim=1).contiguous()
args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
# Set max bounces to 0, we don't need lighting.
max_bounces = 0,
# Use the diffuse color as the output
channels = [redner.channels.diffuse_reflectance])
# Render the initial guess.
render = pyredner.RenderFunction.apply
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/init.exr')
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/init.png')
# Optimize for mesh vertices
optimizer = torch.optim.Adam([quad_vertices_2d, tri_vertices_2d], lr=4e-2)
for t in range(200):
print('iteration:', t)
optimizer.zero_grad()
# Forward pass: render the image
# Need to redo the concatenation
shape_quad.vertices = torch.cat((quad_vertices_2d,
torch.ones(quad_vertices_2d.shape[0], 1, device = pyredner.get_device())), dim=1).contiguous()
shape_tri.vertices = torch.cat((tri_vertices_2d,
torch.zeros(tri_vertices_2d.shape[0], 1, device = pyredner.get_device())), dim=1).contiguous()
args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 1,
max_bounces = 0,
channels = [redner.channels.diffuse_reflectance])
img = render(t+1, *args)
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/iter_{}.png'.format(t))
loss = (img - target).pow(2).sum()
print('loss:', loss.item())
loss.backward()
print('quad_vertices_2d.grad:', quad_vertices_2d.grad)
print('tri_vertices_2d.grad:', tri_vertices_2d.grad)
optimizer.step()
print('quad_vertices_2d:', quad_vertices_2d)
print('tri_vertices_2d:', tri_vertices_2d)
shape_quad.vertices = torch.cat((quad_vertices_2d,
torch.ones(quad_vertices_2d.shape[0], 1, device = pyredner.get_device())), dim=1).contiguous()
shape_tri.vertices = torch.cat((tri_vertices_2d,
torch.zeros(tri_vertices_2d.shape[0], 1, device = pyredner.get_device())), dim=1).contiguous()
args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 0,
channels = [redner.channels.diffuse_reflectance])
img = render(t+1, *args)
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/final.exr')
pyredner.imwrite(img.cpu(), 'results/two_d_mesh/final.png')
pyredner.imwrite(torch.abs(target - img).cpu(), 'results/two_d_mesh/final_diff.png')
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/two_d_mesh/iter_%d.png", "-vb", "20M",
"results/two_d_mesh/out.mp4"])
|
{"hexsha": "c3e858167bcec2029fe643c5f992f303ddbf6e25", "size": 6468, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/two_d_mesh.py", "max_stars_repo_name": "aferrall/redner", "max_stars_repo_head_hexsha": "be52e4105140f575f153d640ba889eb6e6015616", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1146, "max_stars_repo_stars_event_min_datetime": "2018-11-11T01:47:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:11:03.000Z", "max_issues_repo_path": "examples/two_d_mesh.py", "max_issues_repo_name": "aferrall/redner", "max_issues_repo_head_hexsha": "be52e4105140f575f153d640ba889eb6e6015616", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 177, "max_issues_repo_issues_event_min_datetime": "2018-11-13T22:48:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T07:19:29.000Z", "max_forks_repo_path": "examples/two_d_mesh.py", "max_forks_repo_name": "aferrall/redner", "max_forks_repo_head_hexsha": "be52e4105140f575f153d640ba889eb6e6015616", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 127, "max_forks_repo_forks_event_min_datetime": "2018-11-11T02:32:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T07:24:03.000Z", "avg_line_length": 41.1974522293, "max_line_length": 104, "alphanum_fraction": 0.6723871367, "include": true, "reason": "import numpy", "num_tokens": 1897}
|
C FILE: LAYERS.F
SUBROUTINE LAYERS_LOCATE(xx,Nlayers,m,x,k)
INTEGER Nlayers
REAL*4 xx(Nlayers)
REAL*4 x
INTEGER m
INTEGER kl, km, ku,k
Cf2py intent(in) xx,Nlayers,m
Cf2py intent(in) x
Cf2py intent(out) k
C bisection, following Press et al., Numerical Recipes in Fortran,
C mostly, because it can be vectorized
kl=1
ku=Nlayers+1
C This is the bisection loop
DO l = 1,m
IF (ku-kl.GT.1) THEN
km=(ku+kl)/2
CML IF ((xx(Nlayers).GE.xx(1)).EQV.(x.GE.xx(km))) THEN
IF ( ((xx(Nlayers).GE.xx(1)).AND.(x.GE.xx(km))).OR.
& ((xx(Nlayers).GE.xx(1)).AND.(x.GE.xx(km))) ) THEN
kl=km
ELSE
ku=km
END IF
END IF
END DO
IF ( x.LT.xx(2) ) THEN
k=1
ELSE IF ( x.GE.xx(Nlayers) ) THEN
k=Nlayers
ELSE
k=kl
END IF
END SUBROUTINE
SUBROUTINE LAYERS_1(Vel,tracer,layers_bounds,MapFact,MapIndex,
& CellIndex,dZZf,
& NZ,Nlayers,NZZ,VH)
C
C CALCULATE THE NEW GRID
C
INTEGER NZ,Nlayers,NZZ
INTEGER mSteps, kgv,kloc
REAL*4 layers_bounds(Nlayers)
REAL*4 Vel(NZ)
REAL*4 tracer(NZ)
REAL*4 VH(Nlayers)
REAL*4 MapFact(NZZ)
INTEGER MapIndex(NZZ)
INTEGER CellIndex(NZZ)
REAL*4 Tfact
REAL*4 dzfac
REAL*4 dZZf(NZZ)
Cf2py intent(in) Vel
Cf2py intent(in) tracer,layers_bounds
Cf2py intent(in) MapFact,MapIndex
Cf2py intent(in) CellIndex,dZZf
Cf2py intent(in) NZ,Nlayers
Cf2py intent(in) NZZ
Cf2py intent(out) VH
Cf2py external :: layers_locate
Cf2py depend(Nlayers) VH
C compute maximum number of steps for bisection method (approx.
C log2(Nlayers)) as log2(Nlayers) + 1 for safety
mSteps = int(log10(dble(Nlayers))/log10(2.))+1
C The temperature index (layer_G) goes from cold to warm.
C The water column goes from warm (k=1) to cold (k=Nr).
C So initialize the search with the warmest value.
kgv = Nlayers
DO kg=1,Nlayers
VH(kg) = 0.
ENDDO
DO kk=1,NZZ
k = MapIndex(kk)
kp1=k+1
Tfact = MapFact(kk) * tracer(k) +
& (1. -MapFact(kk)) * tracer(kp1)
CALL LAYERS_LOCATE(
I layers_bounds,Nlayers,msteps,Tfact,kgv)
kloc = kgv
C NEED TO ADD TOPOGRAPHY HERE
kci = CellIndex(kk)
dzfac = dZZf(kk)
VH(kloc) =
& VH(kloc) +
& dzfac * Vel(kci)
ENDDO
END SUBROUTINE
C END FILE LAYERS.F
|
{"hexsha": "18572c45df21dfa50107dc367f34997e137c8bba", "size": 2627, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "layers.f", "max_stars_repo_name": "roxyboy/xlayers", "max_stars_repo_head_hexsha": "2e2d97d6f792dd71417919849a66c66f998407e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-10-16T17:27:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-14T18:47:52.000Z", "max_issues_repo_path": "layers.f", "max_issues_repo_name": "roxyboy/xlayers", "max_issues_repo_head_hexsha": "2e2d97d6f792dd71417919849a66c66f998407e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-10-27T14:18:06.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-30T14:39:57.000Z", "max_forks_repo_path": "xlayers/layers.f", "max_forks_repo_name": "cspencerjones/xlayers", "max_forks_repo_head_hexsha": "dc61e8b9189c2933f38547fd2cf77210bfd7d35c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-10-26T14:02:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-13T04:45:52.000Z", "avg_line_length": 24.7830188679, "max_line_length": 70, "alphanum_fraction": 0.5629996193, "num_tokens": 868}
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
import itertools, operator, random, math
from scipy.sparse.linalg import spsolve_triangular
from sklearn import linear_model
import pandas as pd
def random_sampling(data, porpotion):
sampled_data = np.empty(data.shape)
sampled_data[:] = np.nan
n = data.shape[1]
for i in range(data.shape[0]):
sample_idx = random.sample(range(n), int(n*porpotion))
sampled_data[i][sample_idx] = data[i][sample_idx]
return sampled_data
def funkSVD(rating_mat, latent_features, learning_rate, iters):
n_s, n_t = rating_mat.shape[0], rating_mat.shape[1]
s_matrix, t_matrix = np.random.rand(n_s, latent_features), np.random.rand(latent_features, n_t)
# s_matrix, t_matrix = 0.5*np.ones((n_s, latent_features)), 0.5*np.ones((latent_features, n_t))
sse_initial = 0
for p in range(iters):
old_see = sse_initial
sse_initial = 0
for i in range(n_s):
for j in range(n_t):
if not math.isnan(rating_mat[i][j]):
diff = rating_mat[i][j] - s_matrix[i,:].dot(t_matrix[:,j])
sse_initial += diff**2
for k in range(latent_features):
s_matrix[i][k] += learning_rate*(2*diff*t_matrix[k][j])
t_matrix[k][j] += learning_rate*(2*diff*s_matrix[i][k])
est_mat = s_matrix.dot(t_matrix)
return est_mat
def ft_data(pop, tspan, dt):
"""
est_mat from funkSVD
"""
n = len(tspan)
y_ft = []
for i in range(pop.shape[0]):
fhat = np.fft.fft(pop[i], n)
PSD = fhat*np.conj(fhat)/n
freq = (1/(dt*n))*np.arange(n)
L = np.arange(1, np.floor(n/2), dtype= 'int')
indices = PSD > 5
PSDclean = PSD * indices
fhat = indices*fhat
ffilt = np.fft.ifft(fhat)
y_ft.append(ffilt)
return np.array(y_ft)
def funkSVD_ft(ft_matrix, rating_mat, latent_features, learning_rate, iters):
u,s,v = np.linalg.svd(ft_matrix, full_matrices=False)
n_s, n_t = rating_mat.shape[0], rating_mat.shape[1]
s_matrix, t_matrix = u, v
# s_matrix, t_matrix = 0.5*np.ones((n_s, latent_features)), 0.5*np.ones((latent_features, n_t))
sse_initial = 0
for p in range(iters):
old_see = sse_initial
sse_initial = 0
for i in range(n_s):
for j in range(n_t):
if not math.isnan(rating_mat[i][j]):
diff = rating_mat[i][j] - s_matrix[i,:].dot(t_matrix[:,j])
sse_initial += diff**2
for k in range(latent_features):
s_matrix[i][k] += learning_rate*(2*diff*t_matrix[k][j])
t_matrix[k][j] += learning_rate*(2*diff*s_matrix[i][k])
est_mat = s_matrix.dot(t_matrix)
return est_mat
def power_(d,order):
# d is the number of variables; order of polynomials
powers = []
for p in range(1,order+1):
size = d + p - 1
for indices in itertools.combinations(range(size), d-1): ##combinations
starts = [0] + [index+1 for index in indices]
stops = indices + (size,)
powers.append(tuple(map(operator.sub, stops, starts)))
return powers
def lib_terms(data,order,description):
#description is a list of name of variables, like [R, M, S]
#description of lib
descr = []
#data is the input data, like R,M,S; order is the total order of polynomials
d,t = data.shape # d is the number of variables; t is the number of time points
theta = np.ones((t,1), dtype=np.float64) # the first column of lib is '1'
P = power_(d,order)
descr = ["1"]
for i in range(len(P)):
new_col = np.zeros((t,1),dtype=np.float64)
for j in range(t):
new_col[j] = np.prod(np.power(list(data[:,j]),list(P[i])))
theta = np.hstack([theta, new_col.reshape(t,1)])
descr.append("{0} {1}".format(str(P[i]), str(description)))
# print((str(P[i]), str(description)))
return theta, descr
def sparsifyDynamics(Theta, dx, Lambda):
#theta.shape = 248*10 (time points*functions); dx.shape = 248*3 (time points*variables)
#need to ensure size or dimenssions !!!
# dx = dx.T
m,n = dx.shape #(248*3)
Xi = np.dot(np.linalg.pinv(Theta), dx) #Xi.shape = 10*3
# lambda is sparasification knob
for k in range(20): ###??
small_idx = (abs(Xi) < Lambda)
big_idx = (abs(Xi) >= Lambda)
Xi[small_idx] = 0
for i in range(n):
big_curr, = np.where(big_idx[:,i])
Xi[big_curr, i] = np.dot(np.linalg.pinv(Theta[:,big_curr]), dx[:,i])
return Xi
def sparseGalerkin(t, pop, Xi, polyorder):
theta, descr = lib_terms(np.array([pop]).T,polyorder,[])
dpop = theta.dot(Xi)
return dpop[0]
def time_different(dt, pop):
"""
dpop = (6*6000) (species * time)
centered first order derviate
"""
x = np.full_like(pop, fill_value = np.nan)
x[:, 1:-1] = (pop[:, 2:] - pop[:, :-2]) / (2*dt)
x[:,0] = (-11/6 *pop[:,0] + 3* pop[:,1] - 3/2*pop[:,2] + pop[:,3]/3) /dt
x[:,-1] = (11/6* pop[:,-1] -3* pop[:,-2] + 3/2* pop[:,-3] -pop[:,-4]/3)/dt
return x
def visual_param(Xi, descr):
small_idx = abs(Xi) < 1e-4
Xi[small_idx] = 0
new_set = [x.replace('(', '').replace(']', '') for x in descr]
name_s = descr
label = []
for str_ in new_set[1:]:
idx_ = [int(x) for x in str_.split(') [')[0].split(',')]
lab = ""
for idx, i in enumerate(idx_):
j = i
while j > 0:
lab += name_s[idx]
j -= 1
label.append(lab)
term_label = ['1'] + label
df_term = pd.DataFrame(Xi.T, index=term_label, columns=name_s)
return df_term
def bulid_prior(label, theta, descr, prior_dic):
df_prior = visual_param(np.zeros((len(label), theta.shape[1])), descr)
drop_index = []
for term in label:
idx_prev = df_prior.index
x_new = set()
for i, s in enumerate(prior_dic[term]):
lst_idx = [p.find(s) for p in idx_prev]
x, = np.where(np.array(lst_idx) == -1)
if i == 0:
x_new = set(x)
else:
x_new = x_new.intersection(x)
drop_index.append(list(x_new))
df_prior[term].iloc[list(x_new)] = 1
return df_prior, drop_index
|
{"hexsha": "085cd47852ba0d305de67b4cbf38da2fe585ae38", "size": 6474, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "onionpork/sparsemyxo", "max_stars_repo_head_hexsha": "ee08764c05733419a92246f5fcf47a76dc409228", "max_stars_repo_licenses": ["BSD-Source-Code"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "onionpork/sparsemyxo", "max_issues_repo_head_hexsha": "ee08764c05733419a92246f5fcf47a76dc409228", "max_issues_repo_licenses": ["BSD-Source-Code"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "onionpork/sparsemyxo", "max_forks_repo_head_hexsha": "ee08764c05733419a92246f5fcf47a76dc409228", "max_forks_repo_licenses": ["BSD-Source-Code"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.696969697, "max_line_length": 99, "alphanum_fraction": 0.5698177325, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1862}
|
struct S a::T end
struct T end
|
{"hexsha": "5628100340d000d6bcbce4169b5f7566d45ebaae", "size": 32, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "tests/typing/bad/testfile-unbound-3.jl", "max_stars_repo_name": "gabriel-doriath-dohler/pjulia", "max_stars_repo_head_hexsha": "14325f282b711c979091ec259aefb0f1e89ec1be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-29T17:27:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-29T17:27:44.000Z", "max_issues_repo_path": "tests/typing/bad/testfile-unbound-3.jl", "max_issues_repo_name": "gabriel-doriath-dohler/pjulia", "max_issues_repo_head_hexsha": "14325f282b711c979091ec259aefb0f1e89ec1be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-11-17T20:57:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-20T18:23:01.000Z", "max_forks_repo_path": "tests/typing/bad/testfile-unbound-3.jl", "max_forks_repo_name": "gabriel-doriath-dohler/pjulia", "max_forks_repo_head_hexsha": "14325f282b711c979091ec259aefb0f1e89ec1be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 8.0, "max_line_length": 17, "alphanum_fraction": 0.6875, "num_tokens": 12}
|
[STATEMENT]
lemma nnvs_finite: "n_nearest_verts w u n U \<Longrightarrow> finite U"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n_nearest_verts w u n U \<Longrightarrow> finite U
[PROOF STEP]
by (induction rule: n_nearest_verts.induct) auto
|
{"llama_tokens": 97, "file": "Query_Optimization_Graph_Definitions", "length": 1}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from nltk.util import ngrams
import numpy as np
def format_line( line, padding = 1 ):
line = line.strip()
line = "<s> " * padding + line + " </s>" * padding
line = line.split( " " )
return line
def init_vocab():
vocab = {
"unk": 0,
"<s>": 1,
"</s>": 2,
}
return vocab
def extract_vocabulary_and_sentences( in_corpus ):
corpus = []
vocab = init_vocab()
count = len( vocab )
with open( in_corpus, mode = "r" ) as c:
for line in c:
line = format_line( line, 1 )
numberized_tokens = []
for token in line:
if token not in vocab:
vocab[ token ] = count
count += 1
numberized_tokens.append( str( vocab[ token ] ) )
corpus.append( " ".join( numberized_tokens ) )
vocab = [ "{0} {1}".format( item, vocab[ item ] ) for item in vocab ]
return vocab, corpus
def extract_vocabulary_and_ngrams( in_corpus, n_order ):
corpus_contexts = []
corpus_targets = []
vocab = init_vocab()
count = len( vocab )
sentence_idx = []
count_sentence = -1
with open( in_corpus, mode = "r" ) as corpus:
for line in corpus:
count_sentence += 1
tokens = format_line( line, ( n_order - 1 ) )
numberized_tokens = []
for token in tokens:
if token not in vocab:
vocab[ token ] = count
count += 1
numberized_tokens.append( vocab[ token ] )
line_ngrams = ngrams( numberized_tokens, n_order * 2 - 1 )
for ngram in line_ngrams:
left_context = " ".join( str( item ) for item in ngram[ 0 : n_order - 1 ] )
right_context = " ".join( str( item ) for item in ngram[ n_order : len( ngram ) ] )
target = str( ngram[ n_order - 1 ] )
corpus_contexts.append( left_context + " " + right_context )
corpus_targets.append( target )
sentence_idx.append( count_sentence )
vocab = [ "{0} {1}".format( item, vocab[ item ] ) for item in vocab ]
return vocab, corpus_contexts, corpus_targets, sentence_idx
def multiply_sentences( sentences, idx ):
multi_sentences = []
for i in idx:
multi_sentences.append( sentences[ i ] )
return multi_sentences
def write_file( towrite, out_file ):
with open( out_file, mode = "w" ) as out:
out.write( "\n".join( towrite ) )
if __name__ == '__main__':
if len( sys.argv ) != 6:
print( "\nUsage: ", sys.argv[ 0 ], "<input src corpus> <input trg corpus> <trg ngram order> <src output prefix> <trg output prefix>\n" )
exit()
in_src_corpus, in_trg_corpus, trg_n_order, out_src_prefix, out_trg_prefix = sys.argv[ 1: ]
out_src_sentences = "{0}.sentences".format( out_src_prefix )
out_src_vocab = "{0}.vocab".format( out_src_prefix )
src_vocab, src_sentences = extract_vocabulary_and_sentences( in_src_corpus )
trg_n_order = np.int( trg_n_order )
out_trg_context = "{0}.context".format( out_trg_prefix )
out_trg_target = "{0}.target".format( out_trg_prefix )
out_trg_vocab = "{0}.vocab".format( out_trg_prefix )
trg_vocab, trg_contexts, trg_targets, trg_sentence_idx = extract_vocabulary_and_ngrams( in_trg_corpus, trg_n_order )
src_sentences = multiply_sentences( src_sentences, trg_sentence_idx )
write_file( src_sentences, out_src_sentences )
write_file( src_vocab, out_src_vocab )
write_file( trg_contexts, out_trg_context )
write_file( trg_targets, out_trg_target )
write_file( trg_vocab, out_trg_vocab )
|
{"hexsha": "7ea90e207883f1e6945a38591abf70499c340edd", "size": 3299, "ext": "py", "lang": "Python", "max_stars_repo_path": "misc/prepare_data_bi.py", "max_stars_repo_name": "raphaelrubino/nid", "max_stars_repo_head_hexsha": "343a225fe0a078fa4b4d43b08ed53e9731b0471b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "misc/prepare_data_bi.py", "max_issues_repo_name": "raphaelrubino/nid", "max_issues_repo_head_hexsha": "343a225fe0a078fa4b4d43b08ed53e9731b0471b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "misc/prepare_data_bi.py", "max_forks_repo_name": "raphaelrubino/nid", "max_forks_repo_head_hexsha": "343a225fe0a078fa4b4d43b08ed53e9731b0471b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3431372549, "max_line_length": 138, "alphanum_fraction": 0.6829342225, "include": true, "reason": "import numpy", "num_tokens": 955}
|
from typing import Iterable
from scipy.sparse import csr_matrix
from maru.feature.vocabulary import FeatureVocabulary
from maru.types import FeatureVector
class SparseFeatureVectorizer:
def __init__(self, vocabulary: FeatureVocabulary):
self._vocabulary = vocabulary
def transform(self, features: Iterable[FeatureVector]) -> csr_matrix:
values = []
columns = []
rows = [0]
vocabulary = self._vocabulary
for vector in features:
for name, value in vector:
if name in vocabulary:
columns.append(vocabulary[name])
values.append(value)
rows.append(len(columns))
height = len(rows) - 1
width = len(vocabulary)
return csr_matrix((values, columns, rows), shape=(height, width))
|
{"hexsha": "c877aaa7b50322aef182a9dc3daeffa03a7a7275", "size": 839, "ext": "py", "lang": "Python", "max_stars_repo_path": "maru/vectorizer/sparse/feature.py", "max_stars_repo_name": "chomechome/maru", "max_stars_repo_head_hexsha": "dae41e250a64a8b6f6ab9647fd60221d5ede8ab6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2018-09-25T05:17:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:27:15.000Z", "max_issues_repo_path": "maru/vectorizer/sparse/feature.py", "max_issues_repo_name": "ojomio/maru", "max_issues_repo_head_hexsha": "7a44be7f974c0962f3023f5d064a391d2b4f20b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-11-26T08:48:54.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-10T10:00:39.000Z", "max_forks_repo_path": "maru/vectorizer/sparse/feature.py", "max_forks_repo_name": "ojomio/maru", "max_forks_repo_head_hexsha": "7a44be7f974c0962f3023f5d064a391d2b4f20b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-01-05T17:36:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-29T08:55:24.000Z", "avg_line_length": 27.064516129, "max_line_length": 73, "alphanum_fraction": 0.63170441, "include": true, "reason": "from scipy", "num_tokens": 167}
|
************************************************************************
* UPDATE_EST - Updates the estimated yield of magic beans given
* some additional amount of rainfall
************************************************************************
*
* VARIABLES
*
* INPUT RAIN = Additional rainfall
*
* INOUT YIELD_EST = Crop yield to update
*
************************************************************************
************************************************************************
* CROP_YIELD - Estimate the yield of magic beans given a simple
* model for rainfall
************************************************************************
*
* VARIABLES
*
* INPUT MAX_RAIN = The maximum rain for the month
* INPUT CONSITENCY = The consistency of the rainfall
* (higher = more consistent)
* INPUT ABSORBTION = Estimates the % of rainfall absorbed into the
* soil (i.e. % lost due to evaporation, runoff)
*
* OUTPUT YIELD_EST = The estimated yield of magic beans
*
* DAY = The current day of the month
* RAIN = The rainfall estimate for the current day
*
************************************************************************
PROGRAM CROP_YIELD
IMPLICIT NONE
INTEGER DAY
DOUBLE PRECISION RAIN, YIELD_EST, TOTAL_RAIN, NEWS
DOUBLE PRECISION MAX_RAIN, CONSISTENCY, ABSORBTION
MAX_RAIN = 4.0
CONSISTENCY = 64.0
ABSORBTION = 0.6
YIELD_EST = 0
TOTAL_RAIN = 0
DO 20 DAY=1,31
PRINT *, "(", DAY, CONSISTENCY, MAX_RAIN, ABSORBTION, ")"
* Compute rainfall for the current day
RAIN = (-(DAY - 16) ** 2 / CONSISTENCY + MAX_RAIN) * ABSORBTION
PRINT *, RAIN
* Update rainfall estimate
YIELD_EST = UPDATE_EST(RAIN, TOTAL_RAIN, YIELD_EST)
NEWS = TEST_FUNC(TOTAL_RAIN, YIELD_EST)
PRINT *, "Day ", DAY, " Estimate: ", YIELD_EST
20 ENDDO
PRINT *, "Crop Yield(%): ", YIELD_EST
PRINT *, "News: ", NEWS
CONTAINS
DOUBLE PRECISION FUNCTION UPDATE_EST(RAIN, TOTAL_RAIN, YIELD_EST)
IMPLICIT NONE
DOUBLE PRECISION RAIN, YIELD_EST, TOTAL_RAIN
TOTAL_RAIN = TOTAL_RAIN + RAIN
* Yield increases up to a point
IF(TOTAL_RAIN .le. 40) THEN
YIELD_EST = -(TOTAL_RAIN - 40) ** 2 / 16 + 100
* Then sharply declines
ELSE
YIELD_EST = -TOTAL_RAIN + 140
ENDIF
UPDATE_EST = YIELD_EST
END FUNCTION UPDATE_EST
DOUBLE PRECISION FUNCTION TEST_FUNC(TOTAL_RAIN, YIELD_EST)
IMPLICIT NONE
DOUBLE PRECISION TOTAL_RAIN, YIELD_EST, NEW_VAR
NEW_VAR = 5.0
IF (NEW_VAR .le. 4.0) THEN
TEST_FUNC = TOTAL_RAIN
ELSE
TEST_FUNC = YIELD_EST
ENDIF
END FUNCTION TEST_FUNC
END PROGRAM CROP_YIELD
|
{"hexsha": "f7f39f7709da826067a7576fe3a9cdfebbddfad3", "size": 2960, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "delphi/translators/for2py/tests/test_data/legacy_tests/crop_yield_function.f", "max_stars_repo_name": "cthoyt/delphi", "max_stars_repo_head_hexsha": "3df2de639905453f5d28d7a7b3b9f7e5a7a1fb0d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "delphi/translators/for2py/tests/test_data/legacy_tests/crop_yield_function.f", "max_issues_repo_name": "cthoyt/delphi", "max_issues_repo_head_hexsha": "3df2de639905453f5d28d7a7b3b9f7e5a7a1fb0d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "delphi/translators/for2py/tests/test_data/legacy_tests/crop_yield_function.f", "max_forks_repo_name": "cthoyt/delphi", "max_forks_repo_head_hexsha": "3df2de639905453f5d28d7a7b3b9f7e5a7a1fb0d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1578947368, "max_line_length": 72, "alphanum_fraction": 0.5087837838, "num_tokens": 701}
|
from pathlib import Path
import unittest
import numpy as np
from onnxruntime_extensions.eager_op import EagerOp, BlingFireSentenceBreaker
def _get_test_data_file(*sub_dirs):
test_dir = Path(__file__).parent
return str(test_dir.joinpath(*sub_dirs))
def _run_blingfire_sentencebreaker(input, output, model_path):
t2stc = EagerOp.from_customop(BlingFireSentenceBreaker, model=model_path)
result = t2stc(input)
np.testing.assert_array_equal(result, output)
class TestBlingFireSentenceBreaker(unittest.TestCase):
def test_text_to_case1(self):
inputs = np.array([
"This is the Bling-Fire tokenizer. Autophobia, also called monophobia, isolophobia, or eremophobia, is the specific phobia of isolation. 2007年9月日历表_2007年9月农历阳历一览表-万年历. I saw a girl with a telescope. Я увидел девушку с телескопом."])
outputs = np.array(["This is the Bling-Fire tokenizer.",
"Autophobia, also called monophobia, isolophobia, or eremophobia, is the specific phobia of isolation. 2007年9月日历表_2007年9月农历阳历一览表-万年历.",
"I saw a girl with a telescope.",
"Я увидел девушку с телескопом."])
_run_blingfire_sentencebreaker(input=inputs, output=outputs, model_path=_get_test_data_file('data', 'default_sentence_break_model.bin'))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "fe97bddfa8da018690e7ccc26e18649c8a64b8f9", "size": 1406, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_blingfire_sentencebreaker.py", "max_stars_repo_name": "xadupre/onnxruntime-extensions", "max_stars_repo_head_hexsha": "3e82549bcb53bc93d04c25d64f16a52d495725c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-06T14:07:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T14:07:48.000Z", "max_issues_repo_path": "test/test_blingfire_sentencebreaker.py", "max_issues_repo_name": "QPC-database/onnxruntime-extensions", "max_issues_repo_head_hexsha": "7fd96c8e9700425335b479ca042b16fe92f8b8e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_blingfire_sentencebreaker.py", "max_forks_repo_name": "QPC-database/onnxruntime-extensions", "max_forks_repo_head_hexsha": "7fd96c8e9700425335b479ca042b16fe92f8b8e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.8666666667, "max_line_length": 262, "alphanum_fraction": 0.7155049787, "include": true, "reason": "import numpy", "num_tokens": 385}
|
#!/usr/bin/env julia
open("test.txt","w") do fout
write(fout,"Hello World!\n")
end
|
{"hexsha": "67c15b58dc123bf15d069bb522aa951bb1640b90", "size": 85, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/io/simple.jl", "max_stars_repo_name": "jdurbin/sandbox", "max_stars_repo_head_hexsha": "ee982f7386ae02c5937dbaee867710b5cd2cc71b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "julia/io/simple.jl", "max_issues_repo_name": "jdurbin/sandbox", "max_issues_repo_head_hexsha": "ee982f7386ae02c5937dbaee867710b5cd2cc71b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "julia/io/simple.jl", "max_forks_repo_name": "jdurbin/sandbox", "max_forks_repo_head_hexsha": "ee982f7386ae02c5937dbaee867710b5cd2cc71b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.0, "max_line_length": 29, "alphanum_fraction": 0.6470588235, "num_tokens": 26}
|
from basic_utils.options import *
import numpy as np
from models.net_builder import MLPs_pol, MLPs_v
from models.optimizers import *
from models.policies import StochPolicy
from models.baselines import *
from basic_utils.replay_memory import *
from models.data_processor import *
# ================================================================
# Abstract Class
# ================================================================
class BasicAgent:
"""
This is the abstract class of the agent.
"""
def act(self, ob_no):
"""
Get the action given the observation.
Args:
ob_no: the observation
Return:
the corresponding action
"""
raise NotImplementedError
def update(self, paths):
"""
Update the weights of the network.
Args:
paths: a dict containing the information for updating
Return:
information of the updating process, extra information
"""
raise NotImplementedError
def get_params(self):
"""
Get the parameters of the agent.
Return:
the state dict of the agent
"""
raise NotImplementedError
def set_params(self, state_dicts):
"""
Set the parameters to the agent.
Args:
state_dicts: the parameters to be set
"""
raise NotImplementedError
def save_model(self, name):
"""
Save the model.
"""
raise NotImplementedError
def load_model(self, name):
"""
Load the model.
"""
raise NotImplementedError
# ================================================================
# Policy Based Agent
# ================================================================
class Policy_Based_Agent(BasicAgent):
def __init__(self, policy, baseline):
self.policy = policy
self.baseline = baseline
def act(self, observation):
return self.policy.act(observation)
def process_act(self, action):
return self.policy.probtype.process_act(action)
def update(self, processed_path):
vf_name, vf_stats, info = self.baseline.fit(processed_path)
pol_stats = self.policy.update(processed_path)
return [(vf_name, vf_stats), ("pol", pol_stats)], info
def save_model(self, name):
self.policy.save_model(name)
self.baseline.save_model(name)
def load_model(self, name):
self.policy.load_model(name)
self.baseline.load_model(name)
def get_params(self):
return self.policy.net.state_dict(), self.baseline.net.state_dict()
def set_params(self, state_dicts):
self.policy.net.load_state_dict(state_dicts[0])
self.baseline.net.load_state_dict(state_dicts[1])
# ================================================================
# Evolution Strategy Based Agent
# ================================================================
class Evolution_Based_Agent(BasicAgent):
def __init__(self, policy, n_kid, sigma):
self.policy = policy
self.n_kid = n_kid
self.sigma = sigma
def act(self, ob_no):
return self.policy.act(ob_no)
def process_act(self, a):
return self.policy.probtype.process_act(a)
def update(self, paths):
return [("pol", self.policy.update(paths, self.noise_seed))], {}
def save_model(self, name):
self.policy.save_model(name)
def load_model(self, name):
self.policy.load_model(name)
def get_params(self):
self.noise_seed = np.random.randint(0, 2 ** 32 - 1, size=self.n_kid, dtype=np.uint32).repeat(2)
params = get_flat_params_from(self.policy.net)
for index in range(2 * self.n_kid):
np.random.seed(self.noise_seed[index])
change = turn_into_cuda(
torch.from_numpy(sign(index) * self.sigma * np.random.randn(params.numel()))).float()
new_params = params + change
yield new_params
def set_params(self, flat_params):
set_flat_params_to(self.policy.net, flat_params)
# ================================================================
# Value Based Agent
# ================================================================
class Value_Based_Agent(BasicAgent):
def __init__(self, baseline, gamma, double=False):
self.baseline = baseline
self.gamma = gamma
self.double = double
def act(self, ob_no):
return self.baseline.act(ob_no)
def update(self, processed_path):
vf_name, vf_stats, info = self.baseline.fit(processed_path)
return [(vf_name, vf_stats)], {'td_err': info["td_err"]}
def get_params(self):
return [net.state_dict() for net in self.baseline.nets]
def set_params(self, state_dict):
for i in range(len(self.baseline.nets)):
self.baseline.nets[i].load_state_dict(state_dict[i])
def save_model(self, name):
self.baseline.save_model(name)
def load_model(self, name):
self.baseline.load_model(name)
# ================================================================
# Trust Region Policy Optimization
# ================================================================
class TRPO_Agent(Policy_Based_Agent):
name = 'TRPO_Agent'
def __init__(self,
pol_net,
v_net,
probtype,
lr_optimizer=1e-3,
epochs_v=10,
cg_iters=10,
max_kl=0.003,
batch_size=256,
cg_damping=1e-3,
get_info=True):
updater = TRPO_Updater(net=pol_net,
probtype=probtype,
cg_damping=cg_damping,
cg_iters=cg_iters,
max_kl=max_kl,
get_info=get_info)
optimizer = Adam_Optimizer(net=v_net,
batch_size=batch_size,
epochs=epochs_v,
lr=lr_optimizer)
policy = StochPolicy(net=pol_net, probtype=probtype, updater=updater)
baseline = ValueFunction(net=v_net, optimizer=optimizer)
Policy_Based_Agent.__init__(self, baseline=baseline, policy=policy)
# ================================================================
# Advantage Actor-Critic
# ================================================================
class A2C_Agent(Policy_Based_Agent):
name = 'A2C_Agent'
def __init__(self,
pol_net,
v_net,
probtype,
epochs_v=10,
epochs_p=10,
kl_target=0.003,
lr_updater=9e-4,
lr_optimizer=1e-3,
batch_size=256,
get_info=True):
updater = Adam_Updater(net=pol_net,
epochs=epochs_p,
kl_target=kl_target,
lr=lr_updater,
probtype=probtype,
get_info=get_info)
optimizer = Adam_Optimizer(net=v_net,
batch_size=batch_size,
epochs=epochs_v,
lr=lr_optimizer)
policy = StochPolicy(net=pol_net, probtype=probtype, updater=updater)
baseline = ValueFunction(net=v_net, optimizer=optimizer)
Policy_Based_Agent.__init__(self, baseline=baseline, policy=policy)
# ================================================================
# Proximal Policy Optimization
# ================================================================
class PPO_adapted_Agent(Policy_Based_Agent):
name = 'PPO_adapted_Agent'
def __init__(self,
pol_net,
v_net,
probtype,
epochs_v=10,
epochs_p=10,
kl_target=0.003,
lr_updater=9e-4,
lr_optimizer=1e-3,
batch_size=256,
adj_thres=(0.5, 2.0),
beta=1.0,
beta_range=(1 / 35.0, 35.0),
kl_cutoff_coeff=50.0,
get_info=True):
updater = PPO_adapted_Updater(adj_thres=adj_thres,
beta=beta,
beta_range=beta_range,
epochs=epochs_p,
kl_cutoff_coeff=kl_cutoff_coeff,
kl_target=kl_target,
lr=lr_updater,
net=pol_net,
probtype=probtype,
get_info=get_info)
optimizer = Adam_Optimizer(net=v_net,
batch_size=batch_size,
epochs=epochs_v,
lr=lr_optimizer)
policy = StochPolicy(net=pol_net, probtype=probtype, updater=updater)
baseline = ValueFunction(net=v_net, optimizer=optimizer)
Policy_Based_Agent.__init__(self, baseline=baseline, policy=policy)
class PPO_clip_Agent(Policy_Based_Agent):
def __init__(self,
pol_net,
v_net,
probtype,
epochs_v=10,
epochs_p=10,
kl_target=0.003,
lr_updater=9e-4,
lr_optimizer=1e-3,
batch_size=256,
adj_thres=(0.5, 2.0),
clip_range=(0.05, 0.3),
epsilon=0.2,
get_info=True):
updater = PPO_clip_Updater(adj_thres=adj_thres,
clip_range=clip_range,
epsilon=epsilon,
epochs=epochs_p,
kl_target=kl_target,
lr=lr_updater,
net=pol_net,
probtype=probtype,
get_info=get_info)
optimizer = Adam_Optimizer(net=v_net,
batch_size=batch_size,
epochs=epochs_v,
lr=lr_optimizer)
policy = StochPolicy(net=pol_net, probtype=probtype, updater=updater)
baseline = ValueFunction(net=v_net, optimizer=optimizer)
self.name = 'PPO_clip_Agent'
Policy_Based_Agent.__init__(self, baseline=baseline, policy=policy)
# ================================================================
# Deep Q Learning
# ================================================================
class DQN_Agent(Value_Based_Agent):
name = 'DQN_Agent'
def __init__(self,
net,
target_net,
gamma=0.99,
lr=1e-3,
update_target_every=500,
get_info=True):
optimizer = Adam_Q_Optimizer(net=net,
lr=lr,
get_data=get_info)
baseline = QValueFunction(net=net,
target_net=target_net,
optimizer=optimizer,
update_target_every=update_target_every)
Value_Based_Agent.__init__(self, baseline=baseline, gamma=gamma, double=False)
# ================================================================
# Bayesian Deep Q Learning
# ================================================================
class Bayesian_DQN_Agent(Value_Based_Agent):
name = 'Bayesian_DQN_Agent'
def __init__(self,
net,
mean_net,
std_net,
target_net,
target_mean_net,
target_std_net,
alpha=1,
beta=1e-4,
gamma=0.99,
lr=1e-3,
scale=1e-3,
update_target_every=500,
get_info=True):
optimizer = Bayesian_Q_Optimizer(net=net,
mean_net=mean_net,
std_net=std_net,
lr=lr,
alpha=alpha,
beta=beta,
scale=scale,
get_data=get_info)
baseline = QValueFunction_Bayesian(net=net,
mean_net=mean_net,
std_net=std_net,
target_net=target_net,
target_mean_net=target_mean_net,
target_std_net=target_std_net,
optimizer=optimizer,
scale=scale,
tau=0.01,
update_target_every=update_target_every)
Value_Based_Agent.__init__(self, baseline=baseline, gamma=gamma, double=False)
# ================================================================
# Double Deep Q Learning
# ================================================================
class Double_DQN_Agent(Value_Based_Agent):
name = 'Double_DQN_Agent'
def __init__(self,
net,
target_net,
gamma=0.99,
lr=1e-3,
update_target_every=500,
get_info=True):
optimizer = Adam_Q_Optimizer(net=net,
lr=lr,
get_data=get_info)
baseline = QValueFunction(net=net,
target_net=target_net,
optimizer=optimizer,
update_target_every=update_target_every)
Value_Based_Agent.__init__(self, baseline=baseline, gamma=gamma, double=True)
# ================================================================
# Evolution Strategies
# ================================================================
class Evolution_Agent(Evolution_Based_Agent):
def __init__(self,
pol_net,
probtype,
lr_updater=0.01,
n_kid=10,
sigma=0.05):
updater = Evolution_Updater(lr=lr_updater,
n_kid=n_kid,
net=pol_net,
sigma=sigma)
policy = StochPolicy(net=pol_net,
probtype=probtype,
updater=updater)
self.name = 'Evolution_Agent'
Evolution_Based_Agent.__init__(self, policy=policy, n_kid=n_kid, sigma=sigma)
# ================================================================
# Deep Deterministic Policy Gradient
# ================================================================
class DDPG_Agent(Policy_Based_Agent):
def __init__(self,
policy_net,
policy_target_net,
q_net,
q_target_net,
probtype,
lr_updater,
lr_optimizer,
tau=0.01,
update_target_every=None,
get_info=True):
updater = DDPG_Updater(net=policy_net,
lr=lr_updater,
q_net=q_net,
get_data=get_info)
policy = StochPolicy(net=policy_net,
target_net=policy_target_net,
tau=tau,
update_target_every=update_target_every,
probtype=probtype,
updater=updater)
# optimizer = Adam_Q_Optimizer(net=q_net,
# lr=lr_optimizer,
# get_data=get_info)
optimizer = DDPG_Optimizer_v2(net=q_net,
lr=lr_optimizer,
batch_size=256,
epochs=10,
get_data=get_info)
baseline = QValueFunction_deterministic(net=q_net,
target_net=q_target_net,
optimizer=optimizer,
tau=tau,
update_target_every=update_target_every)
self.name = 'DDPG_Agent'
Policy_Based_Agent.__init__(self, policy=policy, baseline=baseline)
|
{"hexsha": "e6d5bbd4375c58420de02a99e5d5816b2c2c46cd", "size": 17183, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/agents.py", "max_stars_repo_name": "hzxsnczpku/banrinochoujou", "max_stars_repo_head_hexsha": "9e04bc5c561ab674fd10e4991aa4b5ae86364f6c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2017-10-22T07:27:02.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-23T08:01:28.000Z", "max_issues_repo_path": "models/agents.py", "max_issues_repo_name": "hzxsnczpku/banrinochoujou", "max_issues_repo_head_hexsha": "9e04bc5c561ab674fd10e4991aa4b5ae86364f6c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/agents.py", "max_forks_repo_name": "hzxsnczpku/banrinochoujou", "max_forks_repo_head_hexsha": "9e04bc5c561ab674fd10e4991aa4b5ae86364f6c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-10-31T10:44:04.000Z", "max_forks_repo_forks_event_max_datetime": "2017-10-31T10:44:04.000Z", "avg_line_length": 35.4288659794, "max_line_length": 103, "alphanum_fraction": 0.448932084, "include": true, "reason": "import numpy", "num_tokens": 3081}
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
SEGMENT_PRECISION = 1e-6
class Segment(namedtuple('Segment', ['start', 'end'])):
"""
Temporal interval defined by its `start` and `end` times.
Multiple segment operators are available -- including intersection (&),
inclusion (in), emptiness test, start/end time shifting (+, -, >>, <<).
They are illustrated in **Examples** section.
Comparison of two segments is also available (==, !=, <, <=, >, >=).
Two segments are equal iff they have identical start and end times.
Segment S is smaller than segment T iff S.start < T.start or if they have
the same start time and S.end < T.start.
Parameters
----------
start, end : float
`start` and `end` times, in seconds.
Returns
-------
segment : Segment
New segment with `start` and `end` times.
Examples
--------
Create a new temporal interval between 00:13.000 and 00:37.000.
>>> segment = Segment(start=13., end=37)
>>> print segment
[13.000 --> 37.000]
Inclusion, intersection, union & gap
>>> s1 = Segment(1, 2)
>>> s2 = Segment(0, 3)
>>> if s1 in s2:
... print "Segment %s is included in segment %s." % (s1, s2)
Segment [1.000 --> 2.000] is included in segment [0.000 --> 3.000].
>>> s3 = Segment(2, 5)
>>> print s1 & s3
∅
>>> print s2 & s3
[2.000 --> 3.000]
>>> print s2 | s3
[0.000 --> 5.000]
>>> print s1 ^ Segment(5, 7)
[2.000 --> 5.000]
Test whether segment is empty or not.
>>> if not Segment(10, 10):
... print "Segment is empty."
Segment is empty.
Comparison
>>> s1 = Segment(1, 3)
>>> s2 = Segment(1, 3)
>>> s3 = Segment(2, 6)
>>> s4 = Segment(1, 2)
>>> for s in sorted([s1, s2, s3, s4]):
... print s
[1.000 --> 2.000]
[1.000 --> 3.000]
[1.000 --> 3.000]
[2.000 --> 6.000]
"""
def __new__(cls, start=0., end=0.):
# add default values
return super(Segment, cls).__new__(cls, start, end)
def __nonzero__(self):
"""Use the expression 'if segment'
Returns
-------
valid : bool
False is segment is empty, True otherwise.
"""
return bool((self.end - self.start) > SEGMENT_PRECISION)
def _get_duration(self):
return self.end - self.start if self else 0.
duration = property(fget=_get_duration)
"""Get segment duration, in seconds."""
def _get_middle(self):
return .5 * (self.start + self.end)
middle = property(fget=_get_middle)
"""Get segment middle time, in seconds."""
def copy(self):
"""Duplicate segment."""
return Segment(start=self.start, end=self.end)
# ------------------------------------------------------- #
# Inclusion (in), intersection (&), union (|) and gap (^) #
# ------------------------------------------------------- #
def __contains__(self, other):
"""Use the expression 'other in segment'
Returns
-------
contains : bool
True if other segment is fully included, False otherwise
"""
return (self.start <= other.start) and (self.end >= other.end)
def __and__(self, other):
"""Use the expression 'segment & other'
Returns
-------
segment : Segment
Intersection of the two segments
"""
start = max(self.start, other.start)
end = min(self.end, other.end)
return Segment(start=start, end=end)
def intersects(self, other):
"""Check whether two segments intersect each other
Parameters
----------
other : Segment
Other segment
Returns
-------
intersects : bool
True if segments intersect, False otherwise
"""
if not self or not other:
return False
return (self.start == other.start) or \
(self.start < other.start and
other.start < self.end - SEGMENT_PRECISION) or \
(self.start > other.start and
self.start < other.end - SEGMENT_PRECISION)
def overlaps(self, t):
return self.start <= t and self.end >= t
def __or__(self, other):
"""Use the expression 'segment | other'
Returns
-------
segment : Segment
Shortest segment that contains both segments
"""
# if segment is empty, union is the other one
if not self:
return other
# if other one is empty, union is self
if not other:
return self
# otherwise, do what's meant to be...
start = min(self.start, other.start)
end = max(self.end, other.end)
return Segment(start=start, end=end)
def __xor__(self, other):
"""Use the expression 'segment ^ other'
Returns
-------
segment : Segment
Gap between the two segments
"""
# if segment is empty, xor is not defined
if (not self) or (not other):
raise ValueError('')
start = min(self.end, other.end)
end = max(self.start, other.start)
return Segment(start=start, end=end)
def __str__(self):
"""Use the expression str(segment)"""
if self:
return '[%.3f --> %.3f]' % (self.start, self.end)
else:
return '∅'
def _pretty(self, seconds):
from datetime import timedelta
td = timedelta(seconds=seconds)
days = td.days
seconds = td.seconds
microseconds = td.microseconds
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if abs(days) > 0:
return '%d:%02d:%02d:%02d.%03d' % (days, hours, minutes,
seconds, microseconds / 1000)
else:
return '%02d:%02d:%02d.%03d' % (hours, minutes, seconds,
microseconds / 1000)
def pretty(self):
"""Human-readable representation of segments"""
return '[%s --> %s]' % (self._pretty(self.start),
self._pretty(self.end))
def __repr__(self):
return '<Segment(%g, %g)>' % (self.start, self.end)
def for_json(self):
return {'start': self.start, 'end': self.end}
@classmethod
def from_json(cls, data):
return cls(start=data['start'], end=data['end'])
def _repr_png_(self):
from pyannote.core.notebook import repr_segment
return repr_segment(self)
class SlidingWindow(object):
"""Sliding window
Parameters
----------
duration : float > 0, optional
Window duration, in seconds. Default is 30 ms.
step : float > 0, optional
Step between two consecutive position, in seconds. Default is 10 ms.
start : float, optional
First start position of window, in seconds. Default is 0.
end : float > `start`, optional
Default is infinity (ie. window keeps sliding forever)
Examples
--------
>>> sw = SlidingWindow(duration, step, start)
>>> frame_range = (a, b)
>>> frame_range == sw.toFrameRange(sw.toSegment(*frame_range))
... True
>>> segment = Segment(A, B)
>>> new_segment = sw.toSegment(*sw.toFrameRange(segment))
>>> abs(segment) - abs(segment & new_segment) < .5 * sw.step
"""
def __init__(self, duration=0.030, step=0.010, start=0.000, end=None):
super(SlidingWindow, self).__init__()
# duration must be a float > 0
if duration <= 0:
raise ValueError("'duration' must be a float > 0.")
self.__duration = duration
# step must be a float > 0
if step <= 0:
raise ValueError("'step' must be a float > 0.")
self.__step = step
# start must be a float.
self.__start = start
# if end is not provided, set it to infinity
if end is None:
self.__end = np.inf
else:
# end must be greater than start
if end <= start:
raise ValueError("'end' must be greater than 'start'.")
self.__end = end
def __get_start(self):
return self.__start
start = property(fget=__get_start)
"""Sliding window start time in seconds."""
def __get_end(self):
return self.__end
end = property(fget=__get_end)
"""Sliding window end time in seconds."""
def __get_step(self):
return self.__step
step = property(fget=__get_step)
"""Sliding window step in seconds."""
def __get_duration(self):
return self.__duration
duration = property(fget=__get_duration)
"""Sliding window duration in seconds."""
def __closest_frame(self, t):
"""Closest frame to timestamp.
Parameters
----------
t : float
Timestamp, in seconds.
Returns
-------
index : int
Index of frame whose middle is the closest to `timestamp`
"""
return int(np.rint(
(t - self.__start - .5 * self.__duration) / self.__step
))
def segmentToRange(self, segment):
"""Convert segment to 0-indexed frame range
Parameters
----------
segment : Segment
Returns
-------
i0 : int
Index of first frame
n : int
Number of frames
Examples
--------
>>> window = SlidingWindow()
>>> print window.segmentToRange(Segment(10, 15))
i0, n
"""
# find closest frame to segment start
i0 = self.__closest_frame(segment.start)
# find closest frame to segment end
j0 = self.__closest_frame(segment.end)
# return frame range as (start_frame, number_of_frame) tuple
i0 = max(0, i0)
n = j0 - i0
return i0, n
def rangeToSegment(self, i0, n):
"""Convert 0-indexed frame range to segment
Each frame represents a unique segment of duration 'step', centered on
the middle of the frame.
The very first frame (i0 = 0) is the exception. It is extended to the
sliding window start time.
Parameters
----------
i0 : int
Index of first frame
n : int
Number of frames
Returns
-------
segment : Segment
Examples
--------
>>> window = SlidingWindow()
>>> print window.rangeToSegment(3, 2)
[ --> ]
"""
# frame start time
# start = self.start + i0 * self.step
# frame middle time
# start += .5 * self.duration
# subframe start time
# start -= .5 * self.step
start = self.__start + (i0 - .5) * self.__step + .5 * self.__duration
duration = n * self.__step
end = start + duration
# extend segment to the beginning of the timeline
if i0 == 0:
start = self.start
return Segment(start, end)
def samplesToDuration(self, nSamples):
"""Returns duration of samples"""
return self.rangeToSegment(0, nSamples).duration
def durationToSamples(self, duration):
"""Returns samples in duration"""
return self.segmentToRange(Segment(0, duration))[1]
def __getitem__(self, i):
"""
Parameters
----------
i : int
Index of sliding window position
Returns
-------
segment : :class:`Segment`
Sliding window at ith position
"""
# window start time at ith position
start = self.__start + i * self.__step
# in case segment starts after the end,
# return an empty segment
if start >= self.__end:
return None
return Segment(start=start, end=start + self.__duration)
def __iter__(self):
"""Sliding window iterator
Use expression 'for segment in sliding_window'
Examples
--------
>>> window = SlidingWindow(end=0.1)
>>> for segment in window:
... print segment
[0.000 --> 0.030]
[0.010 --> 0.040]
[0.020 --> 0.050]
[0.030 --> 0.060]
[0.040 --> 0.070]
[0.050 --> 0.080]
[0.060 --> 0.090]
[0.070 --> 0.100]
[0.080 --> 0.100]
[0.090 --> 0.100]
"""
# get window first position
i = 0
window = self[i]
# yield window while it's valid
while(window):
yield window
# get window next position
i += 1
window = self[i]
def __len__(self):
"""Number of positions
Equivalent to len([segment for segment in window])
Returns
-------
length : int
Number of positions taken by the sliding window
(from start times to end times)
"""
if np.isinf(self.__end):
raise ValueError('infinite sliding window.')
# start looking for last position
# based on frame closest to the end
i = self.__closest_frame(self.__end)
while(self[i]):
i += 1
length = i
return length
def copy(self):
"""Duplicate sliding window"""
duration = self.duration
step = self.step
start = self.start
end = self.end
sliding_window = SlidingWindow(
duration=duration, step=step, start=start, end=end
)
return sliding_window
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{"hexsha": "b901896228e9521c827a73bf23d5a6b84a2619e8", "size": 15251, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyannote/core/segment.py", "max_stars_repo_name": "gw17/pyannote-core", "max_stars_repo_head_hexsha": "ccbb3c07d1c1c577391d05a296858287cd9e0dfb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyannote/core/segment.py", "max_issues_repo_name": "gw17/pyannote-core", "max_issues_repo_head_hexsha": "ccbb3c07d1c1c577391d05a296858287cd9e0dfb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyannote/core/segment.py", "max_forks_repo_name": "gw17/pyannote-core", "max_forks_repo_head_hexsha": "ccbb3c07d1c1c577391d05a296858287cd9e0dfb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0865561694, "max_line_length": 79, "alphanum_fraction": 0.5481607763, "include": true, "reason": "import numpy", "num_tokens": 3578}
|
\section{Introduction and Related Work} \label{SIP:sec:intro}
Imagine an event organizer trying to convene an event -- for example,
a fundraiser. We assume that the time and venue for the event are fixed,
and that the only remaining decision for the organizer to make is whom
to invite among a set of agents.
An \emph{invitation} is simply defined to be a subset of agents.
The goal of the organizer is to maximize attendance (for example,
in order to maximize donations), but the potential invitees have
their own preferences over how many attendees there should be at the event
and possibly also who the potential attendees should be.
For example, a given donor may not want to attend if too few attendees
show up, but she may not want the event to be overly crowded.
Another donor may want to attend the event only
if her friends attend and her business competitor does not.
To model this setting, we turn our attention to the restricted case of the Group Activity Selection Problem (\GASP) with just one activity, but we generalize preferences of agents.
Specifically, in the Stable Invitations Problem (\SIP), each agent
can specify a set of friends and a set of enemies (in addition to her preference over sizes).
An agent is willing to attend only if all of her friends attend, none of her enemies attends, and
the number of attendees is acceptable to her.
Note that the Stable Invitations Problems is a generalization of the Group Activity Selection Problem with the restriction of one activity present.
Not surprisingly, complexity of \SIPs depends highly on the cardinality of friend-sets and enemy-sets, as friends-and-enemies relationship introduces combinatoric complexity in the problem of finding a good solution.
In this chapter we provide a complete analysis of complexity results on \SIPs; we consider individual rationality (IR) and Nash stability as we did for \GASP, and we also consider both asymmetric and symmetric friends-and-enemmies relation.
\paragraph{Related Work.} %TODO: To relate this to previous chapter.
The rest of this chapter is organized as follows.
%TODO
\section{Definitions and Known Results} \label{SIP:sec:SIP:prelim}
To make this work self-contained, we begin by introducing the formal definitions proposed by
Lee and Shoham~\cite{LEE15AAAI}, yet we make slight modifications to notation for readability and consistency in this paper.
\begin{definition}
An instance of the Stable Invitations Problem (\SIP) is given by a set of agents $N = \{a_1, a_2, \dots, a_n\}$, and an {\em approval set} $S_i \subseteq [1,n]$, a {\em friend set} $F_i \subseteq N$, and an {\em enemy set} $E_i \subseteq N$ for each agent $a_i\in N$.
It is interpreted that agent $a_i$ is willing to attend if all friends in $F_i$ attend, no one in $E_i$ attends, and the number of attendees (including $a_i$) is acceptable (i.e., the number is contained in $S_i$).
\end{definition}
\begin{definition}
An invitation $I$ in \SIPs is a subset of agents.
We say that an invitation $I$ is {\em individually rational} (IR) if for every agent $a_i\in I$, $|I| \in S_i$, $F_i \subseteq I$, and $R_i \cap I = \emptyset$.
We say that an invitation $I$ is {\em (Nash) stable} if it is individually rational, and if for every agent $a_j \not\in I$, $|I_j'| \not\in S_j$, $F_j \not\subseteq I_j'$, or $R_j \cap I_j' \neq \emptyset$ where $I_j' = I \cup \{a_j\}$.
\end{definition}
Individual rationality (IR) requires that every invited agent is willing to attend.
Stability further requires that those who are not invited are not willing to participate (without permission of others)
because not all of her friends are attending, some of her enemies are attending, or the number of attendees would be unacceptable.
We consider the following two problems of finding invitations of size $k$:
\begin{itemize}
\item $k$-IR-Invitation: $\exists$ IR invitation of size $k$?
\item $k$-Stable-Invitation: $\exists$ stable invitation of size $k$?
\end{itemize}
We first consider restrictions on inputs by limiting the size of largest friend-sets and enemy-sets, respectively.
For integer constants $\alpha$ and $\beta$, if an instance of \SIPs satisfies $|F_i| \leq \alpha$ and $|E_i| \leq \beta$ for all $a_i\in N$, we call it an $(\alpha,\beta)$-instance of \SIP.
Lee and Shoham~\cite{LEE15AAAI} showed that \SIPs can be solved in polytime only if $\alpha$ and $\beta$ are small enough, but the problems are NP-hard in general. We will consider the same restrictions in this work, and provide our complete analysis of parameterized complexity of \SIP.
In addition to these restrictions, we consider the special case where agents have symmetric social relationships.
\begin{definition} \label{SIP:def:symmetric_social}
Given an instance of \SIP, we say that agents have {\em symmetric social relationships} if $a_j\in F_i$ if and only if $a_i\in F_j$ and $a_l \in E_i$ if and only if $a_i \in E_l$ for every $a_i$.
\end{definition}
Theorem~\ref{SIP:thm:nphard} summarizes the most relevant results of Darmann et al.~\cite{GASP12WINE} and Lee and Shoham~\cite{LEE15AAAI} on complexity of \SIPs.\footnote{
Darmann et al.~\cite{GASP12WINE} showed easiness when $\alpha=\beta=0$, while Lee and Shoham~\cite{LEE15AAAI} proved easiness and hardness in all other cases.}
\begin{theorem} \label{SIP:thm:nphard} [\cite{LEE15AAAI,GASP12WINE}]
$k$-IR-Invitation and $k$-Stable-Invitation can be solved in polynomial time if $(\max_{a_i \in N} |F_i|) + (\max_{a_i \in N} |E_i|) \leq 1$ (i.e., $\alpha + \beta \leq 1$). In other cases, both problems are NP-hard.
\end{theorem}
Note that $k$-IR-Invitation and $k$-Stable-Invitation are of the same classical complexity, even though stability is a stronger solution concept. Under parameterization, however, these two problems are contained in different complexity classes in the W-hierarchy (see Table~\ref{SIP:tbl:summary}).
In what follows, we show that the parameterized complexity of these problems varies with different solution concepts and under different restrictions on inputs to \SIP.
\section{Parameterized Complexity} \label{SIP:sec:results}
\begin{table*}[t!]
\small
\centering
\begin{tabular}{|l|*{4}{c|}|*{4}{c|}}\hline
\multirow{2}*{} & \multicolumn{4}{c||}{$k$-IR-Invitations} & \multicolumn{4}{c|}{$k$-Stable-Invitations} \\ \cline{2-9}
& $\beta = 0$ & $\beta = 1$ & $2 \leq \beta \leq f(k)$ & unbounded $\beta$ & $\beta = 0$ & $\beta = 1$ & $2 \leq \beta \leq f(k)$ & unbounded $\beta$ \\ \hline
$\alpha = 0$ & P & P & FPT & W[1]-C & P & P & FPT & W[2]-C \\ \hline
$\alpha = 1$ & P & FPT & FPT & W[1]-C & P & W[1]-C & W[1]-C & W[2]-C\\ \hline
$\alpha \geq 2$ & W[1]-C & W[1]-C & W[1]-C & W[1]-C & W[1]-C & W[1]-C & W[1]-C & W[2]-C \\ \hline
\end{tabular}
\caption{\small Complexity of $k$-IR-Invitation and $k$-Stable-Invitation. $f(k)$ can be an arbitrary function of $k$ that only depends on $k$.
All entries other than ``P'' imply NP-completeness.
``W[1]-C'' and ``W[2]-C'' mean W[1]-completeness and W[2]-completeness, respectively.
Note that P and NP-completeness results were known prior to this work as summarized in Theorem~\ref{SIP:thm:nphard}, but all other results are original. }
\label{SIP:tbl:summary}
\end{table*}
In this section, we study parameterized complexity of $k$-IR-Invitation and $k$-Stable-Invitation.
Our main contributions are summarized in Table~\ref{SIP:tbl:summary}. For instance, finding an IR invitation of size $k$ is in FPT when $\alpha = 1$ and $\beta$ is a positive constant (bounded above by some function of $k$), but finding a stable invitation in the same cases is W[1]-complete.
\subsection{$k$-IR-Invitation}
Recall that $k$-IR-Invitation is the problem of finding an IR invitation of size $k$.
When $\alpha + \beta > 1$, the problem is known to be NP-hard (Theorem~\ref{SIP:thm:nphard}).
We first present easiness results: $k$-IR-Invitation is in W[1] in general, and it is in FPT if $\alpha \leq 1$ and $\beta$ is bounded by some function $f(k)$ of $k$.
We then present hardness results by showing that $k$-IR-Invitation is W[1]-hard when $\alpha \geq 2$ and/or $\beta$ is unbounded.
\begin{theorem} \label{SIP:thm:IR_invitation_W1}
$k$-IR-Invitation is in W[1].
\end{theorem}
\begin{proof}[Proof sketch]
We reduce $k$-IR-Invitation to the weighted circuit SAT (WCSAT) of constant depth and of weft at most 1. Details of the proof can be found in Appendix.
\end{proof}
\begin{theorem} \label{SIP:thm:IR_invitation_FPT}
$k$-IR-Invitation is in FPT if $\alpha \leq 1$ and $\beta \leq f(k)$ where $f(k)$ can be an arbitrary function of $k$.
\end{theorem}
\begin{proof}
Without loss of generality, assume that $k\in S_i$ for all $a_i \in N$.
Otherwise, we can remove $a_i$ from the input instance as no IR invitation of size $k$ can contain $a_i$. If $a_i$ is removed, and there is some $a_j$ with $a_i \in F_j$, we remove $a_j$ as well for the same reason. We repeat this removal process until no such agent remains (this can be done in linear time).
Let $\mathcal{A}$ be some polytime algorithm that solves $k$-IR-Invitation if $\alpha \leq 1$ and $\beta = 0$ (it exists due to Theorem~\ref{SIP:thm:nphard}). We will use $\mathcal{A}$ as a sub-routine in our FPT algorithm. Consider any coloring $c$ which colors agents using two colors $\{0,1\}$; let $c(i) \in \{0,1\}$ be the color of agent $a_i$.
We say that coloring $c$ and IR invitation $I$ of size $k$ are {\em compatible} if the following holds: For every agent $a_i\in I$, $c(i) = 1$ and for every agent $a_j \in \cup_{i: i\in I} E_i$, $c(j) = 0$.
Note that coloring $c$ may be compatible with any number of IR invitations of size $k$ (possibly none), and any IR invitation of size $k$ may be compatible with many colorings (but it is compatible with at least one coloring).
Given some arbitrary coloring $c$, we can find an IR invitation of size $k$ that is compatible with $c$ or determine that no compatible IR invitation exists in FPT time as follows.
First, we re-color every agent $a_i$ with $c(i)=1$ to color $0$ such that $\exists a_j\in F_i$ with $c(j)=0$ or $\exists a_l \in E_i$ with $c(l) = 1$ (order in which we re-color agents does not matter).
Notice that this process does not re-color any agent $a_i\in I$ if $I$ is compatible with $c$. After the re-coloring step, let $N_1 = \{a_i\in N: c(i) = 1\}$, and we run the algorithm $\mathcal{A}$ on $N_1$ as input. Suppose that $\mathcal{A}$ finds an IR invitation $I$ of size $k$ given $N_1$. $I$ is individually rational because its friend constraints are satisfied (due to correctness of $\mathcal{A}$) and its enemy constraints are satisfied because no agent with color $0$ is included in $N_1$ (enforced by coloring). Now suppose that $\mathcal{A}$ reports that no IR invitation $I$ of size $k$ exists among the agents in $N_1$. Then there is no IR invitation of size $k$ that is compatible with $c$; if such invitation $I' \subseteq N_1$ exists, then $I'$ satisfies the friend constraints (because it is IR) and therefore $\mathcal{A}$ should find it, which is a contradiction. Hence if our algorithm begins with coloring that is compatible with some IR invitation(s), it will find one.
If we color agents uniformly and independently at random, then the probability of success of our algorithm is at least $1/2^{(k+1)\beta}$ (because, with respect to some fixed IR invitation $I^*$, we must color all agents in $I^*$ as 1 and the union enemies of agents in $I^*$ as 0, to start with compatible coloring). If we run this algorithm $2^{(k+1)\beta}\ln n$ times, the probability of success is at least $1 - 1/n$. Our FPT algorithm's runtime depends on the runtime of $\mathcal{A}$. The algorithm can be de-randomized using a family of $k$-perfect hash functions as shown in the work by Alon et al.~\cite{ColorCoding}.
\end{proof}
\begin{theorem} \label{SIP:thm:IR_invitation_large_beta}
$k$-IR-Invitation is W[1]-complete if $\beta$ is not bounded above by any function $f(k)$.
\end{theorem}
\begin{proof}
We reduce from the $k$-Independent-Set problem which is known to be W[1]-complete.
Given an arbitrary graph $G = (V, E)$ and a parameter $k$, we create agents $N = V = \{v_1, v_2, \dots, v_n\}$.
For each $v_i$, define $S_{v_i} = \{k\}$, $F_{v_i} = \emptyset$, and $E_{v_i} = \{v_j : (v_i, v_j)\in E\}$ (hence $\beta$ is equal to the max-degree of nodes in $G$).
If $I \subset V$ is an independent set of size $k$, then $I$ is an IR invitation in the instance we created: For all $v_i \in I$, we have $|I| = k \in S_{v_i}$, $F_{v_i} = \emptyset \subset I$, and $E_{v_i} \cap I = \emptyset$ because $I$ is an independent set in the original graph.
Conversely, suppose $I$ is an IR invitation of size $k$ in the instance we created. Then $I$ is an independent set because no two agents in $I$ are enemies of each other, and thus their corresponding nodes in the graph are not neighbors of each others. This reduction proves W[1]-hardness, and W[1]-completenes follows from Theorem~\ref{SIP:thm:IR_invitation_W1}.
\end{proof}
\begin{theorem} \label{SIP:thm:IR_invitation_alpha2}
$k$-IR-Invitation is W[1]-complete if $\alpha \geq 2$.
\end{theorem}
\begin{proof}[Proof Sketch]
We reduce from the $k$-Clique problem.
Given an arbitrary graph $G = (V, E)$ and a parameter $k$, we create a set of agents $N$ as follows.
For each node $v_i\in V$, we create $k^2$ node-agents that are labeled as $w_{i,x}$ where $x \in [k^2]$.
For each node-agent $w_{i,x}$ we define $F_{w_{i,x}} = \{w_{i,x+1}\}$ (where $w_{i,k^2+1}$ is understood as $w_{i,1}$) and $E_{w_{i,x}} = \emptyset$.
Note that an IR invitation must include all or none of the $w_{i,x}$'s for each $i$ because of their friend sets.
Next, for each edge $(v_i, v_j) \in E$, we create an edge-agent $e_{i,j}$ with $F_{e_{i,j}} = \{w_{i,1}, w_{j,1}\}$ and $E_{e_{i,j}} = \emptyset$.
Note that if an IR invitation includes $e_{i,j}$, then it must also include all $2k^2$ node-agents of the form $w_{i,x}$ and $w_{j,x}$ with $x\in[k^2]$ (due to friend sets).
Finally, define $k' = k^3 + \binom{k}{2}$ to be the parameter for the $k$-IR-Invitations we created, and define approval sets of all agents to contain $k'$.
Clearly the instance we created satisfies $\alpha = 2$ and $\beta = 0$.
The number of agents we created is $k^2|V| + |E|$, polynomial in the size of the original instance.
It remains to show that a clique of size $k$ exists if and only if an IR invitation of size $k' = k^3 + \binom{k}{2}$ exists; due to space, details of the proof can be found in Appendix.
Note that W[1]-completenes follows from Theorem~\ref{SIP:thm:IR_invitation_W1}.
\end{proof}
\subsection{$k$-Stable-Invitation}
$k$-IR-Invitation and $k$-Stable-Invitation have the same classical complexity for all values of $\alpha$ and $\beta$, but parameterization indicates that $k$-Stable-Invitation is a more difficult problem than $k$-IR-Invitation.
This is not surprising because a stable invitation requires that everyone (whether invited or not) be satisfied with the invitation.
% thm:stable_W2
\begin{theorem} \label{SIP:thm:stable_W2}
$k$-Stable-Invitation is in W[2]. When $\beta$ is bounded above by some function $f(k)$, it is in W[1].
\end{theorem}
\begin{proof}[Proof sketch]
We reduce $k$-Stable-Invitation to the weighted circuit SAT (WCSAT) of constant depth and of weft at most 2; if $\beta$ is bounded, then weft can be reduced to $1$. Details of the proof can be found in Appendix.
\end{proof}
\begin{theorem} \label{SIP:thm:stable_FPT}
$k$-Stable-Invitation is in FPT when $\alpha = 0$ and $\beta \leq f(k)$ where $f(k)$ can be an arbitrary function of $k$.
\end{theorem}
\begin{proof}[Proof Sketch]
The main idea is similar to that of our proof of Theorem~\ref{SIP:thm:IR_invitation_FPT}. However, finding a stable invitation is considerably more difficult, because we must take uninvited agents into account.
We first color all agents using two colors $\{0,1\}$ uniformly and independently at random; let $c$ be this coloring such that $c(i)$ is the color of agent $a_i$.
If there is some $a_i$ with $c(i)=1$ such that $k\not\in S_i$ or $\exists a_j\in R_i$, then we re-color $a_i$ to $c(i) = 0$. We repeat this until no such agent remains.
Now we will find $k$ agents of color $1$ that form a stable invitation. Agents of color $0$ will be uninvited for sure, but we need to ensure stability -- we must invite at least one enemy of every agent of color $0$. This can be done in a brute-force manner in FPT time: The depth of search tree is at most $k$ (as we can invite up to $k$ agents) and the branching factor is $f(k)$ (because each agent has at most $f(k)$ enemies), and thus search space is bounded above by $O((f(k))^k)$. After choosing (at most) $k$ agents to be included in our solution, the rest of the algorithm is similar to what we did in proof of Theorem~\ref{SIP:thm:IR_invitation_FPT}; details can be found in Appendix.
\end{proof}
\begin{theorem} \label{SIP:thm:stable_W1hard_alpha1_beta1}
$k$-Stable-Invitation is W[1]-complete if $\alpha,\beta \geq 1$ and $\beta$ is bounded above by some function $f(k)$.
\end{theorem}
\begin{proof}[Proof sketch]
We reduce from the $k$-Clique problem to show W[1]-hardness.
Let $G = (V, E)$ be an arbitrary graph for the $k$-Clique problem with parameter $k$.
Let us define $k' = 2(k^3 + \binom{k}{2})$ which is the parameter for $k$-Stable-Invitation.
For each node $v_i \in V$, we first create a group of $2k^2$ node-agents (call them $G_i$) such that $G_i=\{w_{i,x}: x \in [2k^2]\}$, and define $F_{w_{i,x}} = \{w_{i,x+1}\}$ (where $w_{i,2k^2+1}$ is understood as $w_{i,1}$) and $S_{w_{i,x}} = \{k'\}$.
For each edge $(v_i, v_j) \in E$, we create four edge-agents $e_{i,j}, e'_{i,j}, f_{i,j}$, and $f'_{i,j}$.
Define $F_{e_{i,j}} = \{w_{i,1}\}$, $F_{e'_{i,j}} = \{w_{j,1}\}$, and $S_{e_{i,j}} = S_{e'_{i,j}} = \{k'\}$.
Define $F_{f_{i,j}} = \{e_{i,j}\}$, $E_{f_{i,j}} = \{e'_{i,j}\}$, and $S_{f_{i,j}} = \{k'+1\}$.
Define $F_{f'_{i,j}} = \{e'_{i,j}\}$, $E_{f'_{i,j}} = \{e_{i,j}\}$, and $S_{f'_{i,j}} = \{k'+1\}$.
We have created $2k^2n$ node-agents and $4|E|$ edge-agents, whose size is polynomial in $n,k$, and each agent we created has at most one friend and at most one enemy (thereby satisfying $\alpha=\beta=1$).
It remains to show that a clique of size $k$ exists if and only if a stable invitation of size $k'$ exists;
due to space, details of the proof can be found in Appendix.
Note that W[1]-completenes follows from Theorem~\ref{SIP:thm:stable_W2}.
\end{proof}
\begin{theorem} \label{SIP:thm:stable_W1hard_alpha2_beta0}
$k$-Stable-Invitation is W[1]-complete if $\alpha \geq 2$ and $\beta$ is bounded above by some function $f(k)$.
\end{theorem}
\begin{proof}[Proof sketch]
We reduce from the $k$-Independent-Set problem to show W[1]-hardness.
Let $G = (V, E)$ be an arbitrary instance of the $k$-Independent-Set problem with parameter $k$.
For each node $v\in V$ we create a node-agent $v$ with approval set $S_v = \{k\}$ and friend set $F_v = \emptyset$.
For each edge $(v, w) \in E$ we create an edge-agent $e_{v,w}$ with friend set $F_{e_{v,w}} = \{v, w\}$ and approval set $S_{e_{v,w}} = \{k+1\}$.
It remains to show that a stable invitation of size $k$ exists if and only if an independent set of size $k$ exists;
due to space, details of the proof can be found in Appendix.
Note that W[1]-completenes follows from Theorem~\ref{SIP:thm:stable_W2}.
\end{proof}
\begin{theorem} \label{SIP:thm:stable_W2hard_beta}
$k$-Stable-Invitation is W[2]-complete if $\beta$ is not bounded above by any function of $k$.
\end{theorem}
\begin{proof}[Proof sketch]
We reduce from the $k$-Dominating-Set problem which is known to be W[2]-hard.
Given an arbitrary graph $G = (V, E)$ and a parameter $k$, we create $2n$ node-agents by creating $x_i$ and $y_i$ for each $v_i\in V$. We define their approval sets and enemy sets as follows: $S_{x_i} = \{k\}$ and $E_{x_i} = \emptyset$ for all $x_i$ while $S_{y_i} = \{k+1\}$ and $E_{y_i} = \{x_i\} \cup \{x_j : (v_i, v_j)\in E\}$ for all $y_i$. Note that a stable invitation cannot contain any of $y_i$'s because of their approval sets.
It remains to show that a dominating set of size $k$ exists if and only if a stable invitation of size $k$ exists; due to space, details of the proof can be found in Appendix.
W[2]-completenes follows from Theorem~\ref{SIP:thm:stable_W2}.
\end{proof}
\section{Symmetric Social Relationship} \label{SIP:sec:symm}
\begin{table*}[t!]
\small
\centering
\begin{tabular}{|l|*{5}{c|}|*{5}{c|}}\hline
\multirow{2}*{} & \multicolumn{5}{c||}{$k$-IR-Invitations (symmetric social relationships)} & \multicolumn{5}{c|}{$k$-Stable-Invitations (symmetric social relationships)} \\ \cline{2-11}
& $\beta = 0$ & $\beta = 1$& $\beta=2$ & $3 \leq \beta \leq f(k)$ & unbounded $\beta$ & $\beta = 0$ & $\beta = 1$ & $\beta=2$ & $3 \leq \beta \leq f(k)$ & unbounded $\beta$ \\ \hline
$\alpha = 0$ & P & P & P & FPT & W[1]-C & P & P & P & FPT & W[2]-C \\ \hline
$\alpha = 1$ & P & P & FPT & FPT & W[1]-C & P & P & FPT & FPT & W[2]-C \\ \hline
$\alpha \geq 2$ & P & FPT & FPT & FPT & W[1]-C & P & FPT & FPT & FPT & W[2]-C \\ \hline
\end{tabular}
\caption{\small Complexity of \SIPs with symmetric social relationships. $f(k)$ can be an arbitrary function of $k$ that only depends on $k$.
All entries other than ``P'' imply NP-completeness.
``W[1]-C'' and ``W[2]-C'' mean W[1]-completeness and W[2]-completeness, respectively.
All results are original (including classical complexity results). }
\label{SIP:tbl:summary_symmetric}
\end{table*}
Recall the definition of ``symmetric social relationships'' from Definition~\ref{SIP:def:symmetric_social}.
Under symmetric social relationships, both $k$-IR-Invitation and $k$-Stable-Invitation admit efficient FPT algorithms when $\beta$ is bounded, as shown in Table~\ref{SIP:tbl:summary_symmetric}, although their complexity does not change when $\beta$ is unbounded (W[1]-complete and W[2]-complete, respectively).
When we compare results in Table~\ref{SIP:tbl:summary} and Table~\ref{SIP:tbl:summary_symmetric}, two interesting observations can be made.
First, $k$-IR-Invitations and $k$-Stable-Invitations have the same classical complexity even under symmetric social relationships. Second, both problems now admit efficient FPT algorithms for broader domains of inputs -- as long as $\beta$ is bounded.
Note that our classical complexity results are original (i.e., not implied by Theorem~\ref{SIP:thm:nphard}) because Lee and Shoham~\cite{LEE15AAAI} did not consider the special case of symmetric social relationships.
\subsection{Symmetric $k$-IR-Invitations}
We first present classical complexity results for $k$-IR-Invitations under symmetric social relationships, followed by parameterized complexity results.
\begin{theorem} \label{SIP:thm:symmetric_IR_p_npc}
When agents have symmetric social relationships,
$k$-IR-Invitations can be solved in polynomial time if (i) $\beta = 0$, (ii) $\beta = 1$ and $\alpha \leq 1$, or (iii) $\beta = 2$ and $\alpha = 0$. Otherwise, the problem is NP-hard.
\end{theorem}
\begin{proof}[Proof sketch]
Let us consider case (ii) in the statement.
As before, without loss of generality assume that all agents accept the size $k$ (i.e., $k\in S_i$ for all $a_i\in N$).
We first construct an {\em enemy graph} in which nodes represent agents, and we create an edge between two nodes if their corresponding agents are enemies of each other. For every pair of friends, we merge their nodes in this graph into a meta-node of weight $2$ (if they are also enemies of each other, then we simply remove them from the graph); let us call the resulting graph a {\em friend graph}.
Now finding an IR invitation of size $k$ is equivalent to finding an independent set of total weight $k$ in the friend graph. Although finding an independent set (of given size) is NP-hard, all nodes in the friend graph have at most two edges, and thus each connected component in the friend graph is either a path or a cycle. A dynamic programming algorithm can solve this problem in polytime, which can be found in Appendix.
Polytime algorithms for cases (i) and (iii) can also be found in Appendix.
Let us now prove that the problem is NP-hard if none of the three conditions in the statement holds.
It is known that the Independent Set problem is NP-hard even if every node has degree at most 3~\cite{Garey_Max_Is_Cubic}.
Given an instance of this problem, we can create an instance of $k$-IR-Invitations as follows.
For each node, we create an agent $a_i$ with $S_i =\{k\}$ (agent only approves size $k$).
If there is an edge between two nodes, we make their corresponding agents enemies of each other.
The resulting instance is a valid instance (with symmetric social relationships) of $k$-IR-Invitations with $\alpha = 0$ and $\beta = 3$ (because each node in the original instance as at most three neighbors).
This shows NP-hardness of $k$-IR-Invitations with symmetric social relationships when $\alpha = 0$ and $\beta \geq 3$.
Other cases -- namely, $(\alpha \geq 2 \land \beta = 1)$ and $(\alpha \geq 1 \land \beta = 2)$ -- require modifications to our reduction, and details can be found in Appendix.
\end{proof}
\begin{theorem} \label{SIP:thm:symmetric_IR_FPT}
When agents have symmetric social relationships,
$k$-IR-Invitations is in FPT if $\beta \leq f(k)$ where $f(k)$ can be an arbitrary function of $k$.
\end{theorem}
\begin{proof}
As before, without loss of generality, assume $k\in S_i$ for all $a_i\in N$.
We first create a {\em friend graph} in which nodes represent agents, and we create an edge between two nodes if their corresponding agents are friends. Clearly, subsets of nodes in this graph and invitations have one-to-one correspondence.
In the friend graph, it is clear that all or none agents in each component should be chosen to form an IR invitation.
Thus, if any connected component contains two nodes whose corresponding agents are enemies of each other, then we can safely remove the component from the graph (as it cannot be included in any IR invitation).
Likewise, if any component contains more than $k$ agents, we can remove the component as well.
We then create an {\em enemy graph} in which nodes represent connected components in the friend graph. Each node in the enemy graph has a weight that is equal to the size of the component it represents, and we create an edge between two nodes if their corresponding components contain a pair of enemies (one agent in each component). Because each agent has at most $\beta$ enemies, each node in the enemy graph has at most $k\cdot \beta$ edges.
Notice that an independent set in the enemy graph represents an IR invitation in the original instance.
Similarly to the FPT algorithm given in proof of Theorem~\ref{SIP:thm:IR_invitation_FPT}, we use Color Coding to color each node in the enemy graph as $\{0,1\}$ with equal probability.
If there is any edge in the enemy graph whose both end-points (components) are of color 1, then we re-color both of them as 0. We repeat this process until no such pair exists (which can be done in linear time by scanning through the edges).
After this step, it is clear that all nodes of color $1$ form an independent set; we can easily determine if a subset of nodes whose weight is $k$ exists, using a knapsack-like algorithm.
Provided that an IR invitation of size $k$ exists, this algorithm's probability of success is at least $(1/2^k) \cdot (1/2^{k\beta}) \geq 1/2^{k(1+f(k))}$. For any fixed IR invitation $I^*$ of size $k$,
we color all agents in $I^*$ as color 1 with probability $1/2^k$, and with probability at least $1/2^{k \beta}$ we color the union of enemies of all agents in $I^*$ as color $0$. Regardless of coloring of all other agents, this coloring will ensure that all agents in $I^*$ remain to be of color $1$ in the enemy graph, and thus our algorithm can find $I^*$ (or some other solution).
The overall runtime of our algorithm is $O(f(k) n)$ as all sub-routines can be implemented in linear time in size of each graph and each graph contains at most $O(n)$ nodes and $O(f(k)n)$ edges.
We can repeat this randomized algorithm $2^{k(1+f(k))}\ln n$ times to increase the probability of success to $1-1/n$ (with overall runtime $2^{k(1+f(k))}(f(k) n \ln n)$).
This algorithm can also be de-randomized using a family of $k$-perfect hash functions~\cite{ColorCoding}.
\end{proof}
Lastly we show that $k$-IR-Invitations remains to be W[1]-complete, even under symmetric social relationships, when $\beta$ is not bounded. Proof of W[1]-hardness is similar to that of Theorem~\ref{SIP:thm:IR_invitation_large_beta}, and completeness follows from Theorem~\ref{SIP:thm:IR_invitation_W1}. We omit proof of Theorem~\ref{SIP:thm:symmetric_IR_W1C}.
\begin{theorem} \label{SIP:thm:symmetric_IR_W1C}
When agents have symmetric social relationships,
$k$-IR-Invitations is W[1]-complete if $\beta$ is not bounded above by any function of $k$.
\end{theorem}
\subsection{Symmetric $k$-Stable-Invitations}
Interestingly, complexity of $k$-Stable-Invitations and that of $k$-IR-Invitations are identical, except when $\beta$ is unbounded, if we assume symmetric social relationships.
This implies that the combinatoric complexity due to social relationships plays an important role in \SIP, and restrictions on social relationships (such as symmetry) can substantially reduce the complexity.
Yet we emphasize that both polytime and FPT algorithms for $k$-Stable-Invitations are much more complicated than those for $k$-IR-Invitations, and much of its complexity is due to the additional requirement that uninvited agents must not be willing to attend.
We first present classical complexity results for $k$-Stable-Invitations under symmetric social relationships, followed by parameterized complexity results.
\begin{theorem} \label{SIP:thm:symmetric_stable_p_npc}
When agents have symmetric social relationships,
$k$-Stable-Invitations can be solved in polynomial time if (i) $\beta = 0$, (ii) $\beta = 1$ and $\alpha \leq 1$, or (iii) $\beta = 2$ and $\alpha = 0$. Otherwise, the problem is NP-hard.
\end{theorem}
\begin{proof}[Proof sketch]
Let us first consider the case (i) when $\beta = 0$.
We construct a friend graph as before, and find connected components in this graph.
Any stable invitation must contain all or none of nodes in each connected component.
For each connected component, we check two things: Whether a stable invitation can contain all of nodes in the component and whether it can contain none of nodes in it.
To check the first, we simply check if the component is of size $k$ or less and if everyone in the component approves size $k$.
To check the second, we check if the component contains two or more nodes (then we can leave them out) or if it is a singleton component but the only agent in it does not approve size $k$ (then we can leave the agent out). All of these checks can be done in linear time.
Now we can use a dynamic programming algorithm to determine whether a subset of connected components that contains $k$ nodes over all such that every component (whether selected or not) does not violate the stability conditions (which can be easily checked by the two conditions we processed earlier).
Algorithms for cases (ii) and (iii) can be found in Appendix.
Our reductions for $k$-IR-Invitations in proof of Theorem~\ref{SIP:thm:symmetric_IR_p_npc} show NP-hardness for $k$-Stable-Invitations as well, because agents only approve invitations of size $k$ in our reduction; it ensures that any uninvited agent would be unwilling to attend due to the size of an invitation.
\end{proof}
\begin{theorem} \label{SIP:thm:symmetric_stable_FPT}
When agents have symmetric social relationships, $k$-Stable-Invitations is in FPT if $\beta \leq f(k)$ where $f(k)$ can be an arbitrary function of $k$.
\end{theorem}
\begin{proof}[Proof sketch]
The main idea is similar to that of our proof of Theorem~\ref{SIP:thm:symmetric_IR_FPT}, but we need an original idea to deal with stability conditions. As before, we proceed by creating a {\em friend graph}, removing certain connected components, creating an {\em enemy graph}, coloring components using two colors $\{0,1\}$, and re-coloring any adjacent nodes of color $1$ to color $0$, as before (details are in Appendix). After the re-coloring step, any subset of nodes of color 1 forms an independent set in the enemy graph (and yields an IR invitation).
Hence we only need to worry about stability conditions while choosing a subset of nodes of color $1$ to find a stable invitation. Doing so will exclude all nodes of color $0$, but only the singleton nodes (i.e., agents with no friends) may violate stability condition. To avoid this, we are going to choose at least one enemy (of color $1$) of each singleton node of color $0$ who approves size $k+1$ in brute-force manner; the search space is bounded because we can only choose up to $k$ agents and each agent has at most $f(k)$ enemies (i.e., the search space is $O((f(k))^k)$). This pre-selection process is the crucial step in our algorithm (and where we need the condition that $\beta$ is bounded). The rest of the algorithm is straightforward, and can be found in Appendix.
\end{proof}
Lastly we show that $k$-Stable-Invitations remains to be W[2]-complete, even under symmetric social relationships, when $\beta$ is not bounded. Proof of W[2]-hardness is similar to that of Theorem~\ref{SIP:thm:stable_W2hard_beta}, and completeness follows from Theorem~\ref{SIP:thm:stable_W2}. We omit proof of Theorem~\ref{SIP:thm:symmetric_stable_W2C}.
\begin{theorem} \label{SIP:thm:symmetric_stable_W2C}
When agents have symmetric social relationships,
$k$-Stable-Invitations is W[2]-complete if $\beta$ is not bounded above by any function of $k$.
\end{theorem}
\section{Discussion and Future Work} \label{SIP:sec:discussion}
In this work we investigated the parameterized complexity of the Stable Invitations Problem (\SIP) for two different solution concepts -- individual rationality and (Nash) stability, when the size of a solution is parameterized.
We considered restrictions on inputs by limiting the number of friends and enemies each agent can have, and also studied the special case in which all agents have symmetric social relationships.
Despite the fact that the majority of the problems we consider in this work are NP-hard, we showed that many special cases of the problem admit efficient FPT algorithms.
Our results indicate that the computational complexity of \SIPs varies when its input is restricted or the solution concept changes, which is not distinguishable under the classic complexity.
Our work leaves a few interesting open problems for future work.
Lee and Shoham~\cite{LEE15AAAI} considered another solution concept in which agents who are not invited are not envious of those who are invited (motivated by `envy-freeness'). It would be interesting to analyze the parameterized complexity of finding an envy-free invitation of size $k$, and compare the results with what we have in this work. In addition, analyzing the parameterized complexity of the Group Activity Selection Problem~\cite{GASP12WINE} is another interesting direction for future work.
\subsubsection*{Appendix}
Missing details in ``proof sketches'' can be found in Appendix (submitted as supplemental material).
|
{"hexsha": "7b4b7abb4b3a8c4b1ce5630aed1cdd3e359f22d3", "size": 35580, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "prior to 2016-09-18/50SIP.tex", "max_stars_repo_name": "ltdtl/thesis", "max_stars_repo_head_hexsha": "b1585aa3e57e06b4368fb51540bbbf1c64c491df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-09-18T17:20:45.000Z", "max_stars_repo_stars_event_max_datetime": "2016-09-18T17:20:45.000Z", "max_issues_repo_path": "prior to 2016-09-18/50SIP.tex", "max_issues_repo_name": "ltdtl/thesis", "max_issues_repo_head_hexsha": "b1585aa3e57e06b4368fb51540bbbf1c64c491df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-01-29T07:21:01.000Z", "max_issues_repo_issues_event_max_datetime": "2017-01-29T07:21:01.000Z", "max_forks_repo_path": "prior to 2016-09-18/50SIP.tex", "max_forks_repo_name": "ltdtl/thesis", "max_forks_repo_head_hexsha": "b1585aa3e57e06b4368fb51540bbbf1c64c491df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 91.2307692308, "max_line_length": 995, "alphanum_fraction": 0.7295390669, "num_tokens": 10359}
|
import pytest
from pathlib import Path
import numpy as np
import scanpy as sc
import scipy
import scipy.sparse
from scTenifoldXct.core import scTenifoldXct
@pytest.fixture(scope="session")
def ada_skin():
data_path = Path(__file__).parent.parent / "./data/LS.h5ad"
ada = sc.read_h5ad(data_path)
data = scipy.sparse.csr_matrix.toarray(ada.X)
counts = np.asarray(np.expm1(data), dtype=int)
ada.layers['raw'] = counts
ada.layers['log1p'] = data
HVG_i = np.argsort(np.asarray(ada.var['vst.variance.standardized']))[-3000:]
return ada[:, HVG_i]
@pytest.fixture(scope="session")
def xct_skin(ada_skin):
return scTenifoldXct(data=ada_skin,
cell_names=['Inflam. FIB', 'Inflam. DC'],
obs_label="ident",
species="human",
rebuild_GRN=True,
GRN_file_dir='./skin_net',
verbose = True)
# small dataset
@pytest.fixture(scope="session")
def xct_paul15():
ada = sc.datasets.paul15()[:, :100] # raw counts
ada.layers['raw'] = np.asarray(ada.X, dtype=int)
sc.pp.log1p(ada)
ada.layers['log1p'] = ada.X.copy()
return scTenifoldXct(data=ada, cell_names=['14Mo', '15Mo'],
obs_label="paul15_clusters",
rebuild_GRN=True, GRN_file_dir='./Net_for_Test',
query_DB=None, verbose=True)
|
{"hexsha": "19f3504b28f29a1c465b904d667d325f0b630d1c", "size": 1446, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/conftest.py", "max_stars_repo_name": "cailab-tamu/scTenifoldXct", "max_stars_repo_head_hexsha": "d25ded8dfb7f2951217a30ab71eccd6b060178f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/conftest.py", "max_issues_repo_name": "cailab-tamu/scTenifoldXct", "max_issues_repo_head_hexsha": "d25ded8dfb7f2951217a30ab71eccd6b060178f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/conftest.py", "max_forks_repo_name": "cailab-tamu/scTenifoldXct", "max_forks_repo_head_hexsha": "d25ded8dfb7f2951217a30ab71eccd6b060178f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4347826087, "max_line_length": 80, "alphanum_fraction": 0.5961272476, "include": true, "reason": "import numpy,import scipy", "num_tokens": 361}
|
import math
from functools import reduce
from matplotlib import cm
from matplotlib.colors import Normalize
import numpy as np
L = 1.000001
OVERFLOW = 10**20
quadabs = lambda z: z.real * z.real + z.imag * z.imag
def differentiate(p):
n = len(p) - 1
return [(n - i) * an for (i, an) in enumerate(p[:-1])]
def horner(p, z):
return reduce(lambda x, y: z * x + y, p)
def juliaRadius(poly, l=L):
n = len(poly) - 1
an = abs(poly[0])
C = sum(map(abs, poly)) - an
return max(1, 2 * C / 2, pow(2 * l / an, 1 / (n-1)))
def escapetimeMandelbrot(c, K):
k, ck = 1, c
while k < K and quadabs(ck) <= 4:
ck *= ck
ck += c
k += 1
return k if k < K else 0
def escapetimeJulia(z, p, K, R2):
k, zk = 1, z
while k < K and quadabs(zk) <= R2:
zk = horner(p, zk)
k += 1
return k if k < K else 0
def demMandelbrot(c, K, overflow=OVERFLOW):
ck, dk = c, 1
for _ in range(K):
if max(
abs(ck.real), abs(ck.imag),
abs(dk.real), abs(dk.imag)
) > overflow: break
dk *= 2 * ck
dk += 1
ck *= ck
ck += c
absck = abs(ck)
if absck <= 2: return 0
absdk = abs(dk)
if absdk == 0: return np.nan # rarely happens
estimate = math.log2(absck) * absck / absdk
return -math.log2(estimate)
def demJulia(z, p, dp, K, R, overflow=OVERFLOW):
zk, dk = z, 1
for _ in range(K):
if max(
abs(zk.real) + abs(zk.imag),
abs(dk.real) + abs(dk.imag)
) > overflow: break
dk = horner(dp, zk) * dk
zk = horner(p, zk)
abszk = abs(zk)
if abszk < R: return 0
absdk = abs(dk)
if absdk == 0: return np.nan # rarely happens
estimate = math.log2(abszk) * abszk / absdk
return -math.log2(estimate)
# generates px^2 points on complex plane
# with radius <= radius and center center
def _complexlattice(center, radius, px):
ReS = center.real - radius
ImS = center.imag - radius
dim = dre = 2 * radius / px
dz = complex(dre, 0)
zk = complex(ReS, ImS)
for _ in range(px):
for _ in range(px):
zk += dz
yield zk
zk = complex(ReS, zk.imag + dim)
def _algoValues(algo, center, radius, px, pb):
points = _complexlattice(center, radius, px)
pixels = np.empty((px, px), dtype=float)
for j in range(px):
for i in range(px):
pixels[j,i] = algo(next(points))
pb.update(px)
return pixels
def _applyColors(values, colormap, cmp, cpc, ic):
m, M = values.min(), values.max()
values[values == np.nan] = M # none or few points
values[values == 0] = m if ic == 'continuous' else M
normed = Normalize(m, M)(values)
if cpc is not None:
p, pf, pc = cpc
q = np.percentile(normed, p)
filt = normed <= q
normed[filt] = pow(normed[filt], pf)
filt = ~filt
normed[filt] = pow(normed[filt], pc)
elif cmp != 1:
normed = pow(normed, cmp)
colormat = colormap(normed)
return (255 * colormat[:,:,:3]).astype(int)
if __name__ == '__main__':
from sys import argv
from pathlib import Path
from enum import Enum
from typing import Tuple
from typer import Typer, Option
from tqdm import tqdm
from cv2 import imwrite # pip install opencv-python
img_dir = Path('img/')
data_dir = Path('data/')
img_dir.mkdir(exist_ok=True)
data_dir.mkdir(exist_ok=True)
app = Typer()
class Algorithm(Enum):
escapetime = 'escapetime'
DEM = 'DEM'
class InteriorColor(Enum):
continuous = 'continuous'
inverted = 'inverted'
def drawFractal(
algo, radius, center, px,
cmap, cmp, cpc, ic, cache,
filepath, cachepath, **kwargs
):
# check if data for image is cached
if cachepath.exists():
print('using cached data...')
values = np.load(cachepath)
else:
with tqdm(total=px*px, desc='data') as pb:
values = _algoValues(algo, center, radius, px, pb)
if cache:
cachepath.touch()
np.save(cachepath, values)
colormat = _applyColors(values, cmap, cmp, cpc, ic)
imwrite(str(filepath), colormat[...,::-1])
def initializeArgs(
cmap, fn, ext, alg,
center, radius, px, it, **kwargs
):
if center != "0" and radius is None:
raise Exception(
'if center is non-trivial, '
'then radius must be provided'
)
filename = Path((fn or ' '.join(argv)) + ext)
if argv[1] == 'mandelbrot': arg = 'mandelbrot'
if argv[1] == 'julia': arg = f'julia [{argv[2]}]'
cachename = f'{arg} {alg.name} {center} {radius} {px} {it}.npy'
return {
'center': complex(center),
'cmap': cm.get_cmap(cmap),
'filepath': img_dir / filename,
'cachepath': data_dir / cachename
}
@app.command('julia')
def julia(
polynomial: str,
center: str = Option("0", '-c'),
radius: float = Option(0, '-r'),
px: int = Option(1000, '-px'),
fn: str = Option('', '-fn'),
ext: str = Option('.png', '-ext'),
it: int = Option(250, '-it'),
alg: Algorithm = Option('DEM', '-alg'),
cmap: str = Option('inferno_r', '-cm'),
cmp: float = Option(1, '-cmp'),
cpc: Tuple[int,float,float] = Option(None, '-cpc'),
ic: InteriorColor = Option('continuous', '-ic'),
cache: bool = False
):
p = list(map(complex, polynomial.split()))
R = juliaRadius(p)
if not radius:
radius = R
print(f'using radius {R}')
if alg.name == 'DEM':
dp = differentiate(p)
algo = lambda z: demJulia(z, p, dp, it, R)
if alg.name == 'escapetime':
R2 = R * R
algo = lambda z: escapetimeJulia(z, p, it, R2)
args = initializeArgs(**locals())
drawFractal(**{**locals(), **args})
@app.command('mandelbrot')
def mandelbrot(
center: str = Option("-0.8", '-c'),
radius: float = Option(1.4, '-r'),
px: int = Option(1000, '-px'),
fn: str = Option('', '-fn'),
ext: str = Option('.png', '-ext'),
it: int = Option(250, '-it'),
alg: Algorithm = Option('DEM', '-alg'),
cmap: str = Option('gist_stern_r', '-cm'),
cmp: float = Option(1, '-cmp'),
cpc: Tuple[int,float,float] = Option(None, '-cpc'),
ic: InteriorColor = Option('continuous', '-ic'),
cache: bool = False
):
if alg.name == 'DEM':
algo = lambda c: demMandelbrot(c, it)
if alg.name == 'escapetime':
algo = lambda c: escapetimeMandelbrot(c, it)
args = initializeArgs(**locals())
drawFractal(**{**locals(), **args})
app()
|
{"hexsha": "78ad27ccaf87d71f8c7ca785901598bd1d976e41", "size": 7043, "ext": "py", "lang": "Python", "max_stars_repo_path": "fractal.py", "max_stars_repo_name": "kuco23/fractal", "max_stars_repo_head_hexsha": "bed6461bd19ebbd1c6cd07e679d83037df9adcf7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-14T21:24:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T21:24:04.000Z", "max_issues_repo_path": "fractal.py", "max_issues_repo_name": "kuco23/fractal", "max_issues_repo_head_hexsha": "bed6461bd19ebbd1c6cd07e679d83037df9adcf7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fractal.py", "max_forks_repo_name": "kuco23/fractal", "max_forks_repo_head_hexsha": "bed6461bd19ebbd1c6cd07e679d83037df9adcf7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.22406639, "max_line_length": 72, "alphanum_fraction": 0.5244924038, "include": true, "reason": "import numpy", "num_tokens": 2061}
|
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import learning_curve
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, confusion_matrix
import sklearn.model_selection
import sklearn.datasets
from sklearn import metrics
from sklearn import preprocessing
from collections import Counter
import numpy as np
import pylab as pl
import autosklearn.classification
import matplotlib.pyplot as plt
import pandas as pd
import os, sys
import PIL
def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
if axes is None:
_, axes = plt.subplots(1, 1)
axes.set_title(title)
if ylim is not None:
axes.set_ylim(*ylim)
axes.set_xlabel("Ejemplos de Entrenamiento")
axes.set_ylabel("Puntaje")
train_sizes, train_scores, test_scores, fit_times, _ = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True, shuffle=True)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
# Plot learning curve
axes.grid()
axes.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axes.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="g")
axes.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Puntaje de entrenamiento")
axes.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Puntaje de validación cruzada")
axes.legend(loc="best")
# Función para visualizar un conjunto de datos en 2D
def plot_data(X, y): # Función para graficar datos (X,y)
y_unique = np.unique(y)
colors = pl.cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X[y == this_y]
pl.scatter(this_X[:, 0], this_X[:, 1], c=np.array([color]),
alpha=0.5, edgecolor='k',
label="Class %s" % this_y)
pl.legend(loc="best")
pl.title("Data")
def gen_pred_fun(clf):
def pred_fun(x1, x2):
x = np.array([[x1, x2]])
return clf.predict(x)[0]
return pred_fun
def plot_decision_region(X, pred_fun, num):
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
min_x = min_x - (max_x - min_x) * 0.05
max_x = max_x + (max_x - min_x) * 0.05
min_y = min_y - (max_y - min_y) * 0.05
max_y = max_y + (max_y - min_y) * 0.05
x_vals = np.linspace(min_x, max_x, num)
y_vals = np.linspace(min_y, max_y, num)
XX, YY = np.meshgrid(x_vals, y_vals)
grid_r, grid_c = XX.shape
ZZ = np.zeros((grid_r, grid_c))
for i in range(grid_r):
for j in range(grid_c):
ZZ[i, j] = pred_fun(XX[i, j], YY[i, j])
pl.contourf(XX, YY, ZZ, 100, cmap=pl.cm.coolwarm, vmin=-1, vmax=2)
pl.colorbar()
pl.xlabel("x")
pl.ylabel("y")
datosDeEjemplo = pd.read_csv("../datos/Iris2.csv")
etiquetas_datosDeEjemplo = datosDeEjemplo["label"]
caracteristicas_datosDeEjemplo = datosDeEjemplo.drop("label", axis=1)
with pd.option_context('display.max_rows', 10, 'display.max_columns', 2):
print("#####################################################")
print("Las Caracteristicas de la variable son las siguientes: ")
print(caracteristicas_datosDeEjemplo)
print("\nSu Análisis Descriptivo es el siguiente: ")
print(caracteristicas_datosDeEjemplo.describe(include="all"))
print("#####################################################\n\n")
print("#####################################################")
print("\nLas Etiquetas del DataFrame son las siguientes: ")
print(etiquetas_datosDeEjemplo)
print("\nSu Análisis Descriptivo es el siguiente: ")
print(etiquetas_datosDeEjemplo.describe(include="all"))
print("#####################################################\n\n")
datosAPredecir = pd.read_csv("../datos/testDatos.csv", header=None)
miModelo = DecisionTreeClassifier()
caracteristicas_datosEntrenamiento, caracteristicas_datosPrueba, etiquetas_datosEntrenamiento, etiquetas_datosPrueba = train_test_split(caracteristicas_datosDeEjemplo, etiquetas_datosDeEjemplo, test_size=0.3)
miModelo.fit(caracteristicas_datosEntrenamiento, etiquetas_datosEntrenamiento)
print("\n\n#####################################################")
print("Resultados de la Validación Cruzada para el Modelo miModelo :\n")
datosPredicciones = datosAPredecir
caracteristicas_datosPredicciones = datosAPredecir
etiquetas_datosPredicciones = miModelo.predict(datosAPredecir)
desempeño_miModelo= miModelo.score(caracteristicas_datosPrueba, etiquetas_datosPrueba)
print("#####################################################")
print("Desempeño del modelo miModelo :", desempeño_miModelo)
print("#####################################################\n\n")
predictions_miModelo= miModelo.predict(caracteristicas_datosDeEjemplo)
cnf_matrix_miModelo= confusion_matrix(etiquetas_datosDeEjemplo, predictions_miModelo)
print('#####################################################\n')
print('Matriz de confusion del modelo, miModelo:\n')
print(cnf_matrix_miModelo)
print('#####################################################\n\n')
print('#####################################################\n')
print('Resultados del modelo miModelo:\n')
print('Precision: {}'.format(metrics.precision_score(etiquetas_datosDeEjemplo, predictions_miModelo, average='micro')))
print('Recall: {}'.format(metrics.recall_score(etiquetas_datosDeEjemplo, predictions_miModelo, average='micro')))
print('Puntaje F_1: {}'.format(metrics.f1_score(etiquetas_datosDeEjemplo, predictions_miModelo, average='micro')))
print('#####################################################\n\n')
MiModelo2 = KNeighborsClassifier(n_neighbors=5)
caracteristicas_datos2Entrenamiento, caracteristicas_datos2Prueba, etiquetas_datos2Entrenamiento, etiquetas_datos2Prueba = train_test_split(caracteristicas_datosPredicciones, etiquetas_datosPredicciones, test_size=0.3)
MiModelo2.fit(caracteristicas_datos2Entrenamiento, etiquetas_datos2Entrenamiento)
print("\n\n#####################################################")
print("Resultados de la Validación Cruzada para el Modelo MiModelo2 :\n")
datos2Predicciones = datosAPredecir
caracteristicas_datos2Predicciones = datosAPredecir
etiquetas_datos2Predicciones = MiModelo2.predict(datosAPredecir)
desempeño_MiModelo2= MiModelo2.score(caracteristicas_datosPrueba, etiquetas_datosPrueba)
print("#####################################################")
print("Desempeño del modelo MiModelo2 :", desempeño_MiModelo2)
print("#####################################################\n\n")
predictions_MiModelo2= MiModelo2.predict(caracteristicas_datosDeEjemplo)
cnf_matrix_MiModelo2= confusion_matrix(etiquetas_datosDeEjemplo, predictions_MiModelo2)
print('#####################################################\n')
print('Matriz de confusion del modelo, MiModelo2:\n')
print(cnf_matrix_MiModelo2)
print('#####################################################\n\n')
print('#####################################################\n')
print('Resultados del modelo MiModelo2:\n')
print('Precision: {}'.format(metrics.precision_score(etiquetas_datosDeEjemplo, predictions_MiModelo2, average='micro')))
print('Recall: {}'.format(metrics.recall_score(etiquetas_datosDeEjemplo, predictions_MiModelo2, average='micro')))
print('Puntaje F_1: {}'.format(metrics.f1_score(etiquetas_datosDeEjemplo, predictions_MiModelo2, average='micro')))
print('#####################################################\n\n')
plot_decision_region(caracteristicas_datosDeEjemplo.values, gen_pred_fun(miModelo), 100)
plot_data(caracteristicas_datosDeEjemplo.values, etiquetas_datosDeEjemplo.values)
pl.title("Region de Decision de miModelo")
pl.savefig('../datos/a1.jpg', bbox_inches='tight')
pl.show()
plot_learning_curve(miModelo, "Curva de aprendizaje de miModelo", caracteristicas_datosDeEjemplo, etiquetas_datosDeEjemplo, axes=None, ylim=None, cv=None)
plt.savefig('../datos/a2.jpg', bbox_inches='tight')
plt.show()
plot_decision_region(caracteristicas_datosDeEjemplo.values, gen_pred_fun(MiModelo2), 100)
plot_data(caracteristicas_datosDeEjemplo.values, etiquetas_datosDeEjemplo.values)
pl.title("Region de Decision de MiModelo2")
pl.savefig('a3.jpg', bbox_inches='tight')
pl.show()
plot_learning_curve(MiModelo2, "Curva de aprendizaje de MiModelo2", caracteristicas_datosDeEjemplo, etiquetas_datosDeEjemplo, axes=None, ylim=None, cv=None)
plt.savefig('a4.jpg', bbox_inches='tight')
plt.show()
df_datosPredicciones = caracteristicas_datosPredicciones
df_datosPredicciones['Etiquetas'] = etiquetas_datosPredicciones
df_datosPredicciones.to_csv('datosPredicciones.csv', index=False)
df_datos2Predicciones = caracteristicas_datos2Predicciones
df_datos2Predicciones['Etiquetas'] = etiquetas_datos2Predicciones
df_datos2Predicciones.to_csv('datos2Predicciones.csv', index=False)
|
{"hexsha": "4175a16b7fe24d22a2fbedbfcb127dcec2b13d45", "size": 9555, "ext": "py", "lang": "Python", "max_stars_repo_path": "output/UNaIAModel.py", "max_stars_repo_name": "baguevaram/UNa-IA", "max_stars_repo_head_hexsha": "e946a74dae97c44fe7d3eba1adf8e849a3e0a4bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "output/UNaIAModel.py", "max_issues_repo_name": "baguevaram/UNa-IA", "max_issues_repo_head_hexsha": "e946a74dae97c44fe7d3eba1adf8e849a3e0a4bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "output/UNaIAModel.py", "max_forks_repo_name": "baguevaram/UNa-IA", "max_forks_repo_head_hexsha": "e946a74dae97c44fe7d3eba1adf8e849a3e0a4bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.0261780105, "max_line_length": 218, "alphanum_fraction": 0.6787022501, "include": true, "reason": "import numpy", "num_tokens": 2428}
|
#!/usr/bin/python
#Student name: Iva Bubalo
#Email: i.bubalo1@nuigalway.ie
#Student ID: 20235871
#GitHub: https://github.com/ivabu/ARC/upload/master/src
import os, sys
import json
import numpy as np
import re
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
################################
#####Task 1: 6f8cd79b.json
#Required Transformation: Find the fist and last list in data, update the color of each element.
#Find first and last element in each list, update the color of the elements.
def solve_6f8cd79b(data):
color = 8
for i in range(0,len(data)): #update color for the first and last row
if i == 0 or i == len(data)-1:
for j in range(0,len(data[i])):
data[i][j] = color
for line in data: #update color for the fist and last element in each list
line[0] = color
line[-1] = color
return data
#####Task 2: 3bd67248.json
#Required Transformation:
#Update the elements color of the last list.
#Start the iteration in the reversed order subtracting the iterator from the row length.
def solve_3bd67248(data):
botttom_colour = 4
diagonal_colour = 2
# Bottom line colouring
for i in range(0,len(data)):
if i == len(data)-1:
for j in range(1,len(data[i])):
data[i][j] = bottom_colour
# Diagonal line colouring
i = len(data) - 1
while i >= 0: #reversed iteration
# If square not black, first list - 1 square at
j = len(data[i]) - 1 #find current row length
col = j - i #find current column, row lenght minus iterator
if data[i][col] == 0: # black colour check
data[i][col] = diagonal_colour
i = i - 1
return data
#####Task 3: a2fd1cf0.json
#Required Transformation: Assign colours and find color coordinates. From red square we always draw horizontally,
#and from green square we always draw vertically until we meet the y coordinate of the red square.
def solve_a2fd1cf0(data):
red = 2
green = 3
colour = 8
red_coords = find_color_index(data, red)
green_coords = find_color_index(data, green)
if(red_coords[1] > green_coords[1]):
# draw line to the left
for i in range(green_coords[1], red_coords[1]):
data[red_coords[0]][i] = colour
else:
for i in range(red_coords[1], green_coords[1]):
data[red_coords[0]][i+1] = colour
# draw in right direction
if(red_coords[0] > green_coords[0]):
for i in range(green_coords[0] + 1, red_coords[0]):
data[i][green_coords[1]] = colour
# draw upwards
else:
for i in range(red_coords[0], green_coords[0]):
data[i][green_coords[1]] = colour
# draw downwards
return data
def find_color_index(data, colour):
for i in range(0, len(data)):
for j in range(0, len(data[i])):
if data[i][j] == colour:
return i, j
############################
def main():
# Find all the functions defined in this file whose names are
# like solve_abcd1234(), and run them.
# regex to match solve_* functions and extract task IDs
p = r"solve_([a-f0-9]{8})"
tasks_solvers = []
# globals() gives a dict containing all global names (variables
# and functions), as name: value pairs.
for name in globals():
m = re.match(p, name)
if m:
# if the name fits the pattern eg solve_abcd1234
ID = m.group(1) # just the task ID
solve_fn = globals()[name] # the fn itself
tasks_solvers.append((ID, solve_fn))
for ID, solve_fn in tasks_solvers:
# for each task, read the data and call test()
directory = os.path.join("..", "data", "training")
json_filename = os.path.join(directory, ID + ".json")
data = read_ARC_JSON(json_filename)
test(ID, solve_fn, data)
def read_ARC_JSON(filepath):
"""Given a filepath, read in the ARC task data which is in JSON
format. Extract the train/test input/output pairs of
grids. Convert each grid to np.array and return train_input,
train_output, test_input, test_output."""
# Open the JSON file and load it
data = json.load(open(filepath))
# Extract the train/test input/output grids. Each grid will be a
# list of lists of ints. We convert to Numpy.
train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]
train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]
test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]
test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]
return (train_input, train_output, test_input, test_output)
def test(taskID, solve, data):
"""Given a task ID, call the given solve() function on every
example in the task data."""
print(taskID)
train_input, train_output, test_input, test_output = data
print("Training grids")
for x, y in zip(train_input, train_output):
yhat = solve(x)
show_result(x, y, yhat)
print("Test grids")
for x, y in zip(test_input, test_output):
yhat = solve(x)
show_result(x, y, yhat)
def show_result(x, y, yhat):
print("Input")
print(x)
print("Correct output")
print(y)
print("Our output")
print(yhat)
print("Correct?")
# if yhat has the right shape, then (y == yhat) is a bool array
# and we test whether it is True everywhere. if yhat has the wrong
# shape, then y == yhat is just a single bool.
print(np.all(y == yhat))
if __name__ == "__main__": main()
|
{"hexsha": "519532d95c5c22f19895e00bf8d6a734f004c781", "size": 5986, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/manual_solve.py", "max_stars_repo_name": "ivabu/ARC", "max_stars_repo_head_hexsha": "842c351b0e2aea6033e075f9584aa56cbd8e277f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/manual_solve.py", "max_issues_repo_name": "ivabu/ARC", "max_issues_repo_head_hexsha": "842c351b0e2aea6033e075f9584aa56cbd8e277f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/manual_solve.py", "max_forks_repo_name": "ivabu/ARC", "max_forks_repo_head_hexsha": "842c351b0e2aea6033e075f9584aa56cbd8e277f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5326086957, "max_line_length": 113, "alphanum_fraction": 0.6266288005, "include": true, "reason": "import numpy", "num_tokens": 1569}
|
# Interpretable cnn for big five personality traits using audio data #
# Get 20 max predictions of each traits #
import scipy.io
import numpy as np
import pandas as pd
import tensorflow as tf
import heapq
# Load files.
model_preds = np.load('.../path/to/load/model_pred.npy')
model_conv_features = np.load('.../path/to/load/model_conv_features.npy')
model_inputs = np.load('.../path/to/load/model_inputs.npy')
model_preds = model_preds[:,0,:]
model_conv_features = model_conv_features[:,0,:,:]
model_inputs = model_inputs[:,0:,:]
# Extraversion predictions.
extra_pred = model_preds[:,0]
# Agreeableness predictions.
agree_pred = model_preds[:,1]
# Conscientiousness predictions.
consc_pred = model_preds[:,2]
# Neurotisicm predictions.
neuro_pred = model_preds[:,3]
# Openness predictions.
open_pred = model_preds[:,4]
# Take 20 highest prediction of the extraversion.
idx_extra_max = heapq.nlargest(20,range(len(extra_pred)),extra_pred.take)
# Take 20 highest prediction of the Agreeableness.
idx_agree_max = heapq.nlargest(20,range(len(agree_pred)),agree_pred.take)
# Take 20 highest prediction of the Conscientiousness.
idx_consc_max = heapq.nlargest(20,range(len(consc_pred)),consc_pred.take)
# Take 20 highest prediction of the Neurotisicm.
idx_neuro_max = heapq.nlargest(20,range(len(neuro_pred)),neuro_pred.take)
# Take 20 highest prediction of the Openness.
idx_open_max = heapq.nlargest(20,range(len(open_pred)),open_pred.take)
input_video_extra_max = []
conv_output_extra_max = []
input_video_agree_max = []
conv_output_agree_max = []
input_video_consc_max = []
conv_output_consc_max = []
input_video_neuro_max = []
conv_output_neuro_max = []
input_video_open_max = []
conv_output_open_max = []
for i in range (20):
# Extraversion.
# Max.
video_index_max = idx_extra_max[i]
# take corresponding video fft and conv_output.
input_video = model_inputs[video_index_max][:][:]
conv_output = model_conv_features[video_index_max][:][:]
input_video_extra_max.append(input_video)
conv_output_extra_max.append(conv_output )
# Agreeableness.
# Max.
video_index_max = idx_agree_max[i]
# take corresponding video fft and conv_output.
input_video = model_inputs[video_index_max][:][:]
conv_output = model_conv_features[video_index_max][:][:]
input_video_agree_max.append(input_video)
conv_output_agree_max.append(conv_output )
# Conscientiousness.
# Max.
video_index_max = idx_consc_max[i]
# take corresponding video fft and conv_output.
input_video = model_inputs[video_index_max][:][:]
conv_output = model_conv_features[video_index_max][:][:]
input_video_consc_max.append(input_video)
conv_output_consc_max.append(conv_output )
# Neurotisicm.
# Max.
video_index_max = idx_neuro_max[i]
# take corresponding video fft and conv_output.
input_video = model_inputs[video_index_max][:][:]
conv_output = model_conv_features[video_index_max][:][:]
input_video_neuro_max.append(input_video)
conv_output_neuro_max.append(conv_output )
# Openness.
# Max.
video_index_max = idx_open_max[i]
# take corresponding video fft and conv_output.
input_video = model_inputs[video_index_max][:][:]
conv_output = model_conv_features[video_index_max][:][:]
input_video_open_max.append(input_video)
conv_output_open_max.append(conv_output)
np.save('.../path/to/save/input_feature_extra_max_fft',input_video_extra_max)
np.save('.../path/to/save/conv_output_extra_max_fft',conv_output_extra_max)
np.save('.../path/to/save/input_feature_agree_max_fft',input_video_agree_max)
np.save('.../path/to/save/conv_output_agree_max_fft',conv_output_agree_max)
np.save('.../path/to/save/input_feature_consc_max_fft',input_video_consc_max)
np.save('.../path/to/save/conv_output_consc_max_fft',conv_output_consc_max)
np.save('.../path/to/save/input_feature_neuro_max_fft',input_video_neuro_max)
np.save('.../path/to/save/conv_output_neuro_max_fft', conv_output_neuro_max)
np.save('.../path/to/save/input_feature_open_max_fft',input_video_open_max)
np.save('.../path/to/save/conv_output_open_max_fft', conv_output_open_max)
print('completed')
|
{"hexsha": "ea48c9a4c6a9fcc3db8dae878e024840e6a5400c", "size": 4195, "ext": "py", "lang": "Python", "max_stars_repo_path": "cam_generation_rawwav/get_20_max_pred.py", "max_stars_repo_name": "HassanHayat08/Interpretable-CNN-for-Big-Five-Personality-Traits-using-Audio-Data", "max_stars_repo_head_hexsha": "7149e78736611f07a1c7c4adbdf24ae03011e549", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-09-26T23:52:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-04T00:08:23.000Z", "max_issues_repo_path": "cam_generation_rawwav/get_20_max_pred.py", "max_issues_repo_name": "HassanHayat08/Interpretable-CNN-for-Big-Five-Personality-Traits-using-Audio-Data", "max_issues_repo_head_hexsha": "7149e78736611f07a1c7c4adbdf24ae03011e549", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cam_generation_rawwav/get_20_max_pred.py", "max_forks_repo_name": "HassanHayat08/Interpretable-CNN-for-Big-Five-Personality-Traits-using-Audio-Data", "max_forks_repo_head_hexsha": "7149e78736611f07a1c7c4adbdf24ae03011e549", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-06T13:02:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-06T09:03:24.000Z", "avg_line_length": 36.798245614, "max_line_length": 77, "alphanum_fraction": 0.7606674613, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1054}
|
########################################################
############Ray Tracing scripts#########################
########################################################
from __future__ import division,with_statement
import sys,os
import time
import gc
from operator import add
from functools import reduce
from lenstools.simulations.logs import logdriver,logstderr,peakMemory,peakMemoryAll
from lenstools.utils.mpi import MPIWhirlPool
from lenstools import ConvergenceMap,OmegaMap,ShearMap
from lenstools.catalog import Catalog,ShearCatalog
from lenstools.simulations.raytracing import RayTracer,DensityPlane
from lenstools.pipeline.simulation import SimulationBatch
from lenstools.pipeline.settings import MapSettings,TelescopicMapSettings,CatalogSettings
from lenstools.scripts import integration_types
import numpy as np
import astropy.units as u
#############################################################
#########Spilt realizations in subdirectories################
#############################################################
def _subdirectories(num_realizations,realizations_in_subdir):
assert not(num_realizations%realizations_in_subdir),"The number of realizations in each subdirectory must be the same!"
s = list()
if num_realizations==realizations_in_subdir:
return s
for c in range(num_realizations//realizations_in_subdir):
s.append("{0}-{1}".format(c*realizations_in_subdir+1,(c+1)*realizations_in_subdir))
return s
#####################################################################################
#######Callback to call during raytracing to save the convergence at every step######
#####################################################################################
def convergence_callback(jacobian,tracer,k,realization,angle,map_batch,settings):
convMap = ConvergenceMap(data=1.0-0.5*(jacobian[0]+jacobian[3]),angle=angle)
savename = os.path.join(map_batch.storage_subdir,"WLconv_z{0:.2f}_{1:04d}r.{2}".format(tracer.redshift[k],realization+1,settings.format))
logdriver.debug("Saving convergence map to {0}".format(savename))
convMap.save(savename)
################################################
#######Single redshift ray tracing##############
################################################
def singleRedshift(pool,batch,settings,batch_id):
#Safety check
assert isinstance(pool,MPIWhirlPool) or (pool is None)
assert isinstance(batch,SimulationBatch)
parts = batch_id.split("|")
if len(parts)==2:
assert isinstance(settings,MapSettings)
#Separate the id into cosmo_id and geometry_id
cosmo_id,geometry_id = parts
#Get a handle on the model
model = batch.getModel(cosmo_id)
#Get the corresponding simulation collection and map batch handlers
collection = [model.getCollection(geometry_id)]
map_batch = collection[0].getMapSet(settings.directory_name)
cut_redshifts = np.array([0.0])
elif len(parts)==1:
assert isinstance(settings,TelescopicMapSettings)
#Get a handle on the model
model = batch.getModel(parts[0])
#Get the corresponding simulation collection and map batch handlers
map_batch = model.getTelescopicMapSet(settings.directory_name)
collection = map_batch.mapcollections
cut_redshifts = map_batch.redshifts
else:
if (pool is None) or (pool.is_master()):
logdriver.error("Format error in {0}: too many '|'".format(batch_id))
sys.exit(1)
#Override the settings with the previously pickled ones, if prompted by user
if settings.override_with_local:
local_settings_file = os.path.join(map_batch.home_subdir,"settings.p")
settings = MapSettings.read(local_settings_file)
assert isinstance(settings,MapSettings)
if (pool is None) or (pool.is_master()):
logdriver.warning("Overriding settings with the previously pickled ones at {0}".format(local_settings_file))
##################################################################
##################Settings read###################################
##################################################################
#Read map angle,redshift and resolution from the settings
map_angle = settings.map_angle
source_redshift = settings.source_redshift
resolution = settings.map_resolution
if len(parts)==2:
#########################
#Use a single collection#
#########################
#Read the plane set we should use
plane_set = (settings.plane_set,)
#Randomization
nbody_realizations = (settings.mix_nbody_realizations,)
cut_points = (settings.mix_cut_points,)
normals = (settings.mix_normals,)
map_realizations = settings.lens_map_realizations
elif len(parts)==1:
#######################
#####Telescopic########
#######################
#Check that we have enough info
for attr_name in ["plane_set","mix_nbody_realizations","mix_cut_points","mix_normals"]:
if len(getattr(settings,attr_name))!=len(collection):
if (pool is None) or (pool.is_master()):
logdriver.error("You need to specify a setting {0} for each collection!".format(attr_name))
sys.exit(1)
#Read the plane set we should use
plane_set = settings.plane_set
#Randomization
nbody_realizations = settings.mix_nbody_realizations
cut_points = settings.mix_cut_points
normals = settings.mix_normals
map_realizations = settings.lens_map_realizations
#Decide which map realizations this MPI task will take care of (if pool is None, all of them)
try:
realization_offset = settings.first_realization - 1
except AttributeError:
realization_offset = 0
if pool is None:
first_map_realization = 0 + realization_offset
last_map_realization = map_realizations + realization_offset
realizations_per_task = map_realizations
logdriver.debug("Generating lensing map realizations from {0} to {1}".format(first_map_realization+1,last_map_realization))
else:
assert map_realizations%(pool.size+1)==0,"Perfect load-balancing enforced, map_realizations must be a multiple of the number of MPI tasks!"
realizations_per_task = map_realizations//(pool.size+1)
first_map_realization = realizations_per_task*pool.rank + realization_offset
last_map_realization = realizations_per_task*(pool.rank+1) + realization_offset
logdriver.debug("Task {0} will generate lensing map realizations from {1} to {2}".format(pool.rank,first_map_realization+1,last_map_realization))
#Planes will be read from this path
plane_path = os.path.join("{0}","ic{1}","{2}")
if (pool is None) or (pool.is_master()):
for c,coll in enumerate(collection):
logdriver.info("Reading planes from {0}".format(plane_path.format(coll.storage_subdir,"-".join([str(n) for n in nbody_realizations[c]]),plane_set[c])))
#Plane info file is the same for all collections
if (not hasattr(settings,"plane_info_file")) or (settings.plane_info_file is None):
info_filename = batch.syshandler.map(os.path.join(plane_path.format(collection[0].storage_subdir,nbody_realizations[0][0],plane_set[0]),"info.txt"))
else:
info_filename = settings.plane_info_file
if (pool is None) or (pool.is_master()):
logdriver.info("Reading lens plane summary information from {0}".format(info_filename))
#Read how many snapshots are available
with open(info_filename,"r") as infofile:
num_snapshots = len(infofile.readlines())
#Save path for the maps
save_path = map_batch.storage_subdir
if (pool is None) or (pool.is_master()):
logdriver.info("Lensing maps will be saved to {0}".format(save_path))
begin = time.time()
#Log initial memory load
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
if (pool is None) or (pool.is_master()):
logstderr.info("Initial memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#We need one of these for cycles for each map random realization
for rloc,r in enumerate(range(first_map_realization,last_map_realization)):
#Set random seed to generate the realizations
np.random.seed(settings.seed + r)
#Instantiate the RayTracer
tracer = RayTracer()
#Force garbage collection
gc.collect()
#Start timestep
start = time.time()
last_timestamp = start
#############################################################
###############Add the lenses to the system##################
#############################################################
#Open the info file to read the lens specifications (assume the info file is the same for all nbody realizations)
infofile = open(info_filename,"r")
#Read the info file line by line, and decide if we should add the particular lens corresponding to that line or not
for s in range(num_snapshots):
#Read the line
line = infofile.readline().strip("\n")
#Stop if there is nothing more to read
if line=="":
break
#Split the line in snapshot,distance,redshift
line = line.split(",")
snapshot_number = int(line[0].split("=")[1])
distance,unit = line[1].split("=")[1].split(" ")
if unit=="Mpc/h":
distance = float(distance)*model.Mpc_over_h
else:
distance = float(distance)*getattr(u,"unit")
lens_redshift = float(line[2].split("=")[1])
#Select the right collection
for n,z in enumerate(cut_redshifts):
if lens_redshift>=z:
c = n
#Randomization of planes
nbody = np.random.randint(low=0,high=len(nbody_realizations[c]))
cut = np.random.randint(low=0,high=len(cut_points[c]))
normal = np.random.randint(low=0,high=len(normals[c]))
#Log to user
logdriver.debug("Realization,snapshot=({0},{1}) --> NbodyIC,cut_point,normal=({2},{3},{4})".format(r,s,nbody_realizations[c][nbody],cut_points[c][cut],normals[c][normal]))
#Add the lens to the system
logdriver.info("Adding lens at redshift {0}".format(lens_redshift))
plane_name = batch.syshandler.map(os.path.join(plane_path.format(collection[c].storage_subdir,nbody_realizations[c][nbody],plane_set[c]),settings.plane_name_format.format(snapshot_number,cut_points[c][cut],normals[c][normal],settings.plane_format)))
tracer.addLens((plane_name,distance,lens_redshift))
#Close the infofile
infofile.close()
now = time.time()
logdriver.info("Plane specification reading completed in {0:.3f}s".format(now-start))
last_timestamp = now
#Rearrange the lenses according to redshift and roll them randomly along the axes
tracer.reorderLenses()
now = time.time()
logdriver.info("Reordering completed in {0:.3f}s".format(now-last_timestamp))
last_timestamp = now
#Start a bucket of light rays from a regular grid of initial positions
b = np.linspace(0.0,map_angle.value,resolution)
xx,yy = np.meshgrid(b,b)
pos = np.array([xx,yy]) * map_angle.unit
if settings.tomographic_convergence:
#Trace the ray deflections and save the convergence at every step
tracer.shoot(pos,z=source_redshift,kind="jacobians",callback=convergence_callback,realization=r,angle=map_angle,map_batch=map_batch,settings=settings)
else:
#Trace the ray deflections
jacobian = tracer.shoot(pos,z=source_redshift,kind="jacobians")
now = time.time()
logdriver.info("Jacobian ray tracing for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
last_timestamp = now
#Compute shear,convergence and omega from the jacobians
if settings.convergence or settings.reduced_shear or settings.reduced_shear_convergence:
convMap = ConvergenceMap(data=1.0-0.5*(jacobian[0]+jacobian[3]),angle=map_angle,cosmology=map_batch.cosmology,redshift=source_redshift)
if settings.convergence:
savename = batch.syshandler.map(os.path.join(save_path,"WLconv_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving convergence map to {0}".format(savename))
convMap.save(savename)
logdriver.debug("Saved convergence map to {0}".format(savename))
##############################################################################################################################
if settings.shear or settings.convergence_ks or settings.reduced_shear or settings.reduced_shear_convergence:
shearMap = ShearMap(data=np.array([0.5*(jacobian[3]-jacobian[0]),-0.5*(jacobian[1]+jacobian[2])]),angle=map_angle,cosmology=map_batch.cosmology,redshift=source_redshift)
if settings.shear:
savename = batch.syshandler.map(os.path.join(save_path,"WLshear_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving shear map to {0}".format(savename))
shearMap.save(savename)
if settings.convergence_ks:
convMap = shearMap.convergence()
savename = batch.syshandler.map(os.path.join(save_path,"WLconv-ks_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving convergence (KS) map to {0}".format(savename))
convMap.save(savename)
if settings.reduced_shear or settings.reduced_shear_convergence:
for ng in (0,1):
shearMap.data[ng] /= (1. - convMap.data)
if settings.reduced_shear:
savename = batch.syshandler.map(os.path.join(save_path,"WLredshear_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving reduced shear map to {0}".format(savename))
shearMap.save(savename)
if settings.reduced_shear_convergence:
convMap = shearMap.convergence()
savename = batch.syshandler.map(os.path.join(save_path,"WLredconv_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving reduced shear corrected convergence map to {0}".format(savename))
convMap.save(savename)
##############################################################################################################################
if settings.omega:
omegaMap = OmegaMap(data=-0.5*(jacobian[2]-jacobian[1]),angle=map_angle,cosmology=map_batch.cosmology,redshift=source_redshift)
savename = batch.syshandler.map(os.path.join(save_path,"WLomega_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving omega map to {0}".format(savename))
omegaMap.save(savename)
now = time.time()
#Log peak memory usage to stdout
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
logdriver.info("Weak lensing calculations for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
logdriver.info("Peak memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#Log progress and peak memory usage to stderr
if (pool is None) or (pool.is_master()):
logstderr.info("Progress: {0:.2f}%, peak memory usage: {1:.3f} (task), {2[0]:.3f} (all {2[1]} tasks)".format(100*(rloc+1.)/realizations_per_task,peak_memory_task,peak_memory_all))
#Safety sync barrier
if pool is not None:
pool.comm.Barrier()
if (pool is None) or (pool.is_master()):
now = time.time()
logdriver.info("Total runtime {0:.3f}s".format(now-begin))
############################################################################################################################################################################
#########################################################
#######Save intermediate results of LOS integration######
#########################################################
def save_intermediate(add_on,tracer,k,ctype,map_batch=None,map_angle=None,realization=None):
savename = os.path.join(map_batch.storage,"{0}-lens{1}-{2:04d}r.fits".format(ctype,k,realization))
logdriver.info("Saving z={0:.3f} add-on to convergence to {1}".format(tracer.redshift[k],savename))
side = map_angle.to(u.rad).value*tracer.distance[k]
ConvergenceMap(add_on,angle=side).save(savename)
########################################################################################################################
def losIntegrate(pool,batch,settings,batch_id):
#Safety check
assert isinstance(pool,MPIWhirlPool) or (pool is None)
assert isinstance(batch,SimulationBatch)
parts = batch_id.split("|")
if len(parts)==2:
assert isinstance(settings,MapSettings)
#Separate the id into cosmo_id and geometry_id
cosmo_id,geometry_id = parts
#Get a handle on the model
model = batch.getModel(cosmo_id)
#Get the corresponding simulation collection and map batch handlers
collection = [model.getCollection(geometry_id)]
map_batch = collection[0].getMapSet(settings.directory_name)
cut_redshifts = np.array([0.0])
elif len(parts)==1:
assert isinstance(settings,TelescopicMapSettings)
#Get a handle on the model
model = batch.getModel(parts[0])
#Get the corresponding simulation collection and map batch handlers
map_batch = model.getTelescopicMapSet(settings.directory_name)
collection = map_batch.mapcollections
cut_redshifts = map_batch.redshifts
else:
if (pool is None) or (pool.is_master()):
logdriver.error("Format error in {0}: too many '|'".format(batch_id))
sys.exit(1)
#Override the settings with the previously pickled ones, if prompted by user
if settings.override_with_local:
local_settings_file = os.path.join(map_batch.home_subdir,"settings.p")
settings = MapSettings.read(local_settings_file)
assert isinstance(settings,MapSettings)
if (pool is None) or (pool.is_master()):
logdriver.warning("Overriding settings with the previously pickled ones at {0}".format(local_settings_file))
##################################################################
##################Settings read###################################
##################################################################
#Read map angle,redshift and resolution from the settings
map_angle = settings.map_angle
source_redshift = settings.source_redshift
resolution = settings.map_resolution
if len(parts)==2:
#########################
#Use a single collection#
#########################
#Read the plane set we should use
plane_set = (settings.plane_set,)
#Randomization
nbody_realizations = (settings.mix_nbody_realizations,)
cut_points = (settings.mix_cut_points,)
normals = (settings.mix_normals,)
map_realizations = settings.lens_map_realizations
elif len(parts)==1:
#######################
#####Telescopic########
#######################
#Check that we have enough info
for attr_name in ["plane_set","mix_nbody_realizations","mix_cut_points","mix_normals"]:
if len(getattr(settings,attr_name))!=len(collection):
if (pool is None) or (pool.is_master()):
logdriver.error("You need to specify a setting {0} for each collection!".format(attr_name))
sys.exit(1)
#Read the plane set we should use
plane_set = settings.plane_set
#Randomization
nbody_realizations = settings.mix_nbody_realizations
cut_points = settings.mix_cut_points
normals = settings.mix_normals
map_realizations = settings.lens_map_realizations
#Integration type
if settings.integration_type not in integration_types:
if (pool is None) or (pool.is_master()):
logdriver.error("Integration type {0} not supported, please choose one in {1}".format(settings.integration_type,integration_types))
sys.exit(1)
if (pool is None) or (pool.is_master()):
logdriver.info("Line of sight integration type: {0}".format(settings.integration_type))
#Decide which map realizations this MPI task will take care of (if pool is None, all of them)
try:
realization_offset = settings.first_realization - 1
except AttributeError:
realization_offset = 0
if pool is None:
first_map_realization = 0 + realization_offset
last_map_realization = map_realizations + realization_offset
realizations_per_task = map_realizations
logdriver.debug("Generating lensing map realizations from {0} to {1}".format(first_map_realization+1,last_map_realization))
else:
assert map_realizations%(pool.size+1)==0,"Perfect load-balancing enforced, map_realizations must be a multiple of the number of MPI tasks!"
realizations_per_task = map_realizations//(pool.size+1)
first_map_realization = realizations_per_task*pool.rank + realization_offset
last_map_realization = realizations_per_task*(pool.rank+1) + realization_offset
logdriver.debug("Task {0} will generate lensing map realizations from {1} to {2}".format(pool.rank,first_map_realization+1,last_map_realization))
#Planes will be read from this path
plane_path = os.path.join("{0}","ic{1}","{2}")
if (pool is None) or (pool.is_master()):
for c,coll in enumerate(collection):
logdriver.info("Reading planes from {0}".format(plane_path.format(coll.storage_subdir,"-".join([str(n) for n in nbody_realizations[c]]),plane_set[c])))
#Plane info file is the same for all collections
if (not hasattr(settings,"plane_info_file")) or (settings.plane_info_file is None):
info_filename = batch.syshandler.map(os.path.join(plane_path.format(collection[0].storage_subdir,nbody_realizations[0][0],plane_set[0]),"info.txt"))
else:
info_filename = settings.plane_info_file
if (pool is None) or (pool.is_master()):
logdriver.info("Reading lens plane summary information from {0}".format(info_filename))
#Read how many snapshots are available
with open(info_filename,"r") as infofile:
num_snapshots = len(infofile.readlines())
#Save path for the maps
save_path = map_batch.storage_subdir
if (pool is None) or (pool.is_master()):
logdriver.info("Lensing maps will be saved to {0}".format(save_path))
begin = time.time()
#Log initial memory load
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
if (pool is None) or (pool.is_master()):
logstderr.info("Initial memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#We need one of these for cycles for each map random realization
for rloc,r in enumerate(range(first_map_realization,last_map_realization)):
#Set random seed to generate the realizations
np.random.seed(settings.seed + r)
#Instantiate the RayTracer
if settings.lens_type=="PotentialPlane":
tracer = RayTracer()
elif settings.lens_type=="DensityPlane":
tracer = RayTracer(lens_type=DensityPlane)
else:
raise ValueError("Lens type {0} not recognized!".format(settings.lens_type))
#Force garbage collection
gc.collect()
#Start timestep
start = time.time()
last_timestamp = start
#############################################################
###############Add the lenses to the system##################
#############################################################
#Open the info file to read the lens specifications (assume the info file is the same for all nbody realizations)
infofile = open(info_filename,"r")
#Read the info file line by line, and decide if we should add the particular lens corresponding to that line or not
for s in range(num_snapshots):
#Read the line
line = infofile.readline().strip("\n")
#Stop if there is nothing more to read
if line=="":
break
#Split the line in snapshot,distance,redshift
line = line.split(",")
snapshot_number = int(line[0].split("=")[1])
distance,unit = line[1].split("=")[1].split(" ")
if unit=="Mpc/h":
distance = float(distance)*model.Mpc_over_h
else:
distance = float(distance)*getattr(u,"unit")
lens_redshift = float(line[2].split("=")[1])
#Select the right collection
for n,z in enumerate(cut_redshifts):
if lens_redshift>=z:
c = n
#Randomization of planes
nbody = np.random.randint(low=0,high=len(nbody_realizations[c]))
cut = np.random.randint(low=0,high=len(cut_points[c]))
normal = np.random.randint(low=0,high=len(normals[c]))
#Log to user
logdriver.debug("Realization,snapshot=({0},{1}) --> NbodyIC,cut_point,normal=({2},{3},{4})".format(r,s,nbody_realizations[c][nbody],cut_points[c][cut],normals[c][normal]))
#Add the lens to the system
logdriver.info("Adding lens at redshift {0}".format(lens_redshift))
plane_name = batch.syshandler.map(os.path.join(plane_path.format(collection[c].storage_subdir,nbody_realizations[c][nbody],plane_set[c]),settings.plane_name_format.format(snapshot_number,cut_points[c][cut],normals[c][normal],settings.plane_format)))
tracer.addLens((plane_name,distance,lens_redshift))
#Close the infofile
infofile.close()
now = time.time()
logdriver.info("Plane specification reading completed in {0:.3f}s".format(now-start))
last_timestamp = now
#Rearrange the lenses according to redshift and roll them randomly along the axes
tracer.reorderLenses()
now = time.time()
logdriver.info("Reordering completed in {0:.3f}s".format(now-last_timestamp))
last_timestamp = now
#Start a bucket of light rays from a regular grid of initial positions
b = np.linspace(0.0,map_angle.value,resolution)
xx,yy = np.meshgrid(b,b)
pos = np.array([xx,yy]) * map_angle.unit
#Save intermediate results
if settings.tomographic_convergence:
callback = save_intermediate
else:
callback = None
#Perform the line of sight integration (choose integration type)
if settings.integration_type=="born":
image = tracer.convergenceBorn(pos,z=source_redshift,save_intermediate=False)
img_type = ConvergenceMap
elif settings.integration_type=="born-rt":
image = tracer.convergenceBorn(pos,z=source_redshift,real_trajectory=True,save_intermediate=False)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn2":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=False,transpose_up_to=settings.transpose_up_to,callback=callback,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn2-ll":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=False,include_gp=False,transpose_up_to=settings.transpose_up_to,callback=callback,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn2-gp":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=False,include_ll=False,transpose_up_to=settings.transpose_up_to,callback=callback,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn1+2":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=True,callback=callback,transpose_up_to=settings.transpose_up_to,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn1+2-gp":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=True,include_ll=False,transpose_up_to=settings.transpose_up_to,callback=callback,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn1+2-ll":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=True,include_gp=False,transpose_up_to=settings.transpose_up_to,callback=callback,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="omega2":
image = tracer.omegaPostBorn2(pos,z=source_redshift,save_intermediate=False)
img_type = OmegaMap
else:
raise NotImplementedError
now = time.time()
logdriver.info("Line of sight integration for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
last_timestamp = now
#Save the image
savename = batch.syshandler.map(os.path.join(save_path,"{0}_z{1:.2f}_{2:04d}r".format(settings.integration_type,source_redshift,r+1)))
if settings.transpose_up_to>=0:
savename += "_t{0}".format(settings.transpose_up_to)
savename += ".{0}".format(settings.format)
logdriver.info("Saving {0} map to {1}".format(settings.integration_type,savename))
img_type(data=image,angle=map_angle,cosmology=map_batch.cosmology,redshift=source_redshift).save(savename)
logdriver.debug("Saving {0} map to {1}".format(settings.integration_type,savename))
now = time.time()
#Log peak memory usage to stdout
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
logdriver.info("Weak lensing calculations for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
logdriver.info("Peak memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#Log progress and peak memory usage to stderr
if (pool is None) or (pool.is_master()):
logstderr.info("Progress: {0:.2f}%, peak memory usage: {1:.3f} (task), {2[0]:.3f} (all {2[1]} tasks)".format(100*(rloc+1.)/realizations_per_task,peak_memory_task,peak_memory_all))
#Safety sync barrier
if pool is not None:
pool.comm.Barrier()
if (pool is None) or (pool.is_master()):
now = time.time()
logdriver.info("Total runtime {0:.3f}s".format(now-begin))
############################################################################################################################################################################
###############################################
#######Galaxy catalog ray tracing##############
###############################################
def simulatedCatalog(pool,batch,settings,batch_id):
#Safety check
assert isinstance(pool,MPIWhirlPool) or (pool is None)
assert isinstance(batch,SimulationBatch)
assert isinstance(settings,CatalogSettings)
#Separate the id into cosmo_id and geometry_id
cosmo_id,geometry_id = batch_id.split("|")
#Get a handle on the model
model = batch.getModel(cosmo_id)
#Scale the box size to the correct units
nside,box_size = geometry_id.split("b")
box_size = float(box_size)*model.Mpc_over_h
#Get the corresponding simulation collection and catalog handler
collection = model.getCollection(box_size,nside)
catalog = collection.getCatalog(settings.directory_name)
#Override the settings with the previously pickled ones, if prompted by user
if settings.override_with_local:
local_settings_file = os.path.join(catalog.home_subdir,"settings.p")
settings = CatalogSettings.read(local_settings_file)
assert isinstance(settings,CatalogSettings)
if (pool is None) or (pool.is_master()):
logdriver.warning("Overriding settings with the previously pickled ones at {0}".format(local_settings_file))
##################################################################
##################Settings read###################################
##################################################################
#Read the catalog save path from the settings
catalog_save_path = catalog.storage_subdir
if (pool is None) or (pool.is_master()):
logdriver.info("Lensing catalogs will be saved to {0}".format(catalog_save_path))
#Read the total number of galaxies to raytrace from the settings
total_num_galaxies = settings.total_num_galaxies
#Pre-allocate numpy arrays
initial_positions = np.zeros((2,total_num_galaxies)) * settings.catalog_angle_unit
galaxy_redshift = np.zeros(total_num_galaxies)
#Keep track of the number of galaxies for each catalog
galaxies_in_catalog = list()
#Fill in initial positions and redshifts
for galaxy_position_file in settings.input_files:
try:
galaxies_before = reduce(add,galaxies_in_catalog)
except TypeError:
galaxies_before = 0
#Read the galaxy positions and redshifts from the position catalog
if (pool is None) or (pool.is_master()):
logdriver.info("Reading galaxy positions and redshifts from {0}".format(galaxy_position_file))
position_catalog = Catalog.read(galaxy_position_file)
if (pool is None) or (pool.is_master()):
logdriver.info("Galaxy catalog {0} contains {1} galaxies".format(galaxy_position_file,len(position_catalog)))
#This is just to avoid confusion
assert position_catalog.meta["AUNIT"]==settings.catalog_angle_unit.to_string(),"Catalog angle units, {0}, do not match with the ones privided in the settings, {1}".format(position_catalog.meta["AUNIT"],settings.catalog_angle_unit.to_string())
#Keep track of the number of galaxies in the catalog
galaxies_in_catalog.append(len(position_catalog))
if (pool is None) or (pool.is_master()):
#Save a copy of the position catalog to the simulated catalogs directory
position_catalog.write(os.path.join(catalog_save_path,os.path.basename(galaxy_position_file)),overwrite=True)
#Fill in initial positions and redshifts
initial_positions[0,galaxies_before:galaxies_before+len(position_catalog)] = position_catalog["x"] * getattr(u,position_catalog.meta["AUNIT"])
initial_positions[1,galaxies_before:galaxies_before+len(position_catalog)] = position_catalog["y"] * getattr(u,position_catalog.meta["AUNIT"])
galaxy_redshift[galaxies_before:galaxies_before+len(position_catalog)] = position_catalog["z"]
#Make sure that the total number of galaxies matches, and units are correct
assert reduce(add,galaxies_in_catalog)==total_num_galaxies,"The total number of galaxies in the catalogs, {0}, does not match the number provided in the settings, {1}".format(reduce(add,galaxies_in_catalog),total_num_galaxies)
##########################################################################################################################################################
####################################Initial positions and redshifts of galaxies loaded####################################################################
##########################################################################################################################################################
#Read the randomization information from the settings
nbody_realizations = settings.mix_nbody_realizations
cut_points = settings.mix_cut_points
normals = settings.mix_normals
catalog_realizations = settings.lens_catalog_realizations
if hasattr(settings,"realizations_per_subdirectory"):
realizations_in_subdir = settings.realizations_per_subdirectory
else:
realizations_in_subdir = catalog_realizations
#Create subdirectories as necessary
catalog_subdirectory = _subdirectories(catalog_realizations,realizations_in_subdir)
if (pool is None) or (pool.is_master()):
for d in catalog_subdirectory:
dir_to_make = os.path.join(catalog_save_path,d)
if not(os.path.exists(dir_to_make)):
logdriver.info("Creating catalog subdirectory {0}".format(dir_to_make))
os.mkdir(dir_to_make)
#Safety barrier sync
if pool is not None:
pool.comm.Barrier()
#Decide which map realizations this MPI task will take care of (if pool is None, all of them)
try:
realization_offset = settings.first_realization - 1
except AttributeError:
realization_offset = 0
if pool is None:
first_realization = 0 + realization_offset
last_realization = catalog_realizations + realization_offset
realizations_per_task = catalog_realizations
logdriver.debug("Generating lensing catalog realizations from {0} to {1}".format(first_realization+1,last_realization))
else:
assert catalog_realizations%(pool.size+1)==0,"Perfect load-balancing enforced, catalog_realizations must be a multiple of the number of MPI tasks!"
realizations_per_task = catalog_realizations//(pool.size+1)
first_realization = realizations_per_task*pool.rank + realization_offset
last_realization = realizations_per_task*(pool.rank+1) + realization_offset
logdriver.debug("Task {0} will generate lensing catalog realizations from {1} to {2}".format(pool.rank,first_realization+1,last_realization))
#Planes will be read from this path
plane_path = os.path.join(collection.storage_subdir,"ic{0}",settings.plane_set)
if (pool is None) or (pool.is_master()):
logdriver.info("Reading planes from {0}".format(plane_path.format("-".join([str(n) for n in nbody_realizations]))))
#Read how many snapshots are available
with open(batch.syshandler.map(os.path.join(plane_path.format(nbody_realizations[0]),"info.txt")),"r") as infofile:
num_snapshots = len(infofile.readlines())
begin = time.time()
#Log initial memory load
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
if (pool is None) or (pool.is_master()):
logstderr.info("Initial memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#We need one of these for cycles for each map random realization
for rloc,r in enumerate(range(first_realization,last_realization)):
#Set random seed to generate the realizations
np.random.seed(settings.seed + r)
#Instantiate the RayTracer
tracer = RayTracer()
#Force garbage collection
gc.collect()
#Start timestep
start = time.time()
last_timestamp = start
#############################################################
###############Add the lenses to the system##################
#############################################################
#Open the info file to read the lens specifications (assume the info file is the same for all nbody realizations)
infofile = open(os.path.join(plane_path.format(nbody_realizations[0]),"info.txt"),"r")
#Read the info file line by line, and decide if we should add the particular lens corresponding to that line or not
for s in range(num_snapshots):
#Read the line
line = infofile.readline().strip("\n")
#Stop if there is nothing more to read
if line=="":
break
#Split the line in snapshot,distance,redshift
line = line.split(",")
snapshot_number = int(line[0].split("=")[1])
distance,unit = line[1].split("=")[1].split(" ")
if unit=="Mpc/h":
distance = float(distance)*model.Mpc_over_h
else:
distance = float(distance)*getattr(u,"unit")
lens_redshift = float(line[2].split("=")[1])
#Randomization of planes
nbody = np.random.randint(low=0,high=len(nbody_realizations))
cut = np.random.randint(low=0,high=len(cut_points))
normal = np.random.randint(low=0,high=len(normals))
#Log to user
logdriver.debug("Realization,snapshot=({0},{1}) --> NbodyIC,cut_point,normal=({2},{3},{4})".format(r,s,nbody_realizations[nbody],cut_points[cut],normals[normal]))
#Add the lens to the system
logdriver.info("Adding lens at redshift {0}".format(lens_redshift))
plane_name = batch.syshandler.map(os.path.join(plane_path.format(nbody_realizations[nbody]),settings.plane_name_format.format(snapshot_number,cut_points[cut],normals[normal],settings.plane_format)))
tracer.addLens((plane_name,distance,lens_redshift))
#Close the infofile
infofile.close()
now = time.time()
logdriver.info("Plane specification reading completed in {0:.3f}s".format(now-start))
last_timestamp = now
#Rearrange the lenses according to redshift and roll them randomly along the axes
tracer.reorderLenses()
now = time.time()
logdriver.info("Reordering completed in {0:.3f}s".format(now-last_timestamp))
last_timestamp = now
#Trace the ray deflections through the lenses
jacobian = tracer.shoot(initial_positions,z=galaxy_redshift,kind="jacobians")
now = time.time()
logdriver.info("Jacobian ray tracing for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
last_timestamp = now
#Build the shear catalog and save it to disk
if settings.reduced_shear:
shear_catalog = ShearCatalog([(jacobian[3]-jacobian[0])/(jacobian[3]+jacobian[0]),-(jacobian[1]+jacobian[2])/(jacobian[3]+jacobian[0])],names=("shear1","shear2"))
else:
shear_catalog = ShearCatalog([0.5*(jacobian[3]-jacobian[0]),-0.5*(jacobian[1]+jacobian[2])],names=("shear1","shear2"))
for n,galaxy_position_file in enumerate(settings.input_files):
try:
galaxies_before = reduce(add,galaxies_in_catalog[:n])
except TypeError:
galaxies_before = 0
#Build savename
if settings.reduced_shear:
shear_root = "WLredshear_"
else:
shear_root = "WLshear_"
if len(catalog_subdirectory):
shear_catalog_savename = batch.syshandler.map(os.path.join(catalog_save_path,catalog_subdirectory[r//realizations_in_subdir],shear_root+os.path.basename(galaxy_position_file.split(".")[0])+"_{0:04d}r.{1}".format(r+1,settings.format)))
else:
shear_catalog_savename = batch.syshandler.map(os.path.join(catalog_save_path,shear_root+os.path.basename(galaxy_position_file.split(".")[0])+"_{0:04d}r.{1}".format(r+1,settings.format)))
if settings.reduced_shear:
logdriver.info("Saving simulated reduced shear catalog to {0}".format(shear_catalog_savename))
else:
logdriver.info("Saving simulated shear catalog to {0}".format(shear_catalog_savename))
shear_catalog[galaxies_before:galaxies_before+galaxies_in_catalog[n]].write(shear_catalog_savename,overwrite=True)
now = time.time()
#Log peak memory usage to stdout
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
logdriver.info("Weak lensing calculations for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
logdriver.info("Peak memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#Log progress and peak memory usage to stderr
if (pool is None) or (pool.is_master()):
logstderr.info("Progress: {0:.2f}%, peak memory usage: {1:.3f} (task), {2[0]:.3f} (all {2[1]} tasks)".format(100*(rloc+1.)/realizations_per_task,peak_memory_task,peak_memory_all))
#Safety sync barrier
if pool is not None:
pool.comm.Barrier()
if (pool is None) or (pool.is_master()):
now = time.time()
logdriver.info("Total runtime {0:.3f}s".format(now-begin))
|
{"hexsha": "882c16bc46f1c7af1e7db7776ab175fe2a8ce743", "size": 41473, "ext": "py", "lang": "Python", "max_stars_repo_path": "lenstools/scripts/raytracing.py", "max_stars_repo_name": "asabyr/LensTools", "max_stars_repo_head_hexsha": "e155d6d39361e550906cec00dbbc57686a4bca5c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-27T02:03:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-27T02:03:11.000Z", "max_issues_repo_path": "lenstools/scripts/raytracing.py", "max_issues_repo_name": "asabyr/LensTools", "max_issues_repo_head_hexsha": "e155d6d39361e550906cec00dbbc57686a4bca5c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lenstools/scripts/raytracing.py", "max_forks_repo_name": "asabyr/LensTools", "max_forks_repo_head_hexsha": "e155d6d39361e550906cec00dbbc57686a4bca5c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9812252964, "max_line_length": 252, "alphanum_fraction": 0.6967183469, "include": true, "reason": "import numpy,import astropy", "num_tokens": 10257}
|
"""Functions for infection rates"""
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import dtype_util
import numpy as np
from covid.rdata import load_mobility_matrix, load_population, load_age_mixing
from covid.pydata import load_commute_volume
from covid.impl.chainbinom_simulate import chain_binomial_simulate
tode = tfp.math.ode
tla = tf.linalg
DTYPE = np.float64
def power_iteration(A, tol=1e-3):
b_k = tf.random.normal([A.shape[1], 1], dtype=A.dtype)
epsilon = tf.constant(1., dtype=A.dtype)
i = 0
while tf.greater(epsilon, tol):
b_k1 = tf.matmul(A, b_k)
b_k1_norm = tf.linalg.norm(b_k1)
b_k_new = b_k1 / b_k1_norm
epsilon = tf.reduce_sum(tf.pow(b_k_new-b_k, 2))
b_k = b_k_new
i += 1
return b_k, i
def rayleigh_quotient(A, b):
b = tf.reshape(b, [b.shape[0], 1])
numerator = tf.matmul(tf.transpose(b), tf.matmul(A, b))
denominator = tf.matmul(tf.transpose(b), b)
return numerator / denominator
def dense_to_block_diagonal(A, n_blocks):
A_dense = tf.linalg.LinearOperatorFullMatrix(A)
eye = tf.linalg.LinearOperatorIdentity(n_blocks, dtype=A.dtype)
A_block = tf.linalg.LinearOperatorKronecker([eye, A_dense])
return A_block
def load_data(paths, settings, dtype=DTYPE):
M_tt, age_groups = load_age_mixing(paths['age_mixing_matrix_term'])
M_hh, _ = load_age_mixing(paths['age_mixing_matrix_hol'])
C, la_names = load_mobility_matrix(paths['mobility_matrix'])
np.fill_diagonal(C, 0.)
w_period = [settings['inference_period'][0], settings['prediction_period'][1]]
W = load_commute_volume(paths['commute_volume'], w_period)['percent']
pop = load_population(paths['population_size'])
M_tt = M_tt.astype(DTYPE)
M_hh = M_hh.astype(DTYPE)
C = C.astype(DTYPE)
W = W.astype(DTYPE)
pop['n'] = pop['n'].astype(DTYPE)
return {'M_tt': M_tt, 'M_hh': M_hh,
'C': C, 'la_names': la_names,
'age_groups': age_groups,
'W': W, 'pop': pop}
class CovidUK:
def __init__(self,
M_tt: np.float64,
M_hh: np.float64,
W: np.float64,
C: np.float64,
N: np.float64,
date_range: list,
holidays: list,
lockdown: list,
time_step: np.int64):
"""Represents a CovidUK ODE model
:param M_tt: a MxM matrix of age group mixing in term time
:param M_hh: a MxM matrix of age group mixing in holiday time
:param W: Commuting volume
:param C: a n_ladsxn_lads matrix of inter-LAD commuting
:param N: a vector of population sizes in each LAD
:param date_range: a time range [start, end)
:param holidays: a list of length-2 tuples containing dates of holidays
:param lockdown: a length-2 tuple of start and end of lockdown measures
:param time_step: a time step to use in the discrete time simulation
"""
dtype = dtype_util.common_dtype([M_tt, M_hh, W, C, N], dtype_hint=np.float64)
self.n_ages = M_tt.shape[0]
self.n_lads = C.shape[0]
self.M_tt = tf.convert_to_tensor(M_tt, dtype=tf.float64)
self.M_hh = tf.convert_to_tensor(M_hh, dtype=tf.float64)
# Create one linear operator comprising both the term and holiday
# matrices. This is nice because
# - the dense "M" parts will take up memory of shape [2, M, M]
# - the identity matirix will only take up memory of shape [M]
# - matmuls/matvecs will be quite efficient because of the
# LinearOperatorKronecker structure and diagonal structure of the
# identity piece thereof.
# It should be sufficiently efficient that we can just get rid of the
# control flow switching between the two operators, and instead just do
# both matmuls in one big (vectorized!) pass, and pull out what we want
# after the fact with tf.gather.
self.M = dense_to_block_diagonal(
np.stack([M_tt, M_hh], axis=0), self.n_lads)
self.Kbar = tf.reduce_mean(M_tt)
self.C = tf.linalg.LinearOperatorFullMatrix(C + tf.transpose(C))
shp = tf.linalg.LinearOperatorFullMatrix(tf.ones_like(M_tt, dtype=dtype))
self.C = tf.linalg.LinearOperatorKronecker([self.C, shp])
self.W = tf.constant(W, dtype=dtype)
self.N = tf.constant(N, dtype=dtype)
N_matrix = tf.reshape(self.N, [self.n_lads, self.n_ages])
N_sum = tf.reduce_sum(N_matrix, axis=1)
N_sum = N_sum[:, None] * tf.ones([1, self.n_ages], dtype=dtype)
self.N_sum = tf.reshape(N_sum, [-1])
self.time_step = time_step
self.times = np.arange(date_range[0], date_range[1], np.timedelta64(int(time_step), 'D'))
self.m_select = np.int64((self.times >= holidays[0]) &
(self.times < holidays[1]))
self.lockdown_select = np.int64((self.times >= lockdown[0]) &
(self.times < lockdown[1]))
self.max_t = self.m_select.shape[0] - 1
def create_initial_state(self, init_matrix=None):
if init_matrix is None:
I = np.zeros(self.N.shape, dtype=DTYPE)
I[149*17+10] = 30. # Middle-aged in Surrey
else:
np.testing.assert_array_equal(init_matrix.shape, [self.n_lads, self.n_ages],
err_msg=f"init_matrix does not have shape [<num lads>,<num ages>] \
({self.n_lads},{self.n_ages})")
I = tf.reshape(init_matrix, [-1])
S = self.N - I
E = tf.zeros(self.N.shape, dtype=DTYPE)
R = tf.zeros(self.N.shape, dtype=DTYPE)
return tf.stack([S, E, I, R], axis=-1)
class CovidUKODE(CovidUK):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.solver = tode.DormandPrince()
def make_h(self, param):
def h_fn(t, state):
S, E, I, R = tf.unstack(state, axis=-1)
# Integrator may produce time values outside the range desired, so
# we clip, implicitly assuming the outside dates have the same
# holiday status as their nearest neighbors in the desired range.
t_idx = tf.clip_by_value(tf.cast(t, tf.int64), 0, self.max_t)
m_switch = tf.gather(self.m_select, t_idx)
commute_volume = tf.pow(tf.gather(self.W, t_idx), param['omega'])
lockdown = tf.gather(self.lockdown_select, t_idx)
beta = tf.where(lockdown == 0, param['beta1'], param['beta1']*param['beta3'])
infec_rate = beta * (
tf.gather(self.M.matvec(I), m_switch) +
param['beta2'] * self.Kbar * commute_volume * self.C.matvec(I / self.N_sum))
infec_rate = S * infec_rate / self.N
dS = -infec_rate
dE = infec_rate - param['nu'] * E
dI = param['nu'] * E - param['gamma'] * I
dR = param['gamma'] * I
df = tf.stack([dS, dE, dI, dR], axis=-1)
return df
return h_fn
def simulate(self, param, state_init, solver_state=None):
h = self.make_h(param)
t = np.arange(self.times.shape[0])
results = self.solver.solve(ode_fn=h, initial_time=t[0], initial_state=state_init,
solution_times=t, previous_solver_internal_state=solver_state)
return results.times, results.states, results.solver_internal_state
def ngm(self, param):
infec_rate = param['beta1'] * (
self.M.to_dense()[0, ...] +
(param['beta2'] * self.Kbar * self.C.to_dense() /
self.N_sum[np.newaxis, :]))
ngm = infec_rate / param['gamma']
return ngm
def eval_R0(self, param, tol=1e-8):
ngm = self.ngm(param)
# Dominant eigen value by power iteration
dom_eigen_vec, i = power_iteration(ngm, tol=tf.cast(tol, tf.float64))
R0 = rayleigh_quotient(ngm, dom_eigen_vec)
return tf.squeeze(R0), i
def covid19uk_logp(y, sim, phi, r):
# Sum daily increments in removed
r_incr = sim[1:, :, 3] - sim[:-1, :, 3]
r_incr = tf.reduce_sum(r_incr, axis=-1)
# Poisson(\lambda) = \lim{r\rightarrow \infty} NB(r, \frac{\lambda}{r + \lambda})
#y_ = tfp.distributions.Poisson(rate=phi*r_incr)
lambda_ = r_incr * phi
y_ = tfp.distributions.NegativeBinomial(r, probs=lambda_/(r+lambda_))
return y_.log_prob(y)
class CovidUKStochastic(CovidUK):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def make_h(self, param):
"""Constructs a function that takes `state` and outputs a
transition rate matrix (with 0 diagonal).
"""
def h(t, state):
"""Computes a transition rate matrix
:param state: a tensor of shape [ns, nc] for ns states and nc population strata. States
are S, E, I, R. We arrange the state like this because the state vectors are then arranged
contiguously in memory for fast calculation below.
:return a tensor of shape [ns, ns, nc] containing transition matric for each i=0,...,(c-1)
"""
t_idx = tf.clip_by_value(tf.cast(t, tf.int64), 0, self.max_t)
m_switch = tf.gather(self.m_select, t_idx)
commute_volume = tf.pow(tf.gather(self.W, t_idx), param['omega'])
infec_rate = param['beta1'] * (
tf.gather(self.M.matvec(state[:, 2]), m_switch) +
param['beta2'] * self.Kbar * commute_volume * self.C.matvec(state[:, 2] / self.N_sum))
infec_rate = infec_rate / self.N
ei = tf.broadcast_to([param['nu']], shape=[state.shape[0]])
ir = tf.broadcast_to([param['gamma']], shape=[state.shape[0]])
# Scatter rates into a [ns, ns, nc] tensor
n = state.shape[0]
b = tf.stack([tf.range(n),
tf.zeros(n, dtype=tf.int32),
tf.ones(n, dtype=tf.int32)], axis=-1)
indices = tf.stack([b, b + [0, 1, 1], b + [0, 2, 2]], axis=-2)
# Un-normalised rate matrix (diag is 0 here)
rate_matrix = tf.scatter_nd(indices=indices,
updates=tf.stack([infec_rate, ei, ir], axis=-1),
shape=[state.shape[0],
state.shape[1],
state.shape[1]])
return rate_matrix
return h
@tf.function(autograph=False, experimental_compile=True)
def simulate(self, param, state_init):
"""Runs a simulation from the epidemic model
:param param: a dictionary of model parameters
:param state_init: the initial state
:returns: a tuple of times and simulated states.
"""
param = {k: tf.constant(v, dtype=tf.float64) for k, v in param.items()}
hazard = self.make_h(param)
t, sim = chain_binomial_simulate(hazard, state_init, np.float64(0.),
np.float64(self.times.shape[0]), self.time_step)
return t, sim
|
{"hexsha": "2dce1ef10cea366723d3db3aa366cc9504aca511", "size": 11426, "ext": "py", "lang": "Python", "max_stars_repo_path": "covid/model.py", "max_stars_repo_name": "FrankD/covid19uk", "max_stars_repo_head_hexsha": "dcc463bd5a2432c58935891728cbb4a5d9fb94ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "covid/model.py", "max_issues_repo_name": "FrankD/covid19uk", "max_issues_repo_head_hexsha": "dcc463bd5a2432c58935891728cbb4a5d9fb94ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "covid/model.py", "max_forks_repo_name": "FrankD/covid19uk", "max_forks_repo_head_hexsha": "dcc463bd5a2432c58935891728cbb4a5d9fb94ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3985507246, "max_line_length": 109, "alphanum_fraction": 0.5904078418, "include": true, "reason": "import numpy", "num_tokens": 2922}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import hashlib
from warnings import warn
import six
import numpy as np
import pandas as pd
import matplotlib.collections as mcoll
import matplotlib.text as mtext
import matplotlib.transforms as mtransforms
from matplotlib.offsetbox import (TextArea, HPacker, VPacker)
from matplotlib.offsetbox import AuxTransformBox
from matplotlib.colors import ListedColormap
from mizani.bounds import rescale
from ..aes import rename_aesthetics
from ..scales.scale import scale_continuous
from ..utils import ColoredDrawingArea
from .guide import guide
class guide_colorbar(guide):
"""
Guide colorbar
Parameters
----------
barwidth : float
Width (in pixels) of the colorbar.
barheight : float
Height (in pixels) of the colorbar.
nbin : int
Number of bins for drawing a colorbar. A larger value yields
a smoother colorbar. Default is 20.
raster : bool
Whether to render the colorbar as a raster object.
ticks : bool
Whether tick marks on colorbar should be visible.
draw_ulim : bool
Whether to show the upper limit tick marks.
draw_llim : bool
Whether to show the lower limit tick marks.
direction : str in ``['horizontal', 'vertical']``
Direction of the guide.
kwargs : dict
Parameters passed on to :class:`.guide`
"""
# bar
barwidth = 23
barheight = 23*5
nbin = 20 # maximum number of bins
raster = True
# ticks
ticks = True
draw_ulim = True
draw_llim = True
# parameter
available_aes = {'colour', 'color', 'fill'}
def train(self, scale):
# Do nothing if scales are inappropriate
if set(scale.aesthetics) & {'color', 'colour', 'fill'} == 0:
warn("colorbar guide needs color or fill scales.")
return None
if not issubclass(scale.__class__, scale_continuous):
warn("colorbar guide needs continuous scales")
return None
# value = breaks (numeric) is used for determining the
# position of ticks
limits = scale.limits
breaks = scale.get_breaks(strict=True)
breaks = np.asarray(breaks)
breaks = breaks[~np.isnan(breaks)]
if not len(breaks):
return None
self.key = pd.DataFrame({
scale.aesthetics[0]: scale.map(breaks),
'label': scale.get_labels(breaks),
'value': breaks})
bar = np.linspace(limits[0], limits[1], self.nbin)
self.bar = pd.DataFrame({
'color': scale.map(bar),
'value': bar})
labels = ' '.join(six.text_type(x) for x in self.key['label'])
info = '\n'.join([self.title, labels,
' '.join(self.bar['color'].tolist()),
self.__class__.__name__])
self.hash = hashlib.md5(info.encode('utf-8')).hexdigest()
return self
def merge(self, other):
"""
Simply discards the other guide
"""
return self
def create_geoms(self, plot):
"""
This guide is not geom based
Return self if colorbar will be drawn and None if not.
"""
for l in plot.layers:
exclude = set()
if isinstance(l.show_legend, dict):
l.show_legend = rename_aesthetics(l.show_legend)
exclude = {ae for ae, val in l.show_legend.items()
if not val}
elif l.show_legend not in (None, True):
continue
matched = self.legend_aesthetics(l, plot)
# layer uses guide
if set(matched) - exclude:
break
# no break, no layer uses this guide
else:
return None
return self
def draw(self):
"""
Draw guide
Returns
-------
out : matplotlib.offsetbox.Offsetbox
A drawing of this legend
"""
obverse = slice(0, None)
reverse = slice(None, None, -1)
width = self.barwidth
height = self.barheight
nbars = len(self.bar)
length = height
direction = self.direction
colors = self.bar['color'].tolist()
labels = self.key['label'].tolist()
themeable = self.theme.figure._themeable
# When there is more than one guide, we keep
# record of all of them using lists
if 'legend_title' not in themeable:
themeable['legend_title'] = []
if 'legend_text_colorbar' not in themeable:
themeable['legend_text_colorbar'] = []
# .5 puts the ticks in the middle of the bars when
# raster=False. So when raster=True the ticks are
# in between interpolation points and the matching is
# close though not exactly right.
_from = self.bar['value'].min(), self.bar['value'].max()
tick_locations = rescale(self.key['value'],
(.5, nbars-.5),
_from) * length/nbars
if direction == 'horizontal':
width, height = height, width
length = width
if self.reverse:
colors = colors[::-1]
labels = labels[::-1]
tick_locations = length - tick_locations[::-1]
# title #
title_box = TextArea(self.title,
textprops=dict(color='black'))
themeable['legend_title'].append(title_box)
# colorbar and ticks #
da = ColoredDrawingArea(width, height, 0, 0)
if self.raster:
add_interpolated_colorbar(da, colors, direction)
else:
add_segmented_colorbar(da, colors, direction)
if self.ticks:
_locations = tick_locations
if not self.draw_ulim:
_locations = _locations[:-1]
if not self.draw_llim:
_locations = _locations[1:]
add_ticks(da, _locations, direction)
# labels #
if self.label:
labels_da, legend_text = create_labels(da, labels,
tick_locations,
direction)
themeable['legend_text_colorbar'].extend(legend_text)
else:
labels_da = ColoredDrawingArea(0, 0)
# colorbar + labels #
if direction == 'vertical':
packer, align = HPacker, 'bottom'
align = 'center'
else:
packer, align = VPacker, 'right'
align = 'center'
slc = obverse if self.label_position == 'right' else reverse
if self.label_position in ('right', 'bottom'):
slc = obverse
else:
slc = reverse
main_box = packer(children=[da, labels_da][slc],
sep=self._label_margin,
align=align,
pad=0)
# title + colorbar(with labels) #
lookup = {
'right': (HPacker, reverse),
'left': (HPacker, obverse),
'bottom': (VPacker, reverse),
'top': (VPacker, obverse)}
packer, slc = lookup[self.title_position]
children = [title_box, main_box][slc]
box = packer(children=children,
sep=self._title_margin,
align=self._title_align,
pad=0)
return box
def add_interpolated_colorbar(da, colors, direction):
"""
Add 'rastered' colorbar to DrawingArea
"""
# Special case that arises due to not so useful
# aesthetic mapping.
if len(colors) == 1:
colors = [colors[0], colors[0]]
# Number of horizontal egdes(breaks) in the grid
# No need to create more nbreak than colors, provided
# no. of colors = no. of breaks = no. of cmap colors
# the shading does a perfect interpolation
nbreak = len(colors)
if direction == 'vertical':
mesh_width = 1
mesh_height = nbreak-1
linewidth = da.height/mesh_height
# Construct rectangular meshgrid
# The values(Z) at each vertex are just the
# normalized (onto [0, 1]) vertical distance
x = np.array([0, da.width])
y = np.arange(0, nbreak) * linewidth
X, Y = np.meshgrid(x, y)
Z = Y/y.max()
else:
mesh_width = nbreak-1
mesh_height = 1
linewidth = da.width/mesh_width
x = np.arange(0, nbreak) * linewidth
y = np.array([0, da.height])
X, Y = np.meshgrid(x, y)
Z = X/x.max()
# As a 2D coordinates array
coordinates = np.zeros(
((mesh_width+1)*(mesh_height+1), 2),
dtype=float)
coordinates[:, 0] = X.ravel()
coordinates[:, 1] = Y.ravel()
cmap = ListedColormap(colors)
coll = mcoll.QuadMesh(mesh_width, mesh_height,
coordinates,
antialiased=False,
shading='gouraud',
linewidth=0,
cmap=cmap,
array=Z.ravel())
da.add_artist(coll)
def add_segmented_colorbar(da, colors, direction):
"""
Add 'non-rastered' colorbar to DrawingArea
"""
nbreak = len(colors)
if direction == 'vertical':
linewidth = da.height/nbreak
verts = [None] * nbreak
x1, x2 = 0, da.width
for i, color in enumerate(colors):
y1 = i * linewidth
y2 = y1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
else:
linewidth = da.width/nbreak
verts = [None] * nbreak
y1, y2 = 0, da.height
for i, color in enumerate(colors):
x1 = i * linewidth
x2 = x1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
coll = mcoll.PolyCollection(verts,
facecolors=colors,
linewidth=0,
antialiased=False)
da.add_artist(coll)
def add_ticks(da, locations, direction):
segments = [None] * (len(locations)*2)
if direction == 'vertical':
x1, x2, x3, x4 = np.array([0.0, 1/5, 4/5, 1.0]) * da.width
for i, y in enumerate(locations):
segments[i*2:i*2+2] = [((x1, y), (x2, y)),
((x3, y), (x4, y))]
else:
y1, y2, y3, y4 = np.array([0.0, 1/5, 4/5, 1.0]) * da.height
for i, x in enumerate(locations):
segments[i*2:i*2+2] = [((x, y1), (x, y2)),
((x, y3), (x, y4))]
coll = mcoll.LineCollection(segments,
color='#CCCCCC',
linewidth=1,
antialiased=False)
da.add_artist(coll)
def create_labels(da, labels, locations, direction):
"""
Return an OffsetBox with label texts
"""
# The box dimensions are determined by the size of
# the text objects. We put two dummy children at
# either end to gaurantee that when center packed
# the labels in the labels_box matchup with the ticks.
fontsize = 9
aux_transform = mtransforms.IdentityTransform()
labels_box = MyAuxTransformBox(aux_transform)
xs, ys = [0]*len(labels), locations
ha, va = 'left', 'center'
x1, y1 = 0, 0
x2, y2 = 0, da.height
if direction == 'horizontal':
xs, ys = ys, xs
ha, va = 'center', 'top'
x2, y2 = da.width, 0
txt1 = mtext.Text(x1, y1, '',
horizontalalignment=ha,
verticalalignment=va)
txt2 = mtext.Text(x2, y2, '',
horizontalalignment=ha,
verticalalignment=va)
labels_box.add_artist(txt1)
labels_box.add_artist(txt2)
legend_text = []
for i, (x, y, text) in enumerate(zip(xs, ys, labels)):
txt = mtext.Text(x, y, text,
size=fontsize,
horizontalalignment=ha,
verticalalignment=va)
labels_box.add_artist(txt)
legend_text.append(txt)
return labels_box, legend_text
guide_colourbar = guide_colorbar
# Fix AuxTransformBox, Adds a dpi_transform
# See https://github.com/matplotlib/matplotlib/pull/7344
class MyAuxTransformBox(AuxTransformBox):
def __init__(self, aux_transform):
AuxTransformBox.__init__(self, aux_transform)
self.dpi_transform = mtransforms.Affine2D()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.aux_transform + \
self.ref_offset_transform + \
self.dpi_transform + \
self.offset_transform
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
for c in self._children:
c.draw(renderer)
self.stale = False
|
{"hexsha": "7854d7400d1405d2aead2be4eb4bdfb46ca5b438", "size": 13278, "ext": "py", "lang": "Python", "max_stars_repo_path": "venv/lib/python2.7/site-packages/plotnine/guides/guide_colorbar.py", "max_stars_repo_name": "nuriale207/preprocesspack", "max_stars_repo_head_hexsha": "cc06a9cb79c5e3b392371fcd8d1ccf7185e71821", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "venv/lib/python2.7/site-packages/plotnine/guides/guide_colorbar.py", "max_issues_repo_name": "nuriale207/preprocesspack", "max_issues_repo_head_hexsha": "cc06a9cb79c5e3b392371fcd8d1ccf7185e71821", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "venv/lib/python2.7/site-packages/plotnine/guides/guide_colorbar.py", "max_forks_repo_name": "nuriale207/preprocesspack", "max_forks_repo_head_hexsha": "cc06a9cb79c5e3b392371fcd8d1ccf7185e71821", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9951807229, "max_line_length": 70, "alphanum_fraction": 0.5488025305, "include": true, "reason": "import numpy", "num_tokens": 3118}
|
"""Rudimentary reimplementation of gradient based smoothing from LAYNII
Reference
---------
https://github.com/layerfMRI/LAYNII/blob/master/LN_GRADSMOOTH.cpp
"""
import os
import numpy as np
import nibabel as nb
from scipy.ndimage import gaussian_filter
from numpy.linalg import eigh
# =============================================================================
# User defined parameters
NII1 = "/home/faruk/Git/PyLAYNII/sample_data/activity_map_example.nii.gz"
NII2 = "/home/faruk/Git/PyLAYNII/sample_data/activity_map_example.nii.gz"
# =============================================================================
# Load data
nii1 = nb.load(NII1)
data1 = nii1.get_fdata()
nii2 = nb.load(NII2)
data2 = nii2.get_fdata()
# Derive parameters
dims = data1.shape
nr_vox = np.prod(dims)
basename = NII2.split(os.extsep, 1)[0]
# =============================================================================
grad = np.transpose(np.asarray(np.gradient(data1)), (1, 2, 3, 0))
grad = np.reshape(grad, (nr_vox, 3))
struct = np.multiply(grad[:, None, :], grad[:, :, None])
eigvals, eigvecs = eigh(struct)
eigvecs.shape
# Masked Gaussian filter
# data_new = np.zeros(dims)
# for l in layers:
# idx_nonzero = data_layers == l
# mask = (idx_nonzero).astype("float")
# data_smooth = gaussian_filter(data * mask, sigma=SIGMA)
# mask_smooth = gaussian_filter(mask, sigma=SIGMA)
# data_new[idx_nonzero] += data_smooth[idx_nonzero] / mask_smooth[idx_nonzero]
#
# # Save
# out = nb.Nifti1Image(data_new, affine=nii.affine)
# nb.save(out, "{}_layer_smooth_sigma{}.nii.gz".format(basename, int(SIGMA)))
#
# print("Finished.")
|
{"hexsha": "d4f2321e62e69ae3b3ad43af86ac3f1a6520ff5f", "size": 1636, "ext": "py", "lang": "Python", "max_stars_repo_path": "old/wip/LN_GRADSMOOTH.py", "max_stars_repo_name": "ofgulban/PyLAYNII", "max_stars_repo_head_hexsha": "38e607edbd6601b5893e519af3f791059fbe190d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-06-30T10:57:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-07T13:20:37.000Z", "max_issues_repo_path": "old/wip/LN_GRADSMOOTH.py", "max_issues_repo_name": "ofgulban/PyLAYNII", "max_issues_repo_head_hexsha": "38e607edbd6601b5893e519af3f791059fbe190d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-09T06:04:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-20T13:12:00.000Z", "max_forks_repo_path": "old/wip/LN_GRADSMOOTH.py", "max_forks_repo_name": "ofgulban/PyLAYNII", "max_forks_repo_head_hexsha": "38e607edbd6601b5893e519af3f791059fbe190d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2142857143, "max_line_length": 82, "alphanum_fraction": 0.6149144254, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 427}
|
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from matplotlib import rc
from matplotlib import rcParams
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=True)
rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
col1 = 'b'
col2 = 'k'
col3 = 'r'
####### Parámetros #######
# número de muestras para la ganancia y el MSE mínimo
N1 = 101
# número de muestras para el filtrado
N2 = 51
# a del modelo de estados
a = 0.9
# varianza del ruiso de excitación
var_u = 1
# media y varianza de s[-1]
mu_s = 0
var_s = 1
# los tres casos de la varianza del ruido de observación para n=1
var_w = [0.9, 1, 1.1]
### Fin de parámetros ###
# seed para graficas
np.random.seed(4)
ns1 = np.arange(N1)
n_exp = len(var_w)
## Cálculo de la ganancia de Kalman y el MSE mínimo
# inicialización de variables para guardar los resultados
Ks = np.zeros((N1, n_exp))
Ms = np.zeros((N1, n_exp))
# M[-1|-1] = var_s
M = var_s
for k in np.arange(n_exp):
for n in ns1:
M_pred = (a ** 2) * M + var_u
K = M_pred / (var_w[k] ** n + M_pred)
M = (1 - K) * M_pred
# se guardan los resultados
Ks[n, k] = K
Ms[n, k] = M
## Filtrado
ns2 = np.arange(N2)
# generación de los procesos de Markov
# ruido de excitación - se emplea el mismo en todos los casos
u = np.sqrt(var_u) * np.random.randn(N2)
# s[-1]
s_i = np.random.normal(mu_s, np.sqrt(var_s), 1)
# generación del proceso
s, z_f = signal.lfilter([1], [1, -a], u, zi=a * s_i)
# ruido de observación
w = np.random.randn(N2)
# generación de las observaciones
xs = np.zeros((N2, n_exp))
for k in np.arange(n_exp):
xs[:, k] = s + np.sqrt(np.power(var_w[k], ns2)) * w
# filtrado
s_ests = np.zeros((N2, n_exp))
for k in np.arange(n_exp):
# inicialización: s[-1|-1]
s_est = mu_s
for n in ns2:
s_pred = a * s_est
s_est = s_pred + Ks[n, k] * (xs[n, k] - s_pred)
# se guarda el resultado
s_ests[n, k] = s_est
fs = 12
fig = plt.figure(0, figsize=(9, 4), frameon=False)
ax = plt.subplot2grid((8, 2), (0, 0), rowspan=8, colspan=1)
plt.plot([0, N1-1], [1, 1], 'k--', dashes=(4, 4))
plt.plot(ns1, Ks[:, 0], color = col1, lw=2)
plt.plot(ns1, Ks[:, 1], color = col2, lw=2)
plt.plot(ns1, Ks[:, 2], color = col3, lw=2)
plt.xlim(0, N1-1)
plt.ylim(0, 1.05)
plt.xlabel('$n$', fontsize=fs)
plt.ylabel('$K[n]$', fontsize=fs)
var_s_steady = var_u / (1 - a ** 2)
ax = plt.subplot2grid((8, 2), (0, 1), rowspan=8, colspan=1)
plt.plot([0, N1-1], [var_s_steady, var_s_steady], 'k--', dashes=(4, 4))
plt.plot(ns1, Ms[:, 0], color = col1, lw=2, label='$\sigma_n^2=({})^n$'.format(var_w[0]))
plt.plot(ns1, Ms[:, 1], color = col2, lw=2, label='$\sigma_n^2={}$'.format(var_w[1]))
plt.plot(ns1, Ms[:, 2], color = col3, lw=2, label='$\sigma_n^2=({})^n$'.format(var_w[2]))
plt.xlim(0, N1-1)
plt.ylim(0, 5.5)
plt.xlabel('$n$', fontsize=fs)
plt.ylabel('$M[n|n]$', fontsize=fs)
plt.annotate('$\mathrm{var}(s[n])=\dfrac{\sigma_u^2}{1-a^2}$', xytext=(3, var_s_steady-0.4), xycoords='data',
xy=(28, var_s_steady), textcoords='data', color='k', fontsize=fs, va="top", ha="left",
arrowprops=dict(arrowstyle="-|>, head_width=0.15, head_length=0.3", color='k', relpos=(0.5, 1),
patchA=None, patchB=None, shrinkA=0, shrinkB=1))
leg = plt.legend(loc='center right', frameon=False, fontsize=fs)
plt.savefig('problem_13_11_gain_MSE.pdf', bbox_inches='tight')
fig = plt.figure(2, figsize=(9, 6), frameon=False)
ax = plt.subplot2grid((9, 1), (0, 0), rowspan=3, colspan=1)
k = 0
plt.plot(ns2, s, 'r')
plt.plot(ns2, xs[:, k], color = 'b')
plt.plot(ns2, s_ests[:, k], color = 'k')
plt.xlim(0, N2-1)
ax.set_xticklabels([])
plt.text(0.02, 0.8, '$\sigma_n^2=({})^n$'.format(var_w[k]), fontsize=fs, ha='left', va='baseline',
transform = ax.transAxes)
ax = plt.subplot2grid((9, 1), (3, 0), rowspan=3, colspan=1)
k = 1
plt.plot(ns2, s, 'r')
plt.plot(ns2, xs[:, k], color = 'b')
plt.plot(ns2, s_ests[:, k], color = 'k')
plt.xlim(0, N2-1)
ax.set_xticklabels([])
plt.text(0.02, 0.8, '$\sigma_n^2={}$'.format(var_w[k]), fontsize=fs, ha='left', va='baseline',
transform = ax.transAxes)
ax = plt.subplot2grid((9, 1), (6, 0), rowspan=3, colspan=1)
k = 2
plt.plot(ns2, s, 'r', label='$s[n]$')
plt.plot(ns2, xs[:, k], color = 'b', label='$x[n]$')
plt.plot(ns2, s_ests[:, k], color = 'k', label='$\hat{s}[n|n]$')
plt.xlim(0, N2-1)
plt.xlabel('$n$', fontsize=fs)
plt.text(0.02, 0.8, '$\sigma_n^2=({})^n$'.format(var_w[k]), fontsize=fs, ha='left', va='baseline',
transform = ax.transAxes)
leg = plt.legend(loc=3, frameon=False, fontsize=fs, ncol=3)
plt.savefig('problem_13_11_filtering.pdf', bbox_inches='tight')
plt.show()
|
{"hexsha": "6eb7ff87c830d0995672cfa7702571e680b582b9", "size": 4717, "ext": "py", "lang": "Python", "max_stars_repo_path": "figuras/PycharmKayStatisticalReport/problem_13_11.py", "max_stars_repo_name": "bor9/estudiando_el_kay", "max_stars_repo_head_hexsha": "6e07908b8b0b5a5166dadce30001e6100e8304c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "figuras/PycharmKayStatisticalReport/problem_13_11.py", "max_issues_repo_name": "bor9/estudiando_el_kay", "max_issues_repo_head_hexsha": "6e07908b8b0b5a5166dadce30001e6100e8304c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figuras/PycharmKayStatisticalReport/problem_13_11.py", "max_forks_repo_name": "bor9/estudiando_el_kay", "max_forks_repo_head_hexsha": "6e07908b8b0b5a5166dadce30001e6100e8304c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-02T05:27:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-02T05:27:27.000Z", "avg_line_length": 30.8300653595, "max_line_length": 109, "alphanum_fraction": 0.6133135467, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1752}
|
import cv2
import numpy as np
import pandas as pd
from keras.preprocessing.image import img_to_array
import calendar;
import time;
from keras.models import load_model
from keras.models import model_from_json
import os
import cv2
import numpy as np
import pandas as pd
from keras.preprocessing.image import img_to_array
from keras.models import load_model
from keras.models import model_from_json
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
def rectify(h):
h = h.reshape((4,2))
hnew = np.zeros((4,2),dtype = np.float32)
add = h.sum(1)
hnew[0] = h[np.argmin(add)]
hnew[2] = h[np.argmax(add)]
diff = np.diff(h,axis = 1)
hnew[1] = h[np.argmin(diff)]
hnew[3] = h[np.argmax(diff)]
return hnew
def outerRectangle(image):
height, width, channels = image.shape
if width > height:
image = cv2.transpose(image)
image = cv2.flip(image,1)
# resize image so it can be processed
image = cv2.resize(image, (1600, 1200))
# creating copy of original image
orig = image.copy()
# convert to grayscale and blur to smooth
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#blurred = cv2.medianBlur(gray, 5)
edged = cv2.Canny(blurred, 0,50)
orig_edged = edged.copy()
# find the contours in the edged image, keeping only the
# largest ones, and initialize the screen contour
(_,contours, _) = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
# get approximate contour
for c in contours:
p = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * p, True)
if len(approx) == 4:
target = approx
break
# mapping target points to 800x800 quadrilateral
approx = rectify(target)
(tl, tr, br, bl) = approx
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
pts2 = np.float32([[0,0],[800,0],[800,800],[0,800]])
M = cv2.getPerspectiveTransform(approx,pts2)
dst = cv2.warpPerspective(orig,M,(800, 800))
mask = np.ones(orig.shape, np.uint8)
mask = cv2.bitwise_not(mask)
x_offset=y_offset=50
mask[y_offset:y_offset+dst.shape[0], x_offset:x_offset+dst.shape[1]] = dst
return mask
def correctprespective(image):
#result2 = cv2.add(orig,result)
# cv2.imshow('image', image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()#
# mapping target points to 800x800 quadrilateral
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#blurred = cv2.medianBlur(gray, 5)
# apply Canny Edge Detection
edged = cv2.Canny(blurred, 0,50)
# find the contours in the edged image, keeping only the
# largest ones, and initialize the screen contour
(_,contours, _) = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
# get approximate contour
pt = []
largestctr=""
for c in contours:
p = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * p, True)
if len(approx) == 4:
target = approx
#largestctr = ctr
break
orig = image.copy()
approx = rectify(target)
#cv2.drawContours(orig,[target],-1,(0,255,0),1)
x, y, w, h = cv2.boundingRect(approx)
dst = orig[y:y+h,x:x+w]
return dst
def innerRectangles(dst):
names = []
answers= []
questions = []
gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
# apply Canny Edge Detection
edged = cv2.Canny(blurred, 0, 50)
dst2 = dst.copy()
(_,contours,_) = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
targetvec = list()
for c in contours:
p = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * p, True)
if len(approx) == 4 and cv2.contourArea(approx) >4000: #parameter which needs to be tuned for separate area size
#print("Area:",cv2.contourArea(approx))
targetvec.append(approx)
point_list = []
for c in targetvec:
x1, y1, width1, height1 = cv2.boundingRect (c)
point_list.append([x1,y1,width1,height1])
#filter necessary so that the big outer contour is not detected
point_array = [point for point in point_list if point[0] > 15]
duplicate_array = []
same_pt = []
point_array = sorted(point_array,key=lambda x: (x[1]))
# for i in point_array:
# print ("Point Array :",i)
for i in range(len(point_array)):
for j in range(i+1,len(point_array)):
#nearby contour points to remove
if point_array[i][1]+ 10 > point_array[j][1]:
point_array[j][1] = point_array[i][1]
point_array = sorted(point_array,key=lambda x: (x[1],x[0]))
for i in range(len(point_array)):
for j in range(i+1,len(point_array)):
if point_array[i][0]+ 10 > point_array[j][0] and point_array[i][1]+ 10 > point_array[j][1] and point_array[i][2]+ 10 > point_array[j][2] and point_array[i][3]+ 10 > point_array[j][3] :
duplicate_array.append(j)
# print("final size is : ", len(point_array))
# print("duplicate_array size is : ", len(duplicate_array))
#deleting from reverse based on index to avoid out of index issue
duplicate_array = sorted(list(set(duplicate_array)),reverse=True)
# print("Points detected:",len(point_array),"Duplicate Points to be removed:",len(list(set(duplicate_array))))
# #print(duplicate_array)
# for i in duplicate_array:
# print ("Deleted",i)
for i in duplicate_array:
del point_array[i]
for i in point_array:
x, y, width, height = i[0],i[1],i[2],[3]
for i in range(0,len(point_array)):
x, y, width, height = point_array[i][0],point_array[i][1],point_array[i][2],point_array[i][3]
#if y < 720:
#cropping some padding which contains box lines
roi = dst[y-3:y+height+3, x-5:x+width+5]
# cv2.rectangle(dst,(x,y),(x+width,y+height),(0,255,0),1)
# print(roi.shape)
# print("height - width {}".format(abs(height-width)))
area = height * width
if height+30 >=width:
continue
#print("final area :: ", area)
os.path.join('.')
if i==0 or i==1:
names.append(roi)
elif i>1:
if (area>9000 and area<20000) or area>200000:
continue
elif (area >4500 and area<9000):
answers.append(roi)
elif (area >50000 and area <200000):
questions.append(roi)
print(len(answers))
print(len(questions))
for i in range(len(names)):
if not os.path.isdir('name'):
os.makedirs('name')
cv2.imwrite(os.path.join("name","name" + str(i+1)+".png"), names[i])
for i in range(len(answers)):
if not os.path.isdir('answers'):
os.makedirs('answers')
cv2.imwrite(os.path.join("answers","answers" + str(i+1)+".png"), answers[i])
question_array_names = []
for i in range(len(questions)):
if not os.path.isdir('questions'):
os.makedirs('questions')
file_name = str(int(calendar.timegm(time.gmtime()))) + "_question" + str(i+1)+".png"
cv2.imwrite(os.path.join("questions", file_name), questions[i])
question_array_names.append(file_name);
return len(point_array), answers, question_array_names
def getResponseFromImage(input_image):
success = False
image = cv2.imread("static/" + input_image)
image = outerRectangle(image)
dst = correctprespective(image)
#dst = correctprespective(image)
#qpts_data = pd.read_csv("question_data.csv")
regions_detected, answers, question_array_names = innerRectangles(dst)
print("Detected regions :",regions_detected)
responses = []
q_types = ["ocr","ocr", "ocr", "omr","omr"]
idx_char_omr = { 1 : "A", 2 : "B", 3 : "C", 4: "D"}
if not len(answers) == len(q_types):
print("Not able to detect properly")
return False, answers, question_array_names
return True, answers, question_array_names
def getBlob(im):
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0
params.maxThreshold = 100
# Filter by Area.
params.filterByArea = True
params.minArea = 100
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.3
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.5
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.1
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(params)
else :
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
def getKey(item):
return item[1]
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
li = []
for i in range(len(keypoints)):
print(i,"x:",keypoints[i].pt)
li.append(keypoints[i].pt)
keypoint_sorted = sorted(li, key=getKey)
return keypoint_sorted
def getCircles(image):
output = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT, 1, 20,
param1=45,
param2=22,
minRadius=0,
maxRadius=55)
point_list = []
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle
#print(x,y)
point_list.append([x,y])
# corresponding to the center of the circle
cv2.circle(output, (x, y), r, (0, 255, 0), 1)
#cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
#sort by x coord because we will get row_count in metadata
point_list = sorted(point_list,key=lambda x: x[0])
return point_list
def evaluateOmrQuestion(image,row_count=2,x_response = ["A","B","C","D"],y_response= ["Vertebrate","Invertebrate"]):
#if __name__ == "__main__":
#load image
#image = cv2.imread("roi5.png")
#get all circles
# print("shape of OMR: ", image.shape)
# point_list = getCircles(image)
# #setting the x-y range based on circles
# x_range = []
# y_range = sorted([point_list[i][1] for i in range(row_count)])
# print(y_range)
# for i in range(0,len(point_list),row_count):
# row_group = point_list[i:i+row_count]
# x_range.append(min([row[0] for row in row_group ]))
# print(x_range)
x_range = [60,110,160,210]
#Detecting blob points
blob_points = getBlob(image)
print ("blob_points ", blob_points)
# #final response list
responses = []
for point in blob_points:
print(point)
found = False
for i in range(len(x_range)):
if int(point[0]) < x_range[i]:
responses.append(i + 1)
#print(responses)
found = True
if found:break
# print("Final responses")
# print(responses)
return responses
# success , answers, question_array_names = getResponseFromImage("test.jpg")
# q_types = ["ocr","ocr", "ocr", "omr","omr"]
# idx_char_omr = { 1 : "A", 2 : "B", 3 : "C", 4: "D"}
# responses=[]
# for i in range(len(answers)):
# q_img = "answers"+str(i+1)+".png"
# if q_types[i] == "omr":
# img = cv2.imread(os.path.join('./answers',q_img))
# detected_omr_ans = evaluateOmrQuestion(img);
# print("detected",detected_omr_ans)
# #responses.append(idx_char_omr[detected_omr_ans[0] ] )
# if q_types[i] =="ocr":
# img = cv2.imread(os.path.join('./answers',q_img))
# responses.append(ocr_prediction(img))
|
{"hexsha": "c3baed2b5873cc258fc35029fc74e9035ad5d3d8", "size": 14159, "ext": "py", "lang": "Python", "max_stars_repo_path": "scannable_paper.py", "max_stars_repo_name": "dhavalocked/alchemy-backend", "max_stars_repo_head_hexsha": "c908e0c8066c3b8a327d92da0ff34f13522f470d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scannable_paper.py", "max_issues_repo_name": "dhavalocked/alchemy-backend", "max_issues_repo_head_hexsha": "c908e0c8066c3b8a327d92da0ff34f13522f470d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scannable_paper.py", "max_forks_repo_name": "dhavalocked/alchemy-backend", "max_forks_repo_head_hexsha": "c908e0c8066c3b8a327d92da0ff34f13522f470d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2542735043, "max_line_length": 299, "alphanum_fraction": 0.5958047885, "include": true, "reason": "import numpy", "num_tokens": 3892}
|
from amuse.community import *
from amuse.units import units
from amuse.units import constants
from amuse.units.quantities import Quantity
from amuse.community.interface import common
from amuse.datamodel import Particles
from amuse.datamodel import ParticlesSubset
import numpy
class BSEInterface(CodeInterface, common.CommonCodeInterface , LiteratureReferencesMixIn):
"""
Binary evolution is performed by the **rapid** binary-star evolution (BSE)
algorithm. Circularization of eccentric orbits and synchronization of stellar
rotation with the orbital motion owing to tidal interaction is modelled in detail.
Angular momentum loss mechanisms, such as gravitational radiation and magnetic
braking, are also modelled. Wind accretion, where the secondary may accrete some
of the material lost from the primary in a wind, is allowed with the necessary
adjustments made to the orbital parameters in the event of any mass variations.
Mass transfer also occurs if either star fills its Roche lobe and may proceed on a
nuclear, thermal or dynamical time-scale. In the latter regime, the radius of the
primary increases in response to mass-loss at a faster rate than the Roche-lobe of
the star. Stars with deep surface convection zones and degenerate stars are
unstable to such dynamical time-scale mass loss unless the mass ratio of the system
is less than some critical value. The outcome is a common-envelope event if the
primary is a giant star. This results in merging or formation of a close binary, or
a direct merging if the primary is a white dwarf or low-mass main-sequence star. On
the other hand, mass transfer on a nuclear or thermal time-scale is assumed to be a
steady process. Prescriptions to determine the type and rate of mass transfer, the
response of the secondary to accretion and the outcome of any merger events are in
place in BSE and the details can be found in the BSE paper:
.. [#] Hurley J.R., Tout C.A., & Pols O.R., 2002, MNRAS, 329, 897:
.. [#] ... Evolution of binary stars and the effect of tides on binary populations
.. [#] Hurley J.R., Pols O.R., Tout C.A., 2000, MNRAS, 315, 543:
.. [#] ... Comprehensive analytic formulae for stellar evolution as a function of mass and metallicity
"""
def __init__(self, **options):
CodeInterface.__init__(self, name_of_the_worker="bse_worker", **options)
LiteratureReferencesMixIn.__init__(self)
@legacy_function
def initialize():
function = LegacyFunctionSpecification()
function.addParameter('z_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('neta_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('bwind_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('hewind_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('alpha1_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('CElambda_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('ceflag_in', dtype='i', direction=function.IN, unit = NO_UNIT)
function.addParameter('tflag_in', dtype='i', direction=function.IN, unit = NO_UNIT)
function.addParameter('ifflag_in', dtype='i', direction=function.IN, unit = NO_UNIT)
function.addParameter('wdflag_in', dtype='i', direction=function.IN, unit = NO_UNIT)
function.addParameter('bhflag_in', dtype='i', direction=function.IN, unit = NO_UNIT)
function.addParameter('nsflag_in', dtype='i', direction=function.IN, unit = NO_UNIT)
function.addParameter('mxns_in', dtype='d', direction=function.IN, unit = units.MSun)
function.addParameter('idum_in', dtype='i', direction=function.IN, unit = NO_UNIT)
function.addParameter('pts1_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('pts2_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('pts3_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('sigma_in', dtype='d', direction=function.IN, unit = units.km / units.s)
function.addParameter('beta_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('xi_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('acc2_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('epsnov_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('eddfac_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('gamma_in', dtype='d', direction=function.IN, unit = NO_UNIT)
function.addParameter('status', dtype='i', direction=function.OUT, unit = NO_UNIT)
return function
@legacy_function
def evolve_binary():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('type1', dtype='i', direction=function.INOUT, unit = units.stellar_type)
function.addParameter('type2', dtype='i', direction=function.INOUT, unit = units.stellar_type)
function.addParameter('initial_mass1', dtype='d', direction=function.INOUT, unit = units.MSun)
function.addParameter('initial_mass2', dtype='d', direction=function.INOUT, unit = units.MSun)
function.addParameter('mass1', dtype='d', direction=function.INOUT, unit = units.MSun)
function.addParameter('mass2', dtype='d', direction=function.INOUT, unit = units.MSun)
function.addParameter('radius1', dtype='d', direction=function.INOUT, unit = units.RSun)
function.addParameter('radius2', dtype='d', direction=function.INOUT, unit = units.RSun)
function.addParameter('luminosity1', dtype='d', direction=function.INOUT, unit = units.LSun)
function.addParameter('luminosity2', dtype='d', direction=function.INOUT, unit = units.LSun)
function.addParameter('core_mass1', dtype='d', direction=function.INOUT, unit = units.MSun)
function.addParameter('core_mass2', dtype='d', direction=function.INOUT, unit = units.MSun)
function.addParameter('core_radius1', dtype='d', direction=function.INOUT, unit = units.RSun)
function.addParameter('core_radius2', dtype='d', direction=function.INOUT, unit = units.RSun)
function.addParameter('convective_envelope_mass1', dtype='d', direction=function.INOUT, unit = units.MSun)
function.addParameter('convective_envelope_mass2', dtype='d', direction=function.INOUT, unit = units.MSun)
function.addParameter('convective_envelope_radius1', dtype='d', direction=function.INOUT, unit = units.RSun)
function.addParameter('convective_envelope_radius2', dtype='d', direction=function.INOUT, unit = units.RSun)
function.addParameter('spin1', dtype='d', direction=function.INOUT, unit = NO_UNIT)
function.addParameter('spin2', dtype='d', direction=function.INOUT, unit = NO_UNIT)
function.addParameter('epoch1', dtype='d', direction=function.INOUT, unit = units.Myr)
function.addParameter('epoch2', dtype='d', direction=function.INOUT, unit = units.Myr)
function.addParameter('MS_lifetime1', dtype='d', direction=function.INOUT, unit = units.Myr)
function.addParameter('MS_lifetime2', dtype='d', direction=function.INOUT, unit = units.Myr)
function.addParameter('age', dtype='d', direction=function.INOUT, unit = units.Myr)
function.addParameter('orbital_period', dtype='d', direction=function.INOUT, unit = units.day)
function.addParameter('eccentricity', dtype='d', direction=function.INOUT, unit = NO_UNIT)
function.addParameter('end_time', dtype='d', direction=function.INOUT, unit = units.Myr)
return function
@legacy_function
def get_time_step():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('type1', dtype='i', direction=function.IN, unit = units.stellar_type)
function.addParameter('type2', dtype='i', direction=function.IN, unit = units.stellar_type)
function.addParameter('initial_mass1', dtype='d', direction=function.IN, unit = units.MSun)
function.addParameter('initial_mass2', dtype='d', direction=function.IN, unit = units.MSun)
function.addParameter('mass1', dtype='d', direction=function.IN, unit = units.MSun)
function.addParameter('mass2', dtype='d', direction=function.IN, unit = units.MSun)
function.addParameter('MS_lifetime1', dtype='d', direction=function.IN, unit = units.Myr)
function.addParameter('MS_lifetime2', dtype='d', direction=function.IN, unit = units.Myr)
function.addParameter('epoch1', dtype='d', direction=function.IN, unit = units.Myr)
function.addParameter('epoch2', dtype='d', direction=function.IN, unit = units.Myr)
function.addParameter('age', dtype='d', direction=function.IN, unit = units.Myr)
function.addParameter('time_step', dtype='d', direction=function.OUT, unit = units.Myr)
return function
def get_time_step_for_binary(self, binary):
current_values = {}
current_values['type1'] = binary.type1.value_in(units.stellar_type)
current_values['type2'] = binary.type2.value_in(units.stellar_type)
current_values['initial_mass1'] = binary.initial_mass1.value_in(units.MSun)
current_values['initial_mass2'] = binary.initial_mass2.value_in(units.MSun)
current_values['mass1'] = binary.mass1.value_in(units.MSun)
current_values['mass2'] = binary.mass2.value_in(units.MSun)
current_values['MS_lifetime1'] = binary.MS_lifetime1.value_in(units.Myr)
current_values['MS_lifetime2'] = binary.MS_lifetime2.value_in(units.Myr)
current_values['epoch1'] = binary.epoch1.value_in(units.Myr)
current_values['epoch2'] = binary.epoch2.value_in(units.Myr)
current_values['age'] = binary.age.value_in(units.Myr)
result = self.get_time_step(**current_values)
return result | units.Myr
def evolve_particle(self, particle, time_end):
t = particle.current_time
if particle.stellar_type == 15:
return
while t < time_end:
t0 = t
t = t0 + self.get_time_step_for_binary(particle)
if t > time_end:
t = time_end
self.evolve_star(particle, t)
t1 = particle.current_time
dt = t1 - t0
t0 = t1
if dt.value_in(units.Myr) == 0.0:
#print t, t0, t1, dt, "BREAK BREAK BREAK!"
return
if particle.stellar_type == 15:
return
def initialize_code(self):
return 0
def commit_parameters(self):
return 0
def recommit_parameters(self):
return 0
def cleanup_code(self):
return 0
def commit_particles(self):
return 0
class BSEStars(Particles):
def __init__(self, code_interface, storage = None):
Particles.__init__(self, storage = storage)
self._private.code_interface = code_interface
self.add_calculated_attribute("temperature", self.calculate_effective_temperature, ["luminosity", "radius"])
def calculate_effective_temperature(self, luminosity, radius):
return ((luminosity/(constants.four_pi_stefan_boltzmann*radius**2))**.25).in_(units.K)
def add_particles_to_store(self, keys, attributes = [], values = []):
if len(keys) == 0:
return
all_attributes = []
all_attributes.extend(attributes)
all_values = []
all_values.extend(values)
mapping_from_attribute_to_default_value = {
"stellar_type" : 1 | units.stellar_type,
"radius": 0 | units.RSun,
"luminosity": 0 | units.LSun,
"core_mass": 0 | units.MSun,
"core_radius": 0 | units.RSun,
"convective_envelope_mass": 0 | units.MSun,
"convective_envelope_radius": 0 | units.RSun,
"epoch": 0 | units.Myr,
"spin": 0 | units.none,
"main_sequence_lifetime": 0 | units.Myr,
"age": 0 | units.Myr,
"stellar_type": 0 | units.stellar_type #units.stellar_type("Main Sequence star"),
}
given_attributes = set(attributes)
if not "initial_mass" in given_attributes:
index_of_mass_attibute = attributes.index("mass")
all_attributes.append("initial_mass")
all_values.append(values[index_of_mass_attibute] * 1.0)
for attribute, default_value in mapping_from_attribute_to_default_value.items():
if not attribute in given_attributes:
all_attributes.append(attribute)
all_values.append(default_value.as_vector_with_length(len(keys)))
super(BSEStars, self).add_particles_to_store(keys, all_attributes, all_values)
def get_defined_attribute_names(self):
return ["mass", "radius"]
class BSEBinaries(Particles):
def __init__(self, code_interface, storage = None):
Particles.__init__(self, storage = storage)
self._private.code_interface = code_interface
def add_particles_to_store(self, keys, attributes = [], values = []):
if len(keys) == 0:
return
given_attributes = set(attributes)
if not "child1" in given_attributes:
raise Exception("a binary must always have a child1 attribute")
if not "child2" in given_attributes:
raise Exception("a binary must always have a child2 attribute")
all_attributes = []
all_values = []
for attribute, value in zip(attributes, values):
all_attributes.append(attribute)
if attribute == 'child1' or attribute == 'child2':
value = value.copy_with_link_transfer(None, self._private.code_interface.particles)
all_values.append(value)
else:
all_values.append(value)
mapping_from_attribute_to_default_value = {
"eccentricity": 0.0 | units.none,
"age": 0 | units.Myr
}
for attribute, default_value in mapping_from_attribute_to_default_value.items():
if not attribute in given_attributes:
all_attributes.append(attribute)
all_values.append(default_value.as_vector_with_length(len(keys)))
super(BSEBinaries, self).add_particles_to_store(keys, all_attributes, all_values)
added_particles = ParticlesSubset(self, keys)
self._private.code_interface._evolve_binaries(added_particles, 1e-08 | units.yr)
def get_defined_attribute_names(self):
return ["eccentricity", "orbital_period", "age", "child1", "child2"]
class BSE(common.CommonCode):
def __init__(self, **options):
InCodeComponentImplementation.__init__(self, BSEInterface(**options), **options)
self.model_time = 0.0 | units.yr
def define_parameters(self, handler):
handler.add_caching_parameter(
"initialize",
"z_in",
"metallicity",
"Metallicity of all stars",
0.02
)
handler.add_caching_parameter(
"initialize",
"neta_in",
"reimers_mass_loss_coefficient",
"Reimers mass-loss coefficient (neta*4x10^-13; 0.5 normally)",
0.5
)
handler.add_caching_parameter(
"initialize",
"bwind_in",
"binary_enhanced_mass_loss_parameter",
"The binary enhanced mass loss parameter (inactive for single).",
0.0
)
handler.add_caching_parameter(
"initialize",
"hewind_in",
"helium_star_mass_loss_factor",
"Helium star mass loss factor",
1.0
)
handler.add_caching_parameter(
"initialize",
"alpha1_in",
"common_envelope_efficiency",
"The common-envelope efficiency parameter",
1.0
)
handler.add_caching_parameter(
"initialize",
"CElambda_in",
"common_envelope_binding_energy_factor",
"The binding energy factor for common envelope evolution",
0.5
)
handler.add_caching_parameter(
"initialize",
"ceflag_in",
"common_envelope_model_flag",
"ceflag > 0 activates spin-energy correction in common-envelope. ceflag = 3 activates de Kool common-envelope model (0).",
0
)
handler.add_caching_parameter(
"initialize",
"tflag_in",
"tidal_circularisation_flag",
"tflag > 0 activates tidal circularisation (1).",
1
)
handler.add_caching_parameter(
"initialize",
"ifflag_in",
"white_dwarf_IFMR_flag",
"ifflag > 0 uses white dwarf IFMR (initial-final mass relation) of HPE, 1995, MNRAS, 272, 800 (0).",
0
)
handler.add_caching_parameter(
"initialize",
"wdflag_in",
"white_dwarf_cooling_flag",
"wdflag > 0 uses modified-Mestel cooling for WDs (0).",
1
)
handler.add_caching_parameter(
"initialize",
"bhflag_in",
"black_hole_kick_flag",
"bhflag > 0 allows velocity kick at BH formation (0).",
0
)
handler.add_caching_parameter(
"initialize",
"nsflag_in",
"neutron_star_mass_flag",
"nsflag > 0 takes NS/BH mass from Belczynski et al. 2002, ApJ, 572, 407 (1).",
1
)
handler.add_caching_parameter(
"initialize",
"mxns_in",
"maximum_neutron_star_mass",
"The maximum neutron star mass (1.8, nsflag=0; 3.0, nsflag=1).",
3.0 | units.MSun
)
handler.add_caching_parameter(
"initialize",
"idum_in",
"SN_kick_random_seed",
"The random number seed used in the kick routine.",
29769
)
handler.add_caching_parameter(
"initialize",
"pts1_in",
"fractional_time_step_1",
"The timesteps chosen in each evolution phase as decimal fractions of the time taken in that phase: MS (0.05)",
0.05
)
handler.add_caching_parameter(
"initialize",
"pts2_in",
"fractional_time_step_2",
"The timesteps chosen in each evolution phase as decimal fractions of the time taken in that phase: GB, CHeB, AGB, HeGB (0.01)",
0.01
)
handler.add_caching_parameter(
"initialize",
"pts3_in",
"fractional_time_step_3",
"The timesteps chosen in each evolution phase as decimal fractions of the time taken in that phase: HG, HeMS (0.02)",
0.02
)
handler.add_caching_parameter(
"initialize",
"sigma_in",
"SN_kick_speed_dispersion",
"The dispersion in the Maxwellian for the SN kick speed (190 km/s).",
190.0 | units.km / units.s
)
handler.add_caching_parameter(
"initialize",
"beta_in",
"wind_velocity_factor",
"The wind velocity factor: proportional to vwind**2 (1/8).",
0.125
)
handler.add_caching_parameter(
"initialize",
"xi_in",
"wind_accretion_efficiency",
"The wind accretion efficiency factor (1.0).",
1.0
)
handler.add_caching_parameter(
"initialize",
"acc2_in",
"wind_accretion_factor",
"The Bondi-Hoyle wind accretion factor (3/2).",
1.5
)
handler.add_caching_parameter(
"initialize",
"epsnov_in",
"nova_retained_accreted_matter_fraction",
"The fraction of accreted matter retained in nova eruption (0.001).",
0.001
)
handler.add_caching_parameter(
"initialize",
"eddfac_in",
"Eddington_mass_transfer_limit_factor",
"The Eddington limit factor for mass transfer (1.0).",
1.0
)
handler.add_caching_parameter(
"initialize",
"gamma_in",
"Roche_angular_momentum_factor",
"The angular momentum factor for mass lost during Roche (-1.0). ",
-1.0
)
def define_state(self, handler):
common.CommonCode.define_state(self, handler)
handler.add_transition('INITIALIZED','RUN','commit_parameters')
handler.add_method('RUN', 'evolve_binary')
handler.add_method('RUN','before_get_parameter')
handler.add_method('RUN','before_set_parameter')
def define_particle_sets(self, handler):
handler.define_inmemory_set('particles', BSEStars)
handler.define_inmemory_set('binaries', BSEBinaries)
handler.add_attribute(
'binaries',
'time_step',
'_get_time_step',
('child1', 'child2', 'age')
#('child1', 'type2',
# 'initial_mass1', 'initial_mass2',
# 'mass1', 'mass2',
# 'MS_lifetime1', 'MS_lifetime2',
# 'epoch1', 'epoch2',
#'age')
)
def _get_time_step(self, child1, child2, age):
child1 = child1.as_set()
child2 = child2.as_set()
return self.get_time_step(
child1.stellar_type, child2.stellar_type,
child1.initial_mass, child2.initial_mass,
child1.mass, child2.mass,
child1.age, child2.age,
child1.epoch, child2.epoch,
age
)
def orbital_period_to_semi_major_axis(self, orbital_period, mass1, mass2):
mu = (mass1 + mass2) * constants.G
return (((orbital_period / (2.0 * numpy.pi))**2)*mu)**(1.0/3.0)
def semi_major_axis_to_orbital_period(self, semi_major_axis, mass1, mass2):
mu = (mass1 + mass2) * constants.G
return 2.0 * numpy.pi * ((semi_major_axis**3/mu)**0.5)
def _evolve_binaries(self, particles, end_time):
binary_attributes = (
"age",
"semi_major_axis",
"eccentricity"
)
single_attributes = (
"stellar_type",
"initial_mass",
"mass",
"radius",
"luminosity",
"core_mass",
"core_radius",
"convective_envelope_mass",
"convective_envelope_radius",
"spin",
"epoch",
"age",
)
children1 = particles.child1.as_set()
children2 = particles.child2.as_set()
children1_arguments = children1.get_values_in_store(children1.get_all_indices_in_store(), single_attributes)
children2_arguments = children2.get_values_in_store(children2.get_all_indices_in_store(), single_attributes)
binaries_arguments = particles.get_values_in_store(particles.get_all_indices_in_store(), binary_attributes)
binaries_arguments[1] = self.semi_major_axis_to_orbital_period(binaries_arguments[1] , children1_arguments[2], children2_arguments[2])
arguments = []
for argument1, argument2 in zip(children1_arguments, children2_arguments):
arguments.append(argument1)
arguments.append(argument2)
arguments.extend(binaries_arguments)
arguments.append(end_time.as_vector_with_length(len(particles)))
result = self.evolve_binary(*arguments)
result[-3] = self.orbital_period_to_semi_major_axis(result[-3] , result[4], result[5])
children1_results = []
children2_results = []
index = 0
for dummy in range(len(children1_arguments)):
children1_results.append(result[index])
index += 1
children2_results.append(result[index])
index += 1
children1.set_values_in_store(children1.get_all_indices_in_store(), single_attributes, children1_results)
children2.set_values_in_store(children2.get_all_indices_in_store(), single_attributes, children2_results)
particles.set_values_in_store(particles.get_all_indices_in_store(), binary_attributes, result[index:])
def evolve_model(self, end_time = None, keep_synchronous = True):
if not keep_synchronous:
self._evolve_binaries(self.binaries, self.binaries.time_step + self.binaries.age)
return
if end_time is None:
end_time = self.model_time + min(self.binaries.time_step)
self._evolve_binaries(self.binaries, end_time - self.model_time + self.binaries.age)
self.model_time = end_time
def commit_particles(self):
pass
def update_time_steps(self):
pass
def commit_parameters(self):
self.parameters.send_cached_parameters_to_code()
self.overridden().commit_parameters()
def initialize_module_with_current_parameters(self):
self.commit_parameters()
def initialize_module_with_default_parameters(self):
"""
* neta is the Reimers mass-loss coefficent (neta*4x10^-13; 0.5 normally).
* bwind is the binary enhanced mass loss parameter (inactive for single).
* hewind is a helium star mass loss factor (1.0 normally).
* sigma is the dispersion in the Maxwellian for the SN kick speed (190 km/s).
*
* ifflag > 0 uses WD IFMR of HPE, 1995, MNRAS, 272, 800 (0).
* wdflag > 0 uses modified-Mestel cooling for WDs (0).
* bhflag > 0 allows velocity kick at BH formation (0).
* nsflag > 0 takes NS/BH mass from Belczynski et al. 2002, ApJ, 572, 407 (1).
* mxns is the maximum NS mass (1.8, nsflag=0; 3.0, nsflag=1).
* idum is the random number seed used in the kick routine.
*
* Next come the parameters that determine the timesteps chosen in each
* evolution phase:
* pts1 - MS (0.05)
* pts2 - GB, CHeB, AGB, HeGB (0.01)
* pts3 - HG, HeMS (0.02)
* as decimal fractions of the time taken in that phase.
"""
self.parameters.set_defaults()
self.commit_parameters()
Bse = BSE
|
{"hexsha": "b4492cd3446360dc308d9146d05ab0da2649fa95", "size": 27751, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/amuse/community/bse/interface.py", "max_stars_repo_name": "GFTwrt/amuse", "max_stars_repo_head_hexsha": "ff9e1ff6904e191f6b5a2e6f84c078062f553293", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/amuse/community/bse/interface.py", "max_issues_repo_name": "GFTwrt/amuse", "max_issues_repo_head_hexsha": "ff9e1ff6904e191f6b5a2e6f84c078062f553293", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/amuse/community/bse/interface.py", "max_forks_repo_name": "GFTwrt/amuse", "max_forks_repo_head_hexsha": "ff9e1ff6904e191f6b5a2e6f84c078062f553293", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.8918083462, "max_line_length": 142, "alphanum_fraction": 0.6149327952, "include": true, "reason": "import numpy", "num_tokens": 6241}
|
import numpy as np
def query_integral_image( integral_image, size_x, size_y):
x = integral_image.shape[0]
y = integral_image.shape[1]
area, i, j=0,0,0
hits = 0
# count how many possible locations
for i in xrange(x - size_x):
for j in xrange(y - size_y):
area = integral_image[i, j] + integral_image[i + size_x, j + size_y]
area -= integral_image[i + size_x, j] + integral_image[i, j + size_y]
if not area:
hits += 1
if not hits:
# no room left
return None
# pick a location at random
goal = np.random.randint(hits)
hits = 0
for i in xrange(x - size_x):
for j in xrange(y - size_y):
area = integral_image[i, j] + integral_image[i + size_x, j + size_y]
area -= integral_image[i + size_x, j] + integral_image[i, j + size_y]
if not area:
hits += 1
if hits == goal:
return i, j
|
{"hexsha": "ac22ed645e8ce2e7838b232446884badcd2191a2", "size": 1050, "ext": "py", "lang": "Python", "max_stars_repo_path": "vizs/astrotrend.tmp/query_integral_image.py", "max_stars_repo_name": "fedhere/fedhere.github.io", "max_stars_repo_head_hexsha": "8c38da0305b3a9294ee4da86b7da69b50e5205d8", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vizs/astrotrend.tmp/query_integral_image.py", "max_issues_repo_name": "fedhere/fedhere.github.io", "max_issues_repo_head_hexsha": "8c38da0305b3a9294ee4da86b7da69b50e5205d8", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vizs/astrotrend.tmp/query_integral_image.py", "max_forks_repo_name": "fedhere/fedhere.github.io", "max_forks_repo_head_hexsha": "8c38da0305b3a9294ee4da86b7da69b50e5205d8", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8125, "max_line_length": 84, "alphanum_fraction": 0.5152380952, "include": true, "reason": "import numpy", "num_tokens": 265}
|
import heterocl as hcl
import os, sys
import numpy as np
def test_stream():
A = hcl.placeholder((32, 32), "A")
def kernel_two(A):
B = hcl.compute(A.shape, lambda i, j : A[i, j] + 1, "B")
C = hcl.compute(A.shape, lambda i, j : B[ii, jj] + 1, "C")
return C
target = hcl.Platform.xilinx_zc706
target.config(compiler="vivado_hls", mode="csyn", project="stream.prj")
# Only when creating the schedule, kernel will be executed
s = hcl.create_schedule([A], kernel_two)
s_B, s_C = kernel_two.B, kernel_two.C
s.to(s_B, s[s_C])
mod = hcl.build(s, target=target)
print(mod.src)
# mod()
report = mod.report()
report.display()
if __name__ == "__main__":
test_stream()
|
{"hexsha": "8a0199ed9e0bfcb9f89b9937d0b2bb20612bf18b", "size": 740, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/mlir/hcl-mlir/test_stream.py", "max_stars_repo_name": "chhzh123/heterocl", "max_stars_repo_head_hexsha": "856e9b8ad877d11280a7e457e91ca89803c05570", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/mlir/hcl-mlir/test_stream.py", "max_issues_repo_name": "chhzh123/heterocl", "max_issues_repo_head_hexsha": "856e9b8ad877d11280a7e457e91ca89803c05570", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/mlir/hcl-mlir/test_stream.py", "max_forks_repo_name": "chhzh123/heterocl", "max_forks_repo_head_hexsha": "856e9b8ad877d11280a7e457e91ca89803c05570", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4285714286, "max_line_length": 75, "alphanum_fraction": 0.6148648649, "include": true, "reason": "import numpy", "num_tokens": 226}
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as Spar
import scipy.sparse.linalg as SparLinalg
np.set_printoptions(linewidth = 500)
def NN_Arr(coor):
N = coor.shape[0]
NN = -1*np.ones((N,4), dtype = 'int')
xmax = max(coor[:,0])
ymax = max(coor[:,1])
Lx = xmax + 1
Ly = ymax + 1
for i in range(N):
xi = coor[i, 0]
yi = coor[i, 1]
if (i-1) >= 0 and abs(xi - 1) >= 0 and abs(xi - coor[i-1, 0]) == 1 and abs(yi - coor[i-1, 1]) == 0:
NN[i, 0] = i - 1
if (i+1) < N and abs(xi + 1) <= xmax and abs(xi - coor[i+1, 0]) == 1 and abs(yi - coor[i+1, 1]) == 0:
NN[i, 2] = i + 1
for j in range(0, int(Lx)+1):
if (i + j) < N and abs(yi + 1) <= ymax and abs(yi - coor[int(i + j), 1]) == 1 and abs(xi - coor[int(i + j), 0]) == 0:
NN[i, 1] = i + j
if (i - j) >= 0 and abs(yi - 1) >= 0 and abs(yi - coor[int(i - j), 1]) == 1 and abs(xi - coor[int(i - j), 0]) == 0:
NN[i, 3]= i - j
return NN
def kSq_gen(coor,ax,ay,NN):
row = []; col = []; data = []
N = coor.shape[0]
for i in range(N):
# A[i,i] = 2./ax**2 + 2./ay**2
row.append(i); col.append(i); data.append(2./ax**2 + 2./ay**2)
if NN[i,0] != -1: # checking if we have a left nearest neighbor
row.append(NN[i,0]); col.append(i); data.append(-1./ax**2)
if NN[i,2] != -1: # checking if we have a right nearest neighbor
row.append(NN[i,2]); col.append(i); data.append(-1./ax**2)
if NN[i,1] != -1: # Check top nearest neighbor
row.append(NN[i,1]); col.append(i); data.append(-1./ay**2)
if NN[i,3] != -1: # Check bottom nearest neighbor
row.append(NN[i,3]); col.append(i); data.append(-1./ay**2)
kSq_csc = Spar.csc_matrix((data,(row,col)),shape = (N,N),dtype = 'complex')
return kSq_csc
a = 1.
ax = 1.; ay = 1.1
Nx = 100
Ny = 100
coor_arr = np.zeros((Nx*Ny,2))
counter = 0
for j in range(Ny):
for i in range(Nx):
coor_arr[counter,0] = i * a
coor_arr[counter,1] = j * a
counter += 1
NN = NN_Arr(coor_arr)
kSq = kSq_gen(coor_arr,ax,ay,NN)
plt.scatter(coor_arr[:,0],coor_arr[:,1])
plt.scatter(coor_arr[NN[35],0],coor_arr[NN[35],1],c = 'r')
plt.show()
H = 1. * kSq # should be multiplied by (hbar^2 / (2*m))
num = 5 # This is the number of eigenvalues and eigenvectors you want
sigma = 100 *0.001766 # This is the eigenvalue we search around
which = 'LM'
print "H shape: ", H.shape
eigs,vecs = SparLinalg.eigsh(H,k=num,sigma = sigma, which = which)
idx_sort = np.argsort(eigs)
eigs = eigs[idx_sort]
vecs = vecs[:,idx_sort]
print eigs[0]
### Plotting
for i in range(vecs.shape[1]):
plt.scatter(coor_arr[:,0],coor_arr[:,1],c = np.square(np.absolute(vecs[:,i])),cmap = 'hot')
plt.title('E/Eo = %.5f' % (eigs[i] / eigs[0] * 2.))
plt.show()
#asd
|
{"hexsha": "64b7d5a955c83e34a17060db0999482cbe8b1308", "size": 2954, "ext": "py", "lang": "Python", "max_stars_repo_path": "junk/practice/sparse_toy.py", "max_stars_repo_name": "tbcole/majoranaJJ", "max_stars_repo_head_hexsha": "dcf31f7786fa0a4874a940b7d8dcdd55f3921a46", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "junk/practice/sparse_toy.py", "max_issues_repo_name": "tbcole/majoranaJJ", "max_issues_repo_head_hexsha": "dcf31f7786fa0a4874a940b7d8dcdd55f3921a46", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-03-24T23:46:17.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-19T20:29:08.000Z", "max_forks_repo_path": "junk/practice/sparse_toy.py", "max_forks_repo_name": "tbcole/majoranaJJ", "max_forks_repo_head_hexsha": "dcf31f7786fa0a4874a940b7d8dcdd55f3921a46", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-30T08:48:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T12:15:15.000Z", "avg_line_length": 22.5496183206, "max_line_length": 129, "alphanum_fraction": 0.5345294516, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1074}
|
import json
import os
import time
import numpy as np
import pyUSRP as u
from flask_socketio import emit
from flask_login import current_user
from app import socketio, check_connection, measure_manager
@socketio.on('vna_param')
def handle_vna_param(msg, methods=['GET', 'POST']):
f_min = 1e6*float(msg['start_F'])
f_max = 1e6*float(msg['end_F'])
f_lo = 1e6*float(msg['central_F'])
N_points = int(msg['N_points'])
scan_time = msg['scan_time']
tx_gain = int(msg['tx_gain'])
try:
iterations = int(msg['pass'])
except ValueError:
iterations = None
except TypeError:
iterations = None
try:
rate = int(msg['rate'])
except ValueError:
rate = None
except TypeError:
rate = None
try:
decim = int(msg['decim'])
except ValueError:
decim = None
except TypeError:
decim = None
try:
tone_comp = int(msg['amp'])
except ValueError:
tone_comp = 1
except TypeError:
tone_comp = None
if check_connection():
socketio.emit('vna_response',json.dumps({'connected':int(1)}))
args = {
'start_f' : f_min,
'last_f' : f_max,
'measure_t' : scan_time,
'n_points' : N_points,
'tx_gain' : tx_gain,
'Rate':rate,
'decimation':True,
'RF':f_lo,
'Front_end':None,
'Device':None,
'output_filename':None,
'Multitone_compensation':tone_comp,
'Iterations':iterations,
'verbose':False
}
measure_manager.enqueue_measure(
target = u.Single_VNA,
args = args,
kind = "vna",
name = "VNA_"+str(time.time()),
author = str(current_user)[6:-1]
)
else:
socketio.emit('vna_response',json.dumps({'connected':int(0)}))
|
{"hexsha": "89fa2d72c90c35947f6750f3192e2ee827260d39", "size": 2150, "ext": "py", "lang": "Python", "max_stars_repo_path": "handlers/VNA_handlers.py", "max_stars_repo_name": "LorenzoMinutolo/GPU_SDR_WEBGUI", "max_stars_repo_head_hexsha": "3b04b10c140acdf793fcca60e07c6efb348b27f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "handlers/VNA_handlers.py", "max_issues_repo_name": "LorenzoMinutolo/GPU_SDR_WEBGUI", "max_issues_repo_head_hexsha": "3b04b10c140acdf793fcca60e07c6efb348b27f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "handlers/VNA_handlers.py", "max_forks_repo_name": "LorenzoMinutolo/GPU_SDR_WEBGUI", "max_forks_repo_head_hexsha": "3b04b10c140acdf793fcca60e07c6efb348b27f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6666666667, "max_line_length": 74, "alphanum_fraction": 0.4986046512, "include": true, "reason": "import numpy", "num_tokens": 477}
|
function solution()
sum, curr, next = 0, 1, 2
while curr < 4000000
if curr % 2 == 0
sum += curr
end
temp = next
next += curr
curr = temp
end
sum
end
if length(ARGS) == 1 && ARGS[1] == "-a"
println(solution())
exit(0)
end
for line in eachline(STDIN)
iters = int(line)
start = time_ns()
for i in 1:iters
solution()
end
end_ = time_ns()
println(end_ - start)
end
|
{"hexsha": "0b60e8eeb3b8ca3ac72fdcab5de00790e70534f2", "size": 421, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "problems/002/002.jl", "max_stars_repo_name": "jsdelivrbot/euler_criterion.rs", "max_stars_repo_head_hexsha": "7eee1353560d2a784209ca8735632806388e14e1", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2015-04-05T03:47:25.000Z", "max_stars_repo_stars_event_max_datetime": "2017-06-21T15:18:49.000Z", "max_issues_repo_path": "problems/002/002.jl", "max_issues_repo_name": "jsdelivrbot/euler_criterion.rs", "max_issues_repo_head_hexsha": "7eee1353560d2a784209ca8735632806388e14e1", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2015-01-18T21:03:23.000Z", "max_issues_repo_issues_event_max_datetime": "2015-04-10T03:41:25.000Z", "max_forks_repo_path": "problems/002/002.jl", "max_forks_repo_name": "jsdelivrbot/euler_criterion.rs", "max_forks_repo_head_hexsha": "7eee1353560d2a784209ca8735632806388e14e1", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2015-01-18T20:09:17.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-06T22:55:52.000Z", "avg_line_length": 12.7575757576, "max_line_length": 39, "alphanum_fraction": 0.5653206651, "num_tokens": 150}
|
[STATEMENT]
lemma del_Leaf[simp]: "del x Leaf = None"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. del x \<langle>\<rangle> = None
[PROOF STEP]
using val_cong[OF del_tm.simps(1)]
[PROOF STATE]
proof (prove)
using this:
Time_Monad.val (del_tm ?x1 \<langle>\<rangle>) = Time_Monad.val (return None \<bind> tick)
goal (1 subgoal):
1. del x \<langle>\<rangle> = None
[PROOF STEP]
by(simp only: del_def val_simps)
|
{"llama_tokens": 172, "file": "Root_Balanced_Tree_Root_Balanced_Tree", "length": 2}
|
# -*- coding: utf-8 -*-
"""
Setting up data recovery for relative displacements
"""
import numpy as np
import scipy.linalg as la
from pyyeti.nastran import n2p
# FIXME: We need the str/repr formatting used in Numpy < 1.14.
try:
np.set_printoptions(legacy="1.13")
except TypeError:
pass
def relative_displacement_dtm(nas, node_pairs):
"""
Form relative displacements data recovery matrix
Parameters
----------
nas : dictionary
This is the nas2cam dictionary: ``nas = op2.rdnas2cam()``. For
a Craig-Bampton component, `nas` will also need to have `mug1`
and `tug1` entries as shown in the example usage below. The
matrix `mug1` is a data recovery matrix (to recover the
displacements) and `tug1` is the DOF map to `mug1`: ``[node,
dof]``. These get created during an "EXTSEOUT" Nastran run.
The naming convention is::
tug1 --> nas['tug1'][se]
mug1 --> nas['extse'][se]['mug1']
node_pairs : 2d array_like
Four column matrix where each row contains the superelement ID
and node ID for two non-coincident nodes: ``[SE1, Node1, SE2,
Node2]``. The relative displacement between each node pair is
computed along a vector from Node1 to Node2 with positive
meaning increased distance.
Returns
-------
reldtm : 2d ndarray
This is the displacement-dependent relative displacement
recovery matrix. There is one row per node pair and the order
given in `node_pairs` is preserved.
dist : 1d ndarray
Vector of distances between node pairs.
labels : list
List of labels briefly describing each row in `reldtm`. For
example, if one row in `node_pairs` is: ``[101, 3, 102, 30]``,
the label for that row would be: 'SE102,30 - SE101,3'.
Notes
-----
The algorithm works as follows:
1. Forms two data recovery matrices for the X, Y and Z DOF of
each node listed in `node_pairs`. One (`DTMG`) recovers from
residual g-set DOF and the other (`DTMQ`) recovers from the
residual q-set.
2. Multiplies `DTMG` by the g-set residual rigid-body modes. The
resulting 6-column matrix is passed to
:func:`pyyeti.nastran.n2p.find_xyz_triples` which calculates
the location of each node and applies coordinated transforms
to `DTMQ` such that it recovers in the basic coordinate
system for all nodes.
3. For each pair of nodes in `node_pairs`:
a. Form a new rectangular coordinate system based at Node1
with the z-axis pointing to Node2.
b. Transform the Node1 and Node2 rows of `DTMQ` to output in
the new coordinate system.
c. Form a single row of the final `reldtm` by subtracting the
Node1 Z recovery from the Node2 Z recovery: ``dtm2[2] -
dtm1[2]``. This means that a positive relative
displacement corresponds to an increased distance between
the two nodes.
As a final check, the magnitude of the rigid-body part of
`reldtm` is examined. If the largest value is greater than 1e-6 a
warning message is printed.
Example usage::
# load nastran data:
nas = op2.rdnas2cam('nas2cam')
SC = 101
n2p.addulvs(nas, SC)
# read in more data for SC since it is a Craig-Bampton model:
if 'tug1' not in nas:
nas['tug1'] = {}
nas['tug1'][SC] = nastran.rddtipch('outboard.pch')
if 'extse' not in nas:
nas['extse'] = {}
nas['extse'][SC] = nastran.op4.read('outboard.op4')
node_pairs = [
[SC, 3, SC, 10],
[ 0, 11, SC, 18],
]
reldtm, dist, lbls = relative_displacement_dtm(
nas, node_pairs)
# add the above items to the data recovery:
drdefs = cla.DR_Def({'se': 0})
@cla.DR_Def.addcat
def _():
name = 'reldisp'
desc = 'Relative Displacements'
units = 'in'
labels = lbls
drms = {name: reldtm}
drfunc = f"Vars[se]['{name}'] @ sol.d"
histpv = 'all'
drdefs.add(**locals())
# prepare spacecraft data recovery matrices
DR = cla.DR_Event()
DR.add(nas, drdefs)
# initialize results (ext, mnc, mxc for all drms)
results = DR.prepare_results(mission, event)
# solve equations of motion:
ts = ode.SolveUnc(*mbk, h)
sol = ts.tsolve(genforce, static_ic=1)
sol.t = t
sol = DR.apply_uf(sol, *mbk, nas['nrb'], rfmodes)
results.time_data_recovery(sol, nas['nrb'],
caseid, DR, LC, j)
# write report of results:
results.rpttab()
Raises
------
ValueError
When Node1 and Node2 of any node pair are coincident.
"""
# to get locations in basic, need rb modes relative to origin:
rbg = n2p.rbgeom_uset(nas["uset"][0])
# call formdrm only once per superelement:
node_pairs = np.atleast_2d(node_pairs)
nrel = node_pairs.shape[0]
dtmq = np.empty((nrel * 6, nas["lambda"][0].shape[0]))
dtmg = np.empty((nrel * 6, 6))
senodes = np.vstack((node_pairs[:, :2], node_pairs[:, 2:]))
senodesdof = np.array([[se, n, i] for se, n in senodes for i in range(1, 4)])
for se in set(senodes[:, 0]):
pvd = senodesdof[:, 0] == se
dof = senodesdof[pvd, 1:]
try:
tq = n2p.formdrm(nas, se, dof)[0]
tx = n2p.formdrm(nas, se, dof, gset=True)[0]
except ValueError:
u1x = n2p.formulvs(nas, se, gset=True)
pv = n2p.mkdofpv(nas["tug1"][se], "p", dof)[0]
tq = nas["extse"][se]["mug1"][pv] @ nas["ulvs"][se]
tx = nas["extse"][se]["mug1"][pv] @ u1x
dtmq[pvd] = tq
dtmg[pvd] = tx @ rbg
mats = {"dtmq": dtmq}
xyz = n2p.find_xyz_triples(dtmg, mats=mats, inplace=True)
reldtm = np.empty((nrel, dtmq.shape[1]))
dist = np.empty(nrel)
ext_coords = xyz.coords.max(axis=0)
for i in range(nrel):
n1 = slice(i * 3, i * 3 + 3)
n2 = slice((nrel + i) * 3, (nrel + i) * 3 + 3)
# make transform from basic to C:
a = xyz.coords[i * 3]
b = xyz.coords[(i + nrel) * 3]
# check for coincident nodes:
if la.norm((b - a) / ext_coords) < 1e-5:
raise ValueError(
f"coincident nodes detected at index {i} of "
f"`node_pairs`: {node_pairs[i]}"
)
c = a + (b - a)[[1, 2, 0]]
C = np.array([[1, 1, 0], a, b, c])
To_local = n2p.mkusetcoordinfo(C, None, {})[2:].T
dtm1 = To_local @ dtmq[n1]
dtm2 = To_local @ dtmq[n2]
# positive for moving apart (b - a > 0):
reldtm[i] = dtm2[2] - dtm1[2]
dist[i] = la.norm(b - a)
labels = [f"SE{se2},{n2} - SE{se1},{n1}" for se1, n1, se2, n2 in node_pairs]
return reldtm, dist, labels
|
{"hexsha": "9e332bd26597a571e99c292edb28d79f1b063f48", "size": 7083, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyyeti/cla/rel_disp_dtm.py", "max_stars_repo_name": "twmacro/pyye", "max_stars_repo_head_hexsha": "c4febd44be836bd87368da13c1fb0cf82838b687", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2016-03-02T18:29:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T08:41:56.000Z", "max_issues_repo_path": "pyyeti/cla/rel_disp_dtm.py", "max_issues_repo_name": "twmacro/pyye", "max_issues_repo_head_hexsha": "c4febd44be836bd87368da13c1fb0cf82838b687", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-15T02:11:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-06T12:49:57.000Z", "max_forks_repo_path": "pyyeti/cla/rel_disp_dtm.py", "max_forks_repo_name": "twmacro/pyye", "max_forks_repo_head_hexsha": "c4febd44be836bd87368da13c1fb0cf82838b687", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-06-11T17:09:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T19:15:07.000Z", "avg_line_length": 33.5687203791, "max_line_length": 81, "alphanum_fraction": 0.5732034449, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2058}
|
#!/usr/bin/env python
"""
This script transforms duckietown images into costmaps.
A costmap is a greyscale image that specifies which areas of the map are driveable.
The white sections are able to driven on by the duckiebot, and the black sections are not.
Practically, the costmap will be used in A* to weight certain edges.
Edges that correspond to non driveable areas will have a large weight.
Edges that are on the road will have low weight.
test your implementation with the following command:
python duckie_astar_hw7/render_costmap.py --outfile "costmap.png"
Your costmap will be part of your checkoff, so be sure to save it.
"""
import sys
sys.path.append("/Users/williamloo/selfdriving-fa19/gym-duckietown")
import argparse
import cv2
import numpy as np
import gym
import gym_duckietown
from gym_duckietown.envs import DuckietownEnv
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default=None)
parser.add_argument('--map-name', default='udem1')
parser.add_argument('--distortion', default=False, action='store_true')
parser.add_argument('--draw-curve', action='store_true', help='draw the lane following curve')
parser.add_argument('--draw-bbox', action='store_true', help='draw collision detection bounding boxes')
parser.add_argument('--domain-rand', action='store_true', help='enable domain randomization')
parser.add_argument('--frame-skip', default=1, type=int, help='number of frames to skip')
parser.add_argument('--seed', default=1, type=int, help='seed')
parser.add_argument('--outfile', default="costmap.png", type=str)
parser.add_argument('--radius', default=75, type=int)
args = parser.parse_args()
env = DuckietownEnv(
seed=args.seed,
map_name=args.map_name,
draw_curve=args.draw_curve,
draw_bbox=args.draw_bbox,
domain_rand=args.domain_rand,
frame_skip=args.frame_skip,
distortion=args.distortion,
do_color_relabeling=True)
"""YOUR CODE HERE"""
# reset the environment
env.reset()
# render a top_down_rgb_array image from the environment
top_down_environment = env.render(mode="top_down_rgb_array")
# convert the image to an opencv UMat
frame=cv2.UMat(top_down_environment)
# convert the color space from BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# apply a color filter to detect everything within the HSV spectrum of [0, 0, 0] to [20, 20, 20]
lower = np.array([0, 0, 0])
upper = np.array([20, 20, 20])
mask = cv2.inRange(hsv, lower, upper)
#x = cv2.bitwise_and(frame,frame, mask)
# assign the resulting thresholded image to "x"
cv2.imshow('x',mask)
cv2.waitKey(500)
x = mask
"""END YOUR CODE HERE"""
contours, hierarchy = cv2.findContours(x, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
"""YOUR CODE HERE"""
# using OpenCV erode the costmap x by a kernel size of args.radius
img = cv2.erode(x, (args.radius, args.radius), iterations=2)
# apply a gaussian blur to smooth the costmap using cv2.BORDER_DEFAULT
x = cv2.GaussianBlur(img, (5,5), cv2.BORDER_DEFAULT)
"""END YOUR CODE HERE"""
cv2.imwrite(args.outfile, x)
lower_left = (999999, 999999)
upper_right = (0, 0)
for j in range(env.grid_height):
for i in range(env.grid_width):
tile = env._get_tile(i, j)
kind = tile['kind']
if (kind == "3way_left" or kind == "3way_right" or kind == "straight"
or kind == "curve_right" or kind == "curve_left"):
lower_left = (min(lower_left[0], i), min(lower_left[1], j))
upper_right = (max(upper_right[0], i), max(upper_right[1], j))
lower_left = (env.road_tile_size * lower_left[0], env.road_tile_size * lower_left[1])
upper_right = (env.road_tile_size * (upper_right[0] + 1), env.road_tile_size * (upper_right[1] + 1))
lower_left_image = (999999, 999999)
upper_right_image = (0, 0)
for x in contours:
pos_min = x.min(axis=0)[0]
pos_max = x.max(axis=0)[0]
lower_left_image = (min(lower_left_image[0], pos_min[0]), min(lower_left_image[1], pos_min[1]))
upper_right_image = (max(upper_right_image[0], pos_max[0]), max(upper_right_image[1], pos_max[1]))
# calculate the world to image coordinates scale factor and xy offset
scale_factor = (lower_left[0] - upper_right[0]) / (
lower_left_image[0] - upper_right_image[0])
offset = (lower_left_image[0] - lower_left[0] / scale_factor,
lower_left_image[1] - lower_left[1] / scale_factor)
np.save("transform.npy", np.array([scale_factor, offset[0], offset[1]]))
|
{"hexsha": "d755474123405a3596a64af5b33ea694a1349b95", "size": 4745, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym-duckietown/duckie_astar_hw7/render_costmap.py", "max_stars_repo_name": "williamlooo/self-driving-decal", "max_stars_repo_head_hexsha": "0d083619c44d13eecde09ad0a7660459d8861c0d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gym-duckietown/duckie_astar_hw7/render_costmap.py", "max_issues_repo_name": "williamlooo/self-driving-decal", "max_issues_repo_head_hexsha": "0d083619c44d13eecde09ad0a7660459d8861c0d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gym-duckietown/duckie_astar_hw7/render_costmap.py", "max_forks_repo_name": "williamlooo/self-driving-decal", "max_forks_repo_head_hexsha": "0d083619c44d13eecde09ad0a7660459d8861c0d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6766917293, "max_line_length": 107, "alphanum_fraction": 0.6788198103, "include": true, "reason": "import numpy", "num_tokens": 1245}
|
import copy
import cv2
import numpy as np
import onnxruntime
class YolopONNX(object):
def __init__(
self,
model_path='yolop-640-640.onnx',
input_shape=(640, 640),
score_th=0.3,
nms_th=0.45,
providers=['CPUExecutionProvider'],
):
# 入力サイズ
self.input_shape = input_shape
# 閾値
self.class_score_th = score_th
self.nms_th = nms_th
# モデル読み込み
self.onnx_session = onnxruntime.InferenceSession(
model_path,
providers=providers,
)
def inference(self, image):
# 前処理
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
resize_rgb_image, r, dw, dh, new_unpad_w, new_unpad_h = self._resize_unscale(
rgb_image,
self.input_shape,
)
input_image = resize_rgb_image.copy().astype(np.float32)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_image = (input_image / 255 - mean) / std
input_image = input_image.transpose(2, 0, 1)
input_image = np.expand_dims(input_image, axis=0)
input_image = input_image.astype('float32')
# 推論
det_out, drive_area_seg, lane_line_seg = self.onnx_session.run(
['det_out', 'drive_area_seg', 'lane_line_seg'],
input_feed={"images": input_image},
)
# NMS
bboxes = self._nms(
det_out,
conf_thres=self.class_score_th,
iou_thres=self.nms_th,
agnostic=False,
)[0]
# バウンディングボックスの座標を元画像のスケールに変換
bboxes[:, 0] -= dw
bboxes[:, 1] -= dh
bboxes[:, 2] -= dw
bboxes[:, 3] -= dh
bboxes[:, :4] /= r
# 各セグメンテーション領域を選択する
drive_area_seg = drive_area_seg[:, :, dh:dh + new_unpad_h,
dw:dw + new_unpad_w]
lane_line_seg = lane_line_seg[:, :, dh:dh + new_unpad_h,
dw:dw + new_unpad_w]
drive_area_seg_mask = np.argmax(drive_area_seg, axis=1)[0]
lane_line_seg_mask = np.argmax(lane_line_seg, axis=1)[0]
return bboxes, drive_area_seg_mask, lane_line_seg_mask
def draw(
self,
image,
bboxes,
drive_area_seg_mask,
lane_line_seg_mask,
):
image_height, image_width, _ = image.shape
debug_image = copy.deepcopy(image)
# 道路セグメンテーション
bg_image = np.zeros(image.shape, dtype=np.uint8)
bg_image[:] = [0, 255, 0]
mask = np.stack((drive_area_seg_mask, ) * 3, axis=-1).astype('uint8')
mask = cv2.resize(
mask,
dsize=(image_width, image_height),
interpolation=cv2.INTER_LINEAR,
)
mask = np.where(mask > 0.5, 0, 1)
mask_image = np.where(mask, debug_image, bg_image)
debug_image = cv2.addWeighted(debug_image, 0.5, mask_image, 0.5, 1.0)
# レーンセグメンテーション
bg_image = np.zeros(image.shape, dtype=np.uint8)
bg_image[:] = [255, 0, 0]
mask = np.stack((lane_line_seg_mask, ) * 3, axis=-1).astype('uint8')
mask = cv2.resize(
mask,
dsize=(image_width, image_height),
interpolation=cv2.INTER_LINEAR,
)
mask = np.where(mask > 0.5, 0, 1)
mask_image = np.where(mask, debug_image, bg_image)
debug_image = cv2.addWeighted(debug_image, 0.5, mask_image, 0.5, 1.0)
# 車バウンディングボックス
for bbox in bboxes:
x1, y1 = int(bbox[0]), int(bbox[1])
x2, y2 = int(bbox[2]), int(bbox[3])
# score = bbox[4]
# class_id = int(bbox[5])
cv2.rectangle(debug_image, (x1, y1), (x2, y2), (0, 0, 255), 2, 2)
return debug_image
def _xywh2xyxy(self, x):
y = np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def _box_iou(self, box1, box2):
def box_area(box):
# (x2 - x1) * (y2 - y1)
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (np.min(box1[:, None, 2:], box2[:, 2:]) -
np.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
# iou = inter / (area1 + area2 - inter)
return inter / (area1[:, None] + area2 - inter)
def _nms(
self,
prediction,
conf_thres=0.25,
iou_thres=0.45,
agnostic=False,
):
xc = prediction[..., 4] > conf_thres # candidates
# Settings
max_wh = 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
redundant = True # require redundant detections
merge = False # use merge-NMS
output = [np.zeros((0, 6))] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
x = x[xc[xi]] # confidence
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = self._xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
conf = copy.deepcopy(x[:, 5:])
j = np.zeros((x[:, 5:].shape[0], 1))
x = np.concatenate((box, conf, j), 1)
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
# boxes (offset by class), scores
boxes, scores = x[:, :4] + c, x[:, 4]
i = cv2.dnn.NMSBoxes(
bboxes=boxes.tolist(),
scores=scores.tolist(),
score_threshold=0.3,
nms_threshold=0.45,
top_k=5000,
)
if len(i) > 0:
i = i.flatten()
# limit detections
if i.shape[0] > max_det:
i = i[:max_det]
# Merge NMS (boxes merged using weighted mean)
if merge and (1 < n < 3E3):
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
# iou matrix
iou = self._box_iou(boxes[i], boxes) > iou_thres
# box weights
weights = iou * scores[None]
# merged boxes
x[i, :4] = np.dot(weights, x[:, :4]).float() / weights.sum(
1, keepdim=True)
if redundant:
# require redundancy
i = i[iou.sum(1) > 1]
output[xi] = x[i]
return output
def _resize_unscale(self, img, new_shape=(640, 640), color=114):
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
resize_rgb_image = np.zeros((new_shape[0], new_shape[1], 3))
resize_rgb_image.fill(color)
# Scale ratio (new / old) new_shape(h,w)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
# Compute padding
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) # w,h
new_unpad_w = new_unpad[0]
new_unpad_h = new_unpad[1]
pad_w, pad_h = new_shape[1] - new_unpad_w, new_shape[
0] - new_unpad_h # wh padding
dw = pad_w // 2 # divide padding into 2 sides
dh = pad_h // 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_AREA)
resize_rgb_image[dh:dh + new_unpad_h, dw:dw + new_unpad_w, :] = img
return resize_rgb_image, r, dw, dh, new_unpad_w, new_unpad_h # (dw,dh)
if __name__ == "__main__":
yolop = YolopONNX(
model_path='weights/yolop-640-640.onnx',
input_shape=(640, 640),
providers=['CUDAExecutionProvider', 'CPUExecutionProvider'],
)
video_capture = cv2.VideoCapture("video/sample.mp4")
while True:
ret, frame = video_capture.read()
if not ret:
break
bboxes, da_seg_mask, ll_seg_mask = yolop.inference(frame)
result_image = yolop.draw(
frame,
bboxes,
da_seg_mask,
ll_seg_mask,
)
result_image = cv2.resize(result_image, dsize=None, fx=0.5, fy=0.5)
cv2.imshow("YOLOP", result_image)
key = cv2.waitKey(1)
if key == 27: # ESC
break
video_capture.release()
cv2.destroyAllWindows()
|
{"hexsha": "4572157da34475255cdd6a0b974396ce2bfa6151", "size": 8950, "ext": "py", "lang": "Python", "max_stars_repo_path": "yolop_onnx.py", "max_stars_repo_name": "Kazuhito00/YOLOP-ONNX-Video-Inference-Sample", "max_stars_repo_head_hexsha": "ecc10c2c80cf7ef50b247541ef89eedc8cfc080e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2022-01-16T06:34:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T10:00:43.000Z", "max_issues_repo_path": "yolop_onnx.py", "max_issues_repo_name": "Kazuhito00/YOLOP-ONNX-Video-Inference-Sample", "max_issues_repo_head_hexsha": "ecc10c2c80cf7ef50b247541ef89eedc8cfc080e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-20T10:25:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T00:37:10.000Z", "max_forks_repo_path": "yolop_onnx.py", "max_forks_repo_name": "Kazuhito00/YOLOP-ONNX-Video-Inference-Sample", "max_forks_repo_head_hexsha": "ecc10c2c80cf7ef50b247541ef89eedc8cfc080e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-16T13:36:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T13:36:17.000Z", "avg_line_length": 32.3104693141, "max_line_length": 85, "alphanum_fraction": 0.5174301676, "include": true, "reason": "import numpy", "num_tokens": 2616}
|
"""
Example file
"""
import os
import sys
sys.path = [os.path.abspath("..")] + sys.path
import matplotlib.pyplot as plt
import numpy as np
from mpl_chord_diagram import chord_diagram
# flux matrix
flux = np.array([
[11975, 5871, 8916, 2868],
[ 1951, 10048, 2060, 6171],
[ 8010, 16145, 81090, 8045],
[ 1013, 990, 940, 6907]
])
names = ['non-crystal', 'FCC', 'HCP', 'BCC']
# plot different examples
grads = (True, False, False, False) # gradient
gaps = (0.03, 0, 0.03, 0) # gap value
sorts = ("size", "distance", None, "distance") # sort type
cclrs = (None, None, "slategrey", None) # chord colors
nrota = (False, False, True, True) # name rotation
cmaps = (None, None, None, "summer") # colormap
fclrs = "grey" # fontcolors
drctd = (False, False, False, True) # directed
args = (grads, gaps, sorts, cclrs, nrota, cmaps, drctd)
for grd, gap, srt, cc, nr, cm, d in zip(*args):
chord_diagram(flux, names, gap=gap, use_gradient=grd, sort=srt, directed=d,
cmap=cm, chord_colors=cc, rotate_names=nr, fontcolor=fclrs)
str_grd = "_gradient" if grd else ""
plt.savefig(
"images/example{}_sort-{}{}.png".format(str_grd, srt,
"_directed" if d else ""),
dpi=600, transparent=True, bbox_inches='tight',
pad_inches=0.02)
plt.show()
# plot with partial circle
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
keep = list(range(len(flux) - 1))
total = flux.sum()
partial = flux[keep][:, keep].sum()
colors = ["#cc2233", "#2233cc", "orange", "gray"]
chord_diagram(flux, names, ax=ax1, colors=colors, start_at=60)
chord_diagram(flux[keep][:, keep], names[:-1], ax=ax2, colors=colors[:-1],
start_at=60, extent=360*partial/total)
plt.show()
# min chord width zero reciprocals
flux = np.array([
[11975, 5871, 8916, 0],
[ 1951, 0, 2060, 0],
[ 8010, 16145, 3504, 0],
[ 0, 5200, 300, 6907]
])
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
chord_diagram(flux, names, ax=ax1)
chord_diagram(flux, names, ax=ax2, min_chord_width=200)
plt.show()
|
{"hexsha": "3409b986029c23954b993d3ff4ad30d45a2d6f1a", "size": 2267, "ext": "py", "lang": "Python", "max_stars_repo_path": "example.py", "max_stars_repo_name": "Silmathoron/matplotlib-chord-diagram", "max_stars_repo_head_hexsha": "0e3d2fd15539bf2544b1bc368cd62e3c57df9dfe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example.py", "max_issues_repo_name": "Silmathoron/matplotlib-chord-diagram", "max_issues_repo_head_hexsha": "0e3d2fd15539bf2544b1bc368cd62e3c57df9dfe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example.py", "max_forks_repo_name": "Silmathoron/matplotlib-chord-diagram", "max_forks_repo_head_hexsha": "0e3d2fd15539bf2544b1bc368cd62e3c57df9dfe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0574712644, "max_line_length": 79, "alphanum_fraction": 0.5743273048, "include": true, "reason": "import numpy", "num_tokens": 755}
|
// SPDX-License-Identifier: MIT
// The MIT License (MIT)
//
// Copyright (c) 2014-2018, Institute for Software & Systems Engineering
// Copyright (c) 2018-2019, Johannes Leupolz
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#ifndef PEMC_LMC_LMC_H_
#define PEMC_LMC_LMC_H_
#include <atomic>
#include <functional>
#include <gsl/span>
#include <string>
#include <vector>
#include "pemc/basic/dll_defines.h"
#include "pemc/basic/label.h"
#include "pemc/basic/model_capacity.h"
#include "pemc/basic/probability.h"
#include "pemc/basic/tsc_index.h"
#include "pemc/formula/formula.h"
namespace pemc {
struct LmcStateEntry {
TransitionIndex from;
int32_t elements;
};
// Transition = Target + Probability
struct LmcTransitionEntry {
Probability probability;
Label label;
StateIndex state;
LmcTransitionEntry() = default;
LmcTransitionEntry(Probability _probability, Label _label, StateIndex _state)
: probability(_probability), label(_label), state(_state) {}
};
class Lmc {
private:
TransitionIndex maxNumberOfTransitions = 0;
std::atomic<TransitionIndex> transitionCount{0};
std::vector<LmcTransitionEntry> transitions;
TransitionIndex initialTransitionFrom =
-1; // is uninitialized at first, but may be something else than 0
int32_t initialTransitionElements = 0;
StateIndex maxNumberOfStates = 0;
StateIndex stateCount = 0;
std::vector<LmcStateEntry> states;
std::vector<std::string> labelIdentifier;
TransitionIndex getPlaceForNewTransitionEntries(NoOfElements number);
public:
Lmc();
gsl::span<LmcStateEntry> getStates();
gsl::span<std::string> getLabelIdentifier();
void setLabelIdentifier(const std::vector<std::string>& _labelIdentifier);
std::function<bool(TransitionIndex)> createLabelBasedFormulaEvaluator(
Formula* formula);
gsl::span<LmcTransitionEntry> getTransitions();
gsl::span<LmcTransitionEntry> getInitialTransitions();
std::tuple<TransitionIndex, TransitionIndex> getInitialTransitionIndexes();
gsl::span<LmcTransitionEntry> getTransitionsOfState(StateIndex state);
std::tuple<TransitionIndex, TransitionIndex> getTransitionIndexesOfState(
StateIndex state);
TransitionIndex getPlaceForNewTransitionEntriesOfState(StateIndex stateIndex,
NoOfElements number);
TransitionIndex getPlaceForNewInitialTransitionEntries(NoOfElements number);
void setLmcTransitionEntry(TransitionIndex index,
const LmcTransitionEntry& entry);
void createStutteringState(StateIndex stutteringStateIndex);
void initialize(ModelCapacity& modelCapacity);
void finishCreation(StateIndex _stateCount);
void validate();
};
} // namespace pemc
#endif // PEMC_LMC_LMC_H_
|
{"hexsha": "f3af5af517751760124b35f66a03fd7f0e4800d8", "size": 3785, "ext": "h", "lang": "C", "max_stars_repo_path": "pemc/lmc/lmc.h", "max_stars_repo_name": "joleuger/pemc", "max_stars_repo_head_hexsha": "14deb5b97d4219ba3c92d3834ab71332997e9b13", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pemc/lmc/lmc.h", "max_issues_repo_name": "joleuger/pemc", "max_issues_repo_head_hexsha": "14deb5b97d4219ba3c92d3834ab71332997e9b13", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pemc/lmc/lmc.h", "max_forks_repo_name": "joleuger/pemc", "max_forks_repo_head_hexsha": "14deb5b97d4219ba3c92d3834ab71332997e9b13", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0462962963, "max_line_length": 80, "alphanum_fraction": 0.7574636724, "num_tokens": 869}
|
# This script actually solves the CoeurdacierReyWinant model with a risk-adjusted linearization
# and times the methods, if desired
using BenchmarkTools, RiskAdjustedLinearizations, Test, JLD2
include("crw.jl")
# Settings
diagnostics = true
# Set up
m_crw = CoeurdacierReyWinant()
m = crw(m_crw)
z0 = copy(m.z)
y0 = copy(m.y)
Ψ0 = copy(m.Ψ)
sssout = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference/crw_sss.jld2"), "r")
# Small perturbation b/c initialized at the stochastic steady state from a saved file
m.z .= 1.1 * m.z
m.y .= 1.1 * m.y
m.Ψ .= 1.1 * m.Ψ
# Solve!
solve!(m, m.z, m.y, m.Ψ; algorithm = :homotopy, step = .5)
# Only homotopy seems to work for this model. The relaxation algorithm
# has trouble finding an answer with smaller error than 1e-3
# solve!(m, m.z, m.y, m.Ψ; algorithm = :relaxation, verbose = :high, ftol = 5e-5, damping = .9)
@test isapprox(sssout["z_rss"], m.z)
@test isapprox(sssout["y_rss"], m.y)
@test isapprox(sssout["Psi_rss"], m.Ψ)
if diagnostics
# See crw.jl for the definition of the functions
# crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, and crw_endo_states
shocks = JLD2.jldopen(joinpath(dirname(@__FILE__), "..", "..", "test", "reference", "crw_shocks.jld2"), "r")["shocks"]
@test abs(euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, shocks, summary_statistic = x -> norm(x, Inf))) < 3e-5
c_err, endo_states_err = dynamic_euler_equation_error(m, crw_cₜ, crw_logSDFxR, crw_𝔼_quadrature, crw_endo_states, 1, shocks)
@test c_err < 2e-5
@test endo_states_err < 1e-3
end
|
{"hexsha": "8af029de4c482516ff9c91c1c415f618917849c5", "size": 1577, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/crw/example_crw.jl", "max_stars_repo_name": "chenwilliam77/RiskAdjustedLinearizations", "max_stars_repo_head_hexsha": "24d95b555882bc5336fe9fb456e9364c6f8f0f3f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-08-05T15:42:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T22:08:25.000Z", "max_issues_repo_path": "examples/crw/example_crw.jl", "max_issues_repo_name": "chenwilliam77/RiskAdjustedLinearizations.jl", "max_issues_repo_head_hexsha": "24d95b555882bc5336fe9fb456e9364c6f8f0f3f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2020-09-17T14:15:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-08T10:46:48.000Z", "max_forks_repo_path": "examples/crw/example_crw.jl", "max_forks_repo_name": "chenwilliam77/RiskAdjustedLinearizations", "max_forks_repo_head_hexsha": "24d95b555882bc5336fe9fb456e9364c6f8f0f3f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-03T14:38:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-23T23:24:08.000Z", "avg_line_length": 36.6744186047, "max_line_length": 132, "alphanum_fraction": 0.7019657578, "num_tokens": 576}
|
import torch
import numpy as np
import matplotlib.pyplot as plt
import pickle
import advOpt
from poisson_example import Poisson_FIM
##############
##VECTOR FIELD
##############
fim = Poisson_FIM(omega=(2., 1.))
logit_design_grid, eta11_grid = np.meshgrid(np.linspace(-0.65, 0.65, 10),
np.linspace(-0.2, -0.14, 11))
logit_design_grad_grid = np.zeros_like(logit_design_grid)
eta11_grad_grid = np.zeros_like(eta11_grid)
for i in range(eta11_grid.shape[0]):
for j in range(eta11_grid.shape[1]):
logit_design = torch.tensor([logit_design_grid[i,j]], requires_grad=True)
eta11 = torch.tensor(eta11_grid[i,j], requires_grad=True)
eta = torch.zeros(2, dtype=torch.float32)
eta[1] += eta11
A = fim.makeA(eta)
design = torch.sigmoid(logit_design)
objective = fim.estimateK(design.unsqueeze(0), A.unsqueeze(0))
objective.backward()
logit_design_grad_grid[i,j] = float(logit_design.grad)
eta11_grad_grid[i,j] = float(eta11.grad)
vector_field_plot = plt.quiver(logit_design_grid, eta11_grid,
-logit_design_grad_grid, eta11_grad_grid/1000,
angles='xy')
vector_field_plot.axes.set_xlabel(r'$\lambda$')
vector_field_plot.axes.set_ylabel(r'$\eta_{11}$')
##############
##OPTIMISATION OUTPUT
##############
with open('outputs/poisson3.pkl', 'rb') as infile:
output3 = pickle.load(infile)
with open('outputs/poisson4.pkl', 'rb') as infile:
output4 = pickle.load(infile)
with open('outputs/poisson5.pkl', 'rb') as infile:
output5 = pickle.load(infile)
logit = lambda x: np.log(x/(1-x))
dot_indices = range(99, 5000, 100)
toplot = [(output3, 'b'), (output4, 'g'), (output5, 'r')]
for (o, col) in toplot:
plt.plot(logit(o['design'][dot_indices,0,0]),
np.log(o['A'][dot_indices,0,0,0]), 'o' + col)
plt.plot(logit(o['design'][:,0,0]), np.log(o['A'][:,0,0,0]), '-' + col)
plt.xlim([-0.7, 0.7])
plt.ylim([-0.203, -0.137])
plt.tight_layout()
plt.savefig('plots/poisson_vector_field.pdf')
plt.figure()
for (o, _) in toplot:
plt.plot(o['iterations'], logit(o['design'][:,0,0]))
plt.ylim([-0.7, 0.7])
plt.xlabel('Iterations')
plt.ylabel(r'$\lambda$')
plt.tight_layout()
plt.savefig('plots/poisson_traceplot_design.pdf')
plt.figure()
for (o, _) in toplot:
plt.plot(o['iterations'], np.log(o['A'][:,0,0,0]))
plt.ylim([-0.205, -0.135])
plt.xlabel('Iterations')
plt.ylabel(r'$\eta_{11}$')
plt.tight_layout()
plt.savefig('plots/poisson_traceplot_param.pdf')
|
{"hexsha": "0b673bd22a4c67da42effdbe94f3a8ab5423fe4e", "size": 2516, "ext": "py", "lang": "Python", "max_stars_repo_path": "poisson_plots.py", "max_stars_repo_name": "dennisprangle/AdversarialDesignCode", "max_stars_repo_head_hexsha": "ddf0c7ee667e20e236dfc804394835a9bee11d84", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "poisson_plots.py", "max_issues_repo_name": "dennisprangle/AdversarialDesignCode", "max_issues_repo_head_hexsha": "ddf0c7ee667e20e236dfc804394835a9bee11d84", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "poisson_plots.py", "max_forks_repo_name": "dennisprangle/AdversarialDesignCode", "max_forks_repo_head_hexsha": "ddf0c7ee667e20e236dfc804394835a9bee11d84", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6829268293, "max_line_length": 77, "alphanum_fraction": 0.6486486486, "include": true, "reason": "import numpy", "num_tokens": 740}
|
[STATEMENT]
lemma sub_args: "s \<in> set (args t) \<Longrightarrow> sub s t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s \<in> set (args t) \<Longrightarrow> sub s t
[PROOF STEP]
by (induct t) (auto intro: sub.intros)
|
{"llama_tokens": 90, "file": "Lambda_Free_RPOs_Lambda_Free_Term", "length": 1}
|
import numpy as np
from keras.models import load_model
import sys
import json
import os
import cv2
imagePath = os.getcwd() + "/uploads/dog.jpg"
img = cv2.imread(imagePath)
img = cv2.resize(img, (50, 50))
img = img / 255.0
img = img.reshape(1, 50, 50, 3)
filePath = os.getcwd() + "/pyScript/model.h5"
print(filePath)
model = load_model(filePath)
predict = model.predict(img)
data = { "predict": predict.tolist() }
print(json.dumps(data))
sys.stdout.flush()
|
{"hexsha": "10c6feecfd95fd256a00471e2abb21a646db41fa", "size": 461, "ext": "py", "lang": "Python", "max_stars_repo_path": "node-server/pyScript/app.py", "max_stars_repo_name": "bhanuraghav/Agri-Learning", "max_stars_repo_head_hexsha": "db78894bc0a0ae24c972c7e8fed78ca6fe584f9a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "node-server/pyScript/app.py", "max_issues_repo_name": "bhanuraghav/Agri-Learning", "max_issues_repo_head_hexsha": "db78894bc0a0ae24c972c7e8fed78ca6fe584f9a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "node-server/pyScript/app.py", "max_forks_repo_name": "bhanuraghav/Agri-Learning", "max_forks_repo_head_hexsha": "db78894bc0a0ae24c972c7e8fed78ca6fe584f9a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.2083333333, "max_line_length": 45, "alphanum_fraction": 0.7071583514, "include": true, "reason": "import numpy", "num_tokens": 131}
|
import copy
import glob
import os
import time
import types
from collections import deque
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import tensorflow as tf
import pickle
import algo
from arguments import get_args
from envs import make_vec_envs
from model import Policy
from storage import RolloutStorage
from gatedpixelcnn_bonus import PixelBonus
from skimage.transform import resize
#from visualize import visdom_plot
def update_tf_wrapper_args(args, tf_flags):
"""
take input command line args to DQN agent and update tensorflow wrapper default
settings
:param args:
:param FLAGS:
:return:
"""
# doesn't support boolean arguments
to_parse = args.wrapper_args
if to_parse:
for kwarg in to_parse:
keyname, val = kwarg.split('=')
if keyname in ['ckpt_path', 'data_path', 'samples_path', 'summary_path']:
# if directories don't exist, make them
if not os.path.exists(val):
os.makedirs(val)
tf_flags.update(keyname, val)
elif keyname in ['data', 'model']:
tf_flags.update(keyname, val)
elif keyname in ['mmc_beta']:
tf_flags.update(keyname, float(val))
else:
tf_flags.update(keyname, int(val))
return tf_flags
class DotDict(object):
def __init__(self, dict):
self.dict = dict
def __getattr__(self, name):
return self.dict[name]
def update(self, name, val):
self.dict[name] = val
# can delete this later
def get(self, name):
return self.dict[name]
FLAGS = DotDict({
'img_height': 42,
'img_width': 42,
'channel': 1,
'data': 'mnist',
'conditional': False,
'num_classes': None,
'filter_size': 3,
'init_fs': 7,
'f_map': 16,
'f_map_fc': 16,
'colors': 8,
'parallel_workers': 1,
'layers': 3,
'epochs': 25,
'batch_size': 16,
'model': '',
'data_path': 'data',
'ckpt_path': 'ckpts',
'samples_path': 'samples',
'summary_path': 'logs',
'restore': True,
'nr_resnet': 1,
'nr_filters': 32,
'nr_logistic_mix': 5,
'resnet_nonlinearity': 'concat_elu',
'lr_decay': 0.999995,
'lr': 0.00005,
'num_ds': 1,
'ep-max-step' : 'None',
})
imresize = resize
args = get_args()
print(args)
useNeural = bool(args.useNeural)
assert args.algo in ['a2c', 'ppo', 'acktr']
if args.recurrent_policy:
assert args.algo in ['a2c', 'ppo'], \
'Recurrent policy is not implemented for ACKTR'
num_updates = int(args.num_frames) // args.num_steps // args.num_processes
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
try:
os.makedirs(args.log_dir)
except OSError:
files = glob.glob(os.path.join(args.log_dir, '*.monitor.csv'))
for f in files:
os.remove(f)
eval_log_dir = args.log_dir + "_eval"
try:
os.makedirs(eval_log_dir)
except OSError:
files = glob.glob(os.path.join(eval_log_dir, '*.monitor.csv'))
for f in files:
os.remove(f)
def main():
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
"""
if args.vis:
from visdom import Visdom
viz = Visdom(port=args.port)
win = None
"""
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, args.add_timestep, device, False, args.ep_max_step)
actor_critic = Policy(envs.observation_space.shape, envs.action_space,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic.to(device)
if args.useNeural:
#FLAGS = update_tf_wrapper_args(args,)
tf_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
tf_config.gpu_options.allow_growth = True
sess = tf.Session(config=tf_config)
pixel_bonus = PixelBonus(FLAGS, sess)
tf.initialize_all_variables().run(session=sess)
if args.loadNeural is not None:
pixel_bonus.load_model(args.loadNeural)
#with tf.variable_scope('step'):
# self.step_op = tf.Variable(0, trainable=False, name='step')
# self.step_input = tf.placeholder('int32', None, name='step_input')
# self.step_assign_op = self.step_op.assign(self.step_input)
if args.algo == 'a2c':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, lr=args.lr,
eps=args.eps, alpha=args.alpha,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'ppo':
agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch,
args.value_loss_coef, args.entropy_coef, lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'acktr':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, acktr=True)
rollouts = RolloutStorage(args.num_steps, args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
obs = envs.reset()
rollouts.obs[0].copy_(obs)
rollouts.to(device)
episode_rewards = deque(maxlen=100)
steper =0
img_scale = 1
psc_weight = float(args.pscWeight)
psc_rollout=list()
start = time.time()
for j in range(num_updates):
step_counter = 0
psc_tot=list()
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step],
rollouts.recurrent_hidden_states[step],
rollouts.masks[step])
# Obser reward and next obs
obs, reward, done, infos = envs.step(action)
psc_add = 0
if args.useNeural:
for i in obs[0]:
frame = imresize((i / img_scale).cpu().numpy(), (42, 42), order=1)
psc_add += pixel_bonus.bonus(i, steper)
steper += 1
psc_add = psc_add / 12
else:
useNeural = 0
psc_tot.append(psc_add)
"""
for info in infos:
if 'episode' in info.keys():
print(reward)
episode_rewards.append(info['episode']['r'])
"""
# FIXME: works only for environments with sparse rewards
for idx, eps_done in enumerate(done):
if eps_done:
episode_rewards.append(reward[idx])
psc_add=torch.tensor(psc_add,requires_grad=True, dtype = torch.float)
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, psc=psc_add)
with torch.no_grad():
next_value = actor_critic.get_value(rollouts.obs[-1],
rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau)
value_loss, action_loss, dist_entropy = agent.update(rollouts, psc_tot, psc_weight)
rollouts.after_update()
if j % args.save_interval == 0 and args.save_dir != "":
print('Saving model')
print()
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
save_model = actor_critic
if args.cuda:
save_model = copy.deepcopy(actor_critic).cpu()
save_model = [save_model, hasattr(envs.venv, 'ob_rms') and envs.venv.ob_rms or None]
torch.save(save_model, os.path.join(save_path, args.env_name + ".pt"))
total_num_steps = (j + 1) * args.num_processes * args.num_steps
if j % args.log_interval == 0 and len(episode_rewards) > 1:
end = time.time()
print("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.2f}/{:.2f}, min/max reward {:.2f}/{:.2f}, success rate {:.2f}\n".
format(
j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards),
np.mean(episode_rewards),
np.median(episode_rewards),
np.min(episode_rewards),
np.max(episode_rewards),
np.count_nonzero(np.greater(episode_rewards, 0)) / len(episode_rewards)
)
)
if args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0:
eval_envs = make_vec_envs(args.env_name, args.seed + args.num_processes, args.num_processes,
args.gamma, eval_log_dir, args.add_timestep, device, True)
if eval_envs.venv.__class__.__name__ == "VecNormalize":
eval_envs.venv.ob_rms = envs.venv.ob_rms
# An ugly hack to remove updates
def _obfilt(self, obs):
if self.ob_rms:
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
eval_envs.venv._obfilt = types.MethodType(_obfilt, envs.venv)
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(args.num_processes,
actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(args.num_processes, 1, device=device)
while len(eval_episode_rewards) < 10:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs, eval_recurrent_hidden_states, eval_masks, deterministic=True)
# Obser reward and next obs
obs, reward, done, infos = eval_envs.step(action)
eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".format(
len(eval_episode_rewards),
np.mean(eval_episode_rewards)
))
if useNeural:
pixel_bonus.save_model(str(args.nameDemonstrator) + "neural", step)
"""
if args.vis and j % args.vis_interval == 0:
try:
# Sometimes monitor doesn't properly flush the outputs
win = visdom_plot(viz, win, args.log_dir, args.env_name,
args.algo, args.num_frames)
except IOError:
pass
"""
if __name__ == "__main__":
main()
|
{"hexsha": "2a5d2a10b45d7daef2f60788d75b1878f0eb777b", "size": 11767, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch-a2c-ppo-acktr/main_agent.py", "max_stars_repo_name": "leobix/gym-miniworld", "max_stars_repo_head_hexsha": "8b84c0370c665eb349512d779cc47b1f1b06a41a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pytorch-a2c-ppo-acktr/main_agent.py", "max_issues_repo_name": "leobix/gym-miniworld", "max_issues_repo_head_hexsha": "8b84c0370c665eb349512d779cc47b1f1b06a41a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytorch-a2c-ppo-acktr/main_agent.py", "max_forks_repo_name": "leobix/gym-miniworld", "max_forks_repo_head_hexsha": "8b84c0370c665eb349512d779cc47b1f1b06a41a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8132183908, "max_line_length": 173, "alphanum_fraction": 0.5755927594, "include": true, "reason": "import numpy", "num_tokens": 2676}
|
#!/usr/bin/env python3
from cv2_demo.cv2Node import cv2Node
import numpy as np
RGB_TOPIC = "/rgb"
CAMERA_INFO_TOPIC = "/camera_info"
def main():
cv2_node = cv2Node(
CAMERA_INFO_TOPIC,
RGB_TOPIC
)
cv2_node.run()
if __name__ == '__main__':
main()
|
{"hexsha": "0abea16164ac5b83fab6538cf7c4b2a08f60b6dc", "size": 279, "ext": "py", "lang": "Python", "max_stars_repo_path": "vision_demo/scripts/vision_main.py", "max_stars_repo_name": "bchandaka/Intro-to-ROS-Workshop", "max_stars_repo_head_hexsha": "5062558cce7b34ee3cdfca00f8cb5b3db92cde6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vision_demo/scripts/vision_main.py", "max_issues_repo_name": "bchandaka/Intro-to-ROS-Workshop", "max_issues_repo_head_hexsha": "5062558cce7b34ee3cdfca00f8cb5b3db92cde6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vision_demo/scripts/vision_main.py", "max_forks_repo_name": "bchandaka/Intro-to-ROS-Workshop", "max_forks_repo_head_hexsha": "5062558cce7b34ee3cdfca00f8cb5b3db92cde6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.4375, "max_line_length": 36, "alphanum_fraction": 0.6594982079, "include": true, "reason": "import numpy", "num_tokens": 79}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
import numpy as np
import pytest
from pydefect.input_maker.supercell import (
Supercell, Supercells, TetragonalSupercells, RhombohedralSupercells)
from pydefect.util.error_classes import SupercellError
from pymatgen import Element
def test_supercell(simple_cubic, simple_cubic_2x1x1):
matrix = [[2, 0, 0], [0, 1, 0], [0, 0, 1]]
supercell = Supercell(input_structure=simple_cubic, matrix=matrix)
assert supercell.structure == simple_cubic_2x1x1
average = (2 + 1 + 1) / 3
expected = (abs(2 - average) + abs(1 - average) * 2) / 3 / average
assert supercell.isotropy == expected
def test_supercell_species_order(complex_monoclinic):
matrix = [[2, 0, 0], [0, 1, 0], [0, 0, 1]]
supercell = Supercell(input_structure=complex_monoclinic, matrix=matrix)
actual = [e.specie for e in supercell.structure]
expected = [Element.H] * 2 + [Element.He] * 8
assert actual == expected
def test_supercell_average_angle(monoclinic):
matrix = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
supercell = Supercell(input_structure=monoclinic, matrix=matrix)
assert supercell.average_angle == (90 + 90 + 100) / 3
def test_supercells(simple_cubic):
supercells = Supercells(input_structure=simple_cubic,
min_num_atoms=64,
max_num_atoms=100)
expected = simple_cubic * [[4, 0, 0], [0, 4, 0], [0, 0, 4]]
assert isinstance(supercells.most_isotropic_supercell, Supercell)
assert supercells.most_isotropic_supercell.structure == expected
def test_supercells_raise_no_supercell_error(simple_cubic):
supercells = Supercells(input_structure=simple_cubic,
min_num_atoms=10,
max_num_atoms=10)
with pytest.raises(SupercellError):
print(supercells.most_isotropic_supercell)
def test_rhombohedral_supercells(rhombohedral):
supercells = RhombohedralSupercells(input_structure=rhombohedral,
min_num_atoms=2,
max_num_atoms=4)
actual = supercells.most_isotropic_supercell.lattice.angles[0]
assert actual == 74.85849218561553
def test_tetragonal_supercells(elongated_tetragonal):
supercells = TetragonalSupercells(input_structure=elongated_tetragonal,
min_num_atoms=2,
max_num_atoms=300)
actual = supercells.most_isotropic_supercell.lattice.lengths
expected = (4.242640687119285, 4.242640687119285, 4.242640687119286)
np.testing.assert_array_almost_equal(actual, expected)
actual = supercells.most_isotropic_supercell.matrix
expected = np.array([[3, 3, 0], [-3, 3, 0], [0, 0, 1]])
np.testing.assert_array_equal(actual, expected)
def test_matrix_from_x_y():
actual = TetragonalSupercells.matrix_from_x_y(2, 0)
expected = np.array([[2, 0], [0, 2]])
np.testing.assert_array_equal(actual, expected)
actual = TetragonalSupercells.matrix_from_x_y(2, 1)
expected = np.array([[2, 2], [-2, 2]])
np.testing.assert_array_equal(actual, expected)
actual = TetragonalSupercells.matrix_from_x_y(1, 2)
expected = np.array([[0, 2], [-2, 0]]) # rotate the axis
np.testing.assert_array_equal(actual, expected)
def test_next_x_y_combination():
assert TetragonalSupercells.next_x_y_combination(24) == (5, 0)
assert TetragonalSupercells.next_x_y_combination(25) in [(2, 3), (1, 5)]
|
{"hexsha": "c1809cce3775ce097cdf76b7b5e25601f54594c4", "size": 3566, "ext": "py", "lang": "Python", "max_stars_repo_path": "pydefect/tests/input_maker/test_supercell.py", "max_stars_repo_name": "KazMorita/pydefect", "max_stars_repo_head_hexsha": "681e4bfe92c53edfe8b50cb72768114b28daabc9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-05T05:48:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-05T05:48:29.000Z", "max_issues_repo_path": "pydefect/tests/input_maker/test_supercell.py", "max_issues_repo_name": "ZhouWJ1991/pydefect", "max_issues_repo_head_hexsha": "0875a971fec73128b8f6782a328271ebc34ee709", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pydefect/tests/input_maker/test_supercell.py", "max_forks_repo_name": "ZhouWJ1991/pydefect", "max_forks_repo_head_hexsha": "0875a971fec73128b8f6782a328271ebc34ee709", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-07T10:14:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-07T10:14:16.000Z", "avg_line_length": 37.9361702128, "max_line_length": 76, "alphanum_fraction": 0.6783510937, "include": true, "reason": "import numpy", "num_tokens": 1043}
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Script for generating a dataset for neural renderer training"""
import os
import sys
import argparse
import numpy as np
import confignet
def parse_args(argv):
parser = argparse.ArgumentParser(description="Script for generating avatar datasets")
parser.add_argument("--dataset_dir", help="Path to the directory containing the dataset images", required=True)
parser.add_argument("--dataset_name", help="Name for the output dataset file", required=True)
parser.add_argument("--output_dir", help="Path to the output directory, the .npz file name will correspond to the dataset directory name", required=True)
parser.add_argument("--img_size", type=int, help="Size of the image, same number will be used for height and width", default=256)
parser.add_argument("--pre_normalize", type=int, help="If set to 0 pre_normalization will not be performed", default=1)
parser.add_argument("--img_output_dir", help="If this directory is specified the aligned face images will be dumped to it", default=None)
parser.add_argument("--load_attributes", help="If specified, the script will look for a celeba attribute file in the dataset_dir", action="store_true", default=False)
parser.add_argument("--synthetic_data", help="If specified the dataset will require an accompanying .json metadata file for each image", action="store_true", default=False)
args = parser.parse_args(argv)
dataset_dir = args.dataset_dir
dataset_name = args.dataset_name
output_dir = args.output_dir
img_size = args.img_size
img_output_dir = args.img_output_dir
synthetic_data = args.synthetic_data
load_attributes = args.load_attributes
dataset = confignet.NeuralRendererDataset((img_size, img_size, 3), synthetic_data)
dataset_name = dataset_name + "_res_" + str(img_size)
output_path = os.path.join(output_dir, dataset_name + ".pck")
os.makedirs(output_dir, exist_ok=True)
if load_attributes:
attribute_file_path = os.path.join(dataset_dir, "list_attr_celeba.txt")
else:
attribute_file_path = None
dataset.generate_face_dataset(dataset_dir, output_path, attribute_label_file_path=attribute_file_path, pre_normalize=args.pre_normalize == 1)
if img_output_dir is not None:
print("Writing aligned images to %s"%(img_output_dir))
dataset.write_images(img_output_dir)
if load_attributes:
dataset.write_images_by_attribute(img_output_dir)
if __name__ == "__main__":
parse_args(sys.argv[1:])
|
{"hexsha": "9fc529d6d176940659a5e2fcc2bbeca77ebecae8", "size": 2633, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_dataset.py", "max_stars_repo_name": "microsoft/ConfigNet", "max_stars_repo_head_hexsha": "f16b9b52698b1fe588322fdc5d921746f68d0e9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2020-08-24T01:47:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T08:33:50.000Z", "max_issues_repo_path": "generate_dataset.py", "max_issues_repo_name": "microsoft/ConfigNet", "max_issues_repo_head_hexsha": "f16b9b52698b1fe588322fdc5d921746f68d0e9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-09-18T07:21:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:42:27.000Z", "max_forks_repo_path": "generate_dataset.py", "max_forks_repo_name": "microsoft/ConfigNet", "max_forks_repo_head_hexsha": "f16b9b52698b1fe588322fdc5d921746f68d0e9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2020-08-24T05:47:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T07:25:13.000Z", "avg_line_length": 52.66, "max_line_length": 177, "alphanum_fraction": 0.7383213065, "include": true, "reason": "import numpy", "num_tokens": 556}
|
"""
Functions to read atomic data.
"""
"""
get_atomic_stage(element::String, stage; source="NIST")
Returns the level structure for a given atomic stage for `element`
and ionisation stage `stage`. Uses data from the given `source`,
returns an `AtomicStage` struct. Currently, the only supported source
is `"NIST"`, using locally-saved data obtained from the
[NIST Atomic Spectra Database Levels Form](https://physics.nist.gov/PhysRefData/ASD/levels_form.html).
# Examples
```julia-repl
julia> MgII = get_atomic_stage("Mg", "II")
AtomicStage("Mg", "Mg_II", 2, II, 137, (...))
julia> MgII = get_atomic_stage("Mg", 2)
AtomicStage("Mg", "Mg_II", 2, II, 137, (...))
```
"""
function get_atomic_stage(element::String, stage; source="NIST")
if element ∉ element_symbols
error("Invalid element $element")
end
if source == "NIST"
return read_NIST(element, stage)
else
error("NotImplemented: atomic data source $source not available.")
end
end
"""
Reads NIST atomic level data saved locally. The data were extracted from the
[NIST Atomic Spectra Database Levels Form](https://physics.nist.gov/PhysRefData/ASD/levels_form.html).
"""
function read_NIST(element::String, stage)
stage = RomanNumeral(stage)
file = string(element, "_", repr(stage), ".txt")
filepath = joinpath(@__DIR__, "..", "data", "NIST", file)
if isfile(filepath)
data = readdlm(filepath)
# Index entries that have statistical weights
index = typeof.(data[:, 4]) .== Int
index .*= data[:, 3] .!= "---" # some cases with mismatched wnum as Int
index .*= data[:, 3] .!= "" # cases with no J or g
g = convert.(Int, data[index, 4])
χ = NIST_wavenumber_to_energy.(data[index, 5])
# Find first ionisation edge
if sum(data[:, 2] .== "Limit") == 0
χ_ion = 0.0u"J"
else
wavenum_ion = data[data[:, 2] .== "Limit", 4][1]
χ_ion = NIST_wavenumber_to_energy(wavenum_ion)
end
return AtomicStage(element, stage, g, χ, χ_ion)
else
error("NIST data for $element $(repr(stage)) not found.")
end
end
"""
Parses level energy field from NIST tables. Brackets (round or square)
indicate interpolated or theoretical values. Converts from wavenumber
to energy.
"""
function NIST_wavenumber_to_energy(wavenum)
to_remove = ["[", "]", "(", ")", "?", "a", "l", "x", "y", "z",
"u", "+", "†", "&dgger;"]
if typeof(wavenum) in [String, SubString{String}]
for suffix in to_remove
wavenum = replace(wavenum, suffix => "")
end
wn = parse(Float64, wavenum)u"cm^-1"
elseif typeof(wavenum) <: Real
wn = convert(Float64, wavenum)u"cm^-1"
else
error("Invalid type $(typeof(wavenum)) for wave number")
end
return (h * c_0 * wn) |> u"J"
end
|
{"hexsha": "f40fe38e2866bb656d29003a359086f58b1c5214", "size": 2879, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/read_utils.jl", "max_stars_repo_name": "tiagopereira/AtomicData.jl", "max_stars_repo_head_hexsha": "0984efc50c4507841c6bc266308bb16f430f6790", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/read_utils.jl", "max_issues_repo_name": "tiagopereira/AtomicData.jl", "max_issues_repo_head_hexsha": "0984efc50c4507841c6bc266308bb16f430f6790", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-07T09:36:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-14T17:02:50.000Z", "max_forks_repo_path": "src/read_utils.jl", "max_forks_repo_name": "tiagopereira/AtomicData.jl", "max_forks_repo_head_hexsha": "0984efc50c4507841c6bc266308bb16f430f6790", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8705882353, "max_line_length": 102, "alphanum_fraction": 0.6241750608, "num_tokens": 826}
|
# 4. faza: Analiza podatkov
# zemljevid za 3 grupe
tmp_data<-filter(Neudelezevanje_15,Neudelezevanje_15$QUANTILE=="Celotna populacija",Neudelezevanje_15$ACL00=="Športni dogodki")
tmp_data$ACL00= NULL
tmp_data$QUANTILE=NULL
k<-list()
for (i in 1:6){
k[[i]]<-kmeans(tmp_data$Value,i)
}
betweenss_totss<-list()
for (i in 1:6){
betweenss_totss[i]<-k[[i]]$betweenss/k[[i]]$totss
}
km3=kmeans(tmp_data$Value,3)
km2=kmeans(tmp_data$Value,2)
zemljevid_sport <- tm_shape(merge(svet, tmp_data, by.x = "NAME", by.y = "GEO"),bbox = bb(c(-11,33,36,68))) +
tm_fill(col = "Value", contrast = 1, palette = "YlOrRd", title = "Neudelezevanje šport", breaks = c(15,56 ,71.8,97),colorNA = "Grey", textNA = "Manjkajoči podatki") +
tm_layout(legend.outside = TRUE)
zemljevid_sport
Neudelezevanje_Sport <-filter(Neudelezevanje_15,Neudelezevanje_15$ACL00=="Športni dogodki")
NMn <-filter(Slika,ACL00=="Muzeji in galerije",QUANTILE =="Celotna populacija",REASON=="Ni zanimanja")
NMn <-merge.data.frame(NMn,GDP,by="GEO")
NMn<-NMn[order(NMn$BDP),]
###########################
NMF <-filter(Slika,ACL00=="Muzeji in galerije",QUANTILE =="Celotna populacija",REASON=="Finančni razlogi")
NMF <-merge.data.frame(NMF,GDP,by="GEO")
NMF <-NMF[order(NMF$BDP),]
#poskusil bi razvrstiti u grupe povseh kvintilih
km=kmeans(Neudelezevanje_Sport$Value,2)
k<-list()
for (i in 1:6){
k[[i]]<-kmeans(Neudelezevanje_Sport$Value,i)
}
betweenss_totss<-list()
for (i in 1:6){
betweenss_totss[i]<-k[[i]]$betweenss/k[[i]]$totss
}
#3 skupine so ptimalne
Neudelezevanje_Sport$cluster <- kmeans(Neudelezevanje_Sport$Value,3)$cluster
#dobimo meji 56 in 71.8 vstavim ju v shiny sliko
k<-list()
for (i in 1:6){
k[[i]]<-kmeans(Neudelezevanje_15$Value,i)
}
betweenss_totss<-list()
for (i in 1:6){
betweenss_totss[i]<-k[[i]]$betweenss/k[[i]]$totss
}
#imamo torej 2-3 skupine
Neudelezevanje_15$cluster_2 <- kmeans(Neudelezevanje_15$Value,2)$cluster
# prelomnica je 58
Neudelezevanje_15$cluster_2 <- NULL
Neudelezevanje_15$cluster_3 <- kmeans(Neudelezevanje_15$Value,3)$cluster
#prelomnici sta 46 in
Neudelezevanje_15$cluster_3 <-NULL
m1n<-lm(NMn$TVALUE ~ NMn$BDP)
m2n<-lm(TVALUE ~ poly(BDP,2),data=NMn)
#m1n razloži večino šuma , je pa pomembno in zanimivo opaziti da m2 predvidi povečanje zanimanja z dovolj niskim BDPjem
razmerje_ned<-ggplot(data=NMn,aes(x=BDP,y=TVALUE))+geom_point()+
geom_line(aes(x=BDP,y=predict(m1n)),col="red")+ ylab("%") +
ggtitle("Nezanimanje")
razmerje_ned
NMF$BDP2 <- NMF$BDP^2
NMF$BDP3 <- NMF$BDP2^3
m1<-lm(NMF$TVALUE ~ NMF$BDP)
m2<-lm(TVALUE ~ poly(BDP,2),data=NMF)
#m2 še vedno opčutno poveča kavaliteto modela torej bomo tu izrisali kvadratično
m3<-lm(NMF$TVALUE ~ NMF$BDP + NMF$BDP2 +NMF$BDP3)
razmerje_fin<-ggplot(data=NMF,aes(x=BDP,y=TVALUE))+geom_point()+
geom_line(aes(x=BDP,y=predict(m2)),col="red")+ ylab("%") +
ggtitle("Finančna nedostopnost")
razmerje_fin
|
{"hexsha": "fbecf9e94744e307789c9b1fecd5f7a7ca40006b", "size": 2889, "ext": "r", "lang": "R", "max_stars_repo_path": "analiza/analiza.r", "max_stars_repo_name": "kuskus-star/APPR-2019-20", "max_stars_repo_head_hexsha": "5b644721b9418a181a0af4033f71779aeeb2c5af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analiza/analiza.r", "max_issues_repo_name": "kuskus-star/APPR-2019-20", "max_issues_repo_head_hexsha": "5b644721b9418a181a0af4033f71779aeeb2c5af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-12-21T12:23:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-08T18:49:30.000Z", "max_forks_repo_path": "analiza/analiza.r", "max_forks_repo_name": "kuskus-star/APPR-2019-20", "max_forks_repo_head_hexsha": "5b644721b9418a181a0af4033f71779aeeb2c5af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.064516129, "max_line_length": 168, "alphanum_fraction": 0.717549325, "num_tokens": 1238}
|
[STATEMENT]
lemma distinguishes_sym :
assumes "distinguishes M q1 q2 io"
shows "distinguishes M q2 q1 io"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. distinguishes M q2 q1 io
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
distinguishes M q1 q2 io
goal (1 subgoal):
1. distinguishes M q2 q1 io
[PROOF STEP]
unfolding distinguishes_def
[PROOF STATE]
proof (prove)
using this:
io \<in> LS M q1 \<union> LS M q2 \<and> io \<notin> LS M q1 \<inter> LS M q2
goal (1 subgoal):
1. io \<in> LS M q2 \<union> LS M q1 \<and> io \<notin> LS M q2 \<inter> LS M q1
[PROOF STEP]
by blast
|
{"llama_tokens": 263, "file": "FSM_Tests_FSM", "length": 3}
|
-- Copyright 2022-2023 VMware, Inc.
-- SPDX-License-Identifier: BSD-2-Clause
import .linear
variables {a: Type} [ordered_add_comm_group a].
def positive (s: stream a) := 0 <= s.
def stream_monotone (s: stream a) := ∀ t, s t ≤ s (t+1).
def is_positive {b: Type} [ordered_add_comm_group b]
(f: stream a → stream b) := ∀ s, positive s → positive (f s).
-- TODO: could not get library monotone definition to work, possibly due to
-- partial_order.to_preorder?
-- set_option pp.notation false.
-- set_option pp.implicit true.
-- prove that [stream_monotone] can be rephrased in terms of order preservation
theorem stream_monotone_order (s: stream a) :
stream_monotone s ↔ (∀ t1 t2, t1 ≤ t2 → s t1 ≤ s t2) :=
begin
unfold stream_monotone, split; intro h; introv,
{ intros hle, have heq : t2 = t1 + (t2 - t1) := by omega,
rw heq at *,
generalize : (t2 - t1) = d,
clear_dependent t2,
induction d,
{ simp, },
{ transitivity s (t1 + d_n), assumption,
apply h, }
},
{ apply h, linarith, },
end
lemma integral_monotone (s: stream a) :
positive s → stream_monotone (I s) :=
begin
intros hp,
intros t,
repeat { rw integral_sum_vals },
repeat { simp [sum_vals] },
have h := hp (t + 1), simp at h,
assumption,
end
lemma derivative_pos (s: stream a) :
-- NOTE: paper is missing this, but it is also necessary (maybe they
-- intend `s[-1] =0` in the definition of monotone)
0 ≤ s 0 →
stream_monotone s → positive (D s) :=
begin
intros h0 hp, intros t; simp,
unfold D delay; simp,
split_ifs,
{ subst t, assumption },
{ have hle := hp (t - 1),
have heq : t - 1 + 1 = t := by omega, rw heq at hle,
assumption,
},
end
lemma derivative_pos_counter_example :
(∃ (x:a), x < 0) →
¬(∀ (s: stream a), stream_monotone s → positive (D s)) :=
begin
intros h, cases h with x hneg,
simp,
-- pushing the negation through, we're going to prove
-- ∃ (x : stream a), stream_monotone x ∧ ¬positive (D x)
use (λ _n, x),
split,
{ intros t, simp, },
{ unfold positive,
rw stream_le_ext, simp,
use 0, simp [D],
apply not_le_of_gt, assumption,
},
end
|
{"author": "tchajed", "repo": "database-stream-processing-theory", "sha": "c4c3b7ced9f964f3ea17db77958df78f2d761509", "save_path": "github-repos/lean/tchajed-database-stream-processing-theory", "path": "github-repos/lean/tchajed-database-stream-processing-theory/database-stream-processing-theory-c4c3b7ced9f964f3ea17db77958df78f2d761509/src/ordering.lean"}
|
from __future__ import print_function
import warnings
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
import numpy as np
from models.deeplabv3p import Deeplabv3
import os
import multiprocessing
workers = multiprocessing.cpu_count()//2
import keras
import keras.backend as K
from keras.utils.data_utils import Sequence
import tensorflow as tf
from keras.optimizers import Adam, SGD, RMSprop
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping, LambdaCallback
from keras.layers import *
from models.subpixel import *
from keras.models import Model, Sequential
from keras.callbacks import TensorBoard
from collections import Counter
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.client import device_lib
from keras.regularizers import l2
from keras.utils import to_categorical
from sklearn.utils import class_weight
import cv2
import glob
import random
from tqdm import tqdm
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_labels
import itertools
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
trained_classes = classes
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title,fontsize=11)
tick_marks = np.arange(len(classes))
plt.xticks(np.arange(len(trained_classes)), classes, rotation=90,fontsize=9)
plt.yticks(tick_marks, classes,fontsize=9)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, np.round(cm[i, j],2), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", fontsize=7)
plt.tight_layout()
plt.ylabel('True label',fontsize=9)
plt.xlabel('Predicted label',fontsize=9)
return cm
# Fully connected CRF post processing function
def do_crf(im, mask, n_labels, enable_color=False, zero_unsure=True):
colors, labels = np.unique(mask, return_inverse=True)
image_size = mask.shape[:2]
# n_labels = len(set(labels.flat))
d = dcrf.DenseCRF2D(image_size[1], image_size[0], n_labels) # width, height, nlabels
U = unary_from_labels(labels, n_labels, gt_prob=.7, zero_unsure=zero_unsure)
d.setUnaryEnergy(U)
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(3,3), compat=3)
if enable_color:
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
# im is an image-array, e.g. im.dtype == np.uint8 and im.shape == (640,480,3)
d.addPairwiseBilateral(sxy=80, srgb=13, rgbim=im.astype('uint8'), compat=10)
Q = d.inference(5) # 5 - num of iterations
MAP = np.argmax(Q, axis=0).reshape(image_size)
unique_map = np.unique(MAP)
for u in unique_map: # get original labels back
np.putmask(MAP, MAP == u, colors[u])
return MAP
# MAP = do_crf(frame, labels.astype('int32'), zero_unsure=False)
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def get_VOC2012_classes():
PASCAL_VOC_classes = {
0: 'background',
1: 'airplane',
2: 'bicycle',
3: 'bird',
4: 'boat',
5: 'bottle',
6: 'bus',
7: 'car',
8: 'cat',
9: 'chair',
10: 'cow',
11: 'table',
12: 'dog',
13: 'horse',
14: 'motorbike',
15: 'person',
16: 'potted_plant',
17: 'sheep',
18: 'sofa',
19 : 'train',
20 : 'tv',
21 : 'void'
}
return PASCAL_VOC_classes
def sparse_crossentropy_ignoring_last_label(y_true, y_pred):
nb_classes = K.int_shape(y_pred)[-1]
y_true = K.one_hot(tf.to_int32(y_true[:,:,0]), nb_classes+1)[:,:,:-1]
return K.categorical_crossentropy(y_true, y_pred)
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
nb_classes = K.int_shape(y_pred)[-1]
y_pred = K.reshape(y_pred, (-1, nb_classes))
y_true = tf.to_int64(K.flatten(y_true))
legal_labels = ~K.equal(y_true, nb_classes)
return K.sum(tf.to_float(legal_labels & K.equal(y_true,
K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))
def Jaccard(y_true, y_pred):
nb_classes = K.int_shape(y_pred)[-1]
iou = []
pred_pixels = K.argmax(y_pred, axis=-1)
for i in range(0, nb_classes): # exclude first label (background) and last label (void)
true_labels = K.equal(y_true[:,:,0], i)
pred_labels = K.equal(pred_pixels, i)
inter = tf.to_int32(true_labels & pred_labels)
union = tf.to_int32(true_labels | pred_labels)
legal_batches = K.sum(tf.to_int32(true_labels), axis=1)>0
ious = K.sum(inter, axis=1)/K.sum(union, axis=1)
iou.append(K.mean(tf.gather(ious, indices=tf.where(legal_batches)))) # returns average IoU of the same objects
iou = tf.stack(iou)
legal_labels = ~tf.debugging.is_nan(iou)
iou = tf.gather(iou, indices=tf.where(legal_labels))
return K.mean(iou)
class SegModel:
epochs = 20
batch_size = 16
def __init__(self, dataset='VOCdevkit/VOC2012', image_size=(320,320)):
self.sz = image_size
self.mainpath = dataset
self.crop = False
def create_seg_model(self, net, n=21, backbone='mobilenetv2', load_weights=False, multi_gpu=False):
'''
Net is:
1. original deeplab v3+
2. original deeplab v3+ and subpixel upsampling layer
'''
model = Deeplabv3(weights=None, input_tensor=None, infer=False,
input_shape=self.sz + (3,), classes=21,
backbone=backbone, OS=16, alpha=1)
base_model = Model(model.input, model.layers[-5].output)
self.net = net
self.modelpath = 'weights/{}_{}.h5'.format(backbone, net)
if backbone=='xception':
scale = 4
else:
scale = 8
if net == 'original':
x = Conv2D(n, (1, 1), padding='same', name='conv_upsample')(base_model.output)
x = Lambda(lambda x: K.tf.image.resize_bilinear(x,size=(self.sz[0],self.sz[1])))(x)
x = Reshape((self.sz[0]*self.sz[1], -1)) (x)
x = Activation('softmax', name = 'pred_mask')(x)
model = Model(base_model.input, x, name='deeplabv3p')
elif net == 'subpixel':
x = Subpixel(n, 1, scale, padding='same')(base_model.output)
x = Reshape((self.sz[0]*self.sz[1], -1)) (x)
x = Activation('softmax', name = 'pred_mask')(x)
model = Model(base_model.input, x, name='deeplabv3p_subpixel')
# Do ICNR
for layer in model.layers:
if type(layer) == Subpixel:
c, b = layer.get_weights()
w = icnr_weights(scale=scale, shape=c.shape)
layer.set_weights([w, b])
if load_weights:
model.load_weights('weights/{}_{}.h5'.format(backbone, net))
if multi_gpu:
from keras.utils import multi_gpu_model
model = multi_gpu_model(model, gpus = len(get_available_gpus()))
self.model = model
return model
def create_generators(self, crop_shape=False, mode='train', do_ahisteq=True, n_classes=21, horizontal_flip=True,
vertical_flip=False, blur=False, with_bg=True, brightness=0.1, rotation=5.0,
zoom=0.1, validation_split=.2, seed=7):
generator = SegmentationGenerator(folder = self.mainpath, mode = mode, n_classes = n_classes, do_ahisteq = do_ahisteq,
batch_size=self.batch_size, resize_shape=self.sz[::-1], crop_shape=crop_shape,
horizontal_flip=horizontal_flip, vertical_flip=vertical_flip, blur = blur,
brightness=brightness, rotation=rotation, zoom=zoom,
validation_split = validation_split, seed = seed)
return generator
def load_weights(self, model):
model.load_weights(self.modelpath)
def train_generator(self, model, train_generator, valid_generator, callbacks, mp = True):
steps = len(train_generator)
h = model.fit_generator(train_generator,
steps_per_epoch=steps,
epochs = self.epochs, verbose=1,
callbacks = callbacks,
validation_data=valid_generator,
validation_steps=len(valid_generator),
max_queue_size=10,
workers=workers, use_multiprocessing=mp)
return h
def train(self, model, X, y, val_data, tf_board = False, plot_train_process = True):
h = model.fit(X, y, validation_data = val_data, verbose=1,
batch_size = self.batch_size, epochs = self.epochs,
callbacks = self.build_callbacks(tf_board = tf_board, plot_process = plot_train_process))
return h
@classmethod
def set_num_epochs(cls, new_epochs):
cls.epochs = new_epochs
@classmethod
def set_batch_size(cls, new_batch_size):
cls.batch_size = new_batch_size
class SegmentationGenerator(Sequence):
def __init__(self, folder='/workspace/datasets/', mode='train', n_classes=21, batch_size=1, resize_shape=None,
validation_split = .1, seed = 7, crop_shape=(640, 320), horizontal_flip=True, blur = 0,
vertical_flip=0, brightness=0.1, rotation=5.0, zoom=0.1, do_ahisteq = True):
self.blur = blur
self.histeq = do_ahisteq
self.image_path_list = sorted(glob.glob(os.path.join(folder, 'JPEGImages', 'train', '*')))
self.label_path_list = sorted(glob.glob(os.path.join(folder, 'SegmentationClassAug', '*')))
np.random.seed(seed)
n_images_to_select = round(len(self.image_path_list) * validation_split)
x = np.random.permutation(len(self.image_path_list))[:n_images_to_select]
if mode == 'train':
x = np.setxor1d(x, np.arange(len(self.image_path_list)))
self.image_path_list = [self.image_path_list[j] for j in x]
self.label_path_list = [self.label_path_list[j] for j in x]
if mode == 'test':
self.image_path_list = sorted(glob.glob(os.path.join(folder, 'JPEGImages', 'test', '*')))[:100]
self.mode = mode
self.n_classes = n_classes
self.batch_size = batch_size
self.resize_shape = resize_shape
self.crop_shape = crop_shape
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.brightness = brightness
self.rotation = rotation
self.zoom = zoom
# Preallocate memory
if self.crop_shape:
self.X = np.zeros((batch_size, crop_shape[1], crop_shape[0], 3), dtype='float32')
self.SW = np.zeros((batch_size, crop_shape[1]*crop_shape[0]), dtype='float32')
self.Y = np.zeros((batch_size, crop_shape[1]*crop_shape[0], 1), dtype='float32')
self.F = np.zeros((batch_size, crop_shape[1]*crop_shape[0], 1), dtype='float32')
self.F_SW = np.zeros((batch_size, crop_shape[1]*crop_shape[0]), dtype='float32')
elif self.resize_shape:
self.X = np.zeros((batch_size, resize_shape[1], resize_shape[0], 3), dtype='float32')
self.SW = np.zeros((batch_size, resize_shape[1]*resize_shape[0]), dtype='float32')
self.Y = np.zeros((batch_size, resize_shape[1]*resize_shape[0], 1), dtype='float32')
self.F = np.zeros((batch_size, resize_shape[1]*resize_shape[0], 1), dtype='float32')
self.F_SW = np.zeros((batch_size, resize_shape[1]*resize_shape[0]), dtype='float32')
else:
raise Exception('No image dimensions specified!')
def __len__(self):
return len(self.image_path_list) // self.batch_size
def __getitem__(self, i):
for n, (image_path, label_path) in enumerate(zip(self.image_path_list[i*self.batch_size:(i+1)*self.batch_size],
self.label_path_list[i*self.batch_size:(i+1)*self.batch_size])):
image = cv2.imread(image_path, 1)
label = cv2.imread(label_path, 0)
labels = np.unique(label)
if self.blur and random.randint(0,1):
image = cv2.GaussianBlur(image, (self.blur, self.blur), 0)
if self.resize_shape and not self.crop_shape:
image = cv2.resize(image, self.resize_shape)
label = cv2.resize(label, self.resize_shape, interpolation = cv2.INTER_NEAREST)
if self.crop_shape:
image, label = _random_crop(image, label, self.crop_shape)
# Do augmentation
if self.horizontal_flip and random.randint(0,1):
image = cv2.flip(image, 1)
label = cv2.flip(label, 1)
if self.vertical_flip and random.randint(0,1):
image = cv2.flip(image, 0)
label = cv2.flip(label, 0)
if self.brightness:
factor = 1.0 + random.gauss(mu=0.0, sigma=self.brightness)
if random.randint(0,1):
factor = 1.0/factor
table = np.array([((i / 255.0) ** factor) * 255 for i in np.arange(0, 256)]).astype(np.uint8)
image = cv2.LUT(image, table)
if self.rotation:
angle = random.gauss(mu=0.0, sigma=self.rotation)
else:
angle = 0.0
if self.zoom:
scale = random.gauss(mu=1.0, sigma=self.zoom)
else:
scale = 1.0
if self.rotation or self.zoom:
M = cv2.getRotationMatrix2D((image.shape[1]//2, image.shape[0]//2), angle, scale)
image = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
label = cv2.warpAffine(label, M, (label.shape[1], label.shape[0]))
if self.histeq: # and convert to RGB
img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
img_yuv[:,:,0] = clahe.apply(img_yuv[:,:,0])
image = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2RGB) # to RGB
else:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # BGR to RGB
label = label.astype('int32')
for j in np.setxor1d(np.unique(label), labels):
label[label==j] = self.n_classes
y = label.flatten()
y[y>(self.n_classes-1)]=self.n_classes
self.Y[n] = np.expand_dims(y, -1)
self.F[n] = (self.Y[n]!=0).astype('float32') # get all pixels that aren't background
valid_pixels = self.F[n][self.Y[n]!=self.n_classes] # get all pixels (bg and foregroud) that aren't void
u_classes = np.unique(valid_pixels)
class_weights = class_weight.compute_class_weight('balanced', u_classes, valid_pixels)
class_weights = {class_id : w for class_id, w in zip(u_classes, class_weights)}
if len(class_weights)==1: # no bg\no fg
if 1 in u_classes:
class_weights[0] = 0.
else:
class_weights[1] = 0.
elif not len(class_weights):
class_weights[0] = 0.
class_weights[1] = 0.
sw_valid = np.ones(y.shape)
np.putmask(sw_valid, self.Y[n]==0, class_weights[0]) # background weights
np.putmask(sw_valid, self.F[n], class_weights[1]) # foreground wegihts
np.putmask(sw_valid, self.Y[n]==self.n_classes, 0)
self.F_SW[n] = sw_valid
self.X[n] = image
# Create adaptive pixels weights
filt_y = y[y!=self.n_classes]
u_classes = np.unique(filt_y)
if len(u_classes):
class_weights = class_weight.compute_class_weight('balanced', u_classes, filt_y)
class_weights = {class_id : w for class_id, w in zip(u_classes, class_weights)}
class_weights[self.n_classes] = 0.
for yy in u_classes:
np.putmask(self.SW[n], y==yy, class_weights[yy])
sample_dict = {'pred_mask' : self.SW}
return self.X, self.Y, sample_dict
def on_epoch_end(self):
# Shuffle dataset for next epoch
c = list(zip(self.image_path_list, self.label_path_list))
random.shuffle(c)
self.image_path_list, self.label_path_list = zip(*c)
def _random_crop(image, label, crop_shape):
if (image.shape[0] != label.shape[0]) or (image.shape[1] != label.shape[1]):
raise Exception('Image and label must have the same dimensions!')
if (crop_shape[0] < image.shape[1]) and (crop_shape[1] < image.shape[0]):
x = random.randrange(image.shape[1]-crop_shape[0])
y = random.randrange(image.shape[0]-crop_shape[1])
return image[y:y+crop_shape[1], x:x+crop_shape[0], :], label[y:y+crop_shape[1], x:x+crop_shape[0]]
else:
image = cv2.resize(image, crop_shape)
label = cv2.resize(label, crop_shape, interpolation = cv2.INTER_NEAREST)
return image, label
|
{"hexsha": "aecb8ebbcfe2ed9cd86737bc533b014d71630487", "size": 17977, "ext": "py", "lang": "Python", "max_stars_repo_path": "code_cm17/models/utils.py", "max_stars_repo_name": "micimize/DigitalHistoPath", "max_stars_repo_head_hexsha": "f2a4dd03761e321c35b1b2e17de3aa4b3ba49511", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2020-03-23T09:42:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T23:54:18.000Z", "max_issues_repo_path": "code_cm17/models/utils.py", "max_issues_repo_name": "micimize/DigitalHistoPath", "max_issues_repo_head_hexsha": "f2a4dd03761e321c35b1b2e17de3aa4b3ba49511", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-03-31T08:50:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-16T20:45:29.000Z", "max_forks_repo_path": "code_cm17/models/utils.py", "max_forks_repo_name": "micimize/DigitalHistoPath", "max_forks_repo_head_hexsha": "f2a4dd03761e321c35b1b2e17de3aa4b3ba49511", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-04-02T04:55:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T21:23:16.000Z", "avg_line_length": 44.4975247525, "max_line_length": 135, "alphanum_fraction": 0.5960393837, "include": true, "reason": "import numpy", "num_tokens": 4448}
|
"""Console script for standard_linearity."""
import json
from pathlib import Path
import click
import statsmodels.api as sm
from patsy import dmatrices
from standard_linearity import export_data, import_data
DEFAULT_PLOT_CONFIG = {
"calibration_attrs": None,
"residual_attrs": None,
"errors_attrs": None,
"global_attrs": None,
"figure_attrs": None,
}
@click.command()
@click.option(
"--fig-format", type=str, default="svg", help="Format of the figure file. Refer to matplotlib.pyplot.savefig docs."
)
@click.option(
"--fitting",
"-f",
type=click.Choice(["OLS", "WLS"], case_sensitive=False),
default="OLS",
help="""Algorithm to use for fitting standard curve.
If WLS is used, (1/standard_concentrations)**2 are used as the weighting.""",
)
@click.option(
"--header", "-h", type=int, default=0, help="Parameter used when reading data from csv. Refer to pd.read_csv()."
)
@click.option(
"--nrows", "-n", type=int, default=None, help="Parameter used when reading data from csv. Refer to pd.read_csv()."
)
@click.option(
"--output-dir",
"-o",
type=click.Path(file_okay=False),
default=None,
help="Path to output directory. If not invoked, output directory is generated with a timestamp.",
)
@click.option(
"--plot-config-file",
"-p",
type=click.File(),
default=None,
help="JSON file detailing configuration for plots.",
)
@click.option("--response-colname", "-r", required=True, help="Name of column with response data.")
@click.option(
"--skip-rows", type=int, default=None, help="Parameter used when reading data from csv. Refer to pd.read_csv()."
)
@click.option(
"--standards-colname",
"-s",
required=True,
help="standards_colname: Name of column with standard concentrations in data.",
)
@click.argument(
"input",
required=True,
type=click.Path(exists=True),
)
def main(
fig_format,
fitting,
header,
nrows,
output_dir,
plot_config_file,
response_colname,
standards_colname,
skip_rows,
input,
):
"""Main command for the standard-linearity CLI. # noqa: D417,D415
Args:
input: path to csv file - refer to pd.read_csv docs.
"""
if output_dir:
try:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True)
except FileExistsError:
pass
data = import_data(
path_to_csv=input,
response_colname=response_colname,
standards_colname=standards_colname,
header=header,
nrows=nrows,
skip_rows=skip_rows,
)
response, predictors = dmatrices("standard_concentrations ~ response", data=data, return_type="dataframe")
if fitting == "OLS":
fitted_lm = sm.OLS(response, predictors).fit()
student_resid = fitted_lm.outlier_test()["student_resid"]
else:
lm = sm.WLS(response, predictors, weights=1 / data["standard_concentrations"] ** 2)
fitted_lm = lm.fit()
pseudo_fit = sm.OLS(lm.wendog, lm.wexog).fit()
student_resid = pseudo_fit.get_influence().resid_studentized
if plot_config_file:
plot_config = json.load(plot_config_file)
else:
plot_config = DEFAULT_PLOT_CONFIG.copy()
export_data(
data=data,
student_resid=student_resid,
fitted_lm=fitted_lm,
dir_path=output_dir,
fig_format=fig_format,
calibration_attrs=plot_config["calibration_attrs"],
residual_attrs=plot_config["residual_attrs"],
errors_attrs=plot_config["errors_attrs"],
global_attrs=plot_config["global_attrs"],
figure_attrs=plot_config["figure_attrs"],
)
|
{"hexsha": "c0372067a1e820c7ff1a564bda61e8cf78dc158f", "size": 3683, "ext": "py", "lang": "Python", "max_stars_repo_path": "standard_linearity/cli.py", "max_stars_repo_name": "hainesm6/standard-linearity", "max_stars_repo_head_hexsha": "a222878857332bf46bb5e13c1ed579a1b82e4174", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "standard_linearity/cli.py", "max_issues_repo_name": "hainesm6/standard-linearity", "max_issues_repo_head_hexsha": "a222878857332bf46bb5e13c1ed579a1b82e4174", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "standard_linearity/cli.py", "max_forks_repo_name": "hainesm6/standard-linearity", "max_forks_repo_head_hexsha": "a222878857332bf46bb5e13c1ed579a1b82e4174", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.464, "max_line_length": 119, "alphanum_fraction": 0.6619603584, "include": true, "reason": "import statsmodels", "num_tokens": 874}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from . import utils
from . import sfh
from . import corner
from . import sed
from ..io.write_results import chain_to_struct, dict_to_struct
from .figuremaker import FigureMaker, colorcycle
__all__ = ["FigureMaker", "utils", "sfh", "corner", "sed",
"pretty", "chain_to_struct"]
# nice labels for things
pretty = {"logzsol": r"$\log (Z_{\star}/Z_{\odot})$",
"logmass": r"$\log {\rm M}_{\star, {\rm formed}}$",
"gas_logu": r"${\rm U}_{\rm neb}$",
"gas_logz": r"$\log (Z_{\neb}/Z_{\odot})$",
"dust2": r"$\tau_{\rm V}$",
"av": r"${\rm A}_{\rm V, diffuse}$",
"av_bc": r"${\rm A}_{\rm V, young}$",
"dust_index": r"$\Gamma_{\rm dust}$",
"igm_factor": r"${\rm f}_{\rm IGM}$",
"duste_umin": r"$U_{\rm min, dust}$",
"duste_qpah": r"$Q_{\rm PAH}$",
"duste_gamma": r"$\gamma_{\rm dust}$",
"log_fagn": r"$\log({\rm f}_{\rm AGN})$",
"agn_tau": r"$\tau_{\rm AGN}$",
"mwa": r"$\langle t_{\star} \rangle_M$ (Gyr)",
"ssfr": r"$\log ({\rm sSFR})$ $({\rm yr}^{-1})$",
"logsfr": r'$\log({\rm SFR})$ $({\rm M}_{\odot}/{\rm yr}$)',
"tau": r"$\tau$ (Gyr)",
"logtau": r"$\log(\tau)$ (Gyr)",
"tage": r"Age (Gyr)",
"ageprime": r"Age/$\tau$",
"sigma_smooth": r"$\sigma_v$ (km/s)"}
|
{"hexsha": "094a1634aec2b0f6e36ac3d71ed2d729e35f0cb4", "size": 1445, "ext": "py", "lang": "Python", "max_stars_repo_path": "prospect/plotting/__init__.py", "max_stars_repo_name": "wrensuess/prospector", "max_stars_repo_head_hexsha": "08173f84ddfc2b031c78822344fc821778d35bae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 94, "max_stars_repo_stars_event_min_datetime": "2016-10-12T19:29:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T13:25:39.000Z", "max_issues_repo_path": "prospect/plotting/__init__.py", "max_issues_repo_name": "wrensuess/prospector", "max_issues_repo_head_hexsha": "08173f84ddfc2b031c78822344fc821778d35bae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 168, "max_issues_repo_issues_event_min_datetime": "2016-04-15T20:01:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:03:07.000Z", "max_forks_repo_path": "prospect/plotting/__init__.py", "max_forks_repo_name": "wrensuess/prospector", "max_forks_repo_head_hexsha": "08173f84ddfc2b031c78822344fc821778d35bae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 53, "max_forks_repo_forks_event_min_datetime": "2016-07-14T07:19:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T03:10:28.000Z", "avg_line_length": 34.4047619048, "max_line_length": 70, "alphanum_fraction": 0.4885813149, "include": true, "reason": "import numpy", "num_tokens": 515}
|
(************************************************************************)
(* * The Coq Proof Assistant / The Coq Development Team *)
(* v * INRIA, CNRS and contributors - Copyright 1999-2018 *)
(* <O___,, * (see CREDITS file for the list of authors) *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(* * (see LICENSE file for the text of the license) *)
(************************************************************************)
(** This file provides a constructive form of definite description; it
allows building functions from the proof of their existence in any
context; this is weaker than Church's iota operator *)
Require Import ChoiceFacts.
Set Implicit Arguments.
Axiom constructive_definite_description :
forall (A : Type) (P : A->Prop),
(exists! x, P x) -> { x : A | P x }.
|
{"author": "Priyanka-Mondal", "repo": "Coq", "sha": "220c3eccfa5643b1ca2398d4940e29917da786d9", "save_path": "github-repos/coq/Priyanka-Mondal-Coq", "path": "github-repos/coq/Priyanka-Mondal-Coq/Coq-220c3eccfa5643b1ca2398d4940e29917da786d9/lib/theories/Logic/Description.v"}
|
#%% Plot Best objective vs population
import sys,os
sys.path.insert(0,'../../../')
from glennopt.base import Parameter
from glennopt.helpers import mutation_parameters, de_mutation_type
from glennopt.helpers import get_best,get_pop_best
from glennopt.optimizers import NSGA3
# Generate the DOE
current_dir = os.getcwd()
ns = NSGA3(eval_command = "python evaluation.py", eval_folder="Evaluation",pop_size=20,optimization_folder=current_dir)
eval_parameters = []
eval_parameters.append(Parameter(name="x1",min_value=-10,max_value=10))
eval_parameters.append(Parameter(name="x2",min_value=-10,max_value=10))
eval_parameters.append(Parameter(name="x3",min_value=-10,max_value=10))
ns.add_eval_parameters(eval_params = eval_parameters)
objectives = []
objectives.append(Parameter(name='objective1'))
objectives.append(Parameter(name='objective2'))
ns.add_objectives(objectives=objectives)
# No performance Parameters
performance_parameters = []
performance_parameters.append(Parameter(name='p1'))
performance_parameters.append(Parameter(name='p2'))
performance_parameters.append(Parameter(name='p3'))
ns.add_performance_parameters(performance_params=performance_parameters)
# ns.start_doe(doe_size=40)
# ns.optimize_from_population(pop_start=-1,n_generations=10)
individuals = ns.read_calculation_folder()
ns.to_tecplot()
ns.plot_2D('objective1','objective2')
#%% Plot Best objective vs population
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
objectives, pop, best_fronts = get_best(individuals,pop_size=20)
objective_index = 0
_, ax = plt.subplots()
ax.scatter(pop, objectives[:,objective_index],color='blue',s=10)
ax.set_xlabel('Population')
ax.set_ylabel('Objective {0} Value'.format(objective_index))
plt.show()
#%% Plot Best individual at each population
best_individuals, best_fronts = get_pop_best(individuals)
nobjectives = len(best_individuals[0][0].objectives)
objective_data = list()
for pop,best_individual in best_individuals.items():
objective_data.append(best_individual[objective_index].objectives[objective_index])
_,ax = plt.subplots()
colors = cm.rainbow(np.linspace(0, 1, len(best_individuals.keys())))
ax.scatter(list(best_individuals.keys()), objective_data, color='blue',s=10)
ax.set_xlabel('Population')
ax.set_ylabel('Objective {0} Value'.format(objective_index))
ax.set_title('Best individual at each population')
plt.show()
#%% Plot the pareto front
best_individuals, best_fronts = get_pop_best(individuals)
objectives, pop, best_fronts = get_best(individuals,pop_size=30)
fig,ax = plt.subplots(figsize=(10,8))
colors = cm.rainbow(np.linspace(0, 1, len(best_fronts)))
indx = 0
legend_labels = []
# Scan the pandas file, grab objectives for each population
for ind_list in best_fronts:
obj1_data = []
obj2_data = []
c=colors[indx]
for ind in ind_list[0]:
obj1_data.append(ind.objectives[0])
obj2_data.append(ind.objectives[1])
# Plot the gathered data
ax.scatter(obj1_data, obj2_data, color=c, s=20,alpha=0.5)
legend_labels.append(pop[indx])
indx+=1
ax.set_xlabel('Objective 1')
ax.set_ylabel('Objective 2')
ax.set_title('Non-dimensional sorting: Best Front for each population')
ax.legend(legend_labels)
fig.canvas.draw()
fig.canvas.flush_events()
plt.show()
# %%
|
{"hexsha": "21e79aa63aad9182836ac9423f76ed6d498a22a7", "size": 3302, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/kur/serial/optimization_plot.py", "max_stars_repo_name": "kant/GlennOPT", "max_stars_repo_head_hexsha": "ca816c3708a2db5b98f8f1a7885305a8e18e179e", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/kur/serial/optimization_plot.py", "max_issues_repo_name": "kant/GlennOPT", "max_issues_repo_head_hexsha": "ca816c3708a2db5b98f8f1a7885305a8e18e179e", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/kur/serial/optimization_plot.py", "max_forks_repo_name": "kant/GlennOPT", "max_forks_repo_head_hexsha": "ca816c3708a2db5b98f8f1a7885305a8e18e179e", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.02, "max_line_length": 119, "alphanum_fraction": 0.7722592368, "include": true, "reason": "import numpy", "num_tokens": 803}
|
#from .utils import try_gpu
import numpy as np
from itertools import combinations
import seaborn as sns
from scipy.spatial import distance as spdist
import tqdm
from torch_geometric.data import Data, Batch
import torch
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem import rdFMCS
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import DataStructs
from io import BytesIO
from PIL import Image
import base64
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import HoverTool, ColumnDataSource, CategoricalColorMapper
from bokeh.palettes import Spectral10
possible_atom_list = [
'S', 'Si', 'F', 'Fl', 'O', 'C', 'I', 'P', 'Cl',
'Br', 'N', 'Unknown'
]
possible_hybridization_list = [
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2
]
def atom_hot_encoding(atom, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if atom not in allowable_set:
atom = allowable_set[-1]
return list(map(lambda s: atom == s, allowable_set))
def safe_index(l, e):
"Gets the index of elem e in list l."
try:
return l.index(e)
# If not in list, map as unknown symbol's index
except:
return len(l)
def get_feature_list(atom):
"Get features for a given atom using RDkit."
features[safe_index(possible_atom_list, atom.GetSymbol())]
return features
def bond_features(bond)->np.array:
"""One hot encodes a single bond from a molecule."""
bt = bond.GetBondType()
bond_feats = [
bt == Chem.rdchem.BondType.SINGLE,
bt == Chem.rdchem.BondType.DOUBLE,
bt == Chem.rdchem.BondType.TRIPLE,
bt == Chem.rdchem.BondType.AROMATIC
]
return np.array(bond_feats).astype(np.float32)
def atom_features(atom, use_chirality = False)->np.array:
"""
Feature extraction for a single atom of a molecule.
"""
atom_feats = atom_hot_encoding(atom.GetSymbol(), possible_atom_list)
atom_feats.append(atom.GetDegree())
atom_feats.append(atom.GetFormalCharge())
atom_feats.append(atom.GetNumRadicalElectrons())
atom_feats.append(atom.GetIsAromatic())
atom_feats.append(atom.GetImplicitValence())
atom_feats.append(
safe_index(possible_hybridization_list, atom.GetHybridization())
)
if use_chirality:
try:
atom_feats += atom_hot_encoding(atom.GetProp('_CIPCode'),['R', 'S'])
atom_feats += [atom.HasProp('_ChiralityPossible')]
except:
atom_feats +=[False, False]
atom_feats +=[atom.HasProp('_ChiralityPossible')]
return np.array(atom_feats).astype(np.float32)
#TO-DO atom_features_pandas(mol):
def get_bond_pair(mol):#->tuple(list, np.array):
"""
Returns the indices and adjacency matrix for all bonds in a molecular graph.
"""
bonds = mol.GetBonds()
indices =[[], []]
n_atoms = mol.GetNumAtoms()
adj = np.zeros((n_atoms, n_atoms))
for bond in bonds:
begin_ix = bond.GetBeginAtomIdx()
end_ix = bond.GetEndAtomIdx()
indices[0]+= [begin_ix]
indices[1]+= [end_ix]
adj[begin_ix, end_ix]=1
adj[end_ix, begin_ix]=1
return indices, adj
def mol2graph_data(mol)->tuple:
"""
Returns node features, edge_indices, edge (bond features),
and an adjacency matrix for a molecular graph.
"""
atoms = mol.GetAtoms()
bonds = mol.GetBonds()
node_feats = [atom_features(atom) for atom in atoms]
edge_ixs, adj = get_bond_pair(mol)
edge_feats = [bond_features(bond) for bond in bonds]
return np.stack(node_feats), np.stack(edge_ixs), np.stack(edge_feats)#, adj
def mol2tensors(mol, use_gpu = True):
"""
Generates a torch_geometric.data.Data object from
an RDkit molecule.
"""
#node_feats, edge_ixs, edge_feats, adj = mol2graph_data(mol)
#cuda = torch.cuda.is_available()
if use_gpu:
device = try_gpu()
else:
device = torch.device('cpu')
node_feats, edge_ixs, edge_feats = mol2graph_data(mol)
data = Data(
x = torch.tensor(node_feats, dtype = torch.float, device = device),
edge_index =torch.tensor(edge_ixs, dtype = torch.long, device = device),
edge_attr = torch.tensor(edge_feats, dtype = torch.float, device = device)
)
return data
def n_atom_features():
bond = Chem.MolFromSmiles('C').GetAtomWithIdx(0)
return len(atom_features(atom))
def n_bond_features():
bond = Checm.MolFromSmiles('CC'.GetBondWithIdx(0))
return len(bond_features(bond))
def get_fp(mol, bits = 512)->np.array:
"Returns Morgan Fingerprint given an RDKit molecule."
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 3, nBits=bits)
arr = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fp,arr)
return arr
def get_mcs(ref_mol, query_mol):
"""
Returns the indices of the maximum common substructure (MCS)
on a query molecule given a reference molecule.
Params
------
ref_mol(rdkit mol)
Reference molecule.
query_mol (rdkit mol)
Query molecule.
Returns
-------
ix_matches(tuple)
Tuple of tuples with indices corresponding to the nodes
where a match was found on the query molecule.
"""
res = rdFMCS.FindMCS([ref_mol, query_mol])
# Max common substructure
mcs = Chem.MolFromSmarts(res.smartsString)
ix_matches = query_mol.GetSubstructMatches(mcs)
return ix_matches
def get_mcs_multi(ref_mol, query_mols):
"""
Returns the Max Common Substructure (MCS) given
a reference molecule and a list of query molecules.
Params
------
ref_mol(rdkit mol)
Reference molecule.
query_mols (list)
List of rdkit molecules for query.
Returns
-------
list_matches (list)
List of indices where a match was found for each
query molecule.
"""
res = rdFMCS.FindMCS([ref_mol] + query_mols)
mcs = Chem.MolFromSmarts(res)
list_matches = []
for mol in query_mols:
try:
ix_match = mol.GetSubstructMatches(mcs)
list_matches.append(ix_match[0])
# Exception when no match was found.
except:
list_matches.append(None)
return list_matches
def get_cam_weight(graph, model):
"""
Assumes an architecture of a single fully-connected layer
after the graph conv layers.
Params
------
graph(torch_geometric.Data.data.data)
Graph to get node activations from.
"""
# Get predicted class
batch_ = Batch.from_data_list([graph])
logits = model(batch_)
top_ix = logits.argmax()
# Get row of weight matrix corresponding
# to predicted class
weight_matrix_params = list(
model._modules.get('final_layer').parameters()
)
weight_matrix = weight_matrix_params[0]
w_top = weight_matrix[top_ix]
return w_top
def get_gradCAM_activations(graph, model):
"""
Returns the gradCAM activation scores per node in a graph.
Params
------
graph(torch_geometric.Data.data.data)
Graph to get node activations from.
model (nn.Module)
Graph conv net classifier model.
Returns
-------
grad_cam_avg (array-like)
Notes: Grad CAM (https://arxiv.org/pdf/1610.02391.pdf) computes
∂(y_top) / ∂G, where G is the output of the last conv layer before
average pooling, i.e. the last node embeddings.
In this sense GradCAM is the linear approximation of the neural network
downstream of G, and captures the importance of each node embedding for
a target class.
"""
model.eval()
# Get graph embedding
embedding = model.project(
Batch.from_data_list([graph]),
reg_hook = True,
pool =True
)
# Make predictions without softmax
logits = model.final_layer(embedding)
top_ix = logits.argmax()
#print('Predicted class: ', uniques[top_ix])
# Call backward pass to compute gradients
logits[0, top_ix].backward()
# Extract gradients from hook
gradients = model.get_activations_gradient()
# Compute node embeddings
with torch.no_grad():
node_embeddings = model.project(
Batch.from_data_list([graph]),
pool = False,
reg_hook=False
)
# Get grad CAM
grad_cam = gradients*node_embeddings
grad_cam_avg = grad_cam.mean(axis = 1).detach().numpy()
return grad_cam_avg
def get_CAM_activations(graph, model, weight_vector):
"""
Params
------
graph(torch_geometric.Data.data.data)
Graph to get node activations from.
model (nn.Module)
Graph conv net classifier model.
weight_vector (torch.tensor)
It is the row of the weight matrix that
produced the highest logit `W[ix_top]`. This is precisely
class activation map method.
Returns
------
node_activations (np.array)
Array with Class Activation Map (CAM) scores per node.
"""
x, edge_index = graph.x, graph.edge_index
with torch.no_grad():
# Run through conv layers
for conv_layer in model.conv_encoder:
x = conv_layer(x, edge_index)
x = torch.tanh(x)
#Compute node activation map scores
act_map = weight_vector.view(1, -1) @ x.t()
node_activations = act_map.numpy().flatten()
return node_activations
def get_embedding_activations(graph, model, weight_matrix):
"""
Returns node activation of a graph using the embedding activation
method.
Params
------
graph(torch_geometric.Data.data.data)
Graph to get node activations from.
model (nn.Module)
Graph conv network model. It must support node embedding
generation using model.project(x, pool=False).
weight_matrix (torch.tensor)
Weight matrix, takes as input an average node embedding
(i.e. graph embedding ϕ`) and converts into a lower dimensional
graph embedding ϕ''.
Returns
-------
node_activations(np.array)
shape (nodes,)
"""
with torch.no_grad():
node_embeddings = model.project(graph, pool =False)
node_activations = node_embeddings@weight_matrix.T
return node_activations.numpy()
def plot_node_activations(
mol,
node_activations,
fig_fname,
plot_cbar = False,
vmin = None,
vmax = None
):
"""
Saves a molecule colored by node activations in a directory
specified by `fname`.
Params
------
mol (rdkit.Chem.rdchem.Mol)
RDKit molecule.
node_activations (array-like)
Per node activation map generated by e.g. `get_CAM_activations()`.
fig_fname(str)
path to save file + file name
plot_cbar(bool, default = False)
Whether or not to plot colorbar to get a sense of scale.
Notes: Uses viridis by default. Doesn't return an object.
"""
min_act, max_act = node_activations.min(), node_activations.max()
normalizer =mpl.colors.Normalize(vmin=min_act, vmax = max_act)
cmap = cm.viridis
mapper = cm.ScalarMappable(norm=normalizer, cmap=cmap)
d = rdMolDraw2D.MolDraw2DCairo(500, 500)
n_atoms = len(list(mol.GetAtoms()))
rdMolDraw2D.PrepareAndDrawMolecule(
d,
mol,
highlightAtoms=list(range(n_atoms)),
highlightAtomColors={
i: mapper.to_rgba(node_activations[i]) for i in range(n_atoms)
},
)
with open(fig_fname,'wb') as file:
file.write(d.GetDrawingText())
if plot_cbar:
plt.imshow(
np.linspace(min_act, max_act, 10).reshape(1, -1),
cmap = cmap
)
plt.gca().set_visible(False)
plt.colorbar(orientation = 'horizontal')
plt.savefig(
fig_fname.split('.png')[0] + '_cbar.png',
dpi = 230,
bbox_inches='tight'
)
return None
def get_drug_batch(labels_batch, name_to_mol, ix_to_name, cuda = False):
"Returns a list of torch.geometric Data object given a list of sample codes."
if cuda:
cuda = torch.cuda.is_available()
drug_graphs = []
for x in labels_batch:
graph = mol2tensors(
name_to_mol[ix_to_name[x.item()]], use_gpu = cuda
)
if cuda:
#print(c)
graph.x = graph.x.cuda()
graph.edge_index = graph.edge_index.cuda()
graph.edge_attr = graph.edge_attr.cuda()
drug_graphs.append(graph)
return drug_graphs
def mol_to_bokeh_encodable(mol):
"""
Returns a bytes string readable by bokeh using hovertooltips.
"""
# Get PIL image
im = Chem.Draw.MolToImage(mol, size = (130, 140))
# Initialize in-memory bytes buffer
buffer = BytesIO()
# Load image data onto buffer
im.save(buffer, format='png')
# Get bytes data
for_encoding=buffer.getvalue()
return 'data:image/png;base64,' + base64.b64encode(for_encoding).decode()
from rdkit.Chem.Scaffolds import MurckoScaffold
def deconstruct_mol(mol):
"Returns a scaffold and sidechains from a molecule."
core = MurckoScaffold.GetScaffoldForMol(mol)
tmp = Chem.ReplaceCore(mol, core, labelByIndex=True)
frags = Chem.GetMolFrags(tmp, asMols=True)
return core, frags
def try_gpu(i=0):
"""
Return gpu(i) if exists, otherwise return cpu().
Extracted from https://github.com/d2l-ai/d2l-en/blob/master/d2l/torch.py
"""
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
from sklearn.preprocessing import StandardScaler
def read_energy_dataset(fn):
""
scaler = StandardScaler()
df = pd.read_csv(fn)
prot = fn.split("_")[0]
df["prot"]=prot
df["energies_norm"] = scaler.fit_transform(df.energy.values.reshape(-1,1)).flatten()
return df
|
{"hexsha": "100615e6f0f0f6910a92f5a673230bb8d8e17dc6", "size": 13905, "ext": "py", "lang": "Python", "max_stars_repo_path": "magma/chemspace.py", "max_stars_repo_name": "manuflores/magma", "max_stars_repo_head_hexsha": "352e1c15bbbbb8e9b46337de5d91a142fd7c757d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "magma/chemspace.py", "max_issues_repo_name": "manuflores/magma", "max_issues_repo_head_hexsha": "352e1c15bbbbb8e9b46337de5d91a142fd7c757d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "magma/chemspace.py", "max_forks_repo_name": "manuflores/magma", "max_forks_repo_head_hexsha": "352e1c15bbbbb8e9b46337de5d91a142fd7c757d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-11T20:48:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-11T20:48:28.000Z", "avg_line_length": 24.224738676, "max_line_length": 88, "alphanum_fraction": 0.6665228335, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3468}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 14:56:07 2020
@author: jenzyy
"""
import imageio
import matplotlib.pyplot as plt
import numpy as np
'''
# set wd
import os
os. chdir('./Documents/GitHub/Randomized-SVD/jennifer/watermark')
'''
# read in image
im = imageio.imread("rose.jpg")
# watermark
W = imageio.imread("cat.jpg")
#W = np.random.rand(rows,rows)
Wp = imageio.imread("dog.jpg")
# scale
a = 0.1
def im_stack(im):
im_type = im.dtype
im = im.astype(np.float64)
rows,cols = im.shape[:2]
im_stacked = im.reshape(rows,-1)
return im_stacked, im_type
def im_stack_s(im, im_type):
rows = im.shape[0]
cols = im.shape[1]//3
im_m = im.reshape(rows, cols, -1)
im_m = im_m.astype(im_type)
plt.imshow(im_m)
plt.show()
im_stacked, im_type = im_stack(im)
W_stacked, W_type = im_stack(W)
Wp_stacked, Wp_type = im_stack(Wp)
def watermark_image(im, W, a):
rows,cols = im.shape[:2]
U,S,V = np.linalg.svd(im,full_matrices = False)
Wp = np.pad(W,[(0, rows - W.shape[0]), (0, rows - W.shape[1])])
Aw = np.diag(S)+a*Wp
Uw,Sw,Vw = np.linalg.svd(Aw,full_matrices = True)
marked = U @ np.diag(Sw) @ V
return marked, Uw, S, Vw
# show output
marked, Uw, S, Vw = watermark_image(im_stacked, W_stacked,0.1)
im_stack_s(marked,im_type)
# extract watermark
def watermark_extract(marked, Uw, S,Vw, a):
Um, Sm, Vm = np.linalg.svd(marked)
M = (Uw @ np.diag(Sm) @ Vw - np.diag(S))/a
#rows = len(S)
#Mp = np.pad(M,[(0, M.shape[0]- rows), (0, M.shape[1] - rows)])
return M
# test output
M = watermark_extract(marked, Uw, S, Vw, a)
Mrow, Mcol = W_stacked.shape
M = M[:Mrow, :Mcol]
im_stack_s(M, W_type)
# extract wrong watermark
marked_p, Uw_p, S_p, Vw_p = watermark_image(im_stacked, Wp_stacked,0.1)
Mp = watermark_extract(marked, Uw_p, S, Vw_p, a)
Mprow, Mpcol = Wp_stacked.shape
Mp = Mp[:Mprow, :Mpcol]
im_stack_s(Mp, Wp_type)
|
{"hexsha": "d9af2b30117303241b13ac1db64bb6ccd43b6008", "size": 1936, "ext": "py", "lang": "Python", "max_stars_repo_path": "jennifer/watermark.py", "max_stars_repo_name": "S-I-SVD/Randomized-SVD", "max_stars_repo_head_hexsha": "82108238a53c70938af87417f98aadc7f74b2a87", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-09T13:34:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T13:34:44.000Z", "max_issues_repo_path": "jennifer/watermark.py", "max_issues_repo_name": "S-I-SVD/Randomized-SVD", "max_issues_repo_head_hexsha": "82108238a53c70938af87417f98aadc7f74b2a87", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jennifer/watermark.py", "max_forks_repo_name": "S-I-SVD/Randomized-SVD", "max_forks_repo_head_hexsha": "82108238a53c70938af87417f98aadc7f74b2a87", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2, "max_line_length": 71, "alphanum_fraction": 0.6492768595, "include": true, "reason": "import numpy", "num_tokens": 680}
|
import pandas as pd
import numpy as np
import tensorflow as tf
import dask
import scipy
import time
from functools import partial
from abc import ABCMeta, abstractmethod
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
import FunctionalApproach
import plottingTools
class FunctionalModelFwd(FunctionalApproach.FunctionalModel):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestFunctionalModelFwd"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
def buildArchitecture(self):
nbCoordinates = 3
#Location to interpolate which are common to each day
#Factors values for each day
self.factorTensor = tf.placeholder(tf.float32,
shape=[None, self.nbFactors],
name = "factorTensor")
#Get the number of days as dynamic shape
numObs = tf.shape(self.factorTensor)[0]
#Repeat the locations for each day
self.inputTensor = tf.placeholder(tf.float32, shape=[None, nbCoordinates], name = "inputTensor")
reshapedInputTensor = tf.reshape(self.inputTensor,
tf.stack([numObs, -1, nbCoordinates]),
name = "reshapedInputTensor") #[None, None, 2]
# reshapedInputTensor = tf.tile(tf.expand_dims(self.inputTensor, 0),
# tf.stack([numObs, 1, 1]))
#Repeat the factors for each location
surfaceSize = tf.shape(reshapedInputTensor,
name = "surfaceSize")[1]
reshapedFactorTensor = tf.tile(tf.expand_dims(self.factorTensor, 1),
tf.stack([1, surfaceSize, 1]),
name = "reshapedFactorTensor")
if self.verbose :
print(self.inputTensor)
print(reshapedInputTensor)
#self.factorTensor = tf.Variable(np.ones(shape=(1, self.nbFactors)).astype(np.float32))
if self.verbose :
print(self.factorTensor)
print(reshapedFactorTensor)
#Dynamic Shape of features shoud be [None, surfaceSize, 2 + self.nbFactors] that is [nbDays, surfaceSize, 2 + self.nbFactors]
features = tf.reshape(tf.concat([reshapedInputTensor, reshapedFactorTensor],2),
[-1, nbCoordinates + self.nbFactors],
name = "features")
# filteredFeatures = tf.boolean_mask(features,
# tf.logical_not(tf.reduce_any(tf.is_nan(features), axis=1)),
# name = "filteredFeatures")
# partitions = tf.cast(tf.logical_not(tf.reduce_any(tf.is_nan(features), axis=1)), tf.int32)
# filteredFeatures = tf.dynamic_partition(features,
# partitions,
# num_partitions = 2,
# name = "filteredFeaturesVar")[1]
filteredFeatures = tf.where(tf.reduce_any(tf.is_nan(features), axis=1),
tf.zeros_like(features),
features)
#Factors values for each day
# self.factorTensor = tf.placeholder(tf.float32,
# shape=[None, self.nbFactors],
# name = "factorTensor")
# self.inputTensor = tf.placeholder(tf.float32, shape=[None, nbCoordinates], name = "inputTensor")
# filteredFeatures = tf.concat([self.factorTensor, self.inputTensor],1 , name = "features")
# if self.verbose :
# print(self.inputTensor)
# print(self.factorTensor)
# print(filteredFeatures)
# if self.verbose :
# print(features)
# print(filteredFeatures)
#Build neuronal architecture
he_init = tf.contrib.layers.variance_scaling_initializer(factor=1.0,
mode='FAN_AVG',
uniform=True)
l2_regularizer = tf.contrib.layers.l2_regularizer(self.hyperParameters['l2_reg'])
hidden1 = self.buildDenseLayer(20,
filteredFeatures,
activation = tf.nn.softplus,
kernelRegularizer = l2_regularizer,
kernelInitializer = he_init)
if self.verbose :
print(hidden1)
hidden2 = self.buildDenseLayer(20,
hidden1,
activation = tf.nn.softplus,
kernelRegularizer = l2_regularizer,
kernelInitializer = he_init)
if self.verbose :
print(hidden2)
hidden3 = self.buildDenseLayer(1,
hidden2,
activation = None,
kernelRegularizer = l2_regularizer,
kernelInitializer = he_init)
if self.verbose :
print(hidden3)
#Reshape output as (nbDays, surfaceSize)
self.outputTensor = hidden3#tf.reshape(hidden3, tf.stack([-1, surfaceSize]))
if self.verbose :
print(self.outputTensor)
return
#Build a tensor that construct a surface from factors values
def buildReconstructionTensor(self, factorTensor):
nbCoordinates = 3
#Get the number of days as dynamic shape
numObs = tf.shape(factorTensor)[0]
#Repeat the locations for each day
reshapedInputTensor = tf.reshape(self.inputTensor,
tf.stack([numObs, -1, nbCoordinates]),
name = "reshapedInputTensorVar")
# reshapedInputTensor = tf.tile(tf.expand_dims(self.inputTensor, 0),
# tf.stack([numObs, 1, 1]))
#Repeat the factors for each location
surfaceSize = tf.shape(reshapedInputTensor)[1]
reshapedFactorTensor = tf.tile(tf.expand_dims(factorTensor, 1),
tf.stack([1, surfaceSize, 1]),
name = "reshapedFactorTensorVar")
if self.verbose :
print(self.inputTensor)
print(reshapedInputTensor)
#self.factorTensor = tf.Variable(np.ones(shape=(1, self.nbFactors)).astype(np.float32))
if self.verbose :
print(factorTensor)
print(reshapedFactorTensor)
#Dynamic Shape of features shoud be [None, surfaceSize, 3 + self.nbFactors] that is [nbDays, surfaceSize, 3 + self.nbFactors]
features = tf.reshape(tf.concat([reshapedInputTensor, reshapedFactorTensor],2),
[-1, nbCoordinates + self.nbFactors],
name = "featuresVar")
self.features = features
# filteredFeatures = tf.boolean_mask(features,
# tf.logical_not(tf.reduce_any(tf.is_nan(features), axis=1)),
# name = "filteredFeaturesVar")
# partitions = tf.cast(tf.logical_not(tf.reduce_any(tf.is_nan(features), axis=1)), tf.int32)
# filteredFeatures = tf.dynamic_partition(features,
# partitions,
# num_partitions = 2,
# name = "filteredFeaturesVar")[1]
filteredFeatures = tf.where(tf.reduce_any(tf.is_nan(features), axis=1),
tf.zeros_like(features),
features)
self.filteredFeatures = filteredFeatures
if self.verbose :
print(features)
print(filteredFeatures)
lastTensor = filteredFeatures
#Iterate on layers to build the decoder with the same weights as those used for training
for factory in self.layers :
lastTensor = factory(lastTensor)
reshapedOutputTensor = lastTensor#tf.reshape(lastTensor,[-1, surfaceSize])
self.trainingPred = reshapedOutputTensor
if self.verbose :
print(reshapedOutputTensor)
return reshapedOutputTensor
#Extract for each day the volatility value as output values the coordinates as input input values
def getLocationFromDatasetList(self, dataSet):
if dataSet[1].ndim > 1 :#historical data
nbObs = dataSet[1].shape[0]
nbPoints = dataSet[1].shape[1]
vol = dataSet[0].values if dataSet[0] is not None else dataSet[0]
coordinates = dataSet[1]
yCoor = np.ravel(coordinates.applymap(lambda x : x[1]))
xCoor = np.ravel(coordinates.applymap(lambda x : x[0]))
fwd = np.ravel(dataSet[2].values)
l_Feature = np.reshape(np.vstack([xCoor, yCoor, fwd]).T, (nbObs, nbPoints, 3))
else :#Data for a single day
nbObs = 1
nbPoints = dataSet[1].shape[0]
vol = np.expand_dims(dataSet[0].values, 0) if dataSet[0] is not None else dataSet[0]
coordinates = dataSet[1]
yCoor = np.ravel(coordinates.map(lambda x : x[1]))
xCoor = np.ravel(coordinates.map(lambda x : x[0]))
fwd = np.ravel(dataSet[2].values)
l_Feature = np.reshape(np.vstack([xCoor, yCoor, fwd]).T, (nbObs, nbPoints, 3))
return l_Feature, vol
def createFeedDictEncoder(self, dataSetList):
feedDict = {self.inputTensor : np.reshape(dataSetList[0],(-1,3)),
self.outputTensorRef : np.reshape(dataSetList[1],(-1,1))}
return feedDict
def createFeedDictDecoder(self, *args):
feedDict = {self.inputTensor : np.reshape(args[0][0], (-1,3)),
self.factorTensor : args[1]}
return feedDict
def plotInterpolatedSurface(self,
valueToInterpolate,
locationToInterpolate,
calibratedFactors,
exogenousVariable,
colorMapSystem=None,
plotType=None):
y = list(map( lambda x : x[1] if x is not None else x,
locationToInterpolate.values))
x = list(map( lambda x : x[0] if x is not None else x,
locationToInterpolate.values))
xMax = np.nanmax(x)
yMax = np.nanmax(y)
xMin = np.nanmin(x)
yMin = np.nanmin(y)
xNewValues = np.linspace(xMin,xMax,num=100)
yNewValues = np.linspace(yMin,yMax,num=100)
grid = np.meshgrid(xNewValues,yNewValues)
xInterpolated = np.reshape(grid[0],(100 * 100, 1))
yInterpolated = np.reshape(grid[1],(100 * 100, 1))
coordinates = np.concatenate([xInterpolated, yInterpolated], axis=1)
ind = pd.Series(list(zip(xInterpolated,yInterpolated))).rename(locationToInterpolate.name)
interpolatedSurface = self.evalSingleDayWithoutCalibrationOnCustomLocation(calibratedFactors,
[None, ind, exogenousVariable, None])
interpolatedSurfaceDf = pd.Series(interpolatedSurface, index = ind.index)
plottingTools.plotGrid(interpolatedSurfaceDf,
ind,
"Interpolated Data with Functional Approach",
colorMapSystem=colorMapSystem,
plotType=plotType)
plottingTools.standardInterp(valueToInterpolate,
locationToInterpolate,
colorMapSystem=colorMapSystem,
plotType=plotType)
return
|
{"hexsha": "85d1220a76fb6b308513b625603e720c3cbb88e9", "size": 13327, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/FunctionalFwdApproach.py", "max_stars_repo_name": "mChataign/smileCompletion", "max_stars_repo_head_hexsha": "1bde2dd9fada2194c79cb3599bc9e9139cde6ee5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-01-06T13:53:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T21:23:13.000Z", "max_issues_repo_path": "Code/FunctionalFwdApproach.py", "max_issues_repo_name": "mChataign/smileCompletion", "max_issues_repo_head_hexsha": "1bde2dd9fada2194c79cb3599bc9e9139cde6ee5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/FunctionalFwdApproach.py", "max_forks_repo_name": "mChataign/smileCompletion", "max_forks_repo_head_hexsha": "1bde2dd9fada2194c79cb3599bc9e9139cde6ee5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-06T20:53:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T08:59:31.000Z", "avg_line_length": 46.5979020979, "max_line_length": 134, "alphanum_fraction": 0.4983867337, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2499}
|
import cv2 as cv
import const
import numpy as np
from graph_based import Graph
from matplotlib import pyplot as plt
def merge(a, b):
shape = a.shape
height = shape[0]
width = shape[1]
res = np.ones(shape)
for y in range(height):
for x in range(width):
if a[y, x] == 0 and b[y, x] == 0:
res[y, x] = 0
return res
def fill_hole(im_in):
im_floodfill = im_in.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_in.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
# Floodfill from point (0, 0)
cv.floodFill(im_floodfill, mask, (0, 0), 255)
# Invert floodfilled image
im_floodfill_inv = cv.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = im_in | im_floodfill_inv
return im_out
def find_dummy(image, i, j, thr, val):
dummy = image.copy()
stack = [(i, j)]
while len(stack) != 0:
seed = stack.pop()
y, x = seed
while y < 256 and dummy[y, x] < thr:
dummy[y, x] = val
y += 1
y_right = y - 1
y, x = seed
y -= 1
while 0 <= y and dummy[y, x] < thr:
dummy[y, x] = val
y -= 1
y_left = y + 1
# join left and right seeds
if x > 0 and dummy[y_left, x - 1] < thr:
stack.append((y_left, x - 1))
if x < 255 and dummy[y_left, x + 1] < thr:
stack.append((y_left, x + 1))
if x > 0 and dummy[y_right, x - 1] < thr:
stack.append((y_right, x - 1))
if x < 255 and dummy[y_right, x + 1] < thr:
stack.append((y_right, x + 1))
# for y in range(y_left, y_right + 1):
#
# if x > 0 and y > 0 and dummy[y, x - 1] < thr < dummy[y - 1, x - 1]:
# stack.append((y, x - 1))
#
# if x < 255 and y > 0 and dummy[y, x + 1] < thr < dummy[y - 1, x + 1]:
# stack.append((y, x + 1))
#
# if x > 0 and y > 0 and dummy[y, x - 1] > thr > dummy[y - 1, x - 1]:
# stack.append((y - 1, x - 1))
#
# if x < 255 and y > 0 and dummy[y, x + 1] > thr > dummy[y - 1, x + 1]:
# stack.append((y - 1, x + 1))
# shape = dummy.shape
# height = shape[0]
# width = shape[1]
# for y in range(height):
# for x in range(width):
# if dummy[y, x] == val:
# dummy[y, x] = 0
# else:
# dummy[y, x] = 1
return dummy
def find_square(dummy):
shape = dummy.shape
height = shape[0]
width = shape[1]
hor = np.zeros((height + 1, height + 1), dtype=int)
ver = np.zeros((width + 1, width + 1), dtype=int)
param1 = [-1, -1, 0, 0]
for y in range(height - 1, -1, -1):
for x in range(width - 1, -1, -1):
if dummy[y, x] == 1:
continue
left = hor[y, x - 1] + 1
top = ver[y - 1, x] + 1
hor[y - 1, x - 1] = left
ver[y - 1, x - 1] = top
if left * top > param1[2] * param1[3]:
param1 = [y, x, left, top]
hor = np.zeros((height + 1, height + 1), dtype=int)
ver = np.zeros((width + 1, width + 1), dtype=int)
param2 = [-1, -1, 0, 0]
for y in range(height):
for x in range(width):
if dummy[y, x] == 1:
continue
right = hor[y, x + 1] + 1
bottom = ver[y + 1, x] + 1
hor[y + 1, x + 1] = right
ver[y + 1, x + 1] = bottom
if right * bottom > param2[2] * param2[3]:
param2 = [y, x, right, bottom]
return [param1[0], param2[0], param1[1], param2[1]]
class Dummy(object):
debug = True
thr = 73
def __init__(self, image):
self.image = image
self.result = None
self.dummy = None
def run(self):
image = self.image
shape = image.shape
height = shape[0]
width = shape[1]
# plt.hist(image.ravel(), 256, [0, 256])
# plt.show()
# ret, binary = cv.threshold(image, 50, 255, cv.THRESH_BINARY) # 指定阈值50
# print("二值阈值: %s" % ret)
# ret_otsu, binary_otsu = cv.threshold(image, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
# print("二值阈值_otsu: %s" % ret_otsu)
# ret_tri, binary_tri = cv.threshold(image, 0, 255, cv.THRESH_BINARY | cv.THRESH_TRIANGLE)
# print("二值阈值_tri: %s" % ret_tri)
result = np.zeros(shape)
if self.debug:
dummy = np.ones(shape)
for y in range(height):
for x in range(width):
if image[y, x] > 70:
# image[y, x] = 255
dummy[y, x] = 0
param = find_square(dummy)
# print(param)
for y in range(height):
for x in range(width):
if image[y, x] < 100 and not (param[0] < y < param[1] and param[2] < x < param[3]):
image[y, x] = self.thr
if image[y, x] < 100 and param[0] < y < param[1] and param[2] < x < param[3]:
result[y, x] = 1
else:
result[y, x] = 0
else:
dummy = find_dummy(image, 0, 0, 50, 255)
dummy = find_dummy(dummy, height - 1, width - 1, 50, 255)
for y in range(height):
for x in range(width):
if dummy[y, x] != 255 and image[y, x] < 100:
result[y, x] = 1
elif dummy[y, x] == 0:
image[y, x] = self.thr
if dummy[y, x] == 255:
dummy[y, x] = 0
else:
dummy[y, x] = 1
self.dummy = dummy
# result = cv.morphologyEx(result, cv.MORPH_OPEN, const.kernel())
# result = cv.morphologyEx(result, cv.MORPH_CLOSE, const.kernel())
# plt.figure()
# arr = image.flatten()
# _ = plt.hist(arr, bins=256, range=[0, 254], facecolor='blue', alpha=0.75)
# plt.show()
self.image = image
cv.imwrite('./assets/dummy.jpg', image)
self.result = result
def main():
input_file = const.input_file()
image = cv.imread(input_file, cv.IMREAD_GRAYSCALE)
dummy = Dummy(image)
dummy.run()
result = dummy.result
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(image, cmap='gray')
ax1.set_axis_off()
ax2 = fig.add_subplot(1, 2, 2)
ax2.imshow(result, cmap='gray')
# ax2.imshow(result, cmap='Paired', interpolation='nearest')
ax2.set_axis_off()
plt.show()
if __name__ == '__main__':
main()
|
{"hexsha": "6e40f96e6afee4c4f969bb710d957acb06d19033", "size": 6860, "ext": "py", "lang": "Python", "max_stars_repo_path": "dummy.py", "max_stars_repo_name": "mecobalamine/crispy-octo-palm-tree", "max_stars_repo_head_hexsha": "dfcca865ccb1235083c9a4ab997e8a9334a21235", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dummy.py", "max_issues_repo_name": "mecobalamine/crispy-octo-palm-tree", "max_issues_repo_head_hexsha": "dfcca865ccb1235083c9a4ab997e8a9334a21235", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dummy.py", "max_forks_repo_name": "mecobalamine/crispy-octo-palm-tree", "max_forks_repo_head_hexsha": "dfcca865ccb1235083c9a4ab997e8a9334a21235", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1914893617, "max_line_length": 103, "alphanum_fraction": 0.4733236152, "include": true, "reason": "import numpy", "num_tokens": 2108}
|
include("5.5.1 u (a+b arcsec(c x))^n.jl")
include("5.5.2 Miscellaneous inverse secant.jl")
|
{"hexsha": "7496e500d5e27b705e609eb6aa9851be9a9f18c8", "size": 91, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/rules/5 Inverse trig functions/5.5 Inverse secant/.jl", "max_stars_repo_name": "gronniger/RubiSymbolics.jl", "max_stars_repo_head_hexsha": "dd117985dea699be7ddf14c3e6bdf4f407bb8da6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/rules/5 Inverse trig functions/5.5 Inverse secant/.jl", "max_issues_repo_name": "gronniger/RubiSymbolics.jl", "max_issues_repo_head_hexsha": "dd117985dea699be7ddf14c3e6bdf4f407bb8da6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/rules/5 Inverse trig functions/5.5 Inverse secant/.jl", "max_forks_repo_name": "gronniger/RubiSymbolics.jl", "max_forks_repo_head_hexsha": "dd117985dea699be7ddf14c3e6bdf4f407bb8da6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3333333333, "max_line_length": 48, "alphanum_fraction": 0.6813186813, "num_tokens": 34}
|
using LazySets
function spikingNeuron_specification()
# initial set in mode 1
X0_m1 = Hyperrectangle(low=[-65.0, -0.2], high=[-60.0, 0.2])
X0 = [(1, X0_m1)]
# time horizon: 100 time units
time_horizon = 100.0
# specification
O = Dict{Symbol, Any}(:T => time_horizon)
return X0, O
end
|
{"hexsha": "cbf4113c97446501ccd4ef3060f75374f67f804a", "size": 320, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "models/SpikingNeuron/spikingNeuron_specifications.jl", "max_stars_repo_name": "JuliaReach/ReachabilityBenchmarks", "max_stars_repo_head_hexsha": "a7398cd9eb61f089caa5e9dbaf0914ba3da6bb02", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-10-24T10:40:22.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-25T07:55:32.000Z", "max_issues_repo_path": "models/SpikingNeuron/spikingNeuron_specifications.jl", "max_issues_repo_name": "JuliaReach/ReachabilityBenchmarks", "max_issues_repo_head_hexsha": "a7398cd9eb61f089caa5e9dbaf0914ba3da6bb02", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 109, "max_issues_repo_issues_event_min_datetime": "2017-10-25T12:00:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-13T12:16:42.000Z", "max_forks_repo_path": "models/SpikingNeuron/spikingNeuron_specifications.jl", "max_forks_repo_name": "JuliaReach/ReachabilityBenchmarks", "max_forks_repo_head_hexsha": "a7398cd9eb61f089caa5e9dbaf0914ba3da6bb02", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-04-04T18:18:23.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-04T18:18:23.000Z", "avg_line_length": 20.0, "max_line_length": 64, "alphanum_fraction": 0.625, "num_tokens": 114}
|
c
c Copyright (c) 2018, Lawrence Livermore National Security, LLC.
c Produced at the Lawrence Livermore National Laboratory
c
c Written by Jeffrey Banks banksj3@rpi.edu (Rensselaer Polytechnic Institute,
c Amos Eaton 301, 110 8th St., Troy, NY 12180); Jeffrey Hittinger
c hittinger1@llnl.gov, William Arrighi arrighi2@llnl.gov, Richard Berger
c berger5@llnl.gov, Thomas Chapman chapman29@llnl.gov (LLNL, P.O Box 808,
c Livermore, CA 94551); Stephan Brunner stephan.brunner@epfl.ch (Ecole
c Polytechnique Federale de Lausanne, EPFL SB SPC-TH, PPB 312, Station 13,
c CH-1015 Lausanne, Switzerland).
c CODE-744849
c
c All rights reserved.
c
c This file is part of Loki. For details, see.
c
c Permission is hereby granted, free of charge, to any person obtaining a
c copy of this software and associated documentation files (the "Software"),
c to deal in the Software without restriction, including without limitation
c the rights to use, copy, modify, merge, publish, distribute, sublicense,
c and/or sell copies of the Software, and to permit persons to whom the
c Software is furnished to do so, subject to the following conditions:
c
c The above copyright notice and this permission notice shall be included in
c all copies or substantial portions of the Software.
c
c THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
c OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
c FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
c THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
c LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
c FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
c DEALINGS IN THE SOFTWARE.
c
c Fortran functions called by Maxwell.
c
subroutine zeroghost2d(
& u,
& n1a,n1b,n2a,n2b,
& nd1a,nd1b,nd2a,nd2b,
& dim )
c
implicit none
integer nd1a,nd1b,nd2a,nd2b
integer n1a,n1b,n2a,n2b
integer dim
real u(nd1a:nd1b,nd2a:nd2b,1:dim)
integer i1,i2,i3
c
c .. i1 left
do i3=1,dim
do i2=nd2a,nd2b
do i1=nd1a,n1a-1
u(i1,i2,i3) = 0.0
end do
end do
end do
c .. i1 right
do i3=1,dim
do i2=nd2a,nd2b
do i1=n1b+1,nd1b
u(i1,i2,i3) = 0.0
end do
end do
end do
c
c .. i2 left
do i3=1,dim
do i2=nd2a,n2a-1
do i1=nd1a,nd1b
u(i1,i2,i3) = 0.0
end do
end do
end do
c .. i2 right
do i3=1,dim
do i2=n2b+1,nd2b
do i1=nd1a,nd1b
u(i1,i2,i3) = 0.0
end do
end do
end do
return
end
c
c ++++++++++++++
c
subroutine xpby2d(
& x,
& nd1a, nd1b, nd2a, nd2b,
& y,
& n1a, n1b, n2a, n2b,
& b,
& dim)
c
c.. compute x = x + by
implicit none
c
c.. declaration of incoming variables
integer nd1a, nd1b, nd2a, nd2b
integer n1a, n1b, n2a, n2b
integer dim
real x(nd1a:nd1b, nd2a:nd2b, 1:dim)
real y(nd1a:nd1b, nd2a:nd2b, 1:dim)
real b
c
c.. declaration of local variables
integer i1, i2, i3
c
do i3 = 1, dim
do i2 = n2a, n2b
do i1 = n1a, n1b
x(i1, i2, i3) = x(i1, i2, i3) + b * y(i1, i2, i3)
end do
end do
end do
c
return
end
c
c ++++++++++++++
c
subroutine maxwellevalrhs(
& md1a, md1b, md2a, md2b,
& m1a, m1b, m2a, m2b,
& xlo, xhi, dx,
& c, avWeak, avStrong, solution_order,
& EMvars,
& Jx, Jy, Jz,
& dEMvars)
c
c.. compute rhs of Maxwell's equations
implicit none
c
c.. declaration of incoming variables
integer md1a, md1b, md2a, md2b
integer m1a, m1b, m2a, m2b
real xlo(1:4), xhi(1:4), dx(1:4)
real c, avWeak, avStrong
integer solution_order
real EMvars(md1a:md1b, md2a:md2b, 1:6)
real Jx(md1a:md1b, md2a:md2b)
real Jy(md1a:md1b, md2a:md2b)
real Jz(md1a:md1b, md2a:md2b)
real dEMvars(md1a:md1b, md2a:md2b, 1:6)
c
c.. declaration of local variables
integer i1, i2, comp
real csquared
real Exdy, Eydx, Ezdx, Ezdy
real Bxdy, Bydx, Bzdx, Bzdy
real uxxxx, uyyyy, uxxxxxx, uyyyyyy
c
csquared = c**2
c
if( solution_order .eq. 4 ) then
! 4th order
do i2 = m2a, m2b
do i1 = m1a, m1b
Exdy = (
* EMvars(i1, i2-2, 1)
* -8.0*EMvars(i1, i2-1, 1)
* +8.0*EMvars(i1, i2+1, 1)
* -EMvars(i1, i2+2, 1))/(12.0*dx(2))
Eydx = (
* EMvars(i1-2, i2, 2)
* -8.0*EMvars(i1-1, i2, 2)
* +8.0*EMvars(i1+1, i2, 2)
* -EMvars(i1+2, i2, 2))/(12.0*dx(1))
Ezdx = (
* EMvars(i1-2, i2, 3)
* -8.0*EMvars(i1-1, i2, 3)
* +8.0*EMvars(i1+1, i2, 3)
* -EMvars(i1+2, i2, 3))/(12.0*dx(1))
Ezdy = (
* EMvars(i1, i2-2, 3)
* -8.0*EMvars(i1, i2-1, 3)
* +8.0*EMvars(i1, i2+1, 3)
* -EMvars(i1, i2+2, 3))/(12.0*dx(2))
Bxdy = (
* EMvars(i1, i2-2, 4)
* -8.0*EMvars(i1, i2-1, 4)
* +8.0*EMvars(i1, i2+1, 4)
* -EMvars(i1, i2+2, 4))/(12.0*dx(2))
Bydx = (
* EMvars(i1-2, i2, 5)
* -8.0*EMvars(i1-1, i2, 5)
* +8.0*EMvars(i1+1, i2, 5)
* -EMvars(i1+2, i2, 5))/(12.0*dx(1))
Bzdx = (
* EMvars(i1-2, i2, 6)
* -8.0*EMvars(i1-1, i2, 6)
* +8.0*EMvars(i1+1, i2, 6)
* -EMvars(i1+2, i2, 6))/(12.0*dx(1))
Bzdy = (
* EMvars(i1, i2-2, 6)
* -8.0*EMvars(i1, i2-1, 6)
* +8.0*EMvars(i1, i2+1, 6)
* -EMvars(i1, i2+2, 6))/(12.0*dx(2))
dEMvars(i1, i2, 1) = csquared*(Bzdy)-Jx(i1, i2)
dEMvars(i1, i2, 2) = -csquared*(Bzdx)-Jy(i1, i2)
dEMvars(i1, i2, 3) = csquared*(Bydx-Bxdy)-Jz(i1, i2)
dEMvars(i1, i2, 4) = -Ezdy
dEMvars(i1, i2, 5) = Ezdx
dEMvars(i1, i2, 6) = Exdy-Eydx
end do
end do
else
! 6th order
do i2 = m2a, m2b
do i1 = m1a, m1b
Exdy = (
* -1.0 *EMvars(i1, i2-3, 1)
* +9.0 *EMvars(i1, i2-2, 1)
* -45.0*EMvars(i1, i2-1, 1)
* +45.0*EMvars(i1, i2+1, 1)
* -9.0 *EMvars(i1, i2+2, 1)
* +1.0 *EMvars(i1, i2+3, 1))/(60.0*dx(2))
Eydx = (
* -1.0 *EMvars(i1-3, i2, 2)
* +9.0 *EMvars(i1-2, i2, 2)
* -45.0*EMvars(i1-1, i2, 2)
* +45.0*EMvars(i1+1, i2, 2)
* -9.0 *EMvars(i1+2, i2, 2)
* +1.0 *EMvars(i1+3, i2, 2))/(60.0*dx(1))
Ezdx = (
* -1.0 *EMvars(i1-3, i2, 3)
* +9.0 *EMvars(i1-2, i2, 3)
* -45.0*EMvars(i1-1, i2, 3)
* +45.0*EMvars(i1+1, i2, 3)
* -9.0 *EMvars(i1+2, i2, 3)
* +1.0 *EMvars(i1+3, i2, 3))/(60.0*dx(1))
Ezdy = (
* -1.0 *EMvars(i1, i2-3, 3)
* +9.0 *EMvars(i1, i2-2, 3)
* -45.0*EMvars(i1, i2-1, 3)
* +45.0*EMvars(i1, i2+1, 3)
* -9.0 *EMvars(i1, i2+2, 3)
* +1.0 *EMvars(i1, i2+3, 3))/(60.0*dx(2))
Bxdy = (
* -1.0 *EMvars(i1, i2-3, 4)
* +9.0 *EMvars(i1, i2-2, 4)
* -45.0*EMvars(i1, i2-1, 4)
* +45.0*EMvars(i1, i2+1, 4)
* -9.0 *EMvars(i1, i2+2, 4)
* +1.0 *EMvars(i1, i2+3, 4))/(60.0*dx(2))
Bydx = (
* -1.0 *EMvars(i1-3, i2, 5)
* +9.0 *EMvars(i1-2, i2, 5)
* -45.0*EMvars(i1-1, i2, 5)
* +45.0*EMvars(i1+1, i2, 5)
* -9.0 *EMvars(i1+2, i2, 5)
* +1.0 *EMvars(i1+3, i2, 5))/(60.0*dx(1))
Bzdx = (
* -1.0 *EMvars(i1-3, i2, 6)
* +9.0 *EMvars(i1-2, i2, 6)
* -45.0*EMvars(i1-1, i2, 6)
* +45.0*EMvars(i1+1, i2, 6)
* -9.0 *EMvars(i1+2, i2, 6)
* +1.0 *EMvars(i1+3, i2, 6))/(60.0*dx(1))
Bzdy = (
* -1.0 *EMvars(i1, i2-3, 6)
* +9.0 *EMvars(i1, i2-2, 6)
* -45.0*EMvars(i1, i2-1, 6)
* +45.0*EMvars(i1, i2+1, 6)
* -9.0 *EMvars(i1, i2+2, 6)
* +1.0 *EMvars(i1, i2+3, 6))/(60.0*dx(2))
dEMvars(i1, i2, 1) = csquared*(Bzdy)-Jx(i1, i2)
dEMvars(i1, i2, 2) = -csquared*(Bzdx)-Jy(i1, i2)
dEMvars(i1, i2, 3) = csquared*(Bydx-Bxdy)-Jz(i1, i2)
dEMvars(i1, i2, 4) = -Ezdy
dEMvars(i1, i2, 5) = Ezdx
dEMvars(i1, i2, 6) = Exdy-Eydx
end do
end do
end if
c
if ((avWeak .gt. 0.0) .or. (avStrong .gt. 0.0)) then
if( solution_order .eq. 4 ) then
! 4th order artificial dissipation
do comp = 1, 6
do i2 = m2a, m2b
do i1 = m1a, m1b
uxxxx =
* (1.0*EMvars(i1-2, i2, comp)
* -4.0*EMvars(i1-1, i2, comp)
* +6.0*EMvars(i1, i2, comp)
* -4.0*EMvars(i1+1, i2, comp)
* +1.0*EMvars(i1+2, i2, comp))/(dx(1)**4)
uyyyy =
* (1.0*EMvars(i1, i2-2, comp)
* -4.0*EMvars(i1, i2-1, comp)
* +6.0*EMvars(i1, i2, comp)
* -4.0*EMvars(i1, i2+1, comp)
* +1.0*EMvars(i1, i2+2, comp))/(dx(2)**4)
dEMvars(i1, i2, comp) = dEMvars(i1, i2, comp)
* -(avWeak*dx(1)**4+avStrong*dx(1)**3)*uxxxx
* -(avWeak*dx(2)**4+avStrong*dx(2)**3)*uyyyy
end do
end do
end do
else
! 6th order artificial dissipation
do comp = 1, 6
do i2 = m2a, m2b
do i1 = m1a, m1b
uxxxxxx =
* (1.0 *EMvars(i1-3, i2, comp)
* -6.0 *EMvars(i1-2, i2, comp)
* +15.0*EMvars(i1-1, i2, comp)
* -20.0*EMvars(i1, i2, comp)
* +15.0*EMvars(i1+1, i2, comp)
* -6.0 *EMvars(i1+2, i2, comp)
* +1.0 *EMvars(i1+3, i2, comp))/(dx(1)**6)
uyyyyyy =
* (1.0 *EMvars(i1, i2-3, comp)
* -6.0 *EMvars(i1, i2-2, comp)
* +15.0*EMvars(i1, i2-1, comp)
* -20.0*EMvars(i1, i2, comp)
* +15.0*EMvars(i1, i2+1, comp)
* -6.0 *EMvars(i1, i2+2, comp)
* +1.0 *EMvars(i1, i2+3, comp))/(dx(2)**6)
dEMvars(i1, i2, comp) = dEMvars(i1, i2, comp)
* +(avWeak*dx(1)**6+avStrong*dx(1)**5)*uxxxxxx
* +(avWeak*dx(2)**6+avStrong*dx(2)**5)*uyyyyyy
end do
end do
end do
end if
end if
c
return
end
c
c ++++++++++++++
c
subroutine maxwellevalvzrhs(
& md1a, md1b, md2a, md2b,
& m1a, m1b, m2a, m2b,
& charge_per_mass,
& EMvars,
& dvz)
c
c.. compute rhs of Maxwell's equations
implicit none
c
c.. declaration of incoming variables
integer md1a, md1b, md2a, md2b
integer m1a, m1b, m2a, m2b
real charge_per_mass
real EMvars(md1a:md1b, md2a:md2b, 1:6)
real dvz(md1a:md1b, md2a:md2b)
c
c.. declaration of local variables
integer i1, i2
c
do i2 = m2a, m2b
do i1 = m1a, m1b
dvz(i1, i2) = charge_per_mass*EMvars(i1, i2, 3)
end do
end do
c
return
end
c
c+++++++++++
c
subroutine computeEFieldFromPotentialMaxwell(
* nd1a, nd1b, nd2a, nd2b,
* n1a, n1b, n2a, n2b,
* solution_order,
* dx,
* E, phi)
c
c.. function to neutralize charge densities
implicit none
c
integer nd1a, nd1b, nd2a, nd2b
integer n1a, n1b, n2a, n2b
integer solution_order
real dx(*)
real E(nd1a:nd1b, nd2a:nd2b, 1:6)
real phi(nd1a:nd1b, nd2a:nd2b)
c
integer i1, i2
c
if( solution_order .eq.4 ) then
! 4th order
do i2 = n2a, n2b
do i1 = n1a, n1b
E(i1, i2, 1) = (phi(i1-2, i2)-8.0*phi(i1-1, i2)+
* 8.0*phi(i1+1, i2)-phi(i1+2, i2))/(12.0*dx(1))
E(i1, i2, 2) = (phi(i1, i2-2)-8.0*phi(i1, i2-1)+
* 8.0*phi(i1, i2+1)-phi(i1, i2+2))/(12.0*dx(2))
end do
end do
else
! 6th order
do i2 = n2a,n2b
do i1 = n1a,n1b
E(i1,i2,1) = (
* -1.0 *phi(i1-3,i2)
* +9.0 *phi(i1-2,i2)
* -45.0 *phi(i1-1,i2)
* +45.0 *phi(i1+1,i2)
* -9.0 *phi(i1+2,i2)
* +1.0 *phi(i1+3,i2))/(60.0*dx(1))
E(i1,i2,2) = (
* -1.0 *phi(i1,i2-3)
* +9.0 *phi(i1,i2-2)
* -45.0 *phi(i1,i2-1)
* +45.0 *phi(i1,i2+1)
* -9.0 *phi(i1,i2+2)
* +1.0 *phi(i1,i2+3))/(60.0*dx(2))
end do
end do
end if
c
return
end
|
{"hexsha": "7d1f4991a9306e642ddd68879188988323148ad4", "size": 13248, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "MaxwellF.f", "max_stars_repo_name": "LLNL/LOKI", "max_stars_repo_head_hexsha": "5e3b875bc690e094fa0aaaec117fc523b11b7518", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-14T21:13:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-14T21:13:47.000Z", "max_issues_repo_path": "MaxwellF.f", "max_issues_repo_name": "LLNL/LOKI", "max_issues_repo_head_hexsha": "5e3b875bc690e094fa0aaaec117fc523b11b7518", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-26T15:56:06.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-02T16:23:56.000Z", "max_forks_repo_path": "MaxwellF.f", "max_forks_repo_name": "LLNL/LOKI", "max_forks_repo_head_hexsha": "5e3b875bc690e094fa0aaaec117fc523b11b7518", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5253456221, "max_line_length": 77, "alphanum_fraction": 0.4710144928, "num_tokens": 5565}
|
########################################################################
### Kernel abstract class (with Normalization option)
########################################################################
from imports import *
from scipy.sparse.linalg import norm
from abc import abstractmethod, ABC
class Kernel(ABC):
def __init__(self, normalize=False):
self.normalize = normalize
pass
@abstractmethod
def compute_feature_vector(self, X):
pass
def compute_train(self, data_train):
feature_vector = self.compute_feature_vector(data_train)
K = np.dot(feature_vector, feature_vector.T)
if self.normalize:
K = self.normalize_train(K)
self.K = K
return K
def compute_test(self, data_train, data_test):
feature_vector_train = self.compute_feature_vector(data_train)
feature_vector_test = self.compute_feature_vector(data_test)
K = np.dot(feature_vector_test, feature_vector_train.T)
if self.normalize:
K = self.normalize_test(K, feature_vector_test)
return K
def normalize_train(self, K_train): #K_train unormalized
self.norms_train = np.sqrt(K_train.diagonal()) # norms for x train vector
matrix_norms = np.outer(self.norms_train,self.norms_train) #10e-40
K_train = np.divide(K_train, matrix_norms)
self.K_train = K_train
return K_train
def normalize_test(self, K_test, feats_test): #K_test unormalized
m = K_test.shape[0]
#norms_test = np.sum(feats_test*feats_test,axis=1)
norms_test = norm(feats_test,axis=1)
matrix_norms = np.outer(norms_test, self.norms_train) #+ 1e-40 # matrix sqrt(K(xtest,xtest)*K(xtrain,xtrain))
K_test = np.divide(K_test, matrix_norms)
return K_test
def save_train(self, filename):
np.save(filename, self.K)
def load_train(self, filename):
self.K = np.load(filename)
|
{"hexsha": "921a8be9847c5b9ffd02cb30929197978ae3430c", "size": 1965, "ext": "py", "lang": "Python", "max_stars_repo_path": "kernels/kernel.py", "max_stars_repo_name": "AmineKheldouni/Kernel-in-Machine-Learning", "max_stars_repo_head_hexsha": "6d299cb8b1aab9f653b51baad1222cac8a2d28c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-17T22:51:54.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-17T22:51:56.000Z", "max_issues_repo_path": "kernels/kernel.py", "max_issues_repo_name": "AmineKheldouni/Kernel-in-Machine-Learning", "max_issues_repo_head_hexsha": "6d299cb8b1aab9f653b51baad1222cac8a2d28c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kernels/kernel.py", "max_forks_repo_name": "AmineKheldouni/Kernel-in-Machine-Learning", "max_forks_repo_head_hexsha": "6d299cb8b1aab9f653b51baad1222cac8a2d28c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7272727273, "max_line_length": 118, "alphanum_fraction": 0.6249363868, "include": true, "reason": "from scipy", "num_tokens": 433}
|
import torch.nn as nn
import torch
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
class DCDHLoss(nn.Module):
def __init__(self, gamma, code_length, num_train):
super(DCDHLoss, self).__init__()
self.gamma = gamma
self.code_length = code_length
self.num_train = num_train
def forward(self, u, V, S, V_omega):
batch_size = u.size(0)
V = Variable(torch.from_numpy(V).type(torch.FloatTensor).cuda())
V_omega = Variable(torch.from_numpy(V_omega).type(torch.FloatTensor).cuda())
S = Variable(S.cuda())
square_loss = (u.mm(V_omega.t())-self.code_length * S) ** 2
quantization_loss = self.gamma * (V_omega - u) ** 2
loss = (square_loss.sum() + quantization_loss.sum()) / (self.num_train * batch_size)
return loss
class ProductLoss(nn.Module):
def __init__(self, gamma, code_length, num_train):
super(ProductLoss, self).__init__()
self.gamma = gamma
self.code_length = code_length
self.num_train = num_train
self.d = 0.01
self.c = 1
print('Product: Classification: ' + str(self.c) + 'd: ' + str(self.d))
def forward(self, u, V, S, V_omega, classify, train_label, ui, ul):
batch_size = u.size(0)
V_omega = Variable(torch.from_numpy(V_omega).type(torch.FloatTensor).cuda())
S = Variable(S.cuda())
square_loss = (u.mm(V_omega.t())-self.code_length * S) ** 2
quantization_loss = self.gamma * (V_omega - u) ** 2
loss = (square_loss.sum() + quantization_loss.sum()) / (self.num_train * batch_size)
criterion = FocalLoss()
c_loss = criterion(classify, train_label)
d_loss = ((ui - ul) ** 2).sum()
loss = (loss + self.c * c_loss + self.d * d_loss)
return loss
class FocalLoss(nn.Module):
def __init__(self, num_classes=21):
super(FocalLoss, self).__init__()
self.num_classes = num_classes
self.alpha = 0.25
self.gamma = 2
def focal_loss(self, x, t):
'''Focal loss.
Args:
x: (tensor) sized [N,D].
y: (tensor) sized [N,].
Return:
(tensor) focal loss.
'''
alpha = 0.25
gamma = 2
t = t.type(torch.FloatTensor).cuda()
p = x.sigmoid()
pt = p*t + (1-p)*(1-t) # pt = p if t > 0 else 1-p
w = alpha*t + (1-alpha)*(1-t) # w = alpha if t > 0 else 1-alpha
w = w * (1-pt).pow(gamma)
return w * F.binary_cross_entropy_with_logits(x, t)
def focal_loss_alt(self, x, t):
'''Focal loss alternative.
Args:
x: (tensor) sized [N,D].
y: (tensor) sized [N,].
Return:
(tensor) focal loss.
'''
alpha = 0.25
xt = x*(2*t-1) # xt = x if t > 0 else -x
pt = (2*xt+1).sigmoid()
w = alpha*t + (1-alpha)*(1-t)
loss = -w*pt.log() / 2
return loss.sum()
def forward(self, cls_preds, cls_targets):
'''Compute loss between (loc_preds, loc_targets) and (cls_preds, cls_targets).
Args:
loc_preds: (tensor) predicted locations, sized [batch_size, #anchors, 4].
loc_targets: (tensor) encoded target locations, sized [batch_size, #anchors, 4].
cls_preds: (tensor) predicted class confidences, sized [batch_size, #anchors, #classes].
cls_targets: (tensor) encoded target labels, sized [batch_size, #anchors].
loss:
(tensor) loss = SmoothL1Loss(loc_preds, loc_targets) + FocalLoss(cls_preds, cls_targets).
'''
batch_size, num_boxes = cls_targets.size()
pos = cls_targets > 0 # [N,#anchors]
num_pos = pos.data.long().sum()
################################################################
# cls_loss = FocalLoss(loc_preds, loc_targets)
################################################################
# pos_neg = cls_targets > -1 # exclude ignored anchors
# mask = pos_neg.unsqueeze(2).expand_as(cls_preds)
# masked_cls_preds = cls_preds[mask].view(-1,self.num_classes)
cls_loss = self.focal_loss(cls_preds, cls_targets)
loss = cls_loss.mean()
# N = cls_preds.size(0)
# C = cls_preds.size(1)
# P = F.softmax(cls_preds)
#
# class_mask = cls_preds.data.new(N, C).fill_(0)
# class_mask = Variable(class_mask)
# ids = cls_targets.view(-1, 1)
# class_mask.scatter_(1, ids, 1.)
#
# self.alpha = self.alpha.cuda()
# alpha = self.alpha[ids.data.view(-1)]
#
# probs = (P * class_mask).sum(1).view(-1, 1)
#
# log_p = probs.log()
# # print('probs size= {}'.format(probs.size()))
# # print(probs)
#
# batch_loss = -alpha * (torch.pow((1 - probs), self.gamma)) * log_p
# # print('-----bacth_loss------')
# # print(batch_loss)
#
# loss = batch_loss.mean()
return loss
|
{"hexsha": "726e3aa9b93bc15e248d6db8c30835744d3c3d46", "size": 5061, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/dcdh_loss.py", "max_stars_repo_name": "DarrenZZhang/DCDH", "max_stars_repo_head_hexsha": "db526d50b8f8ba7d51fce6155e3ea1e6dda66283", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-09-21T08:39:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-24T09:51:38.000Z", "max_issues_repo_path": "utils/dcdh_loss.py", "max_issues_repo_name": "DarrenZZhang/DCDH", "max_issues_repo_head_hexsha": "db526d50b8f8ba7d51fce6155e3ea1e6dda66283", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/dcdh_loss.py", "max_forks_repo_name": "DarrenZZhang/DCDH", "max_forks_repo_head_hexsha": "db526d50b8f8ba7d51fce6155e3ea1e6dda66283", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-09-21T08:39:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-02T02:08:36.000Z", "avg_line_length": 35.6408450704, "max_line_length": 99, "alphanum_fraction": 0.5500889152, "include": true, "reason": "import numpy", "num_tokens": 1332}
|
import networkx as nx
from history import History
from transaction import Transaction
from data_operation import DataOperation
from history_query_builder import HistoryQueryBuilder
def serializable_or_not(hist):
conflicting_transactions = set()
operations = hist.schedule
for i in range(len(operations)):
operation1=operations[i]
for j in range(i+1,len(operations)):
operation2=operations[j]
id1 = operation1.transaction.id
id2 = operation2.transaction.id
if (id1 != id2 and
(operation1.is_write() or operation2.is_write()) and
operation1.data_item == operation2.data_item and
not operation1.is_commit() and not operation2.is_commit() and
not operation1.is_abort() and not operation2.is_abort()):
conflicting_transactions.add((id1,id2))
l = list()
for item in conflicting_transactions:
l.append(item)
G = nx.DiGraph(l)
try:
l = list(nx.find_cycle(G)) #raises an exception if a cycle is NOT found
cycle = True #if it makes it to this line, then there is a cycle
except: #i don't know if this is bad practice or not, but that's how this nx.find_cycle() function works.
cycle = False
#print(l)
return not cycle #return the opposite if we found a cycle (serializable if cycle == false
"""
def test_serializable():
serial_input = "w1[x] r3[z] r2[x] c2 c1"
not_serial_input = "w1[x] w2[x] w2[y] c2 w1[y] w3[x] w3[y] c3 c1"
serializable_hist = HistoryQueryBuilder(serial_input).process()
not_serializable_hist = HistoryQueryBuilder(not_serial_input).process()
serial_res = serializable_or_not(serializable_hist)
print(serial_res)
print ("\n\n")
not_serial_res = serializable_or_not(not_serializable_hist)
print(not_serial_res)
print("\n\n")
return serial_res and not not_serial_res
test = test_serializable()
if(test):
print("pass")
else:
print("fail")
"""
|
{"hexsha": "e96319894b1d25a266b59e534f6a7bfa632c6c27", "size": 2126, "ext": "py", "lang": "Python", "max_stars_repo_path": "serializable_or_not.py", "max_stars_repo_name": "jpg013/CS5570-project", "max_stars_repo_head_hexsha": "e1817e3c1c77ec35a5605db23cd1f9e3ede8307a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "serializable_or_not.py", "max_issues_repo_name": "jpg013/CS5570-project", "max_issues_repo_head_hexsha": "e1817e3c1c77ec35a5605db23cd1f9e3ede8307a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "serializable_or_not.py", "max_forks_repo_name": "jpg013/CS5570-project", "max_forks_repo_head_hexsha": "e1817e3c1c77ec35a5605db23cd1f9e3ede8307a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.746031746, "max_line_length": 135, "alphanum_fraction": 0.6420507996, "include": true, "reason": "import networkx", "num_tokens": 485}
|
subroutine sig_vals(s1,t1,p1,s2,t2,p2,sig1,sig2)
ccc
ccc
ccc
ccc DESCRIPTION : Computes the sigma values of two neighbouring
ccc bottles w.r.t. the mid pressure
ccc
ccc PRECISION : Double
ccc
ccc INPUT : s1,s2 bottle salinities
ccc t1,t2 bottle in situ temperatures
ccc p1,p2 bottle pressures
ccc
ccc OUTPUT : sig1,sig2 bottle potential density values
ccc
ccc UNITS : salinity psu (IPSS-78)
ccc temperature degrees C (IPTS-68)
ccc pressure db
ccc density kg m-3
ccc
ccc
ccc AUTHOR : David Jackett
ccc
ccc CREATED : June 1993
ccc
ccc REVISION : 1.1 30/6/93
ccc
ccc
ccc
implicit double precision (a-h,o-z)
pmid = (p1+p2)/2.0
sd = svan(s1,theta(s1,t1,p1,pmid),pmid,sig1)
sd = svan(s2,theta(s2,t2,p2,pmid),pmid,sig2)
return
end
|
{"hexsha": "d0f9b2f652e436fd27d5ca519ecd072ac6833fa3", "size": 1063, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "notebooks/FromBieito/pygamma-2172748e338d/src/sig-vals.f", "max_stars_repo_name": "ClaraCDouglas/net_community_production", "max_stars_repo_head_hexsha": "0b1370f00abc99cb4c08c9f84abe12faf781643d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-03T11:05:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-03T11:05:00.000Z", "max_issues_repo_path": "notebooks/FromBieito/pygamma-2172748e338d/src/sig-vals.f", "max_issues_repo_name": "ClaraCDouglas/net_community_production", "max_issues_repo_head_hexsha": "0b1370f00abc99cb4c08c9f84abe12faf781643d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/FromBieito/pygamma-2172748e338d/src/sig-vals.f", "max_forks_repo_name": "ClaraCDouglas/net_community_production", "max_forks_repo_head_hexsha": "0b1370f00abc99cb4c08c9f84abe12faf781643d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-17T21:50:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T21:50:48.000Z", "avg_line_length": 23.1086956522, "max_line_length": 71, "alphanum_fraction": 0.516462841, "num_tokens": 297}
|
module precision_mod
implicit none
integer, parameter :: dp = selected_real_kind(15,100)
integer, parameter :: sp = selected_real_kind(6,30)
! --------------------------------
! use wp = sp for single precision
! use wp = dp for double precision
! --------------------------------
integer, parameter :: wp = dp
integer, parameter :: i8 = selected_int_kind( 12 )
end module precision_mod
module gemm_mod
use precision_mod
contains
subroutine test_gemm_nn_strided_batched(m,n,k,batchCount,use_gpu0)
use precision_mod
implicit none
integer, intent(in) :: m,n,k,batchCount
logical, intent(in) :: use_gpu0
real(kind=wp), allocatable :: A(:,:,:)
real(kind=wp), allocatable :: B(:,:,:)
real(kind=wp), allocatable :: C(:,:,:)
real(kind=wp), allocatable :: Chost(:,:,:)
real(kind=wp) :: alpha,beta
integer :: lda,ldb,ldc
integer, parameter :: idebug = 1
real(kind=wp), parameter :: tol = 1e-5
real(kind=wp) :: max_err, cnorm_1,cnorm_2,cnorm_max
real(kind=wp) :: c_ij, ch_ij
integer :: i,i1,i2,i3
integer(kind=i8) :: strideA,strideB,strideC
integer :: ic,jc,ib,ibatch
logical :: use_gpu
use_gpu = use_gpu0
allocate( A(m,k,batchCount) )
allocate( B(k,n,batchCount) )
allocate( C(m,n,batchCount) )
allocate( Chost(m,n,batchCount) )
lda = size(A,1)
ldb = size(B,1)
ldc = size(C,1)
A = 1
B = 2
C = 3
Chost = C
alpha = -1
beta = 1
! -------------------
! perform on cpu host
! -------------------
!$omp parallel do
do i=1,batchCount
Chost(1:m,1:n,i) = beta * Chost(1:m,1:n,i) + &
& alpha*matmul(A(1:m,1:k,i),B(1:k,1:n,i))
enddo
strideA = size(A,1)*size(A,2)
strideB = size(B,1)*size(B,2)
strideC = size(C,1)*size(C,2)
!$omp target data &
!$omp& if (use_gpu) &
!$omp& map(to:m,n,k,batchCount,lda,ldb,ldc) &
!$omp& map(to:alpha,beta,strideA,strideB,strideC) &
!$omp& map(to:A,B) map(C)
!$omp target teams distribute parallel do simd &
!$omp& if (use_gpu) &
!$omp& private(ic,jc,ib,c_ij)
do ibatch=1,batchCount
do jc=1,n
do ic=1,m
c_ij = 0
do ib=1,k
c_ij = c_ij + A(ic,ib,ibatch)*B(ib,jc,ibatch)
enddo
if (beta.eq.0) then
C(ic,jc,ibatch) = alpha * c_ij
else
C(ic,jc,ibatch) = beta*C(ic,jc,ibatch) + alpha*c_ij
endif
enddo
enddo
enddo
!$omp end target data
max_err = 0
cnorm_max = 0
cnorm_1 = 0
cnorm_2 = 0
!$omp parallel do &
!$omp& private(c_ij,ch_ij) &
!$omp& reduction(max:max_err,cnorm_max) &
!$omp& reduction(+:cnorm_1,cnorm_2)
do i3=1,size(C,3)
do i2=1,size(C,2)
do i1=1,size(C,1)
c_ij = C(i1,i2,i3)
ch_ij = Chost(i1,i2,i3)
max_err = max(max_err,abs(c_ij - ch_ij))
cnorm_max = max( cnorm_max, abs(ch_ij))
cnorm_1 = cnorm_1 + abs(ch_ij)
cnorm_2 = cnorm_2 + abs(ch_ij) * abs(ch_ij)
enddo
enddo
enddo
cnorm_2 = sqrt( cnorm_2 )
print 9010, max_err, cnorm_max,cnorm_1,cnorm_2
9010 format(' max_err= ',1pe14.4,' cnorm_max = ',1pe14.4, &
& ' cnorm_1 = ',1pe14.4, ' cnorm_2 = ', 1pe14.4)
if ((idebug >= 1) .and. (max_err > tol)) then
do i3=1,size(C,3)
do i2=1,size(C,2)
do i1=1,size(C,1)
c_ij = C(i1,i2,i3)
ch_ij = Chost(i1,i2,i3)
if (abs(c_ij - ch_ij) > tol) then
print 9100, i1,i2,i3,c_ij, i1,i2,i3,ch_ij
9100 format(' C(',i4,',',i4,',',i4,') = ',1pe14.4, &
& ' Ch(',i4,',',i4,',',i4,') = ',1pe14.4 )
stop 2
endif
enddo
enddo
enddo
endif
deallocate(A,B,C,Chost)
return
end subroutine test_gemm_nn_strided_batched
end module gemm_mod
program main_gemm_nn_strided_batched
!$ use omp_lib
use precision_mod
use gemm_mod
implicit none
integer :: m,n,k,batchCount
integer :: nthreads
logical :: use_gpu
nthreads = 1
!$omp parallel
!$omp master
!$ nthreads = omp_get_num_threads()
!$omp end master
!$omp end parallel
print 9010,nthreads
9010 format(' nthreads = ', i6)
m = 3
n = 3
k = 3
batchCount = 64
use_gpu = .false.
print*,'use_gpu ',use_gpu
print 9020, m,n,k,batchCount
9020 format(' m,n,k,batchCount ', 4(1x,i6) )
call test_gemm_nn_strided_batched(m,n,k,batchCount,use_gpu)
use_gpu = .true.
print*,'use_gpu ',use_gpu
print 9030, m,n,k,batchCount
9030 format(' m,n,k,batchCount ', 4(1x,i6) )
call test_gemm_nn_strided_batched(m,n,k,batchCount,use_gpu)
m = 10
n = 10
k = 10
batchCount = 2
use_gpu = .false.
print*,'use_gpu ',use_gpu
print 9040, m,n,k,batchCount
9040 format(' m,n,k,batchCount ', 4(1x,i6) )
call test_gemm_nn_strided_batched(m,n,k,batchCount,use_gpu)
use_gpu = .true.
print*,'use_gpu ',use_gpu
print 9050, m,n,k,batchCount
9050 format(' m,n,k,batchCount ', 4(1x,i6) )
call test_gemm_nn_strided_batched(m,n,k,batchCount,use_gpu)
end program main_gemm_nn_strided_batched
|
{"hexsha": "94e04cd212d97bbd1fbccc1fbfa2da4925c7155b", "size": 5885, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/smoke/flang-325322/flang-325322.f90", "max_stars_repo_name": "raramakr/aomp", "max_stars_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/smoke/flang-325322/flang-325322.f90", "max_issues_repo_name": "raramakr/aomp", "max_issues_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/smoke/flang-325322/flang-325322.f90", "max_forks_repo_name": "raramakr/aomp", "max_forks_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8909952607, "max_line_length": 78, "alphanum_fraction": 0.5048428207, "num_tokens": 1840}
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/5/8 21:38
# @author : Mo
# @function: class of model predict of sequence-labeling
# 适配linux
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path.append(path_root)
## cpu-gpu与tf.keras
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
os.environ["TF_KERAS"] = "1"
# macadam
from macadam.conf.path_config import path_root, path_ner_people_1998, path_ner_clue_2020
from macadam.base.utils import txt_read, txt_write, load_json, save_json
from macadam.base.utils import padding_sequences, metrics_report
from macadam.base.layers import custom_objects_macadam
from macadam.conf.constant_params import SL, CLS, SEP
from macadam.base.embedding import embedding_map
from macadam.base.utils import load_json
from macadam import keras, K, L, M, O
from collections import OrderedDict
from typing import List, Dict
from tqdm import tqdm
import numpy as np
import json
import os
# keras.utils.get_custom_objects().update(custom_objects_macadam)
# custom_objects = keras.utils.get_custom_objects()
class ModelPredict():
def __init__(self, path_dir: str):
"""
init, 序列标注任务模型预测类
"""
self.path_model_info = os.path.join(path_dir, "macadam.info")
self.path_model_h5 = os.path.join(path_dir, "macadam.h5")
self.path_dir = path_dir
os.environ["MACADAM_LEVEL"] = "PREDICT"
self.load_tokenizer()
self.load_model()
def load_model(self):
"""
load model of keras of h5 which include graph-node and custom_objects
"""
self.model = M.load_model(self.path_model_h5, compile=False)
def load_tokenizer(self):
"""
load model_info of model, hyper_parameters/label2index/index2label/token2idx
"""
# 从字典里边读取数据
self.model_info = load_json(self.path_model_info)
hyper_parameters = self.model_info.get("hyper_parameters", {})
self.embed_type = hyper_parameters.get("sharing", {}).get("embed_type", "bert").upper()
self.length_max = hyper_parameters.get("sharing", {}).get("length_max", 512)
self.batch_size = hyper_parameters.get("sharing", {}).get("batch_size", 32)
self.token2idx = self.model_info.get("vocab", {}).get("token2idx", {})
self.use_crf = hyper_parameters.get("graph", {}).get("use_crf", True)
self.l2i = self.model_info.get("label", {}).get("l2i", {})
self.i2l = self.model_info.get("label", {}).get("i2l", {})
# 初始化embedding
Embedding = embedding_map.get(self.embed_type)
self.embedd = Embedding(hyper_parameters)
# 使用CRF就需要trans(状态转移矩阵)和维特比解码
if self.use_crf:
self.trans = self.model_info.get(SL, {}).get("trans", [])
# 字典构建Tokenizer, MIX-embedding单独提取出来
if self.embed_type not in ["MIX"]:
self.embedd.build_tokenizer_from_dict(self.token2idx)
else:
self.embedd.build_tokenizer_from_dict(self.token2idx)
def preprocess_x(self, line_json: Dict, limit_lengths: List=None,
use_seconds: bool = True, is_multi: bool = True) -> List[List]:
"""
data pre-process of encode, 数据预处理
Args:
line_json: Dict, input, eg. {"text": "macadam是什么", "texts2": ["macadam是一个python工具包]}
limit_lengths: List, max length of each enum in texts2, eg.[128]
use_seconds: bool, either use [SEP] separate texts2 or not, eg.True
is_multi: bool, either sign texts2 with [0-1; 0] or not, eg.True
Returns:
res: List[Dict]
"""
text = line_json.get("text")
texts2 = line_json.get("texts2", None)
idxs = self.embedd.sent2idx(text=text, second_text=texts2, limit_lengths=limit_lengths,
use_seconds=use_seconds, is_multi=is_multi)
# idxs = padding_sequences(sequences=[idxs] if type(idxs[0])==int else idxs,
# length_max=self.length_max, padding=0)
return idxs
def viterbi_decode(self, nodes: np.array, trans: np.array) -> np.array:
"""
viterbi decode of CRF, 维特比解码, Viterbi算法求最优路径
code from url: https://github.com/bojone/bert4keras
author : bojone
Args:
nodes: np.array, shape=[seq_len, num_labels], output of model predict
trans: np.array, shape=[num_labels, num_labels], state transition matrix
Returns:
res: np.array, label of sequence
"""
labels = np.arange(len(self.l2i)).reshape((1, -1))
scores = nodes[0].reshape((-1, 1))
scores[1:] -= np.inf # 第一个标签必然是0
paths = labels
for l in range(1, len(nodes)):
M = scores + trans + nodes[l].reshape((1, -1))
idxs = M.argmax(0)
scores = M.max(0).reshape((-1, 1))
path_idxs = paths[:, idxs]
paths = np.concatenate([path_idxs, labels], 0)
return paths[:, scores[0].argmax()]
def predict(self, texts: List[Dict], use_sort: bool = True) -> List[Dict]:
"""
model predict, 批处理模型预测
Args:
texts: input of List<dict>, eg. [{"text": "macadam是什么", "texts2": ["macadam是一个python工具包]}]
Returns:
res: List[Dict]
"""
# embedding编码, bert encode
xs = []
for text_i in texts:
text_i_x = self.preprocess_x(text_i)
xs.append(text_i_x)
# numpy处理, numpy.array
xs_array = []
# "LATTICE-LSTM-BATCH"的情况单独出来, 即MIX-Embedding的情况
if self.embed_type in ["MIX"]:
x_1 = np.array([x[0][0] for x in xs])
x_2 = np.array([x[1][0] for x in xs])
xs_array = [x_1, x_2]
else:
for i in range(len(xs[0])):
idxs_array = np.array([inxi[i] for inxi in xs])
xs_array.append(idxs_array)
# 模型预测, model predict
labels = self.model.predict(xs_array)
labels_argmax = [l.argmax(-1) for l in labels]
if self.use_crf:
trans = np.array(self.trans)
labels_argmax = [self.viterbi_decode(label, trans) for label in labels]
# 后处理, post-processing
labels_zh = []
for i in range(len(labels_argmax)):
# 返回文本的真实长度
len_text_i = min(len(texts[i].get("text")) + 2, self.length_max)
la = labels_argmax[i]
label_zh = []
for lai in la:
label_zh.append(self.i2l[str(lai)])
# 随机初始化的就没有[CLS], [SEP]
# if self.embed_type in ["RANDOM", "WORD2VEC"]:
# labels_zh.append(label_zh[0:len_text_i - 2])
# else:
labels_zh.append(label_zh[1:len_text_i-1])
return labels_zh
def evaluate(self, texts: List[Dict]):
"""
evaluate of corpus, 数据集验证/打印报告
Args:
texts: input of List<dict>, eg. [{"text": "macadam是什么", "texts2": ["macadam是一个python工具包]}]
Returns:
res: List[Dict]
"""
labels_true = []
labels_pred = []
# predict of batch_size, 批处理预测
texts_batch = []
# tqdm显示进度
for i in tqdm(range(len(texts))):
line = texts[i]
texts_batch.append(line)
if len(texts_batch) == self.batch_size:
# true_y
labels_true_batch = [tsb.get("y", []) for tsb in texts_batch]
# pred_y
texts_batch_x = [tsb.get("x", {}) for tsb in texts_batch]
labels_predict_batch = self.predict(texts_batch_x)
# 处理y_true大于length_max的情况
for i in range(len(labels_predict_batch)):
labels_pred += labels_predict_batch[i]
labels_true += labels_true_batch[i][:len(labels_predict_batch[i])]
texts_batch = []
# storage, Less than batch_size, 剩余不足批处理尺寸的
if texts_batch:
# true_y
labels_true_batch = [tsb.get("y", []) for tsb in texts_batch]
# pred_y
texts_batch_x = [tsb.get("x", {}) for tsb in texts_batch]
labels_predict_batch = self.predict(texts_batch_x)
# 处理y_true大于length_max的情况
for i in range(len(labels_predict_batch)):
labels_pred += labels_predict_batch[i]
labels_true += labels_true_batch[i][:len(labels_predict_batch[i])]
# 获取评估指标/报告打印
mertics, report = metrics_report(y_true=labels_true, y_pred=labels_pred)
return mertics, report
if __name__ == '__main__':
from macadam.conf.path_config import path_root
# 模型目录与初始化
path_dir = os.path.join(path_root, "data", "model", "CRF_2020")
mp = ModelPredict(path_dir)
# 训练/验证数据地址
path_train = os.path.join(path_ner_people_1998, "train.json")
path_dev = os.path.join(path_ner_people_1998, "dev.json")
# path_train = os.path.join(path_ner_clue_2020, "ner_clue_2020.train")
# path_dev = os.path.join(path_ner_clue_2020, "ner_clue_2020.dev")
# sample
texts = [{"text": "你的一腔热情,别人只道是狼心狗肺"
"一切往事,皆为序章"
"never say never"
"那就这样了吧"
"再见,北京"
,
"texts2": []}]
res = mp.predict(texts)
print(res)
# evaluate
datas_dev = txt_read(path_dev)
print("evaluate开始!")
datas_dev = [json.loads(dd.strip()) for dd in datas_dev]
metrics, report = mp.evaluate(datas_dev)
print("evaluate结束!")
print(json.dumps(metrics, ensure_ascii=False, indent=4))
print(report)
# input
while True:
print("请输入 text1:")
text = input()
texts = {"text": text,
"texts2": []}
res = mp.predict([texts])
print(res)
mm = 0
|
{"hexsha": "ac436a6fb278c65f188130a0ac980840b4c77ce5", "size": 9899, "ext": "py", "lang": "Python", "max_stars_repo_path": "macadam/sl/s00_predict.py", "max_stars_repo_name": "ASJ161220001/Macadam", "max_stars_repo_head_hexsha": "5237381459db5909f392737e33618a16c1e0452a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-11T07:33:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-11T07:33:12.000Z", "max_issues_repo_path": "macadam/sl/s00_predict.py", "max_issues_repo_name": "payiz-asj/Macadam", "max_issues_repo_head_hexsha": "5237381459db5909f392737e33618a16c1e0452a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "macadam/sl/s00_predict.py", "max_forks_repo_name": "payiz-asj/Macadam", "max_forks_repo_head_hexsha": "5237381459db5909f392737e33618a16c1e0452a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1264822134, "max_line_length": 103, "alphanum_fraction": 0.5886453177, "include": true, "reason": "import numpy", "num_tokens": 2686}
|
[STATEMENT]
lemma component_antisym: "[| F \<le> G; G \<le> F |] ==> F = (G :: 'a program)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>F \<le> G; G \<le> F\<rbrakk> \<Longrightarrow> F = G
[PROOF STEP]
apply (simp (no_asm_use) add: component_eq_subset)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Init G \<subseteq> Init F \<and> Acts F \<subseteq> Acts G \<and> AllowedActs G \<subseteq> AllowedActs F; Init F \<subseteq> Init G \<and> Acts G \<subseteq> Acts F \<and> AllowedActs F \<subseteq> AllowedActs G\<rbrakk> \<Longrightarrow> F = G
[PROOF STEP]
apply (blast intro!: program_equalityI)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 278, "file": null, "length": 3}
|
[STATEMENT]
lemma intro_Let_refine[refine2]:
assumes "f x \<le> \<Down>R M'"
shows "Let x f \<le> \<Down>R M'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Let x f \<le> \<Down> R M'
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
f x \<le> \<Down> R M'
goal (1 subgoal):
1. Let x f \<le> \<Down> R M'
[PROOF STEP]
by auto
|
{"llama_tokens": 160, "file": "Refine_Monadic_Refine_Basic", "length": 2}
|
# Table of Contents
<p><div class="lev1 toc-item"><a data-toc-modified-id="Exponentiality-of-the-lifetimes-distributions-1" href="#Exponentiality-of-the-lifetimes-distributions"><span class="toc-item-num">1 </span>Exponentiality of the lifetimes distributions</a></div><div class="lev2 toc-item"><a data-toc-modified-id="load-data-11" href="#load-data"><span class="toc-item-num">1.1 </span>load data</a></div><div class="lev2 toc-item"><a data-toc-modified-id="Calculate-first-and-second-moments-of-lifetime-distributions-12" href="#Calculate-first-and-second-moments-of-lifetime-distributions"><span class="toc-item-num">1.2 </span>Calculate first and second moments of lifetime distributions</a></div><div class="lev1 toc-item"><a data-toc-modified-id="Visual-check-of-the-lifetime-distribution-2" href="#Visual-check-of-the-lifetime-distribution"><span class="toc-item-num">2 </span>Visual check of the lifetime distribution</a></div>
```python
%matplotlib inline
```
# Exponentiality of the lifetimes distributions
## load data
```python
! ls -lrt ../lifetime_ar/*/ | tail
```
-rw-r--r-- 1 lustelzl tb 66000 Jan 27 13:26 lifetime_dt10_ala_c_md.txt
-rw-r--r-- 1 lustelzl tb 48600 Jan 27 13:26 lifetime_dt50_ala_c_md.txt
-rw-r--r-- 1 lustelzl tb 72750 Jan 27 13:26 lifetime_dt1_ala_c_md.txt
-rw-r--r-- 1 lustelzl tb 71475 Jan 27 13:26 lifetime_dt2_ala_c_md.txt
-rw-r--r-- 1 lustelzl tb 69225 Jan 27 13:26 lifetime_dt5_ala_c_md.txt
-rw-r--r-- 1 lustelzl tb 57900 Jan 27 13:26 lifetime_dt25_ala_c_md.txt
-rw-r--r-- 1 lustelzl tb 35775 Jan 27 13:26 lifetime_dt100_ala_c_md.txt
-rw-r--r-- 1 lustelzl tb 4575 Jan 27 13:26 lifetime_dt1000_ala_c_md.txt
-rw-r--r-- 1 lustelzl tb 9750 Jan 27 13:26 lifetime_dt500_ala_c_md.txt
-rw-r--r-- 1 lustelzl tb 2100 Jan 27 13:26 lifetime_dt2500_ala_c_md.txt
```python
lag_time_l = [1, 2, 5, 10, 20, 25, 50, 100, 200, 500, 1000, 2500, 5000]
```
```python
remd_dt_hdw_d = {}
remd_dt_cdw_d = {}
for k in lag_time_l:
#print k
#wa_tau_a_ar = np.column_stack((v.weight.values, v.weight.values / sum(v.weight.values), v.wait_T.values))
w_tau_a_ar = np.genfromtxt("../lifetime_ar/helix/lifetime_dt{}_ala_h.txt".format(k))
remd_dt_hdw_d[k] = w_tau_a_ar
w_tau_a_ar = np.genfromtxt("../lifetime_ar/coil/lifetime_dt{}_ala_h.txt".format(k))
#np.savetxt("lifetime_ar/coil/lifetime_dt{}_ala_h.txt".format(k), wa_tau_a_ar)
remd_dt_cdw_d[k] = w_tau_a_ar
```
```python
md_dt_hdw_d = {}
md_dt_cdw_d = {}
for k in lag_time_l:
w_tau_a_ar = np.genfromtxt("../lifetime_ar/helix/lifetime_dt{}_ala_h_md.txt".format(k))
md_dt_hdw_d[k] = w_tau_a_ar
ar = np.genfromtxt("../lifetime_ar/coil/lifetime_dt{}_ala_c_md.txt".format(k))
md_dt_cdw_d[k] = ar
```
should also load the lifetimes distributions presented in Figure $.
## Calculate first and second moments of lifetime distributions
The lifetimes are scaled by $w_i$, to constitute a the wait for a single transition event.
\begin{equation}
\tau_i = \frac{\tilde{\tau}_i}{w_i}
\end{equation}
$\tau_i$ enters the lifetime distribution with weight $w_i$
\begin{equation}
\langle \tau \rangle = \frac{\sum_i w_i \tau_i}{\sum_i w_i} = \frac{\sum_i \tilde{\tau}_i}{\sum_i w_i}
\end{equation}
This is equivalent to the definition of $k_{mn} = N_{mn}/t_n $. Entering the lifetimes with a weight $w_i$ thus ensures that the mean lifetime is equal to the inverse rate coefficient.
\begin{equation}
\langle \tau^2 \rangle = \frac{\sum_i \tau_i^2}{\sum_i w_i} = \frac{\sum_i \frac{\tilde{\tau}^2}{w_i}}{\sum_i{w_i}}
\end{equation}
Calculating the variance.
\begin{equation}
\langle \tau^2 \rangle - \langle \tau \rangle^2 = \langle (\tau - \langle \tau \rangle )^2 \rangle = \frac{\sum w_i (\tau_i - \langle \tau \rangle)^2}{\sum_i w_i} = \frac{\sum_i w_i ( \frac{\tilde{\tau_i}}{w_i} - \langle \tau \rangle)^2}{\sum_i w_i}
\end{equation}
```python
def check_exp_unw(tau_a, w_a, return_mom=False):
"""
variance is not weighted. This should give the wrong result
"""
#_av = np.average( tau_a / w_a, weights=w_a)
_av = np.sum(tau_a) / np.sum(w_a)
_var = np.sum( (tau_a - _av)**2 / (np.sum(w_a)) )
exp = _var/ _av**2
if return_mom:
return exp, (_av, _var)
else:
return exp
def check_exp_w(tau_a, w_a, return_mom=False):
"""
Calculate weighted mean and variance for the lifetimes
"""
#_av = np.average( tau_a / w_a, weights=w_a)
_av = np.sum(tau_a) / np.sum(w_a)
_var = np.sum( w_a*(tau_a/w_a - _av)**2) / (np.sum(w_a) )
exp = _var/ _av**2
if return_mom:
return exp, (_av, _var)
else:
return exp
```
```python
tf = 1.0/ 1000.0
```
```python
fig, ax = plt.subplots(1,2, figsize=(6,3))
for k, v in remd_dt_hdw_d.items():
_exp = check_exp_w(v[:,2], v[:,0])
lg1_remd, = ax[0].plot(int(k)*tf, _exp, "s", c=cl[1])
for k, v in remd_dt_cdw_d.items():
_exp = check_exp_w(v[:,2], v[:,0])
lg2_remd, = ax[1].plot(int(k)*tf, _exp, "s", c=cl[3])
for k, v in md_dt_hdw_d.items():
_exp = check_exp_w(v[:,2], v[:,0])
ax[0].plot(int(k)*tf, _exp, "o", c=cl[0])
for k, v in md_dt_cdw_d.items():
_exp = check_exp_w(v[:,2], v[:,0])
ax[1].plot(int(k)*tf, _exp, "o", c=cl[2])
ax[0].legend([lg1_remd], [r'REMD $\mathregular{\tau_h}$'],
loc=3, handletextpad=-0.1, borderaxespad=0.1,
frameon=True)
ax[1].legend([ lg2_remd], [r'REMD $\mathregular{\tau_c}$'],
loc=3,handletextpad=-0.1, borderaxespad=0.1,
frameon=True)
for a in ax.flat:
a.loglog()
#a.set_ylim(10**-1, 10**1)
a.plot([10**-3, 10**1], [1]*2, "-")
a.set_xlim(10**-3.1, 10**1)
a.set_ylabel(r"$\mathregular{< \tau^2> - <\tau>^2 / <\tau>^2}$",
fontsize=12)
a.set_xlabel("$\mathregular{\Delta t \, [ns]}$", fontsize=12)
a.tick_params(axis='both', which='major', labelsize=12)
fig.tight_layout()
```
```python
fig, ax = plt.subplots(1,2, figsize=(6,3))
for k, v in remd_dt_hdw_d.items():
_exp = check_exp_w(v[:,2], v[:,0], return_mom=True)
lg1_remd, = ax[0].plot(int(k)*tf, _exp[1][1], "s", c=cl[1])
for k, v in remd_dt_cdw_d.items():
_exp = check_exp_w(v[:,2], v[:,0], return_mom=True)
lg2_remd, = ax[1].plot(int(k)*tf, _exp[1][1], "s", c=cl[3])
for k, v in md_dt_hdw_d.items():
_exp = check_exp_w(v[:,2], v[:,0], return_mom=True)
ax[0].plot(int(k)*tf, _exp[1][1], "o", c=cl[0])
for k, v in md_dt_cdw_d.items():
_exp = check_exp_w(v[:,2], v[:,0], return_mom=True)
ax[1].plot(int(k)*tf, _exp[1][1], "o", c=cl[2])
ax[0].legend([lg1_remd], [r'REMD $\mathregular{\tau_h}$'],
loc=3, handletextpad=-0.1, borderaxespad=0.1,
frameon=True)
ax[1].legend([ lg2_remd], [r'REMD $\mathregular{\tau_c}$'],
loc=3,handletextpad=-0.1, borderaxespad=0.1,
frameon=True)
for a in ax.flat:
a.loglog()
a.set_ylim(10**3, 10**9)
#a.plot([10**-3, 10**1], [1]*2, "-")
a.set_xlim(10**-3.1, 10**1)
a.set_ylabel(r"$\mathregular{\langle \tau^2 \rangle - \langle \tau^2 \rangle \, [ns^2]}$",
fontsize=12)
a.set_xlabel("$\mathregular{\Delta t \, [ns]}$", fontsize=12)
a.tick_params(axis='both', which='major', labelsize=12)
fig.tight_layout()
```
# Visual check of the lifetime distribution
* need to compare to MD
```python
from kinetics.ala_kinetics import *
from kinetics.waiting_time import *
```
```python
ala_t_factor = 1.0/ 1000.0
_blog = np.logspace(-3,1, 5000)
```
```python
mh_1 = remd_dt_hdw_d[1][:,2] > 0
mh_50 = remd_dt_hdw_d[50][:,2] > 0
```
```python
fit_plot_cdf??
```
```python
print np.max(remd_dt_hdw_d[1][:,2][mh_1] / remd_dt_hdw_d[1][:,0][mh_1])
print np.var(remd_dt_hdw_d[1][:,2][mh_1] / remd_dt_hdw_d[1][:,0][mh_1]), \
check_exp_w(remd_dt_hdw_d[1][:,2][mh_1], remd_dt_hdw_d[1][:,0][mh_1], return_mom=True)[1][1]
```
2977.0
91677.5419681 43746.9958621
```python
np.max(md_dt_hdw_d[1][:,2])
print np.var(md_dt_hdw_d[1][:,2])
```
31651.5224742
```python
fig, ax = plt.subplots(1,2, figsize=(15,3))
ax[0], b, l = fit_plot_cdf(ax[0], remd_dt_hdw_d[1][:,2][mh_1] / remd_dt_hdw_d[1][:,0][mh_1] * ala_t_factor ,
weights=remd_dt_hdw_d[1][:,0][mh_1],
bins=_blog)
ax[1], b, l = fit_plot_cdf(ax[1], md_dt_hdw_d[1][:,2] * ala_t_factor ,
bins=_blog)
for a in ax:
a.semilogx()
```
```python
mc_1 = remd_dt_cdw_d[1][:,2] > 0
```
```python
print np.max(remd_dt_cdw_d[1][:,2][mc_1] / remd_dt_cdw_d[1][:,0][mc_1])
print np.var(remd_dt_cdw_d[1][:,2][mc_1] / remd_dt_cdw_d[1][:,0][mc_1]), \
check_exp_w(remd_dt_cdw_d[1][:,2][mc_1], remd_dt_cdw_d[1][:,0][mc_1], return_mom=True)[1][1]
```
4527.0
237747.650485 97716.1784696
```python
np.max(md_dt_cdw_d[1][:,2])
print np.var(md_dt_cdw_d[1][:,2])
```
68072.3526156
```python
fig, ax = plt.subplots(1,2, figsize=(18,4))
ax[0], b, l = fit_plot_cdf(ax[0], remd_dt_cdw_d[1][:,2][mc_1] / remd_dt_cdw_d[1][:,0][mc_1] * ala_t_factor ,
weights=remd_dt_cdw_d[1][:,0][mc_1],
bins=_blog)
ax[1], b, l = fit_plot_cdf(ax[1], md_dt_cdw_d[1][:,2] * ala_t_factor ,
bins=_blog)
for a in ax:
a.semilogx()
```
```python
fig, ax = plt.subplots(figsize=(10,3))
_ = fit_plot_cdf(ax, remd_dt_hdw_d[50][:,2][mh_50] / remd_dt_hdw_d[50][:,0][mh_50] * ala_t_factor ,
weights=remd_dt_hdw_d[50][:,0][mh_50],
bins=_blog)
ax.semilogx()
```
```python
```
```python
```
|
{"hexsha": "1695fb3c825a8731ba1d122d97cebbc78b7e730b", "size": 100861, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "ala2-lag-time/from-syn-trj/tau_var/.ipynb_checkpoints/ala_expon-checkpoint.ipynb", "max_stars_repo_name": "lukas-stelzl/kinetics-remd", "max_stars_repo_head_hexsha": "b915729f7bc069091738d008d75c29f432a2f797", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-07-20T19:04:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-27T14:18:33.000Z", "max_issues_repo_path": "ala2-lag-time/from-syn-trj/tau_var/.ipynb_checkpoints/ala_expon-checkpoint.ipynb", "max_issues_repo_name": "lukas-stelzl/kinetics-remd", "max_issues_repo_head_hexsha": "b915729f7bc069091738d008d75c29f432a2f797", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-02-09T14:59:02.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-09T14:59:02.000Z", "max_forks_repo_path": "ala2-lag-time/from-syn-trj/tau_var/.ipynb_checkpoints/ala_expon-checkpoint.ipynb", "max_forks_repo_name": "lukas-stelzl/kinetics-remd", "max_forks_repo_head_hexsha": "b915729f7bc069091738d008d75c29f432a2f797", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-07-06T12:31:43.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-13T06:34:33.000Z", "avg_line_length": 158.8362204724, "max_line_length": 26342, "alphanum_fraction": 0.8771576724, "converted": true, "num_tokens": 3541}
|
#include "config/ServerConfig.hpp" // IWYU pragma: associated
#include "log/Logger.hpp"
#include "storage/path.hpp"
#include <boost/asio.hpp>
#include <boost/beast/http.hpp>
#include <boost/beast/websocket.hpp>
#include <cstdlib>
#include <filesystem>
#include <fstream>
#include <iostream>
#include <streambuf>
#include <string>
namespace boostander {
namespace config {
namespace fs = std::filesystem; // from <filesystem>
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from <boost/beast/websocket.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
ServerConfig::ServerConfig(const fs::path& workdir) : workdir_(workdir) { loadConf(); }
void ServerConfig::print() const {
LOG(INFO) << "address: " << address_.to_string() << '\n'
<< "port: " << wsPort_ << '\n'
<< "threads: " << threads_;
}
void ServerConfig::loadConf() {
address_ = net::ip::make_address("127.0.0.1");
wsPort_ = static_cast<unsigned short>(8080);
threads_ = 1;
}
} // namespace config
} // namespace boostander
|
{"hexsha": "0c007cc9ed0a84ea706d8a62a38d9e489661cfa3", "size": 1250, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/config/ServerConfig.cpp", "max_stars_repo_name": "derofim/boost_server_example", "max_stars_repo_head_hexsha": "b524adbf842faa75fd5f5d46486dff605b96bfde", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/config/ServerConfig.cpp", "max_issues_repo_name": "derofim/boost_server_example", "max_issues_repo_head_hexsha": "b524adbf842faa75fd5f5d46486dff605b96bfde", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/config/ServerConfig.cpp", "max_forks_repo_name": "derofim/boost_server_example", "max_forks_repo_head_hexsha": "b524adbf842faa75fd5f5d46486dff605b96bfde", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.25, "max_line_length": 87, "alphanum_fraction": 0.6608, "num_tokens": 322}
|
"""
''Author'': Atsushi Yamagishi
''File Name'': ellison_sim.py
''License'': MIT license
I thank Daisuke Oyama for his guidance and helpful advice.
This program can simulate the stochastic evolution model raised
by G, Ellison(1993). The class of games this can handle
is a symmetric game with n strategies.
"""
from __future__ import division
import random
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import matplotlib.animation as animation
import networkx as nx
"""
"project_3d_to_simplex" is taken from plot_simplex.py by "oyamad"
https://gist.github.com/oyamad/7a11edb8f8e8e24bcf0c
"""
# Returns the position of dots on the simplex
def project_3d_to_simplex(points_ndarray):
x = np.empty((2, len(points_ndarray)))
x[:] = \
(points_ndarray[:, 0] + 2*points_ndarray[:, 2])*sqrt(3)/3, \
points_ndarray[:, 0]
return x
class Player:
def __init__(self, how_many_actions, init):
self.num_of_actions = how_many_actions
self.init_action(random)
def init_action(self, random):
if init != None:
# random takes int
self.action = random
else:
possible_actions = range(self.num_of_actions)
self.action = random.choice(possible_actions)
# the initial action is randomly chosen.
class ellison:
# This class "inherits" the class "Player" defined above
def __init__(self,network=nx.cycle_graph(10), n=1,
payoffs=[[6, 0, 0], [5, 7, 5], [0, 5, 8]], init=None):
"""
the default payoffs are those of "3*3 coordination games"
n = 1 How far players you play a game with
payoffs = The payoff matrix of the game
network = The network you want to analyze.Use NetworkX graph
example: nx.cycle_graph(6)
"""
self.players = \
[Player(len(payoffs), init) for i in range(nx.number_of_nodes(network))]
# "players" is a list consisting of "player"
self.payoffs = payoffs
self.num_actions = len(payoffs) # the number of actions
self.N = nx.number_of_nodes(network) # The number of players
self.n = n
self.network = network
self.adj_matrix = nx.adjacency_matrix(network)
# actions players can take
self.actions = range(len(payoffs))
def show_action_profile(self):
# shows the current action profile
action_profile = [self.players[i].action for i in range(self.N)]
proportions = np.empty(self.num_actions)
for i in range(self.num_actions):
proportion_of_i = action_profile.count(i) / float(self.N)
proportions[i] = proportion_of_i
print (action_profile,proportions)
def update_rational(self): # function used when a player is "rational"
# pick a player which can change the action
d = random.choice(range(self.N))
# computing the shotest_path_length of every pair of players
s_path = nx.shortest_path_length(self.network)
s_path_from_d = s_path[d]
# you cannot play a game with players further by more than n
for i in s_path_from_d.keys():
if s_path_from_d[i] > self.n:
del(s_path_from_d[i])
del(s_path_from_d[d]) # can't play a game with yourself
neighbors = s_path_from_d.keys() # whom you play a game with
# neighbors' action profile
nei_actions = [self.players[i].action for i in neighbors]
# computing the ratio of players taking a certain action in the nei
proportions = np.empty(self.num_actions)
for i in range(len(self.payoffs)):
proportion_of_i = nei_actions.count(i) / float(len(nei_actions))
proportions[i] = proportion_of_i
# computing the matrix which contains expected payoffs of each action
expected_payoffs = np.dot(self.payoffs, proportions)
# determine the action so that
# it is the best response to the action profile of the community
self.players[d].action = np.argmax(expected_payoffs)
def update_irrational(self): # function used when a player is irrational
d = random.choice(range(self.N))
self.players[d].action = random.choice(self.actions)
# action is randomly chosen because he is irrational
def play(self, X=10000, epsilon=0):
"""
X is the number of repetition
epsilon = the possibility of a player getting "irrational"
"""
self.show_action_profile() # show the initial action profile
for i in range(X):
if random.uniform(0, 1) > epsilon:
self.update_rational()
else:
self.update_irrational()
self.show_action_profile()
# show the action profile at the end of the game
def initialize_action_profile(self): # initialize players' actions
for i in self.players:
i.init_action()
def visualize_the_network(self):
nx.draw(self.network)
plt.show()
def draw_histogram(self, x=1000, y=100, epsilon=0):
"""
x: How many times you repeat the "set" of games
y: How many games constitute one set of games
"""
result_box = []
for i in range(x):
for i in range(y):
if random.uniform(0, 1) > epsilon:
self.update_rational()
else:
self.update_irrational()
action_profile = [self.players[i].action for i in range(self.N)]
proportions = np.empty(self.num_actions)
for i in range(self.num_actions):
proportion_of_i = action_profile.count(i) / float(self.N)
proportions[i] = proportion_of_i
# finding out the most popular strategy
result_box.append(proportions.argmax())
# Initializing the profile to go on to the next game
self.initialize_action_profile()
fig, ax = plt.subplots()
ax.hist(result_box)
plt.show()
def draw_scatter2(self,x=1000, epsilon=0.1): # only for 2*2 games
fig, ax = plt.subplots()
ax.set_xlim([0.0, 1.1])
ax.set_ylim([0.0, 1.1])
plt.xlabel("0")
plt.ylabel("1")
action_profile = [self.players[i].action for i in range(self.N)]
# the proportion of action1
profile = []
initial_state = action_profile.count(0) / float(self.N)
initial_dot = plt.scatter(initial_state, 1 - initial_state, c="red")
profile.append([initial_dot])
for i in range(x):
if random.uniform(0, 1) > epsilon:
self.update_rational()
else:
self.update_irrational()
current_profile = [self.players[s].action for s in range(self.N)]
state = current_profile.count(0) / float(self.N)
dot = plt.scatter(state, 1- state, c="red")
profile.append([dot])
ani = animation.ArtistAnimation(fig, profile, interval=1, repeat_delay=1000)
plt.show()
def draw_scatter3(self, x=1000, epsilon=0.1):
# Drawing the triangle (= simplex)
vertices= np.array([[sqrt(3)/3, 1], [0, 0], [2*sqrt(3)/3, 0]])
triangle = tri.Triangulation(vertices[:, 0], vertices[:, 1])
fig,ax = plt.subplots()
# ax.set_xlim(0, 2*sqrt(3)/3)
# ax.set_ylim(0, 1)
ax.triplot(triangle)
ax.set_axis_off()
# ax.set_xlim(0, 2*sqrt(3)/3)
# ax.set_ylim(0, 1)
ax.text(0, 0, '1')
ax.text(sqrt(3)/3, 1, '0')
ax.text(2*sqrt(3)/3, 0, '2')
action_profile = [self.players[i].action for i in range(self.N)]
state_lists = np.empty((x, self.num_actions))
for m in range(x):
for i in range(self.num_actions): # computing the state
current_profile = [self.players[s].action for s in range(self.N)]
proportion_of_i = current_profile.count(i) / float(self.N)
state_lists[m][i] = proportion_of_i
# proceed the game
if random.uniform(0, 1) > epsilon:
self.update_rational()
else:
self.update_irrational()
states_on_simplex = project_3d_to_simplex(state_lists)
# Plot the scatter.
ims = []
# Converting s.t. we can use Artist animation
for i in range(x):
im = plt.scatter(states_on_simplex[0][i], states_on_simplex[1][i], c="blue")
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=1, repeat_delay=1000)
plt.show()
|
{"hexsha": "25a5ec8eca28b952af85228b853983aec9fd3f72", "size": 8730, "ext": "py", "lang": "Python", "max_stars_repo_path": "ellison_sim.py", "max_stars_repo_name": "haru110jp/StochEvolution", "max_stars_repo_head_hexsha": "4207aa97cca2d7249b0be115f32202dbfaa9679c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ellison_sim.py", "max_issues_repo_name": "haru110jp/StochEvolution", "max_issues_repo_head_hexsha": "4207aa97cca2d7249b0be115f32202dbfaa9679c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ellison_sim.py", "max_forks_repo_name": "haru110jp/StochEvolution", "max_forks_repo_head_hexsha": "4207aa97cca2d7249b0be115f32202dbfaa9679c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.375, "max_line_length": 88, "alphanum_fraction": 0.6093928981, "include": true, "reason": "import numpy,import networkx", "num_tokens": 2092}
|
from audioop import minmax
from math import prod
from mss.preprocessing.preprocesssing import MinMaxNormalizer
import numpy as np
import matplotlib.pyplot as plt
# from auto_encoder_vanilla import VariationalAutoEncoder
from mss.models.auto_encoder import AutoEncoder
from mss.models.atrain import load_fsdd
import librosa, librosa.display
from scipy.io import wavfile
from scipy.signal import wiener
import museval
import musdb
output_dir = "track_output"
estimates_dir = "track_output"
mus_train = musdb.DB(root="databases/database",subsets="train", split='train',download=False,is_wav=False)
def estimate_and_evaluate(track):
# generate your estimates
estimates = {
'other': track.audio,
'accompaniment': track.audio
}
# Evaluate using museval
scores = museval.eval_mus_track(
track, estimates, output_dir=output_dir
)
# print nicely formatted mean scores
print(scores)
# return estimates as usual
return estimates
estimate_and_evaluate(mus_train[0])
|
{"hexsha": "06f4bca3b4a5cd857d196d310440e620c5bad4d0", "size": 1025, "ext": "py", "lang": "Python", "max_stars_repo_path": "mss/postprocessing/postprocessing.py", "max_stars_repo_name": "DiegoLigtenberg/Workspace-MasterThesis-MSS", "max_stars_repo_head_hexsha": "e8183031b6223051049f48e0da2bc2824e60239e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mss/postprocessing/postprocessing.py", "max_issues_repo_name": "DiegoLigtenberg/Workspace-MasterThesis-MSS", "max_issues_repo_head_hexsha": "e8183031b6223051049f48e0da2bc2824e60239e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mss/postprocessing/postprocessing.py", "max_forks_repo_name": "DiegoLigtenberg/Workspace-MasterThesis-MSS", "max_forks_repo_head_hexsha": "e8183031b6223051049f48e0da2bc2824e60239e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2820512821, "max_line_length": 106, "alphanum_fraction": 0.7668292683, "include": true, "reason": "import numpy,from scipy", "num_tokens": 240}
|
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class NavigationVel2DEnv(gym.Env):
"""2D navigation problems, as described in [1]. The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/maml_examples/point_env_randgoal.py
At each time step, the 2D agent takes an action (its velocity, clipped in
[-0.1, 0.1]), and receives a penalty equal to its L2 distance to the goal
position (ie. the reward is `-distance`). The 2D navigation tasks are
generated by sampling goal positions from the uniform distribution
on [-0.5, 0.5]^2.
[1] Chelsea Finn, Pieter Abbeel, Sergey Levine, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
"""
def __init__(self, task={}):
super(NavigationVel2DEnv, self).__init__()
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32)
self.action_space = spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32)
self._task = task
self._goal = task.get('goal', np.zeros(2, dtype=np.float32))
self._state = np.zeros(2, dtype=np.float32)
self.clip_position = True
self.update_vel = True
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def sample_tasks(self, num_tasks):
goals = self.np_random.uniform(-5., 5., size=(num_tasks, 2))
tasks = [{'goal': goal} for goal in goals]
return tasks
def reset_task(self, task):
self._task = task
self._goal = task['goal']
def reset(self, env=True):
self._state = np.zeros(2, dtype=np.float32)
self._vel = np.zeros(2, dtype=np.float32)
return np.concatenate([self._state, self._vel])
def step(self, action):
# action = np.clip(action, -0.1, 0.1)
self._state = self._state + action
if self.clip_position:
self._state = np.clip(self._state, -10, 10)
next_obs = np.concatenate([self._state, self._vel])
x = self._state[0] - self._goal[0]
y = self._state[1] - self._goal[1]
reward = -np.sqrt(x ** 2 + y ** 2) - 0.01 * np.linalg.norm(action)
# Update velocity (i.e., adding previous action as input)
if self.update_vel:
self._vel = action
return next_obs, reward, False, self._task
|
{"hexsha": "b602453eeeb7479291cccc392934a30be39b96ab", "size": 2509, "ext": "py", "lang": "Python", "max_stars_repo_path": "rl/envs/navigation_vel.py", "max_stars_repo_name": "dkkim93/cavia", "max_stars_repo_head_hexsha": "d83f44b8443f4d202396b827e13eced2cfb40b11", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rl/envs/navigation_vel.py", "max_issues_repo_name": "dkkim93/cavia", "max_issues_repo_head_hexsha": "d83f44b8443f4d202396b827e13eced2cfb40b11", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rl/envs/navigation_vel.py", "max_forks_repo_name": "dkkim93/cavia", "max_forks_repo_head_hexsha": "d83f44b8443f4d202396b827e13eced2cfb40b11", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3623188406, "max_line_length": 119, "alphanum_fraction": 0.6353128737, "include": true, "reason": "import numpy", "num_tokens": 703}
|
function region = p00_region ( problem )
%*****************************************************************************80
%
%% P00_REGION returns the name of the integration region for any problem.
%
% Discussion:
%
% I thought I was going to use this idea a lot, but most of my test
% regions are boxes.
%
% BALL
% the interior of a 2D circle,
% the interior of a 3D sphere,
% the interior of an ND sphere.
%
% BOX
% a 1D finite line segment,
% a 2D finite rectangle,
% a 3D box,
% an ND box.
%
% SIMPLEX
% a 2D triangle,
% a 3D tetrahedron,
% an ND simplex.
% The "unit simplex" in ND is the set of nonnegative points X
% such that sum ( X(1:N) ) <= 1.
%
% SPACE
% a 1D infinite line,
% a 2D infinite place,
% a 3D space,
% an ND space.
%
% SPHERE
% the circumference of a 2D circle,
% the surface of a 3D sphere,
% the surface of an ND sphere.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 18 March 2007
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer PROBLEM, the number of the desired test problem.
%
% Output, string REGION, the name of the integration region.
%
if ( problem == 1 )
region = p01_region ( );
elseif ( problem == 2 )
region = p02_region ( );
elseif ( problem == 3 )
region = p03_region ( );
elseif ( problem == 4 )
region = p04_region ( );
elseif ( problem == 5 )
region = p05_region ( );
elseif ( problem == 6 )
region = p06_region ( );
elseif ( problem == 7 )
region = p07_region ( );
elseif ( problem == 8 )
region = p08_region ( );
elseif ( problem == 9 )
region = p09_region ( );
elseif ( problem == 10 )
region = p10_region ( );
elseif ( problem == 11 )
region = p11_region ( );
elseif ( problem == 12 )
region = p12_region ( );
elseif ( problem == 13 )
region = p13_region ( );
elseif ( problem == 14 )
region = p14_region ( );
elseif ( problem == 15 )
region = p15_region ( );
elseif ( problem == 16 )
region = p16_region ( );
elseif ( problem == 17 )
region = p17_region ( );
elseif ( problem == 18 )
region = p18_region ( );
elseif ( problem == 19 )
region = p19_region ( );
elseif ( problem == 20 )
region = p20_region ( );
elseif ( problem == 21 )
region = p21_region ( );
elseif ( problem == 22 )
region = p22_region ( );
elseif ( problem == 23 )
region = p23_region ( );
elseif ( problem == 24 )
region = p24_region ( );
elseif ( problem == 25 )
region = p25_region ( );
elseif ( problem == 26 )
region = p26_region ( );
elseif ( problem == 27 )
region = p27_region ( );
elseif ( problem == 28 )
region = p28_region ( );
elseif ( problem == 29 )
region = p29_region ( );
elseif ( problem == 30 )
region = p30_region ( );
elseif ( problem == 31 )
region = p31_region ( );
elseif ( problem == 32 )
region = p32_region ( );
else
fprintf ( 1, '\n' );
fprintf ( 1, 'P00_REGION - Fatal error!\n' );
fprintf ( 1, ' Illegal problem number = %d\n', problem );
error ( 'P00_REGION - Fatal error!' );
end
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/quadrature_test/p00_region.m"}
|
module QRupdate
using LinearAlgebra
export qraddcol, qraddrow, qrdelcol, csne
"""
Add a column to a QR factorization without using Q.
`R = qraddcol(A,R,v)` adds the m-vector `v` to the QR factorization of the
m-by-n matrix `A` without using `Q`. On entry, `R` is a dense n-by-n upper
triangular matrix such that `R'*R = A'*A`.
`R = qraddcol(A,R,v,β)` is similar to the above, except that the
routine updates the QR factorization of
```
[A; β I], and R'*R = (A'*A + β^2*I).
```
`A` should have fewer columns than rows. `A` and `v` may be sparse or
dense. On exit, `R` is the (n+1)-by-(n+1) factor corresponding to
```
Anew = [A V ], Rnew = [R u ], Rnew'Rnew = Anew'Anew.
[beta*I ] [ gamma]
[ beta]
```
The new column `v` is assumed to be nonzero.
If `A` has no columns yet, input `A = []`, `R = []`.
"""
function qraddcol(A::AbstractMatrix{T}, Rin::AbstractMatrix{T}, a::Vector{T}, β::T = zero(T)) where {T}
m, n = size(A)
anorm = norm(a)
anorm2 = anorm^2
β2 = β^2
if β != 0
anorm2 = anorm2 + β2
anorm = sqrt(anorm2)
end
if n == 0
return reshape([anorm], 1, 1)
end
R = UpperTriangular(Rin)
c = A'a # = [A' β*I 0]*[a; 0; β]
u = R'\c
unorm2 = norm(u)^2
d2 = anorm2 - unorm2
if d2 > anorm2 #DISABLE 0.01*anorm2 # Cheap case: γ is not too small
γ = sqrt(d2)
else
z = R\u # First approximate solution to min ||Az - a||
r = a - A*z
c = A'r
if β != 0
c = c - β2*z
end
du = R'\c
dz = R\du
z += dz # Refine z
# u = R*z # Original: Bjork's version.
u += du # Modification: Refine u
r = a - A*z
γ = norm(r) # Safe computation (we know gamma >= 0).
if !iszero(β)
γ = sqrt(γ^2 + β2*norm(z)^2 + β2)
end
end
# This seems to be faster than concatenation, ie:
# [ Rin u
# zeros(1,n) γ ]
Rout = zeros(T, n+1, n+1)
Rout[1:n,1:n] .= R
Rout[1:n,n+1] .= u
Rout[n+1,n+1] = γ
return Rout
end
"""
Add a row and update a Q-less QR factorization.
`qraddrow!(R, a)` returns the triangular part of a QR factorization of `[A; a]`, where `A = QR` for some `Q`. The argument `a` should be a row
vector.
"""
function qraddrow(R::AbstractMatrix{T}, a::AbstractMatrix{T}) where {T}
n = size(R,1)
@inbounds @simd for k in 1:n
G, r = givens( R[k,k], a[k], 1, 2 )
B = G * [ reshape(R[k,k:n], 1, n-k+1)
reshape(a[:,k:n], 1, n-k+1) ]
R[k,k:n] = B[1,:]
a[ k:n] = B[2,:]
end
return R
end
"""
Delete the k-th column and update a Q-less QR factorization.
`R = qrdelcol(R,k)` deletes the k-th column of the upper-triangular
`R` and restores it to upper-triangular form. On input, `R` is an n x
n upper-triangular matrix. On output, `R` is an n-1 x n-1 upper
triangle.
18 Jun 2007: First version of QRdelcol.m.
Michael Friedlander (mpf@cs.ubc.ca) and
Michael Saunders (saunders@stanford.edu)
To allow for R being sparse,
we eliminate the k-th row of the usual
Hessenberg matrix rather than its subdiagonals,
as in Reid's Bartel-Golub LU update and also
the Forrest-Tomlin update.
(But Matlab will still be pretty inefficient.)
18 Jun 2007: R is now the exact size on entry and exit.
30 Dec 2015: Translate to Julia.
"""
function qrdelcol(R::AbstractMatrix{T}, k::Int) where {T}
m = size(R,1)
R = R[:,1:m .!= k] # Delete the k-th column
n = size(R,2) # Should have m=n+1
for j in k:n # Forward sweep to reduce k-th row to zeros
G, y = givens(R[j+1,j], R[k,j], 1, 2)
R[j+1,j] = y
if j<n && G.s != 0
@inbounds @simd for i in j+1:n
tmp = G.c*R[j+1,i] + G.s*R[k,i]
R[k,i] = G.c*R[k,i] - conj(G.s)*R[j+1,i]
R[j+1,i] = tmp
end
end
end
R = R[1:m .!= k, :] # Delete the k-th row
return R
end
"""
x, r = csne(R, A, b)
solves the least-squares problem
minimize ||r||_2, r := b - A*x
using the corrected semi-normal equation approach described by Bjork (1987). Assumes that `R` is upper triangular.
"""
function csne(Rin::AbstractMatrix{T}, A::AbstractMatrix{T}, b::Vector{T}) where {T}
R = UpperTriangular(Rin)
q = A'b
x = R' \ q
bnorm2 = sum(abs2, b)
xnorm2 = sum(abs2, x)
d2 = bnorm2 - xnorm2
x = R \ x
# Apply one step of iterative refinement.
r = b - A*x
q = A'r
dx = R' \ q
dx = R \ dx
x += dx
r = b - A*x
return (x, r)
end
end # module
|
{"hexsha": "ad162181fddc28d95cd017a7039a56edbd751066", "size": 4902, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/QRupdate.jl", "max_stars_repo_name": "mpf/QRupdate.jl", "max_stars_repo_head_hexsha": "a506e38b096788bfd028c4e61409719ddc05c22b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-05-19T00:26:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T03:17:35.000Z", "max_issues_repo_path": "src/QRupdate.jl", "max_issues_repo_name": "mpf/QRupdate.jl", "max_issues_repo_head_hexsha": "a506e38b096788bfd028c4e61409719ddc05c22b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-11-13T19:40:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T04:55:02.000Z", "max_forks_repo_path": "src/QRupdate.jl", "max_forks_repo_name": "mpf/QRupdate.jl", "max_forks_repo_head_hexsha": "a506e38b096788bfd028c4e61409719ddc05c22b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2016-07-12T02:16:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-05T21:40:36.000Z", "avg_line_length": 27.3854748603, "max_line_length": 143, "alphanum_fraction": 0.5199918401, "num_tokens": 1641}
|
from __future__ import print_function, division
import os
import cv2
import csv
import torch
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms, utils
import bateauxCsvPos
from torchstat import stat
from torchvision.utils import make_grid
from lxml import etree
from tkinter import *
from PIL import Image, ImageTk
import time
Win = Tk()
affichage = Frame(Win, width = 512, height = 512)
imageCanvas = Canvas(affichage, width = 512, height = 512)
imageIndice = StringVar()
nombreTrain = StringVar()
imageIndice.set('1')
buttonFrame = Frame(Win)
ABSOLUTE = 'D:/Documents/Prepa/TIPE'
pathBateaux = ABSOLUTE + "/data/MASATI-v2/ship"
pathXml = ABSOLUTE + "/data/MASATI-v2/ship_labels"
pathMer = ABSOLUTE + "/data/MASATI-v2/water"
pathModels = ABSOLUTE + "/Models/"
listeBateaux = os.listdir(pathBateaux)
listeMer = os.listdir(pathMer)
NUMBER = 750
TOTAL = 800
bateauxCsvPos.generateCsv(NUMBER, TOTAL)
print(".csv généré")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ImageData(Dataset):
def __init__(self, csvtruc, transform = None):
self.taille = 0
self.transform = transform
self.images = []
self.resultats = []
with open(csvtruc, 'r') as fichier:
truc = csv.reader(fichier, delimiter = ',')
for ligne in truc:
if ligne != []:
self.taille += 1
image, xmin, xmax, ymin, ymax = ligne[0].split(',')
self.images.append(self.transform(cv2.imread(image)).float())
self.resultats.append(torch.Tensor([float(xmin) / 512, float(xmax) / 512, float(ymin) / 512, float(ymax) / 512]))
def __getitem__(self, index):
"""image = self.transform(cv2.imread(self.images[index])).float()"""
image = self.images[index]
resultat = self.resultats[index]
return image, resultat
def __len__(self):
return len(self.resultats)
set_images = ImageData("D:/Documents/Prepa/TIPE/bateauxPos.csv", transforms.Compose([transforms.ToTensor(),]))
imagesLoader = torch.utils.data.DataLoader(set_images, batch_size = 8, shuffle = True, pin_memory=True, num_workers=0)
print("Set de train chargé")
set_images_val = ImageData("D:/Documents/Prepa/TIPE/bateauxPosVal.csv", transforms.Compose([transforms.ToTensor(),]))
imagesLoader = torch.utils.data.DataLoader(set_images, batch_size = 8, shuffle = True, pin_memory=True, num_workers=0)
print("Set de validation chargé")
def load():
global set_images
global imagesLoader
set_images = ImageData("D:/Documents/Prepa/TIPE/bateauxPos.csv", transforms.Compose([transforms.ToTensor()]))
imagesLoader = torch.utils.data.DataLoader(set_images, batch_size = 32, shuffle = True, pin_memory=True, num_workers=0)
print('Images chargées.')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.epochs = 0
self.conv = nn.Sequential(
nn.Conv2d(3, 16, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(16),
nn.Conv2d(16, 64, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
)
self.classifier = nn.Sequential(
nn.Linear(2048, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.ReLU(),
nn.Linear(10, 2),
#nn.Sigmoid()
)
def forward(self, x):
x = self.conv(x)
x = self.review(x)
x = self.classifier(x)
return x
def review(self, x):
return x.view(-1, 2048)
"""
for i in enumerate(imagesLoader):
M = i
I = M[1][0][0].to(device)
J = net(I.unsqueeze(0))
print(J.shape)
"""
def train(number):
for epoch in range(number):
running_loss = 0.0
for i, data in enumerate(imagesLoader, 0):
input, expected = data[0].to(device, non_blocking=True), data[1].to(device, non_blocking=True)
optimizer.zero_grad()
outputs = net(input)
loss = criterion(outputs, expected)
loss.backward()
optimizer.step()
running_loss += loss.item()
print('Epoch : ' + str(epoch) + ' loss : ' + str(running_loss))
net.epochs += 1
afficherPreview()
imageCanvas.update()
def testPos():
global NUMBER
global TOTAL
global set_image
prec_Tr = 0
prec_Val = 0
with open(ABSOLUTE + '/bateauxPos.csv', 'r') as fichier:
truc = csv.reader(fichier, delimiter = ',')
i = 0
for ligne in truc:
if ligne != []:
image, res = set_images[i][0].unsqueeze(0).to(device), set_image[i][1]
xmin = res[0]
xmax = res[0]
ymin = res[0]
ymax = res[0]
result = net(image).detach().cpu()[0]
res = ((abs(xmin - result[0])) + (abs(xmax - result[1])) + (abs(ymin - result[2])) + (abs(ymax - result[3]))) / 4
prec_Tr += res / NUMBER
i += 1
with open(ABSOLUTE + '/bateauxPosVal.csv', 'r') as fichier:
truc = csv.reader(fichier, delimiter = ',')
i = 0
for ligne in truc:
if ligne != []:
_, xmin, xmax, ymin, ymax = ligne[0].split(',')
xmin = float(xmin) / 512
xmax = float(xmin) / 512
ymin = float(xmin) / 512
ymax = float(xmin) / 512
image = set_images_val[i][0].unsqueeze(0).to(device)
result = net(image).detach().cpu()[0]
res = ((abs(xmin - result[0])) + (abs(xmax - result[1])) + (abs(ymin - result[2])) + (abs(ymax - result[3]))) / 4
prec_Val += res / (TOTAL - NUMBER)
i += 1
return prec_Tr, prec_Val
def saveModel(nom):
torch.save(net.state_dict(), pathModels + nom)
def loadModel(nom):
net.load_state_dict(torch.load(pathModels + nom))
def show(layer, number, imageN):
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
net.conv1.register_forward_hook(get_activation('conv1'))
net.conv2.register_forward_hook(get_activation('conv2'))
net.conv3.register_forward_hook(get_activation('conv3'))
net.conv4.register_forward_hook(get_activation('conv4'))
net.conv5.register_forward_hook(get_activation('conv5'))
net.conv6.register_forward_hook(get_activation('conv6'))
net.conv7.register_forward_hook(get_activation('conv7'))
net.conv8.register_forward_hook(get_activation('conv8'))
net.conv9.register_forward_hook(get_activation('conv9'))
data = set_images[imageN][0].to(device, non_blocking=True)
output = net(data.unsqueeze(0))
act = activation['conv' + str(layer)].squeeze().cpu()
fir, axarr = plt.subplots(number)
for idx in range(number):
axarr[idx].imshow(act[idx])
plt.show()
def show2(number):
kernels = net.conv1.weight.detach().cpu()
fig, axarr = plt.subplots(number)
for idx in range(number):
axarr[idx].imshow(kernels[idx].squeeze())
plt.show()
def show3():
kernels = net.conv2.weight.detach().cpu().clone()
kernels = kernels - kernels.min()
kernels = kernels / kernels.max()
print(kernels.shape)
img = make_grid(kernels)
plt.imshow(img.permute(1, 2, 0))
plt.show()
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
net = Net()
net.load_state_dict(torch.load(pathModels + 'Tf1'))
for param in net.parameters():
param.requires_grad = False
net.classifier = nn.Sequential(
nn.Linear(2048, 512),
nn.ReLU(),
nn.Linear(512, 4),
nn.Sigmoid()
)
net.to(device, non_blocking=True)
criterion = nn.MSELoss()
optimizer = optim.SGD(list(net.parameters()), lr = 0.001, momentum = 0.9)
##Affichage
def corrigerStr(chaine):
if len(chaine) >= 4:
return chaine
else:
return corrigerStr('0' + chaine)
def incr():
global imageIndice
res = (int(imageIndice.get()) + 1) % TOTAL
if res == 0:
res = 1
imageIndice.set(str(res))
afficherPreview()
def decr():
global imageIndice
res = (int(imageIndice.get()) - 1) % TOTAL
if res == 0:
res = 1
imageIndice.set(str(res))
afficherPreview()
def afficherPreview():
global imageIndice
indiceStr = corrigerStr(imageIndice.get())
imageI = Image.open(pathBateaux + '/s' + indiceStr + '.png')
initI = np.array(imageI)
init = initI.transpose((2, 0, 1))
init = torch.tensor(init).float()
init = init.to(device)
init = init.unsqueeze(0)
result = net(init).detach().cpu().numpy()
xmin, xmax, ymin, ymax = result[0]
imageCanvas.delete('all')
image1 = Image.fromarray(initI)
photo1 = ImageTk.PhotoImage(image1)
imageCanvas.create_image(0, 0, anchor = NW, image = photo1)
imageCanvas.image = photo1
imageCanvas.create_rectangle(int(xmin * 512), int(ymin * 512), int(xmax * 512), int(ymax * 512), outline = 'red', width = 3)
def gotrain():
nombre = int(nombreEntry.get())
train(nombre)
def setTrain():
imageIndice.set(str(1))
afficherPreview()
def setVal():
imageIndice.set(str(TOTAL - NUMBER))
afficherPreview()
incrButton = Button(buttonFrame, command = incr, text = "Suivant")
decrButton = Button(buttonFrame, command = decr, text = 'Précédent')
nombreEntry = Entry(buttonFrame, textvariable = nombreTrain)
trainButton = Button(buttonFrame, command = gotrain, text = 'Train!')
goToTrainButton = Button(buttonFrame, command = setTrain, text = 'go to train')
goToValButton = Button(buttonFrame, command = setVal, text = 'go to val')
decrButton.pack(side = LEFT)
incrButton.pack(side = LEFT)
nombreEntry.pack(side = LEFT)
trainButton.pack(side = LEFT)
goToTrainButton.pack(side = LEFT)
goToValButton.pack(side = LEFT)
imageCanvas.pack(side = TOP)
affichage.pack(side = TOP)
buttonFrame.pack(side = TOP)
#loadModel('Pos2')
Win.mainloop()
|
{"hexsha": "ed7a1ff1f015903d3c25b9ef34de129c80fe36fc", "size": 10989, "ext": "py", "lang": "Python", "max_stars_repo_path": "bateauxPosTf.py", "max_stars_repo_name": "Sup3Legacy/TIPE", "max_stars_repo_head_hexsha": "7e01cef869183c4d609c45d5fcf0bb371a9579f5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bateauxPosTf.py", "max_issues_repo_name": "Sup3Legacy/TIPE", "max_issues_repo_head_hexsha": "7e01cef869183c4d609c45d5fcf0bb371a9579f5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bateauxPosTf.py", "max_forks_repo_name": "Sup3Legacy/TIPE", "max_forks_repo_head_hexsha": "7e01cef869183c4d609c45d5fcf0bb371a9579f5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-28T06:07:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-28T06:07:17.000Z", "avg_line_length": 29.6199460916, "max_line_length": 133, "alphanum_fraction": 0.615979616, "include": true, "reason": "import numpy", "num_tokens": 2974}
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Desire Nuentsa Wakam <desire.nuentsa_wakam@inria.fr>
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
#include "sparse.h"
#include <Eigen/SparseQR>
template<typename MatrixType,typename DenseMat>
int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300, int maxCols = 150)
{
eigen_assert(maxRows >= maxCols);
typedef typename MatrixType::Scalar Scalar;
int rows = internal::random<int>(1,maxRows);
int cols = internal::random<int>(1,maxCols);
double density = (std::max)(8./(rows*cols), 0.01);
A.resize(rows,cols);
dA.resize(rows,cols);
initSparse<Scalar>(density, dA, A,ForceNonZeroDiag);
A.makeCompressed();
int nop = internal::random<int>(0, internal::random<double>(0,1) > 0.5 ? cols/2 : 0);
for(int k=0; k<nop; ++k)
{
int j0 = internal::random<int>(0,cols-1);
int j1 = internal::random<int>(0,cols-1);
Scalar s = internal::random<Scalar>();
A.col(j0) = s * A.col(j1);
dA.col(j0) = s * dA.col(j1);
}
// if(rows<cols) {
// A.conservativeResize(cols,cols);
// dA.conservativeResize(cols,cols);
// dA.bottomRows(cols-rows).setZero();
// }
return rows;
}
template<typename Scalar> void test_sparseqr_scalar()
{
typedef SparseMatrix<Scalar,ColMajor> MatrixType;
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMat;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
MatrixType A;
DenseMat dA;
DenseVector refX,x,b;
SparseQR<MatrixType, COLAMDOrdering<int> > solver;
generate_sparse_rectangular_problem(A,dA);
b = dA * DenseVector::Random(A.cols());
solver.compute(A);
if(internal::random<float>(0,1)>0.5f)
solver.factorize(A); // this checks that calling analyzePattern is not needed if the pattern do not change.
if (solver.info() != MySuccess)
{
std::cerr << "sparse QR factorization failed\n";
exit(0);
return;
}
x = solver.solve(b);
if (solver.info() != MySuccess)
{
std::cerr << "sparse QR factorization failed\n";
exit(0);
return;
}
VERIFY_IS_APPROX(A * x, b);
//Compare with a dense QR solver
ColPivHouseholderQR<DenseMat> dqr(dA);
refX = dqr.solve(b);
VERIFY_IS_EQUAL(dqr.rank(), solver.rank());
if(solver.rank()==A.cols()) // full rank
VERIFY_IS_APPROX(x, refX);
// else
// VERIFY((dA * refX - b).norm() * 2 > (A * x - b).norm() );
// Compute explicitly the matrix Q
MatrixType Q, QtQ, idM;
Q = solver.matrixQ();
//Check ||Q' * Q - I ||
QtQ = Q * Q.adjoint();
idM.resize(Q.rows(), Q.rows()); idM.setIdentity();
VERIFY(idM.isApprox(QtQ));
// Q to dense
DenseMat dQ;
dQ = solver.matrixQ();
VERIFY_IS_APPROX(Q, dQ);
}
void test_sparseqr()
{
for(int i=0; i<g_repeat; ++i)
{
CALL_SUBTEST_1(test_sparseqr_scalar<double>());
CALL_SUBTEST_2(test_sparseqr_scalar<std::complex<double> >());
}
}
|
{"hexsha": "e1d01c4474bb161ad9e2b3e93b14a4a898e2aa5f", "size": 3098, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "A4-paper-sheet-detection-and-cropping/Header_files/eigen/test/sparseqr.cpp", "max_stars_repo_name": "satvik007/Scanner_OP", "max_stars_repo_head_hexsha": "c146f67e3851cd537d62989842abfee7d34de2c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "A4-paper-sheet-detection-and-cropping/Header_files/eigen/test/sparseqr.cpp", "max_issues_repo_name": "satvik007/Scanner_OP", "max_issues_repo_head_hexsha": "c146f67e3851cd537d62989842abfee7d34de2c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "A4-paper-sheet-detection-and-cropping/Header_files/eigen/test/sparseqr.cpp", "max_forks_repo_name": "satvik007/Scanner_OP", "max_forks_repo_head_hexsha": "c146f67e3851cd537d62989842abfee7d34de2c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-05-10T10:14:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-10T10:14:27.000Z", "avg_line_length": 28.953271028, "max_line_length": 112, "alphanum_fraction": 0.6591349258, "num_tokens": 948}
|
import numpy as np
from .sound_generator import SoundGenerator
class WaveTable(SoundGenerator):
def __init__(self, frame_rate, wave_table):
super().__init__(frame_rate)
self.wave_table = reshape_wave_table(wave_table)
def __iter__(self):
channels, frames = self.wave_table.shape
middle = int((frames + 1) / 2)
ramp = np.linspace(0, 1, middle, endpoint=True)
envelope = np.zeros((channels, frames), dtype=np.float32)
if frames % 2:
envelope[:,:middle] = ramp
envelope[:,middle:] = ramp[-2::-1]
else:
envelope[:,:middle] = ramp
envelope[:,middle:] = ramp[::-1]
table = envelope * self.wave_table
table += np.concatenate([ table[:,middle:], table[:,:middle] ], axis=1)
while True:
yield table
# yield self.wave_table
def reshape_wave_table(wave_table):
shape = wave_table.shape
if not isinstance(shape, tuple):
shape = (1, shape)
elif len(shape) == 1:
shape = (1,) + shape
return wave_table.reshape(shape)
|
{"hexsha": "3895c61b31e6cd2b24dd7a80a393f15ee27da438", "size": 1117, "ext": "py", "lang": "Python", "max_stars_repo_path": "ratchet/generator/wave_table.py", "max_stars_repo_name": "bracket/ratchet", "max_stars_repo_head_hexsha": "48c139ca040fe8318a39b61889a2ae6772bd5200", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-12-07T03:39:32.000Z", "max_stars_repo_stars_event_max_datetime": "2016-12-07T03:39:32.000Z", "max_issues_repo_path": "ratchet/generator/wave_table.py", "max_issues_repo_name": "bracket/ratchet", "max_issues_repo_head_hexsha": "48c139ca040fe8318a39b61889a2ae6772bd5200", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ratchet/generator/wave_table.py", "max_forks_repo_name": "bracket/ratchet", "max_forks_repo_head_hexsha": "48c139ca040fe8318a39b61889a2ae6772bd5200", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3863636364, "max_line_length": 79, "alphanum_fraction": 0.5917636526, "include": true, "reason": "import numpy", "num_tokens": 266}
|
module Core.FC
import Text.PrettyPrint.Prettyprinter
%default total
public export
FilePos : Type
FilePos = (Int, Int)
showPos : FilePos -> String
showPos (l, c) = show (l + 1) ++ ":" ++ show (c + 1)
public export
FileName : Type
FileName = String
||| A file context is a filename together with starting and ending positions
public export
data FC = MkFC FileName FilePos FilePos
| EmptyFC
export
Eq FC where
(==) (MkFC n s e) (MkFC n' s' e') = n == n' && s == s' && e == e'
(==) EmptyFC EmptyFC = True
(==) _ _ = False
export
file : FC -> FileName
file (MkFC fn _ _) = fn
file EmptyFC = ""
export
startPos : FC -> FilePos
startPos (MkFC _ s _) = s
startPos EmptyFC = (0, 0)
export
endPos : FC -> FilePos
endPos (MkFC _ _ e) = e
endPos EmptyFC = (0, 0)
-- Return whether a given file position is within the file context (assuming we're
-- in the right file)
export
within : FilePos -> FC -> Bool
within (x, y) (MkFC _ start end)
= (x, y) >= start && (x, y) <= end
within _ _ = False
-- Return whether a given line is on the same line as the file context (assuming
-- we're in the right file)
export
onLine : Int -> FC -> Bool
onLine x (MkFC _ start end)
= x >= fst start && x <= fst end
onLine _ _ = False
export
emptyFC : FC
emptyFC = EmptyFC
export
toplevelFC : FC
toplevelFC = MkFC "(toplevel)" (0, 0) (0, 0)
%name FC fc
export
Show FC where
show loc = file loc ++ ":" ++
showPos (startPos loc) ++ "--" ++
showPos (endPos loc)
export
Pretty FC where
pretty loc = pretty (file loc) <+> colon
<+> prettyPos (startPos loc) <+> pretty "--"
<+> prettyPos (endPos loc)
where
prettyPos : FilePos -> Doc ann
prettyPos (l, c) = pretty (l + 1) <+> colon <+> pretty (c + 1)
|
{"hexsha": "46d831e925afc6d25c9298c32fee5ee201121671", "size": 1781, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "idris2/src/Core/FC.idr", "max_stars_repo_name": "Qqwy/Idris2-Erlang", "max_stars_repo_head_hexsha": "945f9c12d315d73bfda2d441bc5f9f20696b5066", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "idris2/src/Core/FC.idr", "max_issues_repo_name": "Qqwy/Idris2-Erlang", "max_issues_repo_head_hexsha": "945f9c12d315d73bfda2d441bc5f9f20696b5066", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "idris2/src/Core/FC.idr", "max_forks_repo_name": "Qqwy/Idris2-Erlang", "max_forks_repo_head_hexsha": "945f9c12d315d73bfda2d441bc5f9f20696b5066", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2023809524, "max_line_length": 82, "alphanum_fraction": 0.6125772038, "num_tokens": 572}
|
function is_orthogonal(S, tol=1e-6)
n = LinearAlgebra.checksquare(S)
for i in 1:n
for j in 1:n
if i != j
s = LinearAlgebra.dot(S[:, i], S[:, j])
if abs(s) > tol
return false
end
end
end
end
return true
end
function _projcoef(A, i, v, w = A[i, :])
return LinearAlgebra.dot(v, w) / LinearAlgebra.dot(w, w)
end
function _proj(A, i, v)
w = A[i, :]
return w * _projcoef(A, i, v, w)
end
function orthogonalize(A)
display(A)
for i in 2:size(A, 1)
row = A[i, :]
for j in 1:(i - 1)
row = row - _proj(A, j, row)
end
A[i, :] = row
end
return A
end
struct _OrthogonalMatrix end
struct _RowEchelonMatrix end
function __linsolve(A::Matrix{T}, b::Vector{T}, ::_OrthogonalMatrix) where {T}
return map(1:size(A, 1)) do i
return _projcoef(A, i, b)
end
end
function __linsolve(A::Matrix{T}, b::Vector{T}, ::_RowEchelonMatrix) where {T}
j = 0
return map(1:size(A, 1)) do i
while j < size(A, 2)
j += 1
if isone(A[i, j]) && all(k -> k == i || iszero(A[k, j]), 1:size(A, 1))
return b[j]
end
end
error("Not in row_echelon_form, cannot find for `$i`th entry.")
end
end
function _linsolve(A::Matrix{T}, b::Vector{T}, form) where {T}
x = __linsolve(A, b, form)
#@assert transpose(A) * x == b
return x
end
|
{"hexsha": "1c3fe52d7172f397efd6825b6268888d4625273f", "size": 1496, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Certificate/Symmetry/utils.jl", "max_stars_repo_name": "jump-dev/SumOfSquares.jl", "max_stars_repo_head_hexsha": "c1350a7e7c910f542d4442c0869df502a555cfda", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2020-08-12T02:19:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T17:56:02.000Z", "max_issues_repo_path": "src/Certificate/Symmetry/utils.jl", "max_issues_repo_name": "jump-dev/SumOfSquares.jl", "max_issues_repo_head_hexsha": "c1350a7e7c910f542d4442c0869df502a555cfda", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 70, "max_issues_repo_issues_event_min_datetime": "2020-07-15T21:49:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T12:55:33.000Z", "max_forks_repo_path": "src/Certificate/Symmetry/utils.jl", "max_forks_repo_name": "jump-dev/SumOfSquares.jl", "max_forks_repo_head_hexsha": "c1350a7e7c910f542d4442c0869df502a555cfda", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-08-04T15:04:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-27T22:12:41.000Z", "avg_line_length": 23.746031746, "max_line_length": 82, "alphanum_fraction": 0.5180481283, "num_tokens": 488}
|
function [x, mx, sx] = standardise(x, dim, lim)
% STANDARDISE computes the zscore of a matrix along dimension dim
% has similar functionality as the stats-toolbox's zscore function
%
% Use as
% x = standardise(x, dim)
%
% See also ZSCORE
% Copyright (C) 2009, Jan-Mathijs Schoffelen
%
% This file is part of FieldTrip, see http://www.fieldtriptoolbox.org
% for the documentation and details.
%
% FieldTrip is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% FieldTrip is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with FieldTrip. If not, see <http://www.gnu.org/licenses/>.
%
% $Id$
if nargin == 1,
dim = find(size(x)>1,1,'first');
end
if nargin == 3,
ft_error('third input argument is not used');
end
switch dim
case 1
n = size(x,dim);
mx = mean(x,dim);
x = x-mx(ones(1,n),:,:,:,:,:,:,:);
sx = sqrt(sum(abs(x).^2,dim)./n);
x = x./sx(ones(1,n),:,:,:,:,:,:,:);
case 2
n = size(x,dim);
mx = mean(x,dim);
x = x-mx(:,ones(1,n),:,:,:,:,:,:);
sx = sqrt(sum(abs(x).^2,dim)./n);
x = x./sx(:,ones(1,n),:,:,:,:,:,:);
case 3
n = size(x,dim);
mx = mean(x,dim);
x = x-mx(:,:,ones(1,n),:,:,:,:,:);
sx = sqrt(sum(abs(x).^2,dim)./n);
x = x./sx(:,:,ones(1,n),:,:,:,:,:);
case 4
n = size(x,dim);
mx = mean(x,dim);
x = x-mx(:,:,:,ones(1,n),:,:,:,:);
sx = sqrt(sum(abs(x).^2,dim)./n);
x = x./sx(:,:,:,ones(1,n),:,:,:,:);
case 5
n = size(x,dim);
mx = mean(x,dim);
x = x-mx(:,:,:,:,ones(1,n),:,:,:);
sx = sqrt(sum(abs(x).^2,dim)./n);
x = x./sx(:,:,:,:,ones(1,n),:,:,:);
case 6
n = size(x,dim);
mx = mean(x,dim);
x = x-mx(:,:,:,:,:,ones(1,n),:,:);
sx = sqrt(sum(abs(x).^2,dim)./n);
x = x./sx(:,:,:,:,:,ones(1,n),:,:);
case 7
n = size(x,dim);
mx = mean(x,dim);
x = x-mx(:,:,:,:,:,:,ones(1,n),:);
sx = sqrt(sum(abs(x).^2,dim)./n);
x = x./sx(:,:,:,:,:,:,ones(1,n),:);
case 8
n = size(x,dim);
mx = mean(x,dim);
x = x-mx(:,:,:,:,:,:,:,ones(1,n));
sx = sqrt(sum(abs(x).^2,dim)./n);
x = x./sx(:,:,:,:,:,:,:,ones(1,n));
otherwise
ft_error('dim too large, standardise currently supports dimensionality up to 8');
end
|
{"author": "spm", "repo": "spm12", "sha": "3085dac00ac804adb190a7e82c6ef11866c8af02", "save_path": "github-repos/MATLAB/spm-spm12", "path": "github-repos/MATLAB/spm-spm12/spm12-3085dac00ac804adb190a7e82c6ef11866c8af02/external/fieldtrip/connectivity/private/standardise.m"}
|
#!/usr/bin/env python3
import click
import os
import numpy as np
from collections import defaultdict
from itertools import chain
from scipy.spatial.distance import cosine
from scipy.stats import spearmanr
from tqdm import trange
from wikipedia2vec import Wikipedia2Vec
KORE_CATEGORIES = {
'it_companies': ['Apple Inc.', 'Google', 'Facebook', 'Microsoft', 'IBM'],
'celebrities': ['Angelina Jolie', 'Brad Pitt', 'Johnny Depp', 'Jennifer Aniston', 'Leonardo DiCaprio'],
'video_games': ['Grand Theft Auto IV', 'Quake (video game)', 'Deus Ex (series)', 'Guitar Hero (video game)', 'Max Payne'],
'tv_series': ['The Sopranos', 'The A-Team', 'Futurama', 'The Wire', 'Mad Men'],
'chuck_norris': ['Chuck Norris'],
}
@click.command()
@click.argument('data_dir', type=click.Path(exists=True))
@click.argument('model_file', type=click.Path(exists=True))
@click.option('-f', '--out-format', type=click.Choice(['csv', 'text']), default='text')
@click.option('--word-analogy/--no-word-analogy', default=True)
@click.option('--word-similarity/--no-word-similarity', default=True)
@click.option('--entity-similarity/--no-entity-similarity', default=True)
@click.option('--lowercase/--no-lowercase', default=True)
@click.option('--batch-size', default=1000)
@click.option('--analogy-vocab-size', default=None, type=int)
def main(data_dir, model_file, out_format, word_analogy, word_similarity, entity_similarity,
lowercase, batch_size, analogy_vocab_size):
model = Wikipedia2Vec.load(model_file)
results = []
if word_similarity:
base_dir = os.path.join(os.path.join(data_dir, 'word'), 'similarity')
for filename in os.listdir(base_dir):
if not filename.endswith('.txt'):
continue
oov_count = 0
with open(os.path.join(base_dir, filename)) as f:
gold = []
estimated = []
for line in f:
(w1, w2, val) = line.split()
val = float(val)
if lowercase:
(w1, w2) = (w1.lower(), w2.lower())
try:
v1 = model.get_word_vector(w1)
except KeyError:
oov_count += 1
continue
try:
v2 = model.get_word_vector(w2)
except KeyError:
oov_count += 1
continue
gold.append(val)
estimated.append(1.0 - cosine(v1, v2))
results.append((filename[:-4], spearmanr(gold, estimated)[0], oov_count))
if word_analogy:
if analogy_vocab_size is None:
target_words = [w.text for w in model.dictionary.words()]
else:
target_words = [w.text for w in sorted(model.dictionary.words(), key=lambda w: w.count,
reverse=True)[:analogy_vocab_size]]
word_emb = np.empty((len(target_words), model.syn0.shape[1]))
vocab = {}
for (n, word) in enumerate(target_words):
word_emb[n] = model.get_word_vector(word)
vocab[word] = n
word_emb = word_emb / np.linalg.norm(word_emb, 2, axis=1, keepdims=True)
base_dir = os.path.join(os.path.join(data_dir, 'word'), 'analogy')
for filename in os.listdir(base_dir):
with open(os.path.join(base_dir, filename)) as f:
(A_ind, B_ind, C_ind, D_ind) = ([], [], [], [])
oov_count = 0
for (n, line) in enumerate(f):
if not line.startswith(':'):
if lowercase:
indices = list(map(vocab.get, line.lower().split()))
else:
indices = list(map(vocab.get, line.split()))
if not all(i is not None for i in indices):
oov_count += 1
continue
(a_ind, b_ind, c_ind, d_ind) = indices
A_ind.append(a_ind)
B_ind.append(b_ind)
C_ind.append(c_ind)
D_ind.append(d_ind)
(A, B, C) = (word_emb[A_ind], word_emb[B_ind], word_emb[C_ind])
D = (B - A + C)
del A, B, C
predictions = []
for i in trange(0, D.shape[0], batch_size, desc=filename[:-4]):
D_batch = D[i:i+batch_size]
dot_ret = np.dot(word_emb, D_batch.T)
for (j, indices) in enumerate(zip(A_ind[i:i+batch_size], B_ind[i:i+batch_size],
C_ind[i:i+batch_size])):
dot_ret[indices, j] = float('-inf')
predictions.append(np.argmax(dot_ret, 0))
results.append((filename[:-4], np.mean(np.hstack(predictions) == D_ind), oov_count))
if entity_similarity:
category_mapping = {e: c for (c, l) in KORE_CATEGORIES.items() for e in l}
base_dir = os.path.join(os.path.join(data_dir, 'entity'), 'similarity')
for filename in os.listdir(base_dir):
with open(os.path.join(base_dir, filename)) as f:
if filename == 'KORE.txt':
data = defaultdict(list)
title = None
for line in f:
line = line.rstrip()
if line.startswith('\t'):
data[title].append(line[1:])
else:
title = line
kore_results = defaultdict(list)
oov_count = 0
for (title, title_list) in data.items():
try:
v1 = model.get_entity_vector(title)
except KeyError:
oov_count += len(title_list)
continue
estimated = []
for title2 in title_list:
try:
v2 = model.get_entity_vector(title2)
except KeyError:
oov_count += 1
continue
estimated.append(1.0 - cosine(v1, v2))
gold = list(reversed(range(len(estimated))))
kore_results[category_mapping[title]].append(spearmanr(gold, estimated)[0])
results.append((filename[:-4], np.mean(list(chain(*kore_results.values()))), oov_count))
else:
gold = []
estimated = []
oov_count = 0
for (n, line) in enumerate(f):
if n == 0:
continue
line = line.rstrip()
(_, _, title1, _, _, title2, score) = line.split('\t')
try:
v1 = model.get_entity_vector(title1.replace('_', ' '))
except KeyError:
oov_count += 1
continue
try:
v2 = model.get_entity_vector(title2.replace('_', ' '))
except KeyError:
oov_count += 1
continue
gold.append(float(score))
estimated.append(1.0 - cosine(v1, v2))
results.append((filename[:-4], spearmanr(gold, estimated)[0], oov_count))
if out_format == 'text':
for (name, score, oov_count) in results:
print('%s: ' % name)
print(' Spearman score: %.4f' % score)
print(' OOV instances: %d' % oov_count)
elif out_format == 'csv':
print('name,' + ','.join([o[0] for o in results]))
print('score,' + ','.join(['%.4f' % o[1] for o in results]))
print('oov,' + ','.join(['%d' % o[2] for o in results]))
if __name__ == '__main__':
main()
|
{"hexsha": "c0a539e0048e2e02268b47c9bd49e6e832b0e1ac", "size": 8324, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/intrinsic_eval.py", "max_stars_repo_name": "mattolson93/wikipedia2vec", "max_stars_repo_head_hexsha": "89d14e89d21bca05d4f81d2733a672aee3f0abce", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 744, "max_stars_repo_stars_event_min_datetime": "2018-05-27T16:42:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T13:37:11.000Z", "max_issues_repo_path": "scripts/intrinsic_eval.py", "max_issues_repo_name": "mattolson93/wikipedia2vec", "max_issues_repo_head_hexsha": "89d14e89d21bca05d4f81d2733a672aee3f0abce", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 59, "max_issues_repo_issues_event_min_datetime": "2018-08-03T21:10:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T11:23:18.000Z", "max_forks_repo_path": "scripts/intrinsic_eval.py", "max_forks_repo_name": "mattolson93/wikipedia2vec", "max_forks_repo_head_hexsha": "89d14e89d21bca05d4f81d2733a672aee3f0abce", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 86, "max_forks_repo_forks_event_min_datetime": "2018-08-02T16:46:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:25:30.000Z", "avg_line_length": 41.8291457286, "max_line_length": 126, "alphanum_fraction": 0.4794569918, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1734}
|
#!python
import yaml, argparse
import numpy as NP
from astropy.io import ascii, fits
from astropy.coordinates import SkyCoord
from astropy import units as U
from astroutils import geometry as GEOM
from astroutils import nonmathops as NMO
import astroutils
astroutils_path = astroutils.__path__[0]+'/'
if __name__ == '__main__':
## Parse input arguments
parser = argparse.ArgumentParser(description='Program to match input catalog positions with specified catalogs')
input_group = parser.add_argument_group('Input parameters', 'Input specifications')
input_group.add_argument('-i', '--infile', dest='infile', default=astroutils_path+'examples/catalogops/catalog_match_parms.yaml', type=file, required=False, help='File specifying input parameters')
args = vars(parser.parse_args())
with args['infile'] as parms_file:
parms = yaml.safe_load(parms_file)
dirinfo = parms['directory']
projectdir = dirinfo['projectdir']
outdir = projectdir
outfile = outdir + dirinfo['outfile'] + '.hdf5'
refcat = parms['refcat']
refcatfile = refcat['catfile']
reftable = ascii.read(refcatfile)
obj_colname = refcat['obj_colname']
ra_colname = refcat['RA_colname']
dec_colname = refcat['Dec_colname']
if refcat['RA_units'] == 'hms':
ra_units = U.hourangle
if (refcat['Dec_units'] == 'dms') or (refcat['Dec_units'] == 'deg'):
dec_units = U.deg
refRA = reftable[ra_colname]
refDec = reftable[dec_colname]
refObj = reftable[obj_colname]
refcoords = SkyCoord(refRA, refDec, unit=(ra_units, dec_units), equinox=refcat['epoch'], frame='icrs')
subsetinfo = parms['subset']
subparnames = subsetinfo['parmnames']
select = NP.ones(len(reftable), dtype=NP.bool)
if len(subparnames) > 0:
parmranges = subsetinfo['parmrange']
for i,prm in enumerate(subparnames):
subdat = reftable[prm]
if (subdat.dtype == NP.float) or (subdat.dtype == NP.int):
select[NP.logical_or(subdat < parmranges[i][0], subdat > parmranges[i][1])] = False
else:
for prmstr in parmranges[i]:
if prmstr[0] == '!':
pstr = prmstr[1:]
select = NP.logical_and(select, NP.logical_not(NP.asarray([pstr in subdat[j] for j in range(len(subdat))])))
else:
pstr = prmstr
select = NP.logical_and(select, NP.asarray([pstr in subdat[j] for j in range(len(subdat))]))
select_ind = NP.where(select)[0]
select_reftable = reftable[select_ind]
select_refcoords = refcoords[select_ind]
radiocats = parms['radiocats']
matchinfo = {}
for radcatkey in radiocats:
if radiocats[radcatkey]['action']:
if radiocats[radcatkey]['searchrad'] is not None:
matchrad = radiocats[radcatkey]['searchrad']
else:
psfhwhm = 0.5 * radiocats[radcatkey]['psffwhm']
matchrad = NP.sqrt(radiocats[radcatkey]['poserr']**2 + psfhwhm**2)
min_fpeak = radiocats[radcatkey]['min_fpeak']
max_fpeak = radiocats[radcatkey]['max_fpeak']
min_fint = radiocats[radcatkey]['min_fint']
max_fint = radiocats[radcatkey]['max_fint']
fpeak = None
fint = None
if radcatkey.lower() == 'nvss':
hdulist = fits.open(radiocats[radcatkey]['catfile'])
ra_deg_radcat = hdulist[1].data['RA(2000)']
dec_deg_radcat = hdulist[1].data['DEC(2000)']
fpeak = 1e3 * hdulist[1].data['PEAK INT'] # mJy/beam
rmajax = hdulist[1].header['BM_MAJOR'] # restoring beam major axis in degrees
rminax = hdulist[1].header['BM_MINOR'] # restoring beam minor axis in degrees
fmajax = hdulist[1].data['MAJOR AX'] # fitted beam major axis in degrees
fminax = hdulist[1].data['MINOR AX'] # fitted beam minor axis in degrees
fint = fpeak * (fmajax * fminax) / (rmajax * rminax) # from NVSS catalog description document
elif radcatkey.lower() == 'first':
hdulist = fits.open(radiocats[radcatkey]['catfile'])
ra_deg_radcat = hdulist[1].data['RA']
dec_deg_radcat = hdulist[1].data['DEC']
fpeak = hdulist[1].data['FPEAK'] # mJy/beam
fint = hdulist[1].data['FINT'] # mJy
elif radcatkey.lower() == 'tgss':
hdulist = fits.open(radiocats[radcatkey]['catfile'])
ra_deg_radcat = hdulist[1].data['RA']
dec_deg_radcat = hdulist[1].data['DEC']
fpeak = hdulist[1].data['Peak_flux'] # mJy/beam
fint = hdulist[1].data['Total_flux'] # mJy
elif radcatkey.lower() == 'gleam':
hdulist = fits.open(radiocats[radcatkey]['catfile'])
ra_deg_radcat = hdulist[1].data['RAJ2000']
dec_deg_radcat = hdulist[1].data['DEJ2000']
fpeak = 1e3 * hdulist[1].data['peak_flux_{0:.0f}'.format(radiocats[radcatkey]['freq'])] # mJy/beam
fint = 1e3 * hdulist[1].data['int_flux_{0:.0f}'.format(radiocats[radcatkey]['freq'])] # mJy
elif radcatkey.lower() == 'mwa-eor':
hdulist = fits.open(radiocats[radcatkey]['catfile'])
ra_deg_radcat = hdulist[1].data['RAJ2000']
dec_deg_radcat = hdulist[1].data['DECJ2000']
fint = 1e3 * hdulist[1].data['S_{0:.0f}'.format(radiocats[radcatkey]['freq'])] # mJy
eps = 1e-10
if min_fpeak is None:
if fpeak is not None:
min_fpeak = NP.nanmin(NP.abs(fpeak)) - eps
if max_fpeak is None:
if fpeak is not None:
max_fpeak = NP.nanmax(NP.abs(fpeak)) + eps
if min_fint is None:
if fint is not None:
min_fint = NP.nanmin(NP.abs(fint)) - eps
if max_fint is None:
if fint is not None:
max_fint = NP.nanmax(NP.abs(fint)) + eps
if fpeak is not None:
ind_fpeak = NP.where(NP.logical_and(fpeak >= min_fpeak, fpeak <= max_fpeak))[0]
else:
ind_fpeak = NP.arange(ra_deg_radcat.size)
if fint is not None:
ind_fint = NP.where(NP.logical_and(fint >= min_fint, fint <= max_fint))[0]
else:
ind_fint = NP.arange(ra_deg_radcat.size)
ind_flux_cut = NP.intersect1d(ind_fpeak, ind_fint)
ra_deg_radcat = ra_deg_radcat[ind_flux_cut]
dec_deg_radcat = dec_deg_radcat[ind_flux_cut]
if fpeak is not None:
fpeak = fpeak[ind_flux_cut]
if fint is not None:
fint = fint[ind_flux_cut]
nnearest = radiocats[radcatkey]['nnearest']
maxmatches = radiocats[radcatkey]['maxmatches']
mref, mrad, d12 = GEOM.spherematch(select_refcoords.ra.deg, select_refcoords.dec.deg, ra_deg_radcat, dec_deg_radcat, matchrad=matchrad/3.6e3, nnearest=nnearest, maxmatches=maxmatches)
matchinfo[radcatkey] = {}
matchinfo[radcatkey]['radius'] = matchrad
matchinfo[radcatkey]['nnearest'] = nnearest
matchinfo[radcatkey]['maxmatches'] = maxmatches
if len(mref) > 0:
mref = NP.asarray(mref)
mrad = NP.asarray(mrad)
matchinfo[radcatkey]['freq'] = radiocats[radcatkey]['freq']
matchinfo[radcatkey]['iref'] = select_ind[mref]
matchinfo[radcatkey]['icat'] = ind_flux_cut[mrad]
matchinfo[radcatkey]['objname'] = refObj.data[select_ind[mref]].data
matchinfo[radcatkey]['refRA'] = select_refcoords.ra.deg[mref]
matchinfo[radcatkey]['refDec'] = select_refcoords.dec.deg[mref]
matchinfo[radcatkey]['catRA'] = ra_deg_radcat[mrad]
matchinfo[radcatkey]['catDec'] = dec_deg_radcat[mrad]
matchinfo[radcatkey]['dist'] = d12 * 3.6e3
if fpeak is not None:
matchinfo[radcatkey]['fpeak'] = fpeak[mrad]
if fint is not None:
matchinfo[radcatkey]['fint'] = fint[mrad]
if min_fpeak is not None:
matchinfo[radcatkey]['min_peak'] = min_fpeak
matchinfo[radcatkey]['max_peak'] = max_fpeak
if min_fint is not None:
matchinfo[radcatkey]['min_fint'] = min_fint
matchinfo[radcatkey]['max_fint'] = max_fint
NMO.save_dict_to_hdf5(matchinfo, outfile)
|
{"hexsha": "71180bf4f20f1129854187719146cc6f1e48b2e7", "size": 8863, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/match_catalogs.py", "max_stars_repo_name": "nithyanandan/AstroUtils", "max_stars_repo_head_hexsha": "97473f52d4247bb9c8507598899215d0662e8d6f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-10-31T03:49:39.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-31T03:49:39.000Z", "max_issues_repo_path": "scripts/match_catalogs.py", "max_issues_repo_name": "nithyanandan/AstroUtils", "max_issues_repo_head_hexsha": "97473f52d4247bb9c8507598899215d0662e8d6f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-11-18T01:45:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-30T12:26:50.000Z", "max_forks_repo_path": "scripts/match_catalogs.py", "max_forks_repo_name": "nithyanandan/AstroUtils", "max_forks_repo_head_hexsha": "97473f52d4247bb9c8507598899215d0662e8d6f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-14T08:44:40.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-14T08:44:40.000Z", "avg_line_length": 46.6473684211, "max_line_length": 201, "alphanum_fraction": 0.5771183572, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2389}
|
# Copyright 2020 Jaime Tierney, Adam Luchies, and Brett Byram
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the license at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import time
import os
class Trainer():
def __init__(self, model, loss, optimizer, loader_train,
patience=None, scheduler=None,
loader_train_eval=None, loader_val=None, cuda=None,
logger=None, data_noise_gaussian=None, save_dir=None):
"""
"""
super().__init__()
self.model = model
self.loss = loss
self.optimizer = optimizer
self.scheduler=scheduler
self.patience = patience
self.loader_train = loader_train
self.loader_train_eval = loader_train_eval
self.loader_val = loader_val
self.cuda = cuda
self.logger = logger
self.data_noise_gaussian = data_noise_gaussian
self.save_dir = save_dir
def train_epoch(self):
""" Train model for one epoch
"""
self.model.train()
total_loss = 0
for batch_idx, data in enumerate(self.loader_train):
# add gaussian noise if enabled
if self.data_noise_gaussian:
X = data[0].numpy()
SNR = np.random.uniform(1, 10**2)
noise = np.random.randn(*X.shape)
noise_power = np.sum(np.sum(noise ** 2))
noise = noise / np.sqrt(noise_power)
X_power = np.sum(np.sum(X ** 2))
C = X_power / SNR
X_noise = X + noise * np.sqrt(C)
data[0] = torch.from_numpy(np.float32( X_noise) )
inputs = data[0]
targets = data[1]
if self.cuda:
inputs = inputs.cuda()
targets = targets.cuda()
self.loss = self.loss.cuda()
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.loss(outputs, targets)
loss.backward()
self.optimizer.step()
# accumulate loss
total_loss += loss.data.item()
return total_loss / len(self.loader_train)
def compute_loss(self, dat_loader):
""" Compute model loss for provided data loader
"""
self.model.eval()
device = torch.device("cuda:0" if self.cuda else "cpu")
total_loss = 0
for batch_idx, data in enumerate(dat_loader):
# add gaussian noise
#if self.data_noise_gaussian:
# X = data[0].numpy()
# SNR = np.random.uniform(1, 10**2)
# noise = np.random.randn(*X.shape)
# noise_power = np.sum(np.sum(noise ** 2))
# noise = noise / np.sqrt(noise_power)
# X_power = np.sum(np.sum(X ** 2))
# C = X_power / SNR
# X_noise = X + noise * np.sqrt(C)
# data[0] = torch.from_numpy(np.float32( X_noise) )
inputs = data[0]
targets = data[1]
inputs = inputs.to(device)
targets = targets.to(device)
outputs = self.model(inputs)
loss = self.loss(outputs, targets)
# accumulate loss
total_loss += loss.data.item()
return total_loss / len(dat_loader)
def train(self):
""" Train the model
"""
# initial setup
epoch = 1
loss_val_best = 100
num_epochs_increased = 0
epoch_best = 1
logs = {}
# Perform training
while True:
# Run one iteration of SGD
t0 = time.time()
loss_train = self.train_epoch()
loss_train_eval = self.compute_loss(self.loader_train_eval)
loss_val = self.compute_loss(self.loader_val)
time_epoch = time.time() - t0
self.logger.add_entry( {'loss_train' : loss_train,
'loss_train_eval' : loss_train_eval,
'loss_val' : loss_val} )
# run learing rate scheduler
if self.scheduler:
self.scheduler.step(loss_val)
# save logger info
if self.save_dir:
self.logger.append(os.path.join(self.save_dir, 'log.txt'))
# change in loss_val
d_loss_val = (loss_val-loss_val_best)/loss_val_best * 100
# display results
print('E: {:} / Train: {:.3e} / Valid: {:.3e} / Diff Valid: {:.2f}% / Diff Valid-Train: {:.1f}% / Time: {:.2f}'.format(epoch, loss_train_eval, loss_val, d_loss_val, (loss_val - loss_train_eval)/loss_train_eval*100, time_epoch))
# if validation loss improves
if d_loss_val < 0:
num_epochs_increased = 0
# record epoch and loss
epoch_best = epoch
loss_val_best = loss_val
# save model weights
if self.save_dir:
print('Validation loss improved. Saving model.')
torch.save(self.model.state_dict(), os.path.join(self.save_dir, 'model.dat'))
else:
num_epochs_increased = num_epochs_increased + 1
# stop training if we lose patience:
if num_epochs_increased > self.patience:
break
# advance epoch counter
epoch = epoch + 1
|
{"hexsha": "1cf3a9b7ef8149bb20568e70ab23d5e2e4232a7e", "size": 5928, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/trainer.py", "max_stars_repo_name": "VU-BEAM-Lab/DNNBeamforming", "max_stars_repo_head_hexsha": "e8ee8c1e57188a795816b119279ac2e60e5c5236", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-12T19:52:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-12T19:52:43.000Z", "max_issues_repo_path": "src/trainer.py", "max_issues_repo_name": "VU-BEAM-Lab/DNNBeamforming", "max_issues_repo_head_hexsha": "e8ee8c1e57188a795816b119279ac2e60e5c5236", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/trainer.py", "max_forks_repo_name": "VU-BEAM-Lab/DNNBeamforming", "max_forks_repo_head_hexsha": "e8ee8c1e57188a795816b119279ac2e60e5c5236", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.393442623, "max_line_length": 239, "alphanum_fraction": 0.5506072874, "include": true, "reason": "import numpy", "num_tokens": 1286}
|
% EEGTHRESH - reject trials with out-of-bounds channel values within a
% specified epoch time range.
%
% Usage:
% >> [Iin, Iout, newsignal, elec] = eegthresh( signal, frames, ...
% elecs, negthresh, posthresh, timerange, starttime,endtime);
%
% Required inputs:
% signal - 2-D data matrix [channels, frames*sweeps],
% or 3-D data matrix [channels, frames, sweeps]
% frames - number of points per epoch
% elecs - [int vector] electrode indices to reject on
% negthresh - minimum rejection threshold(s) in uV. This can be an array
% of values for individual electrodes. If fewer values than
% electrodes, the last value is used for the remaining
% electrodes.
% posthresh - maximum rejection threshold(s) in uV (same syntax as for
% negthresh)
% timerange - [mintime maxtime] time range limits of the signal
% starttime - Starting limit (in seconds or Hz) of range to perform
% rejection (same syntax as negthresh)
% endtime - Ending limit (in seconds or Hz) of range to perform
% rejection (same syntax as negthresh)
%
% Outputs:
% Iin - Indexes of epochs accepted
% Iout - Indexes of epochs rejected
% newsignal - input data after epoch rejection
% elec - electrode that triggered the rejections (array of 0s
% and 1s with the same number of columns as Iout
% and number of rows = number of electrodes).
%
% Author: Arnaud Delorme, CNL / Salk Institute, 2001
%
% See also: POP_EEGTHRESH, EEGLAB
% Copyright (C) 2001 Arnaud Delorme, Salk Institute, arno@salk.edu
%
% This file is part of EEGLAB, see http://www.eeglab.org
% for the documentation and details.
%
% Redistribution and use in source and binary forms, with or without
% modification, are permitted provided that the following conditions are met:
%
% 1. Redistributions of source code must retain the above copyright notice,
% this list of conditions and the following disclaimer.
%
% 2. Redistributions in binary form must reproduce the above copyright notice,
% this list of conditions and the following disclaimer in the documentation
% and/or other materials provided with the distribution.
%
% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
% ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
% LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
% CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
% SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
% INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
% CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
% ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
% THE POSSIBILITY OF SUCH DAMAGE.
function [Iin, Iout, newsignal, elec] = eegthresh( signal, pnts, electrodes, negthresh, posthresh, timerange, starttime, endtime)
if nargin < 7
help eegthresh;
return;
end
if starttime < timerange(1)
disp('eegthresh: starting point out of range, adjusted');
starttime = timerange(1);
end
if endtime > timerange(2)
disp('eegthresh: ending point out of range, adjusted');
endtime = timerange(2);
end
if isempty(electrodes)
electrodes = 1:size(signal,1);
end
% complete thresholds values if necessary
%----------------------------------------
if size(posthresh,2) < size(electrodes,2)
posthresh = [ posthresh posthresh(end)*ones(1,size(electrodes,2)-size(posthresh,2))];
end
if size(negthresh,2) < size(electrodes,2)
negthresh = [ negthresh negthresh(end)*ones(1,size(electrodes,2)-size(negthresh,2))];
end
% complete timeselect values if necessary
%----------------------------------------
if size(starttime,2) < size(electrodes,2)
starttime = [ starttime starttime(end)*ones(1,size(electrodes,2)-size(starttime,2))];
end
if size(endtime,2) < size(electrodes,2)
endtime = [ endtime endtime(end)*ones(1,size(electrodes,2)-size(endtime,2))];
end
% find the maximum for each trial
%--------------------------------
sweeps = size(signal(1,:),2)/pnts;
signal = reshape(signal(electrodes,:), size(electrodes(:),1), pnts, sweeps);
% reject the selected trials
%---------------------------
elec = zeros(size(electrodes(:),1), sweeps);
allelec = zeros(1, sweeps);
for indexe = 1:size(electrodes(:),1)
% transform the time range
% ------------------------
framelowlimit = max(1,floor((starttime(indexe)-timerange(1))/(timerange(2)-timerange(1))*(pnts-1))+1);
framehighlimit = floor((endtime(indexe) -timerange(1))/(timerange(2)-timerange(1))*(pnts-1))+1;
% remove outliers
% ---------------
sigtmp = squeeze(signal(indexe,framelowlimit:framehighlimit,:));
if size(signal,3) == 1, sigtmp = sigtmp'; end
sigmax = max(sigtmp, [], 1);
sigmin = min(sigtmp, [], 1);
elec(indexe,:) = ( sigmin < negthresh(indexe) ) | ( sigmax > posthresh(indexe) );
allelec = allelec | elec(indexe,:);
end
Iout = find( allelec == 1 );
Iin = find( allelec == 0 );
elec = elec(:,Iout);
% reject the selected trials
%---------------------------
newsignal = reshape(signal, size(signal,1), pnts, sweeps);
if ~isempty(Iin)
newsignal = newsignal(:,:,Iin);
if ndims(signal) == 2
newsignal = newsignal(:,:);
end
else
newsignal = [];
end
return;
|
{"author": "sccn", "repo": "eeglab", "sha": "36d3982a63cde83fb279ab465b7a026ec2807c0a", "save_path": "github-repos/MATLAB/sccn-eeglab", "path": "github-repos/MATLAB/sccn-eeglab/eeglab-36d3982a63cde83fb279ab465b7a026ec2807c0a/functions/sigprocfunc/eegthresh.m"}
|
#include <boost/hana/fwd/repeat.hpp>
|
{"hexsha": "02623d16682218b74dfeb1c85af254ce4a71db2d", "size": 37, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_hana_fwd_repeat.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_hana_fwd_repeat.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_hana_fwd_repeat.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 18.5, "max_line_length": 36, "alphanum_fraction": 0.7567567568, "num_tokens": 11}
|
'''
Doe (c) University of Manchester 2015
Doe is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: Pablo Carbonell
@description: SYNBIOCHEM design of experiments
'''
from os import path, mkdir, system, unlink
import shutil, glob
import sys, re
import argparse
from datetime import datetime
import pyRserve
import numpy as np
import doeopt
import brOrligos
import sys
import csv
import json
import random
sys.path.append('/mnt/SBC1/code/sbc-api')
import sbolutil as sbol
import sbcid
import iceutils
sys.path.append('/mnt/SBC1/code/sbc-viscad')
import viscad
import brOrligos
ID_COUNTER = 1
def construct(f):
ct = []
for l in open(f):
m = l.rstrip().split('\t')
if m[0].startswith('#'):
continue
factor = m[0]
nlevels = int(m[1])
try:
positional = int(m[2])
except:
positional = 0
try:
pool = int(m[3])
except:
pool = 0
# Degeneration: higher levels go to 1
try:
deg = int(m[4])
except:
deg = 0
ct.append((factor, nlevels, positional, pool, deg))
return ct
def convert_construct(xct, AddBlankPromoter=False):
rid = {}
ct = []
for p in xct:
comp = xct[p]['component']
if comp == 'promoter':
sbcid = False
for l in xct[p]['levels']:
if l.startswith('SBC'):
sbcid = True
break
if sbcid:
ll = []
for l in xct[p]['levels']:
if l.startswith('SBC'):
lev = l
else:
lev = None
ll.append(lev)
xct[p]['levels'] = ll
for p in sorted(xct):
levels = xct[p]['levels']
comp = xct[p]['component']
# deg is actual the number of possible values, in case of promoters we can have multiple empty values
deg = 0
if comp == 'promoter':
deg = len(levels) - len(filter( lambda h: h is None, levels))
if len(levels) > deg:
deg += 1
elif comp == 'gene':
deg = len(levels) - len(filter( lambda h: h is None, levels))
if len(levels) == deg:
deg = 0
pos = xct[p]['positional']
if pos is None:
pos = 0
cid = str(xct[p]['component'])+str(p)
i = 1
if AddBlankPromoter and comp == 'promoter' and len(levels) > len(filter( lambda h: h is None, levels)):
rid[cid+'_'+str(i)] = None
i += 1
for x in range(0, len(levels)):
# if levels[x] is not None:
rid[cid+'_'+str(i)] = levels[x]
i += 1
ct.append((cid, len(levels), str(pos), 0, deg))
return ct, rid
def get_sequence(partnumber):
partid = int(re.sub('SBC', '', partnumber))
ice = iceutils.ICESession(doeopt.ICE_RETRIEVE)
return ice.get_sequence(partid)['sequence']
def get_part(partnumber):
partid = int(re.sub('SBC', '', partnumber))
ice = iceutils.ICESession(doeopt.ICE_RETRIEVE)
return ice.get_part(partid)
# transitional function to map old ids to new ICE ids
def map_oldid():
import openpyxl
midfile = '/mnt/SBC1/code/sbc-doe/mapping.xlsx'
wb = openpyxl.load_workbook(midfile)
xl = wb.get_sheet_by_name(wb.get_sheet_names()[0])
nl = xl.get_highest_row()
mid = {}
offset = 1
for r in range(1, nl):
newid = xl.cell(row=r, column= offset).value
oldid = xl.cell(row=r, column= offset+1).value
mid[oldid] = newid
return mid
def write_fasta(fname, seqid, sequence):
ow = open(fname, 'w')
ow.write('>'+seqid+'\n')
ow.write(sequence)
ow.close()
def read_excel(e, s=1):
import openpyxl
wb = openpyxl.load_workbook(e)
xl = wb.get_sheet_by_name(wb.get_sheet_names()[s-1])
# nl = xl.get_highest_row()
mid = None
seql = {}
fact = {}
partinfo = {}
offset = 1
fcol = 0
r = 0
while fcol is not None:
r += 1
fcol = None
try:
fcol = xl.cell(row=r, column= offset).value
factor = int(fcol)
positional = xl.cell(row=r, column= offset+1).value
component = str(xl.cell(row=r, column= offset+2).value)
part = str(xl.cell(row=r, column= offset+3).value)
except:
continue
if part is None:
if factor in fact:
i = len(fact[factor]['levels'])+1
else:
i = 1
part = 'P'+str(factor)+'_'+str(i)
seql[part] = None
if part is not None and part.startswith('SBCPA'):
if mid is None:
mid = map_oldid()
part = mid[part]
seql[part] = None #get_sequence(part)
# partinfo[part] = None #get_part(part)
if factor not in fact:
fact[factor] = {'positional': positional,
'component': component,
'levels': [],
'sequence': seql[part]
}
if part == 'blank':
part = None
fact[factor]['levels'].append(part)
return fact, seql, partinfo
def compact_factors(fact):
""" This is a temporary solution for positional factors.
At this point, we would accept only pure permutations for the positional factors.
It should be improved to give more flexibility.
"""
positional = set()
watch = set()
for pos in fact:
if fact[pos]['positional']:
positional.add(pos)
levels = fact[pos]['levels']
fingerprint = ' '.join(sorted(fact[pos]['levels']))
watch.add(fingerprint)
if len(watch) > 1:
raise Exception('Multiple positional factors')
if len(positional) != len(levels):
raise Exception('More factors than slots')
lpos = sorted(positional)
llev = sorted(levels)
for i in range(0, len(lpos)):
fact[lpos[i]]['levels'] = [llev[i]]
return fact
def read_json(f):
""" Read json file return experimental design info.
Initially the goal will be to replicate same result as with the Excel file.
TO DO: templates are combinations that we want to keep!! Modify code...
"""
mid = None
seql = {}
fact = {}
partinfo = {}
jdata = json.load(open(f))
collections = {}
for col in jdata['plan']['collections']:
collections[col['id']] = col
constraints = jdata['plan']['specifications']['constraints']
instances = {}
for i in range(0, len(constraints)):
for col in constraints[i]['collection']:
if col not in instances:
instances[col] = set()
instances[col].add(i+1)
for i in range(0, len(constraints)):
factor = i + 1
levels = []
positional = 0
for colId in constraints[i]['collection']:
if len(instances[colId]) > 1:
positional = 1
item = collections[colId]
# We assume no mix of types
component = item['type']
for partid in item['options']:
part = partid.split('/')[-1]
levels.append(part)
partinfo[part] = {}
if len(levels) > 0:
fact[factor] = {'positional': positional,
'component': component,
'levels': levels,
'sequence': None
}
for part in partinfo:
partinfo[part]['shortDescription'] = component
partinfo[part]['name'] = part
fact = compact_factors(fact)
seed = int(jdata['seed'])
return fact, partinfo, seed
def getfactors(ct, permut=False):
factors =[]
nlevels = []
npos = []
i = 0
for x in ct:
f = x[0]
l = x[1]
try:
p = int(x[2])
except:
p = 0
if l>1:
factors.append(f)
nlevels.append(l)
if p != 0:
npos.append(x[0])
i += 1
return factors, nlevels, npos
# Assume promoters "p" and count number of segments
def segments(libr, ct):
col = set()
prom = set()
for l in libr:
x = ''
pr = []
for i in range(0, len(l)):
ll = l[i]
cti = ct[i][1]
m = ll.split('_')
if ll.startswith('p'):
newsegment = False
# If promoter has only 1 level, we assume always on
if cti == 1:
newsegment = True
plevel = 2 # level 1 means off, level 2 on
# else, level 1 means promoter off
else:
# Promoter 1 is always on (add one to the level, to be improved in a more general way)
if m[0] == 'p1':
newsegment = True
plevel = int(m[1]) + 1
elif m[1] != '1':
newsegment = True
plevel = int(m[1])
if newsegment:
if x != '':
pr.append(x)
col.add(x)
# x = ''
x = str(plevel) # Put only the promoter level?
elif ll.startswith('g'):
x += ll
col.add(x)
pr.append(x)
prom.add('.'.join(sorted(pr)))
if x != '':
col.add(x)
sta= {}
for i in col:
m = i.split('_')
l = len(m)
if l not in sta:
sta[l] = 0
sta[l] += 1
return col
# return prom
def save_sbol(desid, libr, constructid, outfolder):
if not path.exists(outfolder):
mkdir(outfolder)
nid = []
x = sbol.sbol()
for i in range(0, len(libr)):
did = constructid[i]
x.construct3(did, libr[i])
# out = path.join(outfolder, did+'.sbol')
# x.serialize(out)
out = path.join(outfolder, desid+'.sbol')
x.serialize(out)
# x = sbol.sbol()
# x.collection(desid, constructid)
# out = path.join(outfolder, desid+'_c'+'.sbol')
# x.serialize(out)
# sbc id generator
def getsbcid(name, description, RegisterinICE= False, designid=None):
global ID_COUNTER
# Try to get design number if possible, otherwise keep the full label
try:
desn = "DE%02d" % (int(re.sub('^.*SBC', '', designid)),)
except:
desn = designid
if RegisterinICE:
responsible = doeopt.ICE_USER
email = doeopt.ICE_EMAIL
ice = iceutils.ICESession(doeopt.ICE_POST, info=doeopt.ICE_INFO) # icetest
plasmid = ice.template_newplasmid(name , description, responsible, responsible, responsible, email, email, email)
reg_plasmid = ice.create_part(plasmid)
sbcID = reg_plasmid['id']
group_number = 2
ice.add_write_permission(sbcID, group_number)
partid = ice.get_part(sbcID)['partId']
else:
try:
response = sbcid.reserve('DE', 1, doeopt.ICE_EMAIL, 'Construct in combinatorial library '+designid)
partid = "SBC_%s_PL%02d" % (desn, response['result'][0]['id'])
except:
# partid = "SBCDE%06d" % (random.randint(0,1000),)
partid = "SBC_%s_PL%02d" % (desn,ID_COUNTER)
ID_COUNTER = ID_COUNTER + 1
return partid
def save_design(design, ct, fname, lat, npos, rid = None, designid = None,
constructid = [], partinfo = [], project=None, RegisterinIce=False, WriteCsv=None):
### Potentially the csv could be overwritten if multiple designs?
ndes = {}
n = 0
# Read the design for each factor
for x in ct:
fact = x[0]
found = False
# Get the list of designed factors
# For historical reasons (R), two types are possible
if type(design['design']) == dict:
flist = design['design'].keys()
else:
flist = design['design'].keys
if fact in flist:
ndes[fact] = np.array(design['design'][fact])
n = len(ndes[fact])
else:
ndes[fact] = np.array([])
# Note 01/18: Avoid this below, it is confusing and prone to errors.
# It is better to use the information in the rid dictionary
# coming from the DoE specification table
# to know if we have an empty part (a promoter)
# for x in ct:
# fact = x[0]
# nlevels = x[1]
# dege = x[4]
# if dege > 0:
# for i in range(0, len(ndes[fact])):
# if ndes[fact][i] > dege:
# import pdb
# pdb.set_trace()
# ndes[fact][i] = 1
for x in ct:
fact = x[0]
if len(ndes[fact]) == 0:
ndes[fact] = np.repeat(1, n)
# Add positional factor
if 'pos' in flist:
ndes['pos'] = np.array(design['design']['pos'])
# Store designs
of = open(fname, 'w')
if WriteCsv is not None:
cw = csv.writer(open(WriteCsv, 'w'), dialect='excel-tab' )
libr = []
libscr = []
for x in range(0, n):
ll = []
screen = 1
for y in ct:
# import pdb
# pdb.set_trace()
fa = y[0]
le = y[1]
po = y[2]
pl = y[3]
de = ndes[fa][x]
# Screening pool?
if pl > 0:
if de > 1 or (le == 1 and de > 0):
screen *= pl
# Randomize permutations using a latin square
faid = "%s_%d" % (fa, ndes[fa][x],)
if fa in npos:
perm = ndes['pos'][x]
fa = npos[lat[perm-1][npos.index(fa)]-1]
faid = "%s_%d" % (fa, ndes[fa][x],)
if rid is not None:
faid = rid[faid]
if faid is None:
faid = ''
if faid is not None:
ll.append("%s" % (faid,))
# Get the id
if len(constructid) < x+1 :
# Generate a meaningful name
name = ''
for part in ll:
if part != '':
if part in partinfo:
if name != '':
name += '-'
if partinfo[part]['shortDescription'].lower() == 'promoter':
name += '('
name += partinfo[part]['name']
if partinfo[part]['shortDescription'].lower() == 'promoter':
name += ')'
if name == '':
name = designid +'_'+str(x+1)
description = 'Plasmid '
if project is not None:
description += project+'; '
description += 'Design: '+ designid+'; '
description += 'Construct: '+' '.join(ll)
constructid.append(getsbcid(name, description, RegisterinICE=RegisterinIce, designid=designid))
# Save the construct
if rid is None:
of.write("%s\t" % (constructid[x],))
for part in ll:
of.write("%s\t" % (part,))
else:
if WriteCsv is not None:
xx = []
xx.append(constructid[x])
for part in ll:
if len(part) > 0:
xx.append(part)
cw.writerow(xx)
of.write("%16s" % (constructid[x],))
for part in ll:
of.write("%16s" % (part,))
of.write('\n')
libr.append(ll)
if screen > 1:
screen *= 3 # if screening a pool, multiply by 3
libscr.append(screen)
of.close()
return libr, libscr
def save_seqs(outpath, constructid, libr, seql):
for c in range(0, len(constructid)):
seq = ''
for s in libr[c]:
if s != '':
seq += seql[s]
write_fasta(path.join(outpath, constructid[c]+'.fasta'), constructid[c], seq)
# If firstcolumn, the first column is the contruct name
def pcad(f, rid=None, firstcolumn=True, label=True, predefined='predefined_colors.txt', clean=True, nolabel=False):
pcolors = {}
if predefined is not None:
for l in open(predefined):
m = l.rstrip().split()
pcolors[m[0]] = int(m[1])
i = 0
gl = []
gl1 = []
# Count how many levels each factor has
count = {}
for l in open(f):
m = l.rstrip().split('\t')
if firstcolumn:
m = m[1:]
for x in m:
v = x.split('_')
if v[0] not in gl:
gl.append(v[0])
if v[0] not in count:
count[v[0]] = set()
count[v[0]].add(x)
if x not in gl1:
gl1.append(x)
if nolabel:
nl = 'nl'
else:
nl = ' '
fl = []
labels = []
for l in open(f):
m = l.rstrip().split('\t')
if firstcolumn:
labels.append(m[0])
m = m[1:]
fn = f+'.pcad'+str(i)
fl.append(fn)
ow = open(fn, 'w')
m = l.split()
if firstcolumn:
m = m[1:]
for x in m:
v = x.split('_')
if x.startswith('promoter'):
# p1: assumes promoter absence
if v[1] != '1':
ow.write('t\n')
# For promoters, we just give promoter number and color it accordingly
if rid is not None and x in rid:
ow.write('p %s %d %s\n' % (rid[x],int(v[1])*2+2, nl))
else:
ow.write('p p%s %d %s\n' % (v[1],int(v[1])*2+2,nl))
elif x.startswith('plasmid'):
# ow.write('p %s %d\n' % (v[0],gl.index(v[0])+1))
# For promoters, we just give promoter number and color it accordingly
if rid is not None and x in rid:
ow.write('p %s %d %s\n' % (rid[x],int(v[1])*2+2, nl))
else:
ow.write('p p%s %d %s\n' % (v[1],int(v[1])*2+2, nl))
else:
# ow.write('c %s %d\n' % (v[0],gl.index(v[0])+1))
try:
color = gl1.index(x)+1
except:
import pdb
pdb.set_trace()
if rid is not None and x in rid:
name = rid[x]
elif len(count[v[0]]) > 1:
name = x
else:
name = v[0]
if name in pcolors:
color = pcolors[name]
ow.write('c %s %d %s\n' % (name,color,nl))
ow.write('t\n')
ow.write('# Arcs\n')
ow.close()
i += 1
ofl = []
ofs = []
for i in range(0,len(fl)):
pcad = fl[i]
if label:
ofl.append("label:'"+labels[i]+"'")
of = pcad+'.png'
ofl.append(of)
ofsvg = pcad+'.svg'
ofs.append(ofsvg)
cmd = 'perl '+path.join(path.dirname(path.realpath(__file__)), 'piget.pl')+' '+pcad+' '+of
print(cmd)
system(cmd)
cmd = 'convert '+' '.join(ofl)+' -append -flatten '+f+'.png'
system(cmd)
if clean:
for x in fl+ofl+ofs:
if path.exists(x):
unlink(x)
def readJMP(jmp):
header = None
design = []
doejmp = {'design': {}}
for row in csv.reader(open(jmp, 'rU')):
if header is None:
header = row
continue
for i in range(0, len(header)):
if len(row[i]) == 0:
continue
fact = header[i]
if fact not in doejmp['design']:
doejmp['design'][fact] = []
try:
val = int(row[i])
except:
val = int( re.sub('^L', '', row[i]) )
doejmp['design'][fact].append( val )
design.append(doejmp)
return design
def readOptDes(optf):
header = None
design = []
doejmp = {'design': {}}
for row in csv.reader(open(optf)):
if header is None:
header = row
continue
for i in range(0, len(header)):
if len(row[i]) == 0:
continue
fact = header[i]
if fact not in doejmp['design']:
doejmp['design'][fact] = []
try:
val = int(row[i])
except:
val = int( re.sub('^L', '', row[i]) )
doejmp['design'][fact].append( val )
design.append(doejmp)
return design
def arguments():
parser = argparse.ArgumentParser(description='SBC-DeO. Pablo Carbonell, SYNBIOCHEM, 2016')
parser.add_argument('-p', action='store_true',
help='Full positional permutation (default: random latin square)')
parser.add_argument('f',
help='Input file with specifications (excel or txt format)')
parser.add_argument('-s', default=1,
help='Excel sheet number (default 1)')
parser.add_argument('-i', action='store_true',
help='Ignore segment calculations based on promoters')
parser.add_argument('-r', action='store_false',
help='No regular fractional factorial design')
parser.add_argument('-o', action='store_false',
help='No orthogonal array design')
parser.add_argument('-x', nargs='?', type=int, default=100,
help='Random seed (default 100) [or pick random number] for oa design')
parser.add_argument('id',
help='Design id')
parser.add_argument('-O',
help='Output path')
parser.add_argument('-b', action='store_false',
help='Do not generate sbol file')
parser.add_argument('-g', action='store_true',
help='Generate pigeon cad image')
parser.add_argument('-V', action='store_true',
help='Generate viscad diagram')
parser.add_argument('-c', action='store_true',
help='Generate construct fasta files')
parser.add_argument('-v',
help='Project description')
parser.add_argument('-I', action='store_true',
help='Register project in ICE [False]')
parser.add_argument('-j',
help='DoE from JMP')
parser.add_argument('-optDEs',
help='DoE from OptDes')
parser.add_argument('-w', action='store_true',
help='DoE from json (web version)')
parser.add_argument('-G',
help='Regenerate pigeon from file and exit')
parser.add_argument('-k', action='store_false',
help='Keep pigeon files')
parser.add_argument('-nolab', action='store_true',
help='Do not use labels in pigeon figures')
parser.add_argument('-blankPromoter', action='store_true',
help='Add blank promoter even if not explicitly given')
parser.add_argument('-bro', action='store_true',
help='Add file with full list of bridging oligos')
return parser
def command_line(parser, args=None):
if args is None:
arg = parser.parse_args()
else:
arg = parser.parse_args(args)
return arg
def write_log(logfile, arg):
s = []
for x in arg:
if len(x.split(' ')) > 1:
s.append( '"{}"'.format(x) )
else:
s.append( x )
with open(logfile, 'a') as handler:
handler.write( ' '.join(s)+'\n' )
def run_doe(args=None):
parser = arguments()
arg = command_line(parser, args)
f = arg.f
p = arg.p
cfasta = arg.c
desid = arg.id
outpath = arg.O
sbolgen = arg.b
cad = arg.g
vcad = arg.V
project = arg.v
xarg = arg.x
if outpath is None or not path.exists(outpath):
outpath = path.dirname(f)
outfolder = path.join(outpath)
if not path.exists(outfolder):
mkdir(outfolder)
logfile = path.join(outfolder, desid+'.log')
write_log(logfile, sys.argv)
# with open(logfile, 'a') as handler:
# handler.write(' '.join(['"{}"'.format(x) for x in sys.argv])+'\n')
if arg.G is not None:
rid = {}
aa = []
bb = []
f1 = arg.G+'di0'
f2 = arg.G+'d0'
if path.exists(f1):
for l in open(f1):
m = l.rstrip().split('\t')
for k in m:
bb.append(k)
if path.exists(f2):
for l in open(f2):
ll = l.rstrip()
for k in range(0, len(ll), 16):
val = l[k:(k+16)]
aa.append(re.sub(' ', '', val))
for i in range(0, len(aa)):
rid[bb[i]] = aa[i]
pcad(f1, rid, clean=arg.k, nolabel=arg.nolab)
sys.exit()
if xarg is None:
seed = np.random.randint(1e6)
else:
seed = xarg
inputfile = path.join(outfolder, path.basename(f))
# try:
# shutil.copyfile(f, inputfile)
# except:
# raise Exception('Input file not found')
# pass
if args is None:
sys.argv[1] = '"'+path.basename(inputfile)+'"'
cmd = ' '.join(['"{}"'.format(x) for x in sys.argv])
else:
cmd = ' '.join(['"{}"'.format(x) for x in args])
s = int(arg.s)
if not arg.w:
try:
xct, seql, partinfo = read_excel(inputfile, s)
ct, rid = convert_construct(xct, AddBlankPromoter=arg.blankPromoter)
except:
# old txt format (needs update)
ct, cid = construct(inputfile)
seql = {}
else:
xct, partinfo, seed = read_json(inputfile)
ct, rid = convert_construct(xct, AddBlankPromoter=arg.blankPromoter)
if arg.c:
for s in seql:
write_fasta(path.join(outpath, outfolder, s+'.fasta'), s, seql[s])
wd = path.dirname(path.realpath(__file__))
conn = pyRserve.connect()
conn.r.source(path.join(wd, 'mydeo.r'))
# we keep only factors with more than one level
# npos are the factors that can be rearranged
factors, nlevels, npos = getfactors(ct)
lat = None
if len(npos) > 0:
factors.append('pos')
if not p:
lat = conn.r.permut(len(npos), ptype='latin')
else:
lat = np.array(conn.r.permut(len(npos), ptype='full'))
lat = lat.astype(int)
# add the leves corresponding to the shuffling
nlevels.append(len(lat))
if not p:
designid = path.join(outfolder, desid)
else:
designid = path.join(outfolder, desid+'.full')
constructid = []
finfow = open(designid+'.info', 'w')
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
finfow.write('SBC-DoE; '+now+'\n')
finfow.write(' Command: '+cmd+'\n')
if not p:
dinfo = " Factors: %d; Levels: %d; Positional: %d [Latin square]" % (len(factors), np.prod(nlevels), len(npos))
else:
dinfo = " Factors: %d; Levels: %d; Positional: %d [Full permutations]" % (len(factors), np.prod(nlevels), len(npos))
print('SBC-DoE; '+dinfo)
finfow.write(dinfo+'\n')
if arg.j is not None or arg.optDes is not None: # Custom design (column separated)
if arg.j is not None:
if not path.exists(arg.j):
arg.j = path.join(outfolder, arg.j)
if not path.exists(arg.j):
raise Exception('DoE file not found')
jmp = arg.j
doeJMP = readJMP(jmp)
else:
if not path.exists(arg.optDes):
arg.optDes = path.join(outfolder, arg.optDes)
if not path.exists(arg.optDes):
raise Exception('DoE file not found')
doeJMP = readOptDes(arg.optDes)
for des in range(0, len(doeJMP)):
if rid is not None:
fname0 = designid+'.ji'+str(des)
csvname = re.sub('\.[^.]*$', '.txt', fname0)
libr, libscr = save_design(doeJMP[des], ct, fname0, lat, npos, rid=rid, designid=desid, constructid=constructid, RegisterinIce=arg.I, WriteCsv=csvname)
fname = designid+'.j'+str(des)
libr, libscr = save_design(doeJMP[des],ct, fname, lat, npos, rid=None, designid=desid, constructid=constructid, RegisterinIce=arg.I)
if vars(arg)['i']:
dinfor = " Custom design %d; Library size: %d" % (des, len(libr))
else:
dinfor = " Custom design %d; Library size: %d; Segments: %d; Screening size: %d" % (des, len(libr), len(segments(libr, ct)), np.sum(libscr))
print(dinfor)
finfow.write(dinfor+'\n')
if cad:
pcad(fname, rid, clean=arg.k, nolabel=arg.nolab)
if vcad:
viscad.runViscad(args=[fname, '-i', csvname, '-l', logfile])
if arg.r: # Regular fractional factorial design
# Trivial case, no need of calling planor package
if len(factors) == 1:
doe1 = [{'design': {factors[0]: range(1, nlevels[0]+1) } } ]
else:
doe1 = conn.r.doe1(factors=np.array(factors), nlevels=np.array(nlevels), timeout=30)
for des in range(0, len(doe1)):
fname = designid+'.d'+str(des)
libr, libscr = save_design(doe1[des], ct, fname, lat, npos, rid, desid, constructid, partinfo, project, RegisterinIce=arg.I)
if rid is not None:
fname = designid+'.di'+str(des)
libr, libscr = save_design(doe1[des], ct, fname, lat, npos, rid=None, designid=desid, constructid=constructid, RegisterinIce=arg.I)
if vars(arg)['i']:
dinfor = " Design %d; Model S^%d; Library size: %d" % (des, des+1, len(libr))
else:
dinfor = " Design %d; Model S^%d; Library size: %d; Segments: %d; Screening size: %d" % (des, des+1, len(libr), len(segments(libr, ct)), np.sum(libscr))
print(dinfor)
finfow.write(dinfor+'\n')
if cad:
pcad(fname, rid, clean=arg.k, nolabel=arg.nolab)
if arg.o: # Orthogonal arrays
doe2 = conn.r.doe2(factors=np.array(factors), nlevels=np.array(nlevels),
timeout=30, seed=seed)
for des in range(0, len(doe2)):
fname = designid+'.oad'+str(des)
libr, libscr = save_design(doe2[des], ct, fname, lat, npos, rid, desid, constructid, partinfo, project, RegisterinIce=arg.I)
if cfasta:
save_seqs(outfolder, constructid, libr, seql)
if sbolgen:
save_sbol(desid, libr, constructid, path.join(outfolder))
if rid is not None:
fname = designid+'.oadi'+str(des)
libr, libscr = save_design(doe2[des], ct, fname, lat, npos, rid=None, designid=desid, constructid=constructid, RegisterinIce=arg.I)
if vars(arg)['i']:
dinfor = " Orthogonal Array Design; Library size: %d; Seed: %d" % (len(libscr),seed)
else:
dinfor = " Orthogonal Array Design; Library size: %d; Segments: %d; Screening size: %d; Seed: %d" % (len(libr), len(segments(libr, ct)), np.sum(libscr), seed)
print(dinfor)
finfow.write(dinfor+'\n')
if cad:
pcad(fname, rid, clean=arg.k, nolabel=arg.nolab)
finfow.close()
if arg.bro:
# Needs some improvemet: not valid for all cases
try:
broFile = designid+'.bro'
brArgs = [csvname, fname, '-outFile', broFile, '-logFile', broFile+'.log']
brOrligos.run_bro( brArgs )
write_log(logfile, ['broligos'] + brArgs)
except:
pass
return outfolder, fname
if __name__ == '__main__':
run_doe()
|
{"hexsha": "74acd7b6f11ba2887a48bf3807fd957602547f5e", "size": 32266, "ext": "py", "lang": "Python", "max_stars_repo_path": "doe.py", "max_stars_repo_name": "pablocarb/sbc-doe", "max_stars_repo_head_hexsha": "3467a42765bae03aedfd24924e4c18213753d27a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doe.py", "max_issues_repo_name": "pablocarb/sbc-doe", "max_issues_repo_head_hexsha": "3467a42765bae03aedfd24924e4c18213753d27a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doe.py", "max_forks_repo_name": "pablocarb/sbc-doe", "max_forks_repo_head_hexsha": "3467a42765bae03aedfd24924e4c18213753d27a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9956616052, "max_line_length": 174, "alphanum_fraction": 0.5042149631, "include": true, "reason": "import numpy", "num_tokens": 8285}
|
function fwd_prop(X::Array{Float64,1},
nodes::Int64,
hidden_layer::Int64,
z,
layer,
layer_p1)
# number of used weights
j = 0
# number of covariates
D = length(X)
# build first hidden layer
for node in 1:nodes
i = j + 1
j = i + D
layer[node] = sigmoid(dot(z[i:(j-1)], X) + z[j])
end
# build other hidden layers
if hidden_layer > 1
for l in 2:hidden_layer
for node in 1:nodes
i = j + 1
j = i + nodes
layer_p1[node] = sigmoid(dot(z[i:(j-1)], layer) + z[j])
end
layer = copy(layer_p1)
end
end
# build output
i = j + 1
j = i + nodes
out = sigmoid(dot(z[i:(j-1)], layer) + z[j])
return out
end
|
{"hexsha": "40f74da609a38ca4e9b06079b5587d6a8b6d52bf", "size": 868, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/propagate.jl", "max_stars_repo_name": "jacobcvt12/BayesNN", "max_stars_repo_head_hexsha": "267a62d56b403deb586d630a8289f076f54e9064", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/propagate.jl", "max_issues_repo_name": "jacobcvt12/BayesNN", "max_issues_repo_head_hexsha": "267a62d56b403deb586d630a8289f076f54e9064", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-15T11:59:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-15T11:59:42.000Z", "max_forks_repo_path": "src/propagate.jl", "max_forks_repo_name": "jacobcvt12/BayesNN.jl", "max_forks_repo_head_hexsha": "267a62d56b403deb586d630a8289f076f54e9064", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7, "max_line_length": 71, "alphanum_fraction": 0.4447004608, "num_tokens": 248}
|
import os
import json
import pandas as pd
import numpy as np
import tensorflow as tf
from utils.progress import WorkSplitter
from utils.optimizers import Optimizer
from utils.modelnames import models
from utils.functions import get_attention_example_items, write_latex, read_template
from plots.rec_plots import multi_modes_histogram,multi_modes_count
import glob
def attention(Rtrain, Rvalid, Rtest, index_map, item_names, latex_path, fig_path, settings_df, template_path,
preference_analysis=False,
case_study=False,
gpu_on=True):
progress = WorkSplitter()
m, n = Rtrain.shape
Rtest = Rvalid + Rtest
for idx, row in settings_df.iterrows():
row = row.to_dict()
progress.section(json.dumps(row))
if 'optimizer' not in row.keys():
row['optimizer'] = 'Adam'
row['epoch'] = 30
mmup_model = models[row['model']](Rtrain,
embedded_matrix=np.empty((0)),
mode_dim=row['mode_dimension'],
key_dim=row['key_dimension'],
batch_size=row['batch_size'],
optimizer=row['optimizer'],
learning_rate=row['learning_rate'],
normalize=False,
iteration=row['iteration'],
epoch=row['epoch'],
rank=row['rank'],
corruption=row['corruption'],
gpu_on=gpu_on,
lamb=row['lambda'],
alpha=row['alpha'],
seed=1,
root=row['root'],
return_model=True)
train_batches = mmup_model.get_batches(Rtrain, 100)
test_batches = mmup_model.get_batches(Rtest, 100)
cmd = "rm {0}/*.pdf".format(fig_path)
os.system(cmd)
interaction_modes_counts = []
items = []
for i in index_map:
try:
name = item_names[item_names['ItemID'] == i]['Name'].values[0]
except:
name = "Unknown"
items.append(name)
items = np.array(items)
pop = np.squeeze(np.asarray(np.sum(Rtrain, axis=0)))
latex_template = read_template(template_path)
if preference_analysis:
full_interaction_user = np.ones((1, n))
attentions, kernels, predictions = mmup_model.interprate(full_interaction_user)
_, modes, _ = attentions.shape
results = []
for i in range(modes):
index = np.argsort(attentions[0][i])[::-1][:10]
#attentions[0][1][np.argpartition(-attentions[0][i], 10)[:10]].argsort()[::-1]
result = pd.DataFrame({'Item': items[index],
'Attention': attentions[0][i][index],
'Popularity': pop[index],
'Mode': i
})
results.append(result)
results = pd.concat(results)
results.to_csv("preference_anaysis.csv")
import ipdb; ipdb.set_trace()
for i in range(len(train_batches)):
attentions, kernels, predictions = mmup_model.interprate(train_batches[i])
visualization_samples = get_attention_example_items(train_batches[i], predictions, test_batches[i], 9)
interaction_counts = np.sum(train_batches[i], axis=1).tolist()
for j in range(kernels.shape[0]):
if interaction_counts[j][0] > 200:
continue
interaction_modes_counts.append([interaction_counts[j][0],
len(np.unique(kernels[j][visualization_samples[j][1]]))])
if case_study:
write_latex(visualization_samples, attentions, kernels, items, latex_template, latex_path)
tex_files = glob.glob(latex_path + "/*.tex")
for tex in tex_files:
cmd = "pdflatex -halt-on-error -output-directory {0} {1}".format(fig_path, tex)
os.system(cmd)
cmd = "rm {0}/*.log".format(fig_path)
os.system(cmd)
cmd = "rm {0}/*.aux".format(fig_path)
os.system(cmd)
cmd = "rm {0}/*.tex".format(latex_path)
os.system(cmd)
interaction_modes_counts = pd.DataFrame(np.array(interaction_modes_counts), columns=['x', 'y'])
multi_modes_histogram(interaction_modes_counts)
multi_modes_count(interaction_modes_counts)
interaction_modes_counts.to_csv(template_path+"/modes_count.csv")
mmup_model.sess.close()
tf.reset_default_graph()
return
|
{"hexsha": "17aa5813b3d204725aa63ff78a9aa61db2f31911", "size": 5212, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiment/attention.py", "max_stars_repo_name": "wuga214/MultiModesPreferenceEstimation", "max_stars_repo_head_hexsha": "f80c2feb196cb498a8b417f2037aadad151cceb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiment/attention.py", "max_issues_repo_name": "wuga214/MultiModesPreferenceEstimation", "max_issues_repo_head_hexsha": "f80c2feb196cb498a8b417f2037aadad151cceb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiment/attention.py", "max_forks_repo_name": "wuga214/MultiModesPreferenceEstimation", "max_forks_repo_head_hexsha": "f80c2feb196cb498a8b417f2037aadad151cceb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2162162162, "max_line_length": 114, "alphanum_fraction": 0.5080583269, "include": true, "reason": "import numpy", "num_tokens": 965}
|
# Copyright 2021 IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of the code to reproduce the results in the paper:
# E. van den Berg, "Efficient Bayesian phase estimation using mixed priors"
# arXiv:2007.11629.
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
import bpe
from generic import *
# ----------------------------------------------------------------------
# Plot G1 - Cyclic exponent, single eigenphase
# ----------------------------------------------------------------------
# Load the results
instancesA = loadInstances('./cache/experiment_hybrid_30.dat') # Normal, kMax = 1
instancesB = loadInstances('./cache/experiment_hybrid_36.dat') # Normal, kMax = 5
instancesC = loadInstances('./cache/experiment_hybrid_31.dat') # Mixed, kMax = 1
instancesD = loadInstances('./cache/experiment_hybrid_37.dat') # Mixed, kMax = 5
instancesE = loadInstances('./cache/experiment_hybrid_39.dat') # Mixed, kMax = 20
# Display the averate switch iterations along with standard deviation
for instances in [instancesC,instancesD,instancesE] :
data = [problem.switchIter for problem in instances]
width = max([len(s) for s in data])
switches = np.zeros((len(data),width),dtype=int)
for (i,d) in enumerate(data) :
switches[i,:len(d)] = d
print("Mean switch: %s (%s)" % (", ".join(["%d"%f for f in np.mean(switches,axis=0)]),
", ".join(["%.2f"%f for f in np.std(switches,axis=0)])))
# Plot
plotCollatedMu(instancesA,'-',alpha=0.4,color='C1')
plotCollatedMu(instancesB,'-',alpha=1.0,color='C1')
plotCollatedMu(instancesC,'-',alpha=0.3)
plotCollatedMu(instancesD,'-',alpha=0.6)
plotCollatedMu(instancesE,'-',alpha=1.0)
for (alpha,instances) in zip([0.3,0.6,1.0],[instancesC,instancesD,instancesE]) :
problem = instances[0]
error = np.zeros((len(instances),problem.phiMu.shape[0],problem.phiStar.size),dtype=np.double)
for i in range(len(instances)) :
problem = instances[i]
d = np.abs(problem.collatedPhi - problem.phiStar)
idx = d > np.pi
d[idx] = 2*np.pi - d[idx]
error[i,:,:] = d
medianError = np.nanmedian(error,axis=0)
switch = np.mean([problem.switchIter for problem in instances])
idx1 = np.argwhere(problem.phiIter <= switch); idx1 = idx1[-1]
idx2 = np.argwhere(problem.phiIter > switch); idx2 = idx2[0]
value = ((problem.phiIter[idx2] - switch) * medianError[idx1] +
(switch - problem.phiIter[idx1]) * medianError[idx2]) / (problem.phiIter[idx2] - problem.phiIter[idx1])
c = colors.to_rgba('C0')
c = tuple([(alpha * c[i] + (1-alpha) * 1) for i in range(3)])
c = [0.75 * c[i] for i in range(3)]
plt.plot(switch, value, color=[1,1,1], marker='.',markersize=15)
plt.plot(switch, value, color=c, marker='.',markersize=10)
plt.text(1.24e2,1e-1,'A',fontweight='medium', fontsize=fontsize)
plt.text(1.15e6,5.0e-1,'B',fontweight='medium', fontsize=fontsize)
plt.text(1.15e6,7.0e-4,'C',fontweight='medium', fontsize=fontsize)
plt.text(1.15e6,1.3e-4,'D',fontweight='medium', fontsize=fontsize)
plt.text(1.15e6,5.0e-5,'E',fontweight='medium', fontsize=fontsize)
plt.legend(['A','B','C','D','E'],
['Normal ($c_{\max}$ = 1)','Normal ($c_{\max}$ = 5)',
'Mixed ($c_{\max}$ = 1)','Mixed ($c_{\max}$ = 5)','Mixed ($c_{\max}$ = 20)'],
handler_map={str: StringObjectHandler()}, loc='lower left', fontsize=12)
plt.ylim([1e-5,5e0])
plt.xlabel('Iteration', fontsize=fontsize)
plt.ylabel('Median phase error', fontsize=fontsize)
setTickFontsize(fontsize)
exportFigure("FigExperimentHybrid_G1", label='4a')
# ----------------------------------------------------------------------
# Plot G2 - Phase transitions
# ----------------------------------------------------------------------
epsilon = 1e-4; kMax = 200
sigmaCritical = bpe.DensityFourier.normalCriticalSigma(None, epsilon, kMax=kMax)
print(sigmaCritical)
for i in [1,2,3,4,6,7] :
filename = './cache/experiment_transition_%d.dat' % i
instances = loadInstances(filename)
groups = []
for i in range(len(instances) // 100) :
groups.append(instances[i*100:(i+1)*100])
results = np.zeros(len(groups))
for i in range(len(groups)) :
instances = groups[i]
for j in range(len(instances)) :
error = np.abs(instances[j].phiMu[-1] - instances[j].phiStar[0])
error = np.minimum(error, 2*np.pi - error)
if (error >= 1e-1) :
results[i] += 1
plt.plot(np.arange(len(results)) * 0.01, 100 * results / 100)
plt.text(1.42, 64.4,'A',fontweight='medium', fontsize=fontsize)
plt.text(0.98, 64.4,'B',fontweight='medium', fontsize=fontsize)
plt.text(0.70, 72.0,'C',fontweight='medium', fontsize=fontsize)
plt.text(0.49, 81.0,'D',fontweight='medium', fontsize=fontsize) # 0.49, 82.0
plt.text(0.32, 82.5,'E',fontweight='medium', fontsize=fontsize) # 0.31, 83.0
plt.text(0.09, 80.0,'F',fontweight='medium', fontsize=fontsize)
plt.legend(['A','B','C','D','E','F'],
['$\sigma_{\epsilon}(200)$, $c_{\max}$ = 1','$\sigma_{\epsilon}(200)$, $c_{\max}$ = 2','$\sigma_{\epsilon}(200)$, $c_{\max}$ = 5',
'$\sigma_{\epsilon}(200)$, $c_{\max}$ = 10','$\sigma_{\epsilon}(1000)$, $c_{\max}$ = 10','$\sigma_{\epsilon}(5000)$, $c_{\max}$ = 10'],
handler_map={str: StringObjectHandler()}, loc='lower right', fontsize=12)
plt.xlabel('Bias', fontsize=fontsize)
plt.ylabel('Failure probability (%)', fontsize=fontsize)
setTickFontsize(fontsize)
exportFigure("FigExperimentHybrid_G2", label='4b')
|
{"hexsha": "6a84a10b6da79e33b278916ac6e95f4ae34757d8", "size": 6125, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_figure_4ab.py", "max_stars_repo_name": "ewoutvandenberg/Quantum-phase-estimation", "max_stars_repo_head_hexsha": "035a55a00ba24ae82e2d892cedbdffc36b52aa06", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generate_figure_4ab.py", "max_issues_repo_name": "ewoutvandenberg/Quantum-phase-estimation", "max_issues_repo_head_hexsha": "035a55a00ba24ae82e2d892cedbdffc36b52aa06", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate_figure_4ab.py", "max_forks_repo_name": "ewoutvandenberg/Quantum-phase-estimation", "max_forks_repo_head_hexsha": "035a55a00ba24ae82e2d892cedbdffc36b52aa06", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2413793103, "max_line_length": 147, "alphanum_fraction": 0.6334693878, "include": true, "reason": "import numpy", "num_tokens": 1800}
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import threading
import time
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig
import subprocess
class TensorRTInspectorTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[1, 16, 16], dtype="float32")
matmul_out = fluid.layers.matmul(
x=data,
y=data,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y,
alpha=self.alpha)
out = fluid.layers.batch_norm(matmul_out, is_test=True)
self.feeds = {"data": np.ones([1, 16, 16]).astype("float32"), }
self.enable_trt = True
self.trt_parameters = InferencePassTest.TensorRTParam(
1 << 30, 1, 0, AnalysisConfig.Precision.Float32, False, False, True)
self.fetch_list = [out]
def set_params(self):
self.transpose_x = True
self.transpose_y = True
self.alpha = 2.0
def test_check_output(self):
if core.is_compiled_with_cuda():
build_engine = subprocess.run(
[sys.executable, 'test_trt_inspector.py', '--build-engine'],
stderr=subprocess.PIPE)
engine_info = build_engine.stderr.decode('ascii')
trt_compile_version = paddle.inference.get_trt_compile_version()
trt_runtime_version = paddle.inference.get_trt_runtime_version()
valid_version = (8, 2, 0)
if trt_compile_version >= valid_version and trt_runtime_version >= valid_version:
self.assertTrue('====== engine info ======' in engine_info)
self.assertTrue('====== engine info end ======' in engine_info)
self.assertTrue('matmul' in engine_info)
self.assertTrue('LayerType: Scale' in engine_info)
self.assertTrue('batch_norm' in engine_info)
else:
self.assertTrue(
'Inspector needs TensorRT version 8.2 and after.' in
engine_info)
if __name__ == "__main__":
if '--build-engine' in sys.argv:
test = TensorRTInspectorTest()
test.setUp()
use_gpu = True
test.check_output_with_option(use_gpu)
else:
unittest.main()
|
{"hexsha": "3d4b2dc10c2b6fbec804a5982a356a0f9c0064d8", "size": 3180, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py", "max_stars_repo_name": "ZibinGuo/Paddle", "max_stars_repo_head_hexsha": "6e0892312de5e4ba76d980ff0e4322ac55ca0d07", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2016-08-15T07:02:27.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-24T09:34:00.000Z", "max_issues_repo_path": "python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py", "max_issues_repo_name": "ZibinGuo/Paddle", "max_issues_repo_head_hexsha": "6e0892312de5e4ba76d980ff0e4322ac55ca0d07", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-28T07:23:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T07:23:22.000Z", "max_forks_repo_path": "python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py", "max_forks_repo_name": "ZibinGuo/Paddle", "max_forks_repo_head_hexsha": "6e0892312de5e4ba76d980ff0e4322ac55ca0d07", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-02T11:36:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T11:36:03.000Z", "avg_line_length": 38.313253012, "max_line_length": 93, "alphanum_fraction": 0.6506289308, "include": true, "reason": "import numpy", "num_tokens": 686}
|
import networkx as nx
inputs = open('input.txt').read().split()
inputs = list(map(lambda _: _.split(')'), inputs))
orbits = nx.DiGraph()
orbits.add_edges_from(inputs)
print(sum(nx.shortest_path_length(orbits, source='COM', target=node) for node in orbits.nodes))
#########################
start = set(nx.shortest_path(orbits, source='COM', target='YOU'))
end = set(nx.shortest_path(orbits, source='COM', target='SAN'))
same = start & end
start -= same
end -= same
print(len(start)+len(end)-2)
|
{"hexsha": "72c04fcf75746ffedd19e19eb5718f7abbaf2919", "size": 497, "ext": "py", "lang": "Python", "max_stars_repo_path": "6-Universal_Orbit_Map.py", "max_stars_repo_name": "minhoryang/advent-of-code-2019", "max_stars_repo_head_hexsha": "f5b468ce583a14548346f8e415d6b05589ec564f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "6-Universal_Orbit_Map.py", "max_issues_repo_name": "minhoryang/advent-of-code-2019", "max_issues_repo_head_hexsha": "f5b468ce583a14548346f8e415d6b05589ec564f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "6-Universal_Orbit_Map.py", "max_forks_repo_name": "minhoryang/advent-of-code-2019", "max_forks_repo_head_hexsha": "f5b468ce583a14548346f8e415d6b05589ec564f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6111111111, "max_line_length": 95, "alphanum_fraction": 0.6700201207, "include": true, "reason": "import networkx", "num_tokens": 127}
|
import os
import psycopg2
import logging
import numpy as np
import pandas as pd
import multiprocessing as mp
from multiprocessing.pool import ThreadPool
from ibmfl.data.data_handler import DataHandler
from ibmfl.util.datasets import load_mnist
logger = logging.getLogger(__name__)
def get_value_or_0(rows, patientId):
for (patId, value) in rows:
if patId == patientId:
return value
return 0
class PostgreSqlDataHandler(DataHandler):
"""
Data handler for PostgreSQL database access.
"""
def __init__(self, data_config=None, channels_first=False):
super().__init__()
self.host = data_config['host']
self.port = data_config['port']
self.database = data_config['database']
def get_respiratory_scores(self, cur):
'''
Function calculating the respiratory components of SOFA scores.
Returns: array of tuples like (patientId, sofaScore)
PaO2/FiO2 [mmHg (kPa)] SOFA score
≥ 400 (53.3) -> 0
< 400 (53.3) -> +1
< 300 (40) -> +2
< 200 (26.7) and mechanically ventilated -> +3
< 100 (13.3) and mechanically ventilated -> +4
#TODO is formula correct?
#TODO do we need mechanically ventilated?
'''
def to_sofa(ratio):
if ratio < 100: return 4
if ratio < 200: return 3
if ratio < 300: return 2
if ratio < 400: return 1
return 0
sql = """SELECT patientunitstayid, pao2 / (fio2/100) as ratio
FROM eicu_crd.apacheapsvar
WHERE pao2 > 0 AND fio2 > 0;"""
cur.execute(sql)
return [(patientId, to_sofa(ratio)) for (patientId, ratio) in cur.fetchall()]
def get_nervous_system_scores(self, cur):
'''
Function calculating the nervous system components of SOFA scores.
Returns: array of tuples like (patientId, sofaScore)
'''
sql = """SELECT patientunitstayid, eyes + verbal + motor as gcs
FROM eicu_crd.apacheapsvar
WHERE eyes > 0 AND verbal > 0 AND motor > 0;
"""
cur.execute(sql)
def to_sofa(gcs):
if gcs < 6: return 4
if gcs < 10: return 3
if gcs < 13: return 2
if gcs < 15: return 1
return 0
return [(patientId, to_sofa(gcs)) for (patientId, gcs) in cur.fetchall()]
def get_cardiovascular_system_scores(self, conn):
'''
Function calculating the cardiovascular system components of SOFA scores.
Returns: array of tuples like (patientId, sofaScore)
'''
pool = ThreadPool(mp.cpu_count())
mapSql = """SELECT patientunitstayid, MIN((systemicsystolic + 2 * systemicdiastolic)/3) AS map
FROM eicu_crd.vitalperiodic
WHERE systemicsystolic BETWEEN 50 AND 250
AND systemicdiastolic BETWEEN 25 AND 225
GROUP BY patientunitstayid
ORDER BY patientunitstayid"""
mapResults = pool.apply_async(self.run_query, args=(conn, mapSql))
def drug_query(drugname:str, conv_factor:str, max_rate:str):
"""Function to produce a drug query.
conv_factor is the factor to multiply the drugrate with to get to µg/kg/min.
max_rate is the outlier cutoff in the unit of the rows.
The queries return two columns: patientunitstayid and (converted) drugrate"""
return """SELECT patientunitstayid, MAX(drugrate::FLOAT * """+conv_factor+""") AS drugrate
FROM eicu_crd.infusiondrug
WHERE drugrate ~ E'^[\\\\d\\\\.]+$'
AND drugname = '"""+drugname+"""'
AND drugrate::FLOAT < """+max_rate+"""
GROUP BY patientunitstayid
ORDER BY patientunitstayid"""
dopamineResults = [
pool.apply_async(self.run_query, args=(conn, drug_query('Dopamine (mcg/kg/min)', '1', '100'))),
pool.apply_async(self.run_query, args=(conn, drug_query('Dopamine (ml/hr)', '0.140', '500'))),
pool.apply_async(self.run_query, args=(conn, drug_query('Dopamine ()', '0.105', '500'))),
]
dobutamineResults = [
pool.apply_async(self.run_query, args=(conn, drug_query('Dobutamine (mcg/kg/min)', '1', '100'))),
pool.apply_async(self.run_query, args=(conn, drug_query('Dobutamine (ml/hr)', '0.066', '1000'))),
pool.apply_async(self.run_query, args=(conn, drug_query('Dobutamine ()', '0.054', '1000'))),
]
epinephrineResults = [
pool.apply_async(self.run_query, args=(conn, drug_query('Epinephrine (mcg/kg/min)', '1', '10'))),
pool.apply_async(self.run_query, args=(conn, drug_query('Epinephrine (ml/hr)', '0.00448', '500'))),
pool.apply_async(self.run_query, args=(conn, drug_query('Epinephrine (mcg/min)', '0.03809', '75'))),
pool.apply_async(self.run_query, args=(conn, drug_query('Epinephrine ()', '0.00291', '600'))),
]
norepinephrineResults = [
pool.apply_async(self.run_query, args=(conn, drug_query('Norepinephrine (mcg/kg/min)', '1', '2'))),
pool.apply_async(self.run_query, args=(conn, drug_query('Norepinephrine (ml/hr)', '0.0037', '600'))),
pool.apply_async(self.run_query, args=(conn, drug_query('Norepinephrine (mcg/min)', '0.018626', '150'))),
pool.apply_async(self.run_query, args=(conn, drug_query('Norepinephrine ()', '0.002218', '700'))),
]
def merge_drug_results(results):
merged = []
for rows in [result.get() for result in results]:
for row in rows:
(patId, rate) = row
found = False
for idx, (patId2, rate2) in enumerate(merged):
if patId2 == patId:
found = True
if rate > rate2:
merged[idx] = row
break
if not found:
merged.append(row)
return merged
mapRows = mapResults.get()
dopamineRows = merge_drug_results(dopamineResults)
dobutamineRows = merge_drug_results(dobutamineResults)
epinephrineRows = merge_drug_results(epinephrineResults)
norepinephrineRows = merge_drug_results(norepinephrineResults)
pool.close()
def calculate_sofa_score(mapr, dopamineRate, dobutamineRate, epinephrineRate, norepinephrineRate):
if dopamineRate > 15 or epinephrineRate > 0.1 or norepinephrineRate > 0.1:
return 4
if dopamineRate > 5 or epinephrineRate > 0 or norepinephrineRate > 0:
return 3
if dopamineRate > 0 or dobutamineRate > 0:
return 2
if mapr < 70:
return 1
return 0
allPatientIds = [patId for (patId, _) in mapRows]
for drugRows in [dopamineRows, dobutamineRows, epinephrineRows, norepinephrineRows]:
for (patId, _) in drugRows:
if not patId in allPatientIds:
allPatientIds.append(patId)
print("nb map's: " + str(len(mapRows)))
print("nb dopamines: " + str(len(dopamineRows)))
print("nb dobutamines: " + str(len(dobutamineRows)))
print("nb ephinephrines: " + str(len(epinephrineRows)))
print("nb norepinephrines: " + str(len(norepinephrineRows)))
print("nb patients: " + str(len(allPatientIds)))
return [(patId, calculate_sofa_score(
get_value_or_0(mapRows, patId),
get_value_or_0(dopamineRows, patId),
get_value_or_0(dobutamineRows, patId),
get_value_or_0(epinephrineRows, patId),
get_value_or_0(norepinephrineRows, patId),
)) for patId in allPatientIds]
def get_liver_scores(self, cur):
'''
Function calculating the liver components of SOFA scores.
Returns: array of tuples like (patientId, sofaScore)
'''
def to_sofa(concentration):
if concentration > 12: return 4
if concentration > 6: return 3
if concentration > 2: return 2
if concentration > 1.2: return 1
return 0
sql = """SELECT patientunitstayid, MAX(bilirubin)
FROM eicu_crd.apacheapsvar
WHERE bilirubin > 0
GROUP BY patientunitstayid;"""
cur.execute(sql)
return [(patientId, to_sofa(bilirubin)) for (patientId, bilirubin) in cur.fetchall()]
def get_coagulation_scores(self, cur):
'''
Function calculating the coagulation components of SOFA scores.
Returns: array of tuples like (patientId, sofaScore)
'''
def to_sofa(count):
if count < 20: return 4
if count < 50: return 3
if count < 100: return 2
if count < 150: return 1
return 0
sql = """SELECT patientunitstayid, MIN(labresult)
FROM eicu_crd.lab
WHERE labname = 'platelet x 1000'
AND labresult > 0
GROUP BY patientunitstayid;"""
cur.execute(sql)
return [(patientId, to_sofa(count)) for (patientId, count) in cur.fetchall()]
def get_kidneys_scores(self, cur):
'''
Function calculating the kidney components of SOFA scores.
Returns: array of tuples like (patientId, sofaScore)
'''
def to_sofa(count):
if count > 5: return 4
if count > 3.5: return 3
if count > 2: return 2
if count > 1.2: return 1
return 0
sql = """SELECT patientunitstayid, MAX(creatinine)
FROM eicu_crd.apacheapsvar
WHERE creatinine > 0
GROUP BY patientunitstayid;"""
cur.execute(sql)
return [(patientId, to_sofa(count)) for (patientId, count) in cur.fetchall()]
def calc_patient_scores(self, *args):
'''
Merges the component scores in args (assumed to be arrays of tuples like (patientId, score)). Scores missing
from components for patients are assumed 0: parameters indicating higher values would probably have
been measured for clinical purposes, so missing parameters and thus missing scores usually mean nothing extraordinary
is reported and thus 0 is a good assumed value.
Returns the average and standard deviation.
'''
allPatientIds = []
for componentscores in args:
for (patId, _) in componentscores:
if not patId in allPatientIds:
allPatientIds.append(patId)
patientScores = []
for patId in allPatientIds:
patScore = 0
for componentscores in args:
patScore += get_value_or_0(componentscores, patId)
patientScores.append(patScore)
return patientScores
def get_data(self):
"""
Executes query and calculates the sofa score based on it.
:return: A list with one dict like {'sourceDatabase': '...', 'sofaAvg': ..., 'sofaStd': ...} (train data) and None (test data).
"""
conn = self.connect()
cur = conn.cursor()
# TODO are we allowed (depends on dB) to query in parallel?
resp_scores = self.get_respiratory_scores(cur)
ns_scores = self.get_nervous_system_scores(cur)
cs_scores = self.get_cardiovascular_system_scores(conn)
liver_scores = self.get_liver_scores(cur)
coag_scores = self.get_coagulation_scores(cur)
kidney_scores = self.get_kidneys_scores(cur)
conn.close()
patientScores = self.calc_patient_scores(resp_scores, ns_scores, cs_scores, liver_scores, coag_scores, kidney_scores)
#np.savetxt("patient_scores.csv", patientScores, delimiter=",")
return [{'sourceDatabase': self.database,
'sofaAvg': np.average(patientScores),
'sofaStd': np.std(patientScores)}], None
def save_data(self, statement: str, rows):
"""Inserts the given rows (tuples) of data with the given INSERT statement.
This is usually not the job of a DataHandler, but it already has all the setup for database connections and is passed to the
FusionHander for other uses, so it was easier this way. Used aggregator side.
"""
conn = self.connect()
cur = conn.cursor()
cur.executemany(statement, rows)
conn.commit()
conn.close()
def connect(self):
return psycopg2.connect(host=self.host, port=self.port, database=self.database,
user=os.environ['DB_USER'], password=os.environ['DB_PASSWORD'],
sslmode='require')
def run_query(self,conn,sql:str):
print("\nrunning query: " + sql)
with conn.cursor() as cur:
cur.execute(sql)
tuples = cur.fetchall()
return tuples
|
{"hexsha": "405d3832e3ac86427bdda5d39be9bbf07ca4a12c", "size": 13322, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/PHR/data/sofa_postgresql_data_handler.py", "max_stars_repo_name": "imec-int/federated-learning-lib", "max_stars_repo_head_hexsha": "1b2dc964de01cd23f357edbce7527ec1bcfc2cd3", "max_stars_repo_licenses": ["IBM-pibs"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/PHR/data/sofa_postgresql_data_handler.py", "max_issues_repo_name": "imec-int/federated-learning-lib", "max_issues_repo_head_hexsha": "1b2dc964de01cd23f357edbce7527ec1bcfc2cd3", "max_issues_repo_licenses": ["IBM-pibs"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/PHR/data/sofa_postgresql_data_handler.py", "max_forks_repo_name": "imec-int/federated-learning-lib", "max_forks_repo_head_hexsha": "1b2dc964de01cd23f357edbce7527ec1bcfc2cd3", "max_forks_repo_licenses": ["IBM-pibs"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7617554859, "max_line_length": 135, "alphanum_fraction": 0.5864735025, "include": true, "reason": "import numpy", "num_tokens": 3233}
|
import numpy as np
from random import randint,random
import math
global H_feature,H_label,H_featureS,H_feature_lable
H_feature={}
H_label={}
H_feature_lable={}
H_featureS={}
global feature_item,label_num
feature_item=1449
label_num=45
def read_sparse_arff(f_path, xml_path):
global feature_item
f = open(f_path)
f_data = f.read().split('@data')
f.close()
column = [i.split(' ')[1] for i in f_data[0].split('@attribute')[1:]]
label_row, label_col, label_data, label_indptr = [], [], [], []
feature_row, feature_col, feature_data, feature_indptr = [], [], [], []
inil = 0
inil_label = 0
for l in enumerate(f_data[1].replace(' ', ':').split('\n')[1:-1]):
l_v_dict = eval(l[1])
col, data = [], []
col.extend(l_v_dict.keys())
data.extend(l_v_dict.values())
index = 0
feature_num = 0
for item in col:
if item < feature_item:
index += 1
feature_num += 1
else:
break
inil_label += len(l_v_dict) - feature_num
label_indptr.append(inil_label)
label_col.extend(col[index:])
label_data.extend(data[index:])
label_row.extend([l[0] for i in range(len(l_v_dict) - feature_num)])
inil += feature_num
feature_indptr.append(inil)
feature_col.extend(col[0:index])
feature_data.extend(data[0:index])
feature_row.extend([l[0] for i in range(feature_num)])
sparse_feature = (feature_data, feature_row, feature_col, feature_indptr)
sparse_label = (label_data, label_row, label_col, label_indptr)
return sparse_feature, sparse_label
def NMI(feature,label,i,j,true_len):
global feature_item
add = feature_item
j = j + add
global H_feature, H_label, H_feature_lable
if (i, j) in H_feature_lable:
je = H_feature_lable[(i, j)]
else:
# diffDataCount_X is the horizontal component is distribution table
diffDataCount_X = true_len
# diffDataCount_Y is the vertical component is distribution table
diffDataCount_Y = true_len
distributionXY = np.zeros((diffDataCount_Y, diffDataCount_X))
before_f = 0
before_l = 0
indptr_f = feature[3]
indptr_l = label[3]
diffDataX = []
diffDataNumX = {}
diffDataNumY = {}
diffDataY = []
for (index_f, index_l) in zip(indptr_f, indptr_l):
data_f = feature[0][before_f:index_f]
col_f = feature[2][before_f:index_f]
before_f = index_f
data_l = label[0][before_l:index_l]
col_l = label[2][before_l:index_l]
before_l = index_l
i_exit = col_f.count(i) # i是feature
j_exit = col_l.count(j) # j是label
if (i_exit > 0):
data_i = data_f[col_f.index(i)]
else:
data_i = 0
if diffDataX.count(data_i) > 0:
distribution_x = diffDataX.index(data_i)
diffDataNumX[data_i] += 1
else:
diffDataX.append(data_i)
distribution_x = len(diffDataX) - 1
diffDataNumX[data_i] = 1
if (j_exit > 0):
data_j = data_l[col_l.index(j)]
else:
data_j = 0
if diffDataY.count(data_j) > 0:
distribution_y = diffDataY.index(data_j)
diffDataNumY[data_j] += 1
else:
diffDataY.append(data_j)
distribution_y = len(diffDataY) - 1
diffDataNumY[data_j] = 1
distributionXY[distribution_x][distribution_y] += 1
diffDataCount_X = len(diffDataX)
diffDataCount_Y = len(diffDataY)
distributionXY = distributionXY[:diffDataCount_X, :diffDataCount_Y]
distributionXY = distributionXY / true_len
je = JointEntropy(distributionXY)
H_feature_lable[(i, j)] = je
if i in H_feature:
HX = H_feature[i]
else:
HX = DataEntropy(true_len, diffDataNumX)
H_feature[i] = HX
if j in H_label:
HY = H_label[j]
else:
HY = DataEntropy(true_len, diffDataNumY)
H_label[j] = HY
if (HX == 0.0 or HY == 0.0):
return 0
mi = HX + HY - je
res = mi / math.sqrt(HX * HY)
return res
def JointEntropy(distributionXY):
je = 0
[lenY, lenX] = np.shape(distributionXY)
for i in range(lenY):
for j in range(lenX):
if (distributionXY[i][j] != 0):
je = je - distributionXY[i][j] * math.log2(distributionXY[i][j])
return je
def DataEntropy(dataArrayLen, diffDataNum):
diffDataArrayLen = len(diffDataNum)
entropyVal = 0;
p=[]
for i in diffDataNum:
proptyVal = diffDataNum[i] / dataArrayLen
p.append(proptyVal)
if (proptyVal != 0):
entropyVal = entropyVal - proptyVal * math.log2(proptyVal)
return entropyVal
def distance(feature,i,j,true_len):
global H_feature,H_featureS
if (i,j) in H_featureS:
je=H_featureS[(i,j)]
else:
# diffDataCount_X is the horizontal component is distribution table
diffDataCount_X = true_len
# diffDataCount_Y is the vertical component is distribution table
diffDataCount_Y = true_len
distributionXY = np.zeros((diffDataCount_Y, diffDataCount_X))
before = 0
indptr = feature[3]
diffDataX = []
diffDataY = []
diffDataNumX = {}
diffDataNumY = {}
for index in indptr:
data = feature[0][before:index]
# row = feature[1][before:index]
col = feature[2][before:index]
before = index
i_exit = col.count(i)
j_exit = col.count(j)
if (i_exit > 0):
data_i = data[col.index(i)]
# distribution_x=diffData1.index(data_i)
else:
data_i = 0
# distribution_x=diffDataCount_X-1
if diffDataX.count(data_i) > 0:
distribution_x = diffDataX.index(data_i)
diffDataNumX[data_i] += 1
else:
diffDataX.append(data_i)
distribution_x = len(diffDataX) - 1
diffDataNumX[data_i] = 1
if (j_exit > 0):
data_j = data[col.index(j)]
# distribution_y=diffData2.index(data_j)
else:
data_j = 0
# distribution_y=diffDataCount_Y-1
if diffDataY.count(data_j) > 0:
distribution_y = diffDataY.index(data_j)
diffDataNumY[data_j] += 1
else:
diffDataY.append(data_j)
distribution_y = len(diffDataY) - 1
diffDataNumY[data_j] = 1
distributionXY[distribution_x][distribution_y] += 1
diffDataCount_X = len(diffDataX)
diffDataCount_Y = len(diffDataY)
distributionXY = distributionXY[:diffDataCount_X, :diffDataCount_Y]
distributionXY = distributionXY / true_len
je = JointEntropy(distributionXY)
H_featureS[(i, j)] = je
if i in H_feature:
HX=H_feature[i]
else:
HX = DataEntropy(true_len, diffDataNumX)
H_feature[i]=HX
if j in H_feature:
HY=H_feature[j]
else:
HY = DataEntropy(true_len, diffDataNumY)
H_feature[j]=HY
if je == 0.0:
return 1
return 2 - (HX + HY) / je
def readNMI(address,row,col):
f = open(address)
nmi = np.zeros((row, col))
text=f.read()
lines=text.split("\n")
i,j=0,0
for line in lines:
tokens=line.split(" ")
for v in range(len(tokens)-1):
nmi[i][j]=float(tokens[v])
j+=1
i+=1
j=0
return nmi
def readDis(address,n):
f = open(address)
dis = np.zeros((n, n))
text = f.read()
lines = text.split("\n")
i, j = 0, 0
for line in lines:
j=i+1
tokens = line.split(" ")
for v in range(len(tokens) - 1):
dis[i][j] = float(tokens[v])
j += 1
i += 1
for i in range(1,n):
for j in range(i):
dis[i][j]=dis[j][i]
return dis
class GSEMO:
def __init__(self,**kwargs):
self.k = kwargs["k"]
self.n = kwargs["n"]
self.mylambda = kwargs["mylambda"]
self.top = kwargs["top"]
self.l = kwargs["l"]
iterationoTime = math.exp(1) * self.n * self.k * self.k * self.k / 2
self.iterationTime = iterationoTime
self.NMI = readNMI(kwargs["NMI"],kwargs["n"],kwargs["l"])
self.dis = readDis(kwargs["dis"],kwargs["n"])
def mutation(self, s):
rand_rate = 1.0 / (self.n)
change = np.random.binomial(1, rand_rate, self.n)
return np.abs(s - change)
def doGSEMO(self, path):
population = np.mat(np.zeros([1, self.n], 'int8')) # initiate the population
self.tempOptimum = []
fitness = np.mat(np.zeros([1, 2]))
popSize = 1
t = 0 # the current iterate count j
sum = 0
iter = 0
kn = int(self.k * self.n)
while t < self.iterationTime:
if iter == kn:
log = open(path, 'a')
iter = 0
resultIndex = -1
maxValue = float("-inf")
for p in range(0, popSize):
if fitness[p, 1] <= self.k and fitness[p, 0] > maxValue:
maxValue = fitness[p, 0]
resultIndex = p
self.tempOptimum.append(population[resultIndex])
res = population[resultIndex]
f = self.Calucalate_true_value(res)
log.write(str(f))
log.write("\n")
index = np.nonzero(res)
linklist = []
for i, j in zip(index[0], index[1]):
linklist.append([i, j])
for item in linklist:
log.write(str(item[1] + 1))
log.write(' ')
log.write("\n")
log.close()
iter += 1
s = population[randint(1, popSize) - 1, :] # choose a individual from population randomly
offSpring = self.mutation(s) # every bit will be flipped with probability 1/n
offSpringFit = np.mat(np.zeros([1, 2])) # value, size
offSpringFit[0, 1] = offSpring[0, :].sum()
if offSpringFit[0, 1] == 0 or offSpringFit[0, 1] > self.k:
t += 1
continue
offSpringFit[0, 0] = self.evaluateObjective(offSpring)
isDominate = False
for i in range(0, popSize):
if (fitness[i, 0] > offSpringFit[0, 0] and fitness[i, 1] <= offSpringFit[0, 1]) or (
fitness[i, 0] >= offSpringFit[0, 0] and fitness[i, 1] < offSpringFit[0, 1]):
isDominate = True
break
if isDominate == False: # there is no better individual than offSpring
Q = []
for j in range(0, popSize):
if offSpringFit[0, 0] >= fitness[j, 0] and offSpringFit[0, 1] <= fitness[j, 1]:
continue
else:
Q.append(j)
fitness = np.vstack((offSpringFit, fitness[Q, :])) # update fitness
population = np.vstack((offSpring, population[Q, :])) # update population
t = t + 1
popSize = np.shape(fitness)[0]
resultIndex = -1
maxValue = float("-inf")
for p in range(0, popSize):
if fitness[p, 1] <= self.k and fitness[p, 0] > maxValue:
maxValue = fitness[p, 0]
resultIndex = p
self.tempOptimum.append(population[resultIndex])
return self.tempOptimum
def sum_of_top(self,linklist,size,l):
values=[]
for i in range(size):
values.append(self.NMI[linklist[i][1]][l])
values.sort(reverse=True)
value=0
for i in range(min(self.top,size)):
value+=values[i]
return value
def metric(self,linklist,i,j):
return self.dis[linklist[i][1]][linklist[j][1]]
def evaluateObjective(self,offSpring):
index=np.nonzero(offSpring)
size=np.shape(index)[1]
linklist=[]
for i, j in zip(index[0], index[1]):
linklist.append([i, j])
g=0
for l in range(self.l):
g+= self.sum_of_top(linklist,size,l)
div=0
for i in range(size):
for j in range(i+1,size):
div+=self.metric(linklist,i,j)
res=0.5*(1+size/self.k)*g+self.mylambda*div
return res
def Calucalate_true_value(self,res):
index = np.nonzero(res)
size = np.shape(index)[1]
linklist = []
for i, j in zip(index[0], index[1]):
linklist.append([i, j])
g = 0
for l in range(self.l):
g += self.sum_of_top(linklist, size, l)
div = 0
for i in range(size):
for j in range(i + 1, size):
div += self.metric(linklist, i, j)
res = g + self.mylambda * div
return res
|
{"hexsha": "4275f07552c75b19559f3a6c8dd51df09e45fd23", "size": 13423, "ext": "py", "lang": "Python", "max_stars_repo_path": "Experiment_enron_medical/medical_GSEMO.py", "max_stars_repo_name": "Danxuan-Liu/diversification-code", "max_stars_repo_head_hexsha": "53a2e0216e731d7d693086b81e32e2a61b322b4e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Experiment_enron_medical/medical_GSEMO.py", "max_issues_repo_name": "Danxuan-Liu/diversification-code", "max_issues_repo_head_hexsha": "53a2e0216e731d7d693086b81e32e2a61b322b4e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Experiment_enron_medical/medical_GSEMO.py", "max_forks_repo_name": "Danxuan-Liu/diversification-code", "max_forks_repo_head_hexsha": "53a2e0216e731d7d693086b81e32e2a61b322b4e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1438515081, "max_line_length": 102, "alphanum_fraction": 0.5339342919, "include": true, "reason": "import numpy", "num_tokens": 3505}
|
from vicero.algorithms.qlearning import Qlearning
import numpy as np
# DynaQ is an algorithm that is very similar to
# classical tabular Q-learning, with the one difference being that it
# keeps an internal model that simulates the consequences of actions
# based entirely on experience
# More details: S&B18 Chapter 8
class DynaQ(Qlearning):
def __init__(self, env, n_states, n_actions, epsilon, discretize, planning_steps=0):
super(DynaQ, self).__init__(env, n_states, n_actions, epsilon=epsilon, discretize=discretize)
self.model = {} # internal model for simulation, built from experience / exploration
self.planning_steps = planning_steps
def train(self, iterations):
print(':D')
for _ in range(iterations):
state_old = self.env.state
action = self.exploratory_action(self.env.state)
state, reward, done, board = self.env.step(action)
self.update_q(state_old, action, reward, state)
self.model[(state_old, action)] = (reward, state)
for _ in range(self.planning_steps):
sample_state_old, sample_action = np.random.sample(self.model.keys)
sample_reward, sample_state = self.model((sample_state_old, sample_action))
self.update_q(sample_state_old, sample_action, sample_reward, sample_state)
if done:
self.env.reset()
|
{"hexsha": "9e238beb352a053439714de7f662ca6d69dd851b", "size": 1443, "ext": "py", "lang": "Python", "max_stars_repo_path": "vicero/algorithms/dynaq.py", "max_stars_repo_name": "Jontahan/vicero", "max_stars_repo_head_hexsha": "678f4f139788cb9be149f6d9651d93ca737aeccd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-03-26T18:08:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-09T10:18:05.000Z", "max_issues_repo_path": "vicero/algorithms/dynaq.py", "max_issues_repo_name": "Jontahan/vicero", "max_issues_repo_head_hexsha": "678f4f139788cb9be149f6d9651d93ca737aeccd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vicero/algorithms/dynaq.py", "max_forks_repo_name": "Jontahan/vicero", "max_forks_repo_head_hexsha": "678f4f139788cb9be149f6d9651d93ca737aeccd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.7272727273, "max_line_length": 101, "alphanum_fraction": 0.6708246708, "include": true, "reason": "import numpy", "num_tokens": 314}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.