hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d4bca411ec322bf0d2f4684e172c03b2076797b4
| 3,590
|
py
|
Python
|
hypernet/src/thermophysicalModels/reactionThermo/mixture/multiComponent.py
|
christian-jacobsen/hypernet
|
9f62e1531eb152cc08af0b0c6b09d6fde8d42400
|
[
"Apache-2.0"
] | null | null | null |
hypernet/src/thermophysicalModels/reactionThermo/mixture/multiComponent.py
|
christian-jacobsen/hypernet
|
9f62e1531eb152cc08af0b0c6b09d6fde8d42400
|
[
"Apache-2.0"
] | null | null | null |
hypernet/src/thermophysicalModels/reactionThermo/mixture/multiComponent.py
|
christian-jacobsen/hypernet
|
9f62e1531eb152cc08af0b0c6b09d6fde8d42400
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from hypernet.src.general import const
from hypernet.src.general import utils
from hypernet.src.thermophysicalModels.reactionThermo.mixture import Basic
class MultiComponent(Basic):
# Initialization
###########################################################################
def __init__(
self,
specieThermos,
*args,
**kwargs
):
super(MultiComponent, self).__init__(specieThermos)
# Methods
###########################################################################
# Mixture properties ------------------------------------------------------
def update(self, XY, var='Y'):
# Update mass/molar fractions
for name, value in XY.items():
value = utils.check_XY(utils.convert_to_array(value))
setattr(self.spTh[name].specie, var, value)
# Update mixture/species properties
self.M = self.M_(var=var)
if var == 'Y':
self.Xi_()
elif var == 'X':
self.Yi_()
self.R = self.R_()
# Mixture properties ------------------------------------------------------
# Mass
def M_(self, var='Y'):
# [kg/mol]
if var == 'Y':
M = [spTh.specie.Y / spTh.specie.M for spTh in self.spTh.values()]
return 1./np.sum(np.concatenate(M))
elif var == 'X':
M = [spTh.specie.X * spTh.specie.M for spTh in self.spTh.values()]
return np.sum(np.concatenate(M))
# Specific gas constant
def R_(self):
R = [spTh.specie.Y * spTh.specie.R for spTh in self.spTh.values()]
return np.sum(np.concatenate(R))
# Pressure
def p_(self, rho, T):
return rho*self.R*T
# Density
def rho_(self, p, T):
return p/(self.R*T)
# Number density
def n_(self, rho):
self.ni_(rho=rho, var='Y')
n = [spTh.specie.n for spTh in self.spTh.values()]
return np.sum(np.concatenate(n))
# Enthalpy/Energy
def he_(self):
# [J/kg]
he = [spTh.specie.Y * spTh.thermo.he for spTh in self.spTh.values()]
return np.sum(np.concatenate(he))
def cpv_(self):
# [J/(kg K)]
cpv = [spTh.specie.Y * spTh.thermo.cpv for spTh in self.spTh.values()]
return np.sum(np.concatenate(cpv))
def dcpvdT_(self):
# [J/kg]
dcpvdT = [
spTh.specie.Y * spTh.thermo.dcpvdT for spTh in self.spTh.values()
]
return np.sum(np.concatenate(dcpvdT))
def dhedY_(self, dY):
# [J/kg]
dhedY = [
np.sum(dY[name] * spTh.thermo.he) \
for name, spTh in self.spTh.items()
]
return np.sum(dhedY)
# Species properties ------------------------------------------------------
def Yi_(self):
for spTh_ in self.spTh.values():
sp = spTh_.specie
sp.Y = sp.X * sp.M / self.M
def Xi_(self):
for spTh_ in self.spTh.values():
sp = spTh_.specie
sp.X = sp.Y * self.M / sp.M
def ni_(self, rho=None, n=None, var='Y'):
for spTh_ in self.spTh.values():
sp = spTh_.specie
if var == 'Y':
sp.n = sp.Y * rho / sp.M * const.UNA
elif var == 'X':
sp.n = sp.X * n
def rhoi_(self, rho=None, n=None, var='Y'):
for spTh_ in self.spTh.values():
sp = spTh_.specie
if var == 'Y':
sp.rho = sp.Y * rho
elif var == 'X':
sp.rho = sp.X * n * sp.M / const.UNA
| 30.423729
| 79
| 0.479666
| 446
| 3,590
| 3.778027
| 0.190583
| 0.083086
| 0.071217
| 0.099703
| 0.422552
| 0.321068
| 0.321068
| 0.321068
| 0.321068
| 0.321068
| 0
| 0.000404
| 0.310585
| 3,590
| 117
| 80
| 30.683761
| 0.680404
| 0.117549
| 0
| 0.202532
| 0
| 0
| 0.004335
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.189873
| false
| 0
| 0.050633
| 0.025316
| 0.379747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c06417dd5e89491398d91b568c1842895c3961
| 14,779
|
py
|
Python
|
tensorflow_probability/python/distributions/laplace_test.py
|
wataruhashimoto52/probability
|
12e3f256544eadea6e863868da825614f4423eb0
|
[
"Apache-2.0"
] | 1
|
2020-04-13T12:31:12.000Z
|
2020-04-13T12:31:12.000Z
|
tensorflow_probability/python/distributions/laplace_test.py
|
wataruhashimoto52/probability
|
12e3f256544eadea6e863868da825614f4423eb0
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/laplace_test.py
|
wataruhashimoto52/probability
|
12e3f256544eadea6e863868da825614f4423eb0
|
[
"Apache-2.0"
] | 1
|
2020-12-19T13:05:15.000Z
|
2020-12-19T13:05:15.000Z
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats as sp_stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class LaplaceTest(test_util.TestCase):
def testLaplaceShape(self):
loc = tf.constant([3.0] * 5)
scale = tf.constant(11.0)
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
self.assertEqual(self.evaluate(laplace.batch_shape_tensor()), (5,))
self.assertEqual(laplace.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(laplace.event_shape_tensor()), [])
self.assertEqual(laplace.event_shape, tf.TensorShape([]))
def testLaplaceLogPDF(self):
batch_size = 6
loc = tf.constant([2.0] * batch_size)
scale = tf.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
log_pdf = laplace.log_prob(x)
self.assertEqual(log_pdf.shape, (6,))
expected_log_pdf = sp_stats.laplace.logpdf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
pdf = laplace.prob(x)
self.assertEqual(pdf.shape, (6,))
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testLaplaceLogPDFMultidimensional(self):
batch_size = 6
loc = tf.constant([[2.0, 4.0]] * batch_size)
scale = tf.constant([[3.0, 4.0]] * batch_size)
loc_v = np.array([2.0, 4.0])
scale_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
log_pdf = laplace.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.shape, (6, 2))
pdf = laplace.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.shape, (6, 2))
expected_log_pdf = sp_stats.laplace.logpdf(x, loc_v, scale=scale_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testLaplaceLogPDFMultidimensionalBroadcasting(self):
batch_size = 6
loc = tf.constant([[2.0, 4.0]] * batch_size)
scale = tf.constant(3.0)
loc_v = np.array([2.0, 4.0])
scale_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
log_pdf = laplace.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.shape, (6, 2))
pdf = laplace.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.shape, (6, 2))
expected_log_pdf = sp_stats.laplace.logpdf(x, loc_v, scale=scale_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testLaplaceCDF(self):
batch_size = 6
loc = tf.constant([2.0] * batch_size)
scale = tf.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
cdf = laplace.cdf(x)
self.assertEqual(cdf.shape, (6,))
expected_cdf = sp_stats.laplace.cdf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testLaplaceLogCDF(self):
batch_size = 6
loc = tf.constant([2.0] * batch_size)
scale = tf.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
cdf = laplace.log_cdf(x)
self.assertEqual(cdf.shape, (6,))
expected_cdf = sp_stats.laplace.logcdf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testLaplaceQuantile(self):
qs = self.evaluate(
tf.concat(
[[0., 1],
samplers.uniform([10], minval=.1, maxval=.9,
seed=test_util.test_seed())],
axis=0))
d = tfd.Laplace(loc=1., scale=1.3, validate_args=True)
vals = d.quantile(qs)
self.assertAllClose([-np.inf, np.inf], vals[:2])
self.assertAllClose(qs[2:], d.cdf(vals[2:]))
def testLaplaceLogSurvivalFunction(self):
batch_size = 6
loc = tf.constant([2.0] * batch_size)
scale = tf.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
sf = laplace.log_survival_function(x)
self.assertEqual(sf.shape, (6,))
expected_sf = sp_stats.laplace.logsf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(sf), expected_sf)
def testLaplaceMean(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
self.assertEqual(laplace.mean().shape, (3,))
expected_means = sp_stats.laplace.mean(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.mean()), expected_means)
def testLaplaceMode(self):
loc_v = np.array([0.5, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
self.assertEqual(laplace.mode().shape, (3,))
self.assertAllClose(self.evaluate(laplace.mode()), loc_v)
def testLaplaceVariance(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
self.assertEqual(laplace.variance().shape, (3,))
expected_variances = sp_stats.laplace.var(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.variance()), expected_variances)
def testLaplaceStd(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
self.assertEqual(laplace.stddev().shape, (3,))
expected_stddev = sp_stats.laplace.std(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.stddev()), expected_stddev)
def testLaplaceEntropy(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
self.assertEqual(laplace.entropy().shape, (3,))
expected_entropy = sp_stats.laplace.entropy(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.entropy()), expected_entropy)
def testLaplaceSample(self):
loc_v = 4.0
scale_v = 3.0
loc = tf.constant(loc_v)
scale = tf.constant(scale_v)
n = 100000
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
samples = laplace.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
sp_stats.laplace.mean(loc_v, scale=scale_v),
rtol=0.05,
atol=0.)
self.assertAllClose(
sample_values.var(),
sp_stats.laplace.var(loc_v, scale=scale_v),
rtol=0.05,
atol=0.)
self.assertTrue(self._kstest(loc_v, scale_v, sample_values))
def testLaplaceFullyReparameterized(self):
loc = tf.constant(4.0)
scale = tf.constant(3.0)
_, [grad_loc, grad_scale] = tfp.math.value_and_gradient(
lambda l, s: tfd.Laplace(loc=l, scale=s, validate_args=True).sample( # pylint: disable=g-long-lambda
100, seed=test_util.test_seed()), [loc, scale])
self.assertIsNotNone(grad_loc)
self.assertIsNotNone(grad_scale)
def testLaplaceSampleMultiDimensional(self):
loc_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
scale_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
n = 10000
samples = laplace.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(loc_v + scale_v) # 10 x 100
loc_bc = loc_v + zeros
scale_bc = scale_v + zeros
self.assertAllClose(
sample_values.mean(axis=0),
sp_stats.laplace.mean(loc_bc, scale=scale_bc),
rtol=0.35,
atol=0.)
self.assertAllClose(
sample_values.var(axis=0),
sp_stats.laplace.var(loc_bc, scale=scale_bc),
rtol=0.10,
atol=0.)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(loc_v, [-1])):
for bi, b in enumerate(np.reshape(scale_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, loc, scale, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = sp_stats.kstest(samples, sp_stats.laplace(loc, scale=scale).cdf)
# Return True when the test passes.
return ks < 0.02
def testLaplacePdfOfSampleMultiDims(self):
laplace = tfd.Laplace(loc=[7., 11.], scale=[[5.], [6.]], validate_args=True)
num = 50000
samples = laplace.sample(num, seed=test_util.test_seed())
pdfs = laplace.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.shape, (num, 2, 2))
self.assertEqual(pdfs.shape, (num, 2, 2))
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
self.assertAllClose(
sp_stats.laplace.mean(
[[7., 11.], [7., 11.]], scale=np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
rtol=0.05,
atol=0.)
self.assertAllClose(
sp_stats.laplace.var([[7., 11.], [7., 11.]],
scale=np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
rtol=0.05,
atol=0.)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testLaplaceNonPositiveInitializationParamsRaises(self):
loc_v = tf.constant(0.0, name='loc')
scale_v = tf.constant(-1.0, name='scale')
with self.assertRaisesOpError('Argument `scale` must be positive.'):
laplace = tfd.Laplace(
loc=loc_v, scale=scale_v, validate_args=True)
self.evaluate(laplace.mean())
loc_v = tf.constant(1.0, name='loc')
scale_v = tf.constant(0.0, name='scale')
with self.assertRaisesOpError('Argument `scale` must be positive.'):
laplace = tfd.Laplace(
loc=loc_v, scale=scale_v, validate_args=True)
self.evaluate(laplace.mean())
scale = tf.Variable([1., 2., -3.])
self.evaluate(scale.initializer)
with self.assertRaisesOpError('Argument `scale` must be positive.'):
d = tfd.Laplace(loc=0, scale=scale, validate_args=True)
self.evaluate(d.sample(seed=test_util.test_seed()))
def testLaplaceLaplaceKL(self):
batch_size = 6
event_size = 3
a_loc = np.array([[0.5] * event_size] * batch_size, dtype=np.float32)
a_scale = np.array([[0.1] * event_size] * batch_size, dtype=np.float32)
b_loc = np.array([[0.4] * event_size] * batch_size, dtype=np.float32)
b_scale = np.array([[0.2] * event_size] * batch_size, dtype=np.float32)
a = tfd.Laplace(loc=a_loc, scale=a_scale, validate_args=True)
b = tfd.Laplace(loc=b_loc, scale=b_scale, validate_args=True)
distance = tf.abs(a_loc - b_loc)
ratio = a_scale / b_scale
true_kl = (-tf.math.log(ratio) - 1 + distance / b_scale +
ratio * tf.exp(-distance / a_scale))
kl = tfd.kl_divergence(a, b)
x = a.sample(int(1e4), seed=test_util.test_seed())
kl_sample = tf.reduce_mean(a.log_prob(x) - b.log_prob(x), axis=0)
true_kl_, kl_, kl_sample_ = self.evaluate([true_kl, kl, kl_sample])
self.assertAllClose(true_kl_, kl_, atol=1e-5, rtol=1e-5)
self.assertAllClose(true_kl_, kl_sample_, atol=0., rtol=1e-1)
zero_kl = tfd.kl_divergence(a, a)
true_zero_kl_, zero_kl_ = self.evaluate([tf.zeros_like(true_kl), zero_kl])
self.assertAllEqual(true_zero_kl_, zero_kl_)
@test_util.tf_tape_safety_test
def testGradientThroughParams(self):
loc = tf.Variable([-5., 0., 5.])
scale = tf.Variable(2.)
d = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
with tf.GradientTape() as tape:
loss = -d.log_prob([1., 2., 3.])
grad = tape.gradient(loss, d.trainable_variables)
self.assertLen(grad, 2)
self.assertAllNotNone(grad)
def testAssertsPositiveScaleAfterMutation(self):
scale = tf.Variable([1., 2., 3.])
d = tfd.Laplace(loc=0., scale=scale, validate_args=True)
self.evaluate([v.initializer for v in d.variables])
with self.assertRaisesOpError('Argument `scale` must be positive.'):
with tf.control_dependencies([scale.assign([1., 2., -3.])]):
self.evaluate(tfd.Laplace(loc=0., scale=1.).kl_divergence(d))
def testAssertParamsAreFloats(self):
loc = tf.convert_to_tensor(0, dtype=tf.int32)
scale = tf.convert_to_tensor(1, dtype=tf.int32)
with self.assertRaisesRegexp(ValueError, 'Expected floating point'):
tfd.Laplace(loc=loc, scale=scale)
if __name__ == '__main__':
tf.test.main()
| 38.790026
| 109
| 0.66175
| 2,258
| 14,779
| 4.164748
| 0.125332
| 0.017439
| 0.035942
| 0.029775
| 0.554126
| 0.489792
| 0.449171
| 0.426733
| 0.38994
| 0.378031
| 0
| 0.040776
| 0.183571
| 14,779
| 380
| 110
| 38.892105
| 0.738604
| 0.054808
| 0
| 0.370968
| 0
| 0
| 0.013121
| 0
| 0
| 0
| 0
| 0
| 0.219355
| 1
| 0.077419
| false
| 0
| 0.029032
| 0
| 0.112903
| 0.003226
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c0decddfc9adf11a583ac3c85b167de4ffaed9
| 33,707
|
py
|
Python
|
selectinf/randomized/approx_reference_grouplasso.py
|
kevinbfry/selective-inference
|
4e846877b5c23969fc420b452f20cc3b16b6cb78
|
[
"BSD-3-Clause"
] | 14
|
2015-09-01T19:31:25.000Z
|
2021-11-26T08:47:10.000Z
|
selectinf/randomized/approx_reference_grouplasso.py
|
kevinbfry/selective-inference
|
4e846877b5c23969fc420b452f20cc3b16b6cb78
|
[
"BSD-3-Clause"
] | 7
|
2016-09-12T20:41:41.000Z
|
2018-06-26T02:10:30.000Z
|
selectinf/randomized/approx_reference_grouplasso.py
|
kevinbfry/selective-inference
|
4e846877b5c23969fc420b452f20cc3b16b6cb78
|
[
"BSD-3-Clause"
] | 10
|
2015-09-01T19:31:28.000Z
|
2021-02-23T01:16:20.000Z
|
from __future__ import print_function
from scipy.linalg import block_diag
from scipy.stats import norm as ndist
from scipy.interpolate import interp1d
import collections
import numpy as np
from numpy import log
from numpy.linalg import norm, qr, inv, eig
import pandas as pd
import regreg.api as rr
from .randomization import randomization
from ..base import restricted_estimator
from ..algorithms.barrier_affine import solve_barrier_affine_py as solver
from ..distributions.discrete_family import discrete_family
class group_lasso(object):
def __init__(self,
loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso=True, # should lasso solver be used where applicable - defaults to True
perturb=None):
_check_groups(groups) # make sure groups looks sensible
# log likelihood : quadratic loss
self.loglike = loglike
self.nfeature = self.loglike.shape[0]
# ridge parameter
self.ridge_term = ridge_term
# group lasso penalty (from regreg)
# use regular lasso penalty if all groups are size 1
if use_lasso and groups.size == np.unique(groups).size:
# need to provide weights an an np.array rather than a dictionary
weights_np = np.array([w[1] for w in sorted(weights.items())])
self.penalty = rr.weighted_l1norm(weights=weights_np,
lagrange=1.)
else:
self.penalty = rr.group_lasso(groups,
weights=weights,
lagrange=1.)
# store groups as a class variable since the non-group lasso doesn't
self.groups = groups
self._initial_omega = perturb
# gaussian randomization
self.randomizer = randomizer
def fit(self,
solve_args={'tol': 1.e-12, 'min_its': 50},
perturb=None):
# solve the randomized version of group lasso
(self.initial_soln,
self.initial_subgrad) = self._solve_randomized_problem(perturb=perturb,
solve_args=solve_args)
# initialize variables
active_groups = [] # active group labels
active_dirs = {} # dictionary: keys are group labels, values are unit-norm coefficients
unpenalized = [] # selected groups with no penalty
overall = np.ones(self.nfeature, np.bool) # mask of active features
ordered_groups = [] # active group labels sorted by label
ordered_opt = [] # gamma's ordered by group labels
ordered_vars = [] # indices "ordered" by sorting group labels
tol = 1.e-20
_, self.randomizer_prec = self.randomizer.cov_prec
# now we are collecting the directions and norms of the active groups
for g in sorted(np.unique(self.groups)): # g is group label
group_mask = self.groups == g
soln = self.initial_soln # do not need to keep setting this
if norm(soln[group_mask]) > tol * norm(soln): # is group g appreciably nonzero
ordered_groups.append(g)
# variables in active group
ordered_vars.extend(np.flatnonzero(group_mask))
if self.penalty.weights[g] == 0:
unpenalized.append(g)
else:
active_groups.append(g)
active_dirs[g] = soln[group_mask] / norm(soln[group_mask])
ordered_opt.append(norm(soln[group_mask]))
else:
overall[group_mask] = False
self.selection_variable = {'directions': active_dirs,
'active_groups': active_groups} # kind of redundant with keys of active_dirs
self._ordered_groups = ordered_groups
# exception if no groups are selected
if len(self.selection_variable['active_groups']) == 0:
return np.sign(soln), soln
# otherwise continue as before
self.observed_opt_state = np.hstack(ordered_opt) # gammas as array
_beta_unpenalized = restricted_estimator(self.loglike, # refit OLS on E
overall,
solve_args=solve_args)
beta_bar = np.zeros(self.nfeature)
beta_bar[overall] = _beta_unpenalized # refit OLS beta with zeros
self._beta_full = beta_bar
X, y = self.loglike.data
W = self._W = self.loglike.saturated_loss.hessian(X.dot(beta_bar)) # all 1's for LS
opt_linearNoU = np.dot(X.T, X[:, ordered_vars] * W[:, np.newaxis])
for i, var in enumerate(ordered_vars):
opt_linearNoU[var, i] += self.ridge_term
opt_offset = self.initial_subgrad
self.observed_score_state = -opt_linearNoU.dot(_beta_unpenalized)
self.observed_score_state[~overall] += self.loglike.smooth_objective(beta_bar, 'grad')[~overall]
active_signs = np.sign(self.initial_soln)
active = np.flatnonzero(active_signs)
self.active = active
def compute_Vg(ug):
pg = ug.size # figure out size of g'th group
if pg > 1:
Z = np.column_stack((ug, np.eye(pg, pg - 1)))
Q, _ = qr(Z)
Vg = Q[:, 1:] # drop the first column
else:
Vg = np.zeros((1, 0)) # if the group is size one, the orthogonal complement is empty
return Vg
def compute_Lg(g):
pg = active_dirs[g].size
Lg = self.penalty.weights[g] * np.eye(pg)
return Lg
sorted_active_dirs = collections.OrderedDict(sorted(active_dirs.items()))
Vs = [compute_Vg(ug) for ug in sorted_active_dirs.values()]
V = block_diag(*Vs) # unpack the list
Ls = [compute_Lg(g) for g in sorted_active_dirs]
L = block_diag(*Ls) # unpack the list
XE = X[:, ordered_vars] # changed to ordered_vars
Q = XE.T.dot(self._W[:, None] * XE)
QI = inv(Q)
C = V.T.dot(QI).dot(L).dot(V)
self.XE = XE
self.Q = Q
self.QI = QI
self.C = C
U = block_diag(*[ug for ug in sorted_active_dirs.values()]).T
self.opt_linear = opt_linearNoU.dot(U)
self.active_dirs = active_dirs
self.opt_offset = opt_offset
self.ordered_vars = ordered_vars
self.linear_part = -np.eye(self.observed_opt_state.shape[0])
self.offset = np.zeros(self.observed_opt_state.shape[0])
return active_signs, soln
def _solve_randomized_problem(self,
perturb=None,
solve_args={'tol': 1.e-15, 'min_its': 100}):
# take a new perturbation if supplied
if perturb is not None:
self._initial_omega = perturb
if self._initial_omega is None:
self._initial_omega = self.randomizer.sample()
quad = rr.identity_quadratic(self.ridge_term,
0,
-self._initial_omega,
0)
problem = rr.simple_problem(self.loglike, self.penalty)
# if all groups are size 1, set up lasso penalty and run usual lasso solver... (see existing code)...
initial_soln = problem.solve(quad, **solve_args)
initial_subgrad = -(self.loglike.smooth_objective(initial_soln,
'grad') +
quad.objective(initial_soln, 'grad'))
return initial_soln, initial_subgrad
@staticmethod
def gaussian(X,
Y,
groups,
weights,
sigma=1.,
quadratic=None,
ridge_term=0.,
perturb=None,
use_lasso=True, # should lasso solver be used when applicable - defaults to True
randomizer_scale=None):
loglike = rr.glm.gaussian(X, Y, coef=1. / sigma ** 2, quadratic=quadratic)
n, p = X.shape
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.std(Y) * np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5 * np.std(Y) * np.sqrt(n / (n - 1.))
randomizer = randomization.isotropic_gaussian((p,), randomizer_scale)
return group_lasso(loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso,
perturb)
def _setup_implied_gaussian(self):
_, prec = self.randomizer.cov_prec
if np.asarray(prec).shape in [(), (0,)]:
cond_precision = self.opt_linear.T.dot(self.opt_linear) * prec
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T) * prec
else:
cond_precision = self.opt_linear.T.dot(prec.dot(self.opt_linear))
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T).dot(prec)
cond_mean = -logdens_linear.dot(self.observed_score_state + self.opt_offset)
self.cond_mean = cond_mean
self.cond_cov = cond_cov
self.cond_precision = cond_precision
self.logdens_linear = logdens_linear
return cond_mean, cond_cov, cond_precision, logdens_linear
def selective_MLE(self,
solve_args={'tol': 1.e-12},
level=0.9,
useJacobian=True,
dispersion=None):
"""Do selective_MLE for group_lasso
Note: this masks the selective_MLE inherited from query
because that is not adapted for the group_lasso. Also, assumes
you have already run the fit method since this uses results
from that method.
Parameters
----------
observed_target: from selected_targets
target_cov: from selected_targets
target_cov_score: from selected_targets
init_soln: (opt_state) initial (observed) value of optimization variables
cond_mean: conditional mean of optimization variables (model on _setup_implied_gaussian)
cond_cov: conditional variance of optimization variables (model on _setup_implied_gaussian)
logdens_linear: (model on _setup_implied_gaussian)
linear_part: like A_scaling (from lasso)
offset: like b_scaling (from lasso)
solve_args: passed on to solver
level: level of confidence intervals
useC: whether to use python or C solver
JacobianPieces: (use self.C defined in fitting)
"""
self._setup_implied_gaussian() # Calculate useful quantities
(observed_target, target_cov, target_score_cov, alternatives) = self.selected_targets(dispersion)
init_soln = self.observed_opt_state # just the gammas
cond_mean = self.cond_mean
cond_cov = self.cond_cov
logdens_linear = self.logdens_linear
linear_part = self.linear_part
offset = self.offset
if np.asarray(observed_target).shape in [(), (0,)]:
raise ValueError('no target specified')
observed_target = np.atleast_1d(observed_target)
prec_target = inv(target_cov)
prec_opt = self.cond_precision
score_offset = self.observed_score_state + self.opt_offset
# target_lin determines how the conditional mean of optimization variables
# vary with target
# logdens_linear determines how the argument of the optimization density
# depends on the score, not how the mean depends on score, hence the minus sign
target_linear = target_score_cov.T.dot(prec_target)
target_offset = score_offset - target_linear.dot(observed_target)
target_lin = - logdens_linear.dot(target_linear)
target_off = cond_mean - target_lin.dot(observed_target)
if np.asarray(self.randomizer_prec).shape in [(), (0,)]:
_P = target_linear.T.dot(target_offset) * self.randomizer_prec
_prec = prec_target + (target_linear.T.dot(target_linear) * self.randomizer_prec) - target_lin.T.dot(
prec_opt).dot(
target_lin)
else:
_P = target_linear.T.dot(self.randomizer_prec).dot(target_offset)
_prec = prec_target + (target_linear.T.dot(self.randomizer_prec).dot(target_linear)) - target_lin.T.dot(
prec_opt).dot(target_lin)
C = target_cov.dot(_P - target_lin.T.dot(prec_opt).dot(target_off))
conjugate_arg = prec_opt.dot(cond_mean)
val, soln, hess = solve_barrier_affine_jacobian_py(conjugate_arg,
prec_opt,
init_soln,
linear_part,
offset,
self.C,
self.active_dirs,
useJacobian,
**solve_args)
final_estimator = target_cov.dot(_prec).dot(observed_target) \
+ target_cov.dot(target_lin.T.dot(prec_opt.dot(cond_mean - soln))) + C
unbiased_estimator = target_cov.dot(_prec).dot(observed_target) + target_cov.dot(
_P - target_lin.T.dot(prec_opt).dot(target_off))
L = target_lin.T.dot(prec_opt)
observed_info_natural = _prec + L.dot(target_lin) - L.dot(hess.dot(L.T))
observed_info_mean = target_cov.dot(observed_info_natural.dot(target_cov))
Z_scores = final_estimator / np.sqrt(np.diag(observed_info_mean))
pvalues = ndist.cdf(Z_scores)
pvalues = 2 * np.minimum(pvalues, 1 - pvalues)
alpha = 1 - level
quantile = ndist.ppf(1 - alpha / 2.)
intervals = np.vstack([final_estimator -
quantile * np.sqrt(np.diag(observed_info_mean)),
final_estimator +
quantile * np.sqrt(np.diag(observed_info_mean))]).T
log_ref = val + conjugate_arg.T.dot(cond_cov).dot(conjugate_arg) / 2.
result = pd.DataFrame({'MLE': final_estimator,
'SE': np.sqrt(np.diag(observed_info_mean)),
'Zvalue': Z_scores,
'pvalue': pvalues,
'lower_confidence': intervals[:, 0],
'upper_confidence': intervals[:, 1],
'unbiased': unbiased_estimator})
return result, observed_info_mean, log_ref
def selected_targets(self,
dispersion=None,
solve_args={'tol': 1.e-12, 'min_its': 50}):
X, y = self.loglike.data
n, p = X.shape
XE = self.XE
Q = self.Q
observed_target = restricted_estimator(self.loglike, self.ordered_vars, solve_args=solve_args)
_score_linear = -XE.T.dot(self._W[:, None] * X).T
alternatives = ['twosided'] * len(self.active)
if dispersion is None: # use Pearson's X^2
dispersion = ((y - self.loglike.saturated_loss.mean_function(
XE.dot(observed_target))) ** 2 / self._W).sum() / (n - XE.shape[1])
cov_target = self.QI * dispersion
crosscov_target_score = _score_linear.dot(self.QI).T * dispersion
return (observed_target,
cov_target,
crosscov_target_score,
alternatives)
class approximate_grid_inference(object):
def __init__(self,
query,
dispersion,
solve_args={'tol': 1.e-12},
useIP=True):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
query : `gaussian_query`
A Gaussian query which has information
to describe implied Gaussian.
observed_target : ndarray
Observed estimate of target.
target_cov : ndarray
Estimated covaraince of target.
target_score_cov : ndarray
Estimated covariance of target and score of randomized query.
solve_args : dict, optional
Arguments passed to solver.
"""
self.solve_args = solve_args
result, inverse_info = query.selective_MLE(dispersion=dispersion)[:2]
self.linear_part = query.linear_part
self.offset = query.offset
self.logdens_linear = query.logdens_linear
self.cond_mean = query.cond_mean
self.prec_opt = np.linalg.inv(query.cond_cov)
self.cond_cov = query.cond_cov
self.C = query.C
self.active_dirs = query.active_dirs
(observed_target, target_cov, target_score_cov, alternatives) = query.selected_targets(dispersion)
self.observed_target = observed_target
self.target_score_cov = target_score_cov
self.target_cov = target_cov
self.init_soln = query.observed_opt_state
self.randomizer_prec = query.randomizer_prec
self.score_offset = query.observed_score_state + query.opt_offset
self.ntarget = ntarget = target_cov.shape[0]
_scale = 4 * np.sqrt(np.diag(inverse_info))
if useIP == False:
ngrid = 1000
self.stat_grid = np.zeros((ntarget, ngrid))
for j in range(ntarget):
self.stat_grid[j, :] = np.linspace(observed_target[j] - 1.5 * _scale[j],
observed_target[j] + 1.5 * _scale[j],
num=ngrid)
else:
ngrid = 100
self.stat_grid = np.zeros((ntarget, ngrid))
for j in range(ntarget):
self.stat_grid[j, :] = np.linspace(observed_target[j] - 1.5 * _scale[j],
observed_target[j] + 1.5 * _scale[j],
num=ngrid)
self.opt_linear = query.opt_linear
self.useIP = useIP
def summary(self,
alternatives=None,
parameter=None,
level=0.9):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
alternatives : [str], optional
Sequence of strings describing the alternatives,
should be values of ['twosided', 'less', 'greater']
parameter : np.array
Hypothesized value for parameter -- defaults to 0.
level : float
Confidence level.
"""
if parameter is not None:
pivots = self._approx_pivots(parameter,
alternatives=alternatives)
else:
pivots = None
pvalues = self._approx_pivots(np.zeros_like(self.observed_target),
alternatives=alternatives)
lower, upper = self._approx_intervals(level=level)
result = pd.DataFrame({'target': self.observed_target,
'pvalue': pvalues,
'lower_confidence': lower,
'upper_confidence': upper})
if not np.all(parameter == 0):
result.insert(4, 'pivot', pivots)
result.insert(5, 'parameter', parameter)
return result
def log_reference(self,
observed_target,
target_cov,
target_score_cov,
grid):
"""
Approximate the log of the reference density on a grid.
"""
if np.asarray(observed_target).shape in [(), (0,)]:
raise ValueError('no target specified')
prec_target = np.linalg.inv(target_cov)
target_lin = - self.logdens_linear.dot(target_score_cov.T.dot(prec_target))
ref_hat = []
for k in range(grid.shape[0]):
# in the usual D = N + Gamma theta.hat,
# target_lin is "something" times Gamma,
# where "something" comes from implied Gaussian
# cond_mean is "something" times D
# Gamma is target_score_cov.T.dot(prec_target)
num_opt = self.prec_opt.shape[0]
num_con = self.linear_part.shape[0]
cond_mean_grid = (target_lin.dot(np.atleast_1d(grid[k] - observed_target)) +
self.cond_mean)
#direction for decomposing o
eta = -self.prec_opt.dot(self.logdens_linear.dot(target_score_cov.T))
implied_mean = np.asscalar(eta.T.dot(cond_mean_grid))
implied_cov = np.asscalar(eta.T.dot(self.cond_cov).dot(eta))
implied_prec = 1./implied_cov
_A = self.cond_cov.dot(eta) * implied_prec
R = np.identity(num_opt) - _A.dot(eta.T)
A = self.linear_part.dot(_A).reshape((-1,))
b = self.offset-self.linear_part.dot(R).dot(self.init_soln)
conjugate_arg = implied_mean * implied_prec
val, soln, _ = solver(np.asarray([conjugate_arg]),
np.reshape(implied_prec, (1,1)),
eta.T.dot(self.init_soln),
A.reshape((A.shape[0],1)),
b,
**self.solve_args)
gamma_ = _A.dot(soln) + R.dot(self.init_soln)
log_jacob = jacobian_grad_hess(gamma_, self.C, self.active_dirs)
ref_hat.append(-val - ((conjugate_arg ** 2) * implied_cov)/ 2. + log_jacob[0])
return np.asarray(ref_hat)
def _construct_families(self):
self._construct_density()
self._families = []
for m in range(self.ntarget):
p = self.target_score_cov.shape[1]
observed_target_uni = (self.observed_target[m]).reshape((1,))
target_cov_uni = (np.diag(self.target_cov)[m]).reshape((1, 1))
target_score_cov_uni = self.target_score_cov[m, :].reshape((1, p))
var_target = 1. / ((self.precs[m])[0, 0])
log_ref = self.log_reference(observed_target_uni,
target_cov_uni,
target_score_cov_uni,
self.stat_grid[m])
if self.useIP == False:
logW = (log_ref - 0.5 * (self.stat_grid[m] - self.observed_target[m]) ** 2 / var_target)
logW -= logW.max()
self._families.append(discrete_family(self.stat_grid[m],
np.exp(logW)))
else:
approx_fn = interp1d(self.stat_grid[m],
log_ref,
kind='quadratic',
bounds_error=False,
fill_value='extrapolate')
grid = np.linspace(self.stat_grid[m].min(), self.stat_grid[m].max(), 1000)
logW = (approx_fn(grid) -
0.5 * (grid - self.observed_target[m]) ** 2 / var_target)
logW -= logW.max()
self._families.append(discrete_family(grid,
np.exp(logW)))
def _approx_pivots(self,
mean_parameter,
alternatives=None):
if not hasattr(self, "_families"):
self._construct_families()
if alternatives is None:
alternatives = ['twosided'] * self.ntarget
pivot = []
for m in range(self.ntarget):
family = self._families[m]
var_target = 1. / ((self.precs[m])[0, 0])
mean = self.S[m].dot(mean_parameter[m].reshape((1,))) + self.r[m]
_cdf = family.cdf((mean[0] - self.observed_target[m]) / var_target, x=self.observed_target[m])
print("variable completed ", m)
if alternatives[m] == 'twosided':
pivot.append(2 * min(_cdf, 1 - _cdf))
elif alternatives[m] == 'greater':
pivot.append(1 - _cdf)
elif alternatives[m] == 'less':
pivot.append(_cdf)
else:
raise ValueError('alternative should be in ["twosided", "less", "greater"]')
return pivot
def _approx_intervals(self,
level=0.9):
if not hasattr(self, "_families"):
self._construct_families()
lower, upper = [], []
for m in range(self.ntarget):
# construction of intervals from families follows `selectinf.learning.core`
family = self._families[m]
observed_target = self.observed_target[m]
l, u = family.equal_tailed_interval(observed_target,
alpha=1 - level)
var_target = 1. / ((self.precs[m])[0, 0])
lower.append(l * var_target + observed_target)
upper.append(u * var_target + observed_target)
return np.asarray(lower), np.asarray(upper)
### Private method
def _construct_density(self):
precs = {}
S = {}
r = {}
p = self.target_score_cov.shape[1]
for m in range(self.ntarget):
observed_target_uni = (self.observed_target[m]).reshape((1,))
target_cov_uni = (np.diag(self.target_cov)[m]).reshape((1, 1))
prec_target = 1. / target_cov_uni
target_score_cov_uni = self.target_score_cov[m, :].reshape((1, p))
target_linear = target_score_cov_uni.T.dot(prec_target)
target_offset = (self.score_offset - target_linear.dot(observed_target_uni)).reshape(
(target_linear.shape[0],))
target_lin = -self.logdens_linear.dot(target_linear)
target_off = (self.cond_mean - target_lin.dot(observed_target_uni)).reshape((target_lin.shape[0],))
_prec = prec_target + (target_linear.T.dot(target_linear) * self.randomizer_prec) - target_lin.T.dot(
self.prec_opt).dot(target_lin)
_P = target_linear.T.dot(target_offset) * self.randomizer_prec
_r = (1. / _prec).dot(target_lin.T.dot(self.prec_opt).dot(target_off) - _P)
_S = np.linalg.inv(_prec).dot(prec_target)
S[m] = _S
r[m] = _r
precs[m] = _prec
self.precs = precs
self.S = S
self.r = r
def solve_barrier_affine_jacobian_py(conjugate_arg,
precision,
feasible_point,
con_linear,
con_offset,
C,
active_dirs,
useJacobian=True,
step=1,
nstep=2000,
min_its=500,
tol=1.e-12):
"""
This needs to be updated to actually use the Jacobian information (in self.C)
arguments
conjugate_arg: \\bar{\\Sigma}^{-1} \bar{\\mu}
precision: \\bar{\\Sigma}^{-1}
feasible_point: gamma's from fitting
con_linear: linear part of affine constraint used for barrier function
con_offset: offset part of affine constraint used for barrier function
C: V^T Q^{-1} \\Lambda V
active_dirs:
"""
scaling = np.sqrt(np.diag(con_linear.dot(precision).dot(con_linear.T)))
if feasible_point is None:
feasible_point = 1. / scaling
def objective(gs):
p1 = -gs.T.dot(conjugate_arg)
p2 = gs.T.dot(precision).dot(gs) / 2.
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[0]
else:
p3 = 0
p4 = log(1. + 1. / ((con_offset - con_linear.dot(gs)) / scaling)).sum()
return p1 + p2 + p3 + p4
def grad(gs):
p1 = -conjugate_arg + precision.dot(gs)
p2 = -con_linear.T.dot(1. / (scaling + con_offset - con_linear.dot(gs)))
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[1]
else:
p3 = 0
p4 = 1. / (con_offset - con_linear.dot(gs))
return p1 + p2 + p3 + p4
def barrier_hessian(gs): # contribution of barrier and jacobian to hessian
p1 = con_linear.T.dot(np.diag(-1. / ((scaling + con_offset - con_linear.dot(gs)) ** 2.)
+ 1. / ((con_offset - con_linear.dot(gs)) ** 2.))).dot(con_linear)
if useJacobian:
p2 = - jacobian_grad_hess(gs, C, active_dirs)[2]
else:
p2 = 0
return p1 + p2
current = feasible_point
current_value = np.inf
for itercount in range(nstep):
cur_grad = grad(current)
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * cur_grad
if np.all(con_offset - con_linear.dot(proposal) > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
count += 1
proposal = current - step * cur_grad
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
if count >= 20:
if not (np.isnan(proposed_value) or np.isnan(current_value)):
break
else:
raise ValueError('value is NaN: %f, %f' % (proposed_value, current_value))
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value) and itercount >= min_its:
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
hess = inv(precision + barrier_hessian(current))
return current_value, current, hess
# Jacobian calculations
def calc_GammaMinus(gamma, active_dirs):
"""Calculate Gamma^minus (as a function of gamma vector, active directions)
"""
to_diag = [[g] * (ug.size - 1) for (g, ug) in zip(gamma, active_dirs.values())]
return block_diag(*[i for gp in to_diag for i in gp])
def jacobian_grad_hess(gamma, C, active_dirs):
""" Calculate the log-Jacobian (scalar), gradient (gamma.size vector) and hessian (gamma.size square matrix)
"""
if C.shape == (0, 0): # when all groups are size one, C will be an empty array
return 0, 0, 0
else:
GammaMinus = calc_GammaMinus(gamma, active_dirs)
# eigendecomposition
#evalues, evectors = eig(GammaMinus + C)
# log Jacobian
#J = log(evalues).sum()
J = np.log(np.linalg.det(GammaMinus + C))
# inverse
#GpC_inv = evectors.dot(np.diag(1 / evalues).dot(evectors.T))
GpC_inv = np.linalg.inv(GammaMinus + C)
# summing matrix (gamma.size by C.shape[0])
S = block_diag(*[np.ones((1, ug.size - 1)) for ug in active_dirs.values()])
# gradient
grad_J = S.dot(GpC_inv.diagonal())
# hessian
hess_J = -S.dot(np.multiply(GpC_inv, GpC_inv.T).dot(S.T))
return J, grad_J, hess_J
def _check_groups(groups):
"""Make sure that the user-specific groups are ok
There are a number of assumptions that group_lasso makes about
how groups are specified. Specifically, we assume that
`groups` is a 1-d array_like of integers that are sorted in
increasing order, start at 0, and have no gaps (e.g., if there
is a group 2 and a group 4, there must also be at least one
feature in group 3).
This function checks the user-specified group scheme and
raises an exception if it finds any problems.
Sorting feature groups is potentially tedious for the user and
in future we might do this for them.
"""
# check array_like
agroups = np.array(groups)
# check dimension
if len(agroups.shape) != 1:
raise ValueError("Groups are not a 1D array_like")
# check sorted
if np.any(agroups[:-1] > agroups[1:]) < 0:
raise ValueError("Groups are not sorted")
# check integers
if not np.issubdtype(agroups.dtype, np.integer):
raise TypeError("Groups are not integers")
# check starts with 0
if not np.amin(agroups) == 0:
raise ValueError("First group is not 0")
# check for no skipped groups
if not np.all(np.diff(np.unique(agroups)) == 1):
raise ValueError("Some group is skipped")
| 37.830527
| 116
| 0.556709
| 3,985
| 33,707
| 4.505897
| 0.139523
| 0.031967
| 0.014034
| 0.005792
| 0.275395
| 0.237358
| 0.211796
| 0.159557
| 0.112943
| 0.099688
| 0
| 0.011184
| 0.350076
| 33,707
| 890
| 117
| 37.873034
| 0.808463
| 0.164447
| 0
| 0.219251
| 0
| 0
| 0.019895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040998
| false
| 0
| 0.024955
| 0
| 0.105169
| 0.003565
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c1d2fbba6d7c550c2607f8f36af9eb36384e04
| 18,606
|
py
|
Python
|
internals/states.py
|
mattjj/pyhsmm-collapsedinfinite
|
81a60c025beec6fb065bc9f4e23cea43b6f6725c
|
[
"MIT"
] | null | null | null |
internals/states.py
|
mattjj/pyhsmm-collapsedinfinite
|
81a60c025beec6fb065bc9f4e23cea43b6f6725c
|
[
"MIT"
] | null | null | null |
internals/states.py
|
mattjj/pyhsmm-collapsedinfinite
|
81a60c025beec6fb065bc9f4e23cea43b6f6725c
|
[
"MIT"
] | 1
|
2021-10-06T15:12:44.000Z
|
2021-10-06T15:12:44.000Z
|
from __future__ import division
import numpy as np
na = np.newaxis
import collections, itertools
import abc
from pyhsmm.util.stats import sample_discrete, sample_discrete_from_log, combinedata
from pyhsmm.util.general import rle as rle
# NOTE: assumes censoring. can make no censoring by adding to score of last
# segment
SAMPLING = -1 # special constant for indicating a state or state range that is being resampled
NEW = -2 # special constant indicating a potentially new label
ABIGNUMBER = 10000 # state labels are sampled uniformly from 0 to abignumber exclusive
####################
# States Classes #
####################
# TODO an array class that maintains its own rle
# must override set methods
# type(x).__setitem__(x,i) classmethod
# also has members norep and lens (or something)
# that are either read-only or also override setters
# for now, i'll just make sure outside that anything that sets self.stateseq
# also sets self.stateseq_norep and self.durations
# it should also call beta updates...
class collapsed_states(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def resample(self):
pass
@abc.abstractmethod
def _counts_from(self,k):
pass
@abc.abstractmethod
def _counts_to(self,k):
pass
@abc.abstractmethod
def _counts_fromto(self,k):
pass
def _new_label(self,ks):
assert SAMPLING not in ks
newlabel = np.random.randint(ABIGNUMBER)
while newlabel in ks:
newlabel = np.random.randint(ABIGNUMBER)
newweight = self.beta.betavec[newlabel] # instantiate, needed if new state at beginning of seq
return newlabel
def _data_withlabel(self,k):
assert k != SAMPLING
return self.data[self.stateseq == k]
def _occupied(self):
return set(self.stateseq) - set((SAMPLING,))
def plot(self,colors_dict):
from matplotlib import pyplot as plt
stateseq_norep, durations = rle(self.stateseq)
X,Y = np.meshgrid(np.hstack((0,durations.cumsum())),(0,1))
if colors_dict is not None:
C = np.array([[colors_dict[state] for state in stateseq_norep]])
else:
C = stateseq_norep[na,:]
plt.pcolor(X,Y,C,vmin=0,vmax=1)
plt.ylim((0,1))
plt.xlim((0,len(self.stateseq)))
plt.yticks([])
class collapsed_stickyhdphmm_states(collapsed_states):
def __init__(self,model,beta,alpha_0,kappa,obs,data=None,T=None,stateseq=None):
self.alpha_0 = alpha_0
self.kappa = kappa
self.model = model
self.beta = beta
self.obs = obs
self.data = data
if (data,stateseq) == (None,None):
# generating
assert T is not None, 'must pass in T when generating'
self._generate(T)
elif data is None:
self.T = stateseq.shape[0]
self.stateseq = stateseq
elif stateseq is None:
self.data = data
self._generate(data.shape[0])
else:
assert data.shape[0] == stateseq.shape[0]
self.stateseq = stateseq
self.data = data
self.T = data.shape[0]
def _generate(self,T):
self.T = T
alpha, kappa = self.alpha_0, self.kappa
betavec = self.beta.betavec
stateseq = np.zeros(T,dtype=np.int)
model = self.model
self.stateseq = stateseq[:0]
# NOTE: we have a choice of what state to start in; it's just a
# definition choice that isn't specified in the HDP-HMM
# Here, we choose just to sample from beta. Note that if this is the
# first chain being sampled in this model, this will always sample
# zero, since no states will be occupied.
ks = list(model._occupied()) + [None]
firststate = sample_discrete(np.arange(len(ks)))
if firststate == len(ks)-1:
stateseq[0] = self._new_label(ks)
else:
stateseq[0] = ks[firststate]
# runs a CRF with fixed weights beta forwards
for t in range(1,T):
self.stateseq = stateseq[:t]
ks = list(model._occupied() | self._occupied())
betarest = 1-sum(betavec[k] for k in ks)
# get the counts of new states coming out of our current state
# going to all other states
fromto_counts = np.array([model._counts_fromto(stateseq[t-1],k)
+ self._counts_fromto(stateseq[t-1],k)
for k in ks])
# for those states plus a new one, sample proportional to
scores = np.array([(alpha*betavec[k] + (kappa if k == stateseq[t+1] else 0) + ft)
for k,ft in zip(ks,fromto_counts)] + [alpha*betarest])
nextstateidx = sample_discrete(scores)
if nextstateidx == scores.shape[0]-1:
stateseq[t] = self._new_label(ks)
else:
stateseq[t] = ks[nextstateidx]
self.stateseq = stateseq
def resample(self):
model = self.model
for t in np.random.permutation(self.T):
# throw out old value
self.stateseq[t] = SAMPLING
ks = list(model._occupied())
self.beta.housekeeping(ks)
# form the scores and sample from them
scores = np.array([self._score(k,t) for k in ks]+[self._new_score(ks,t)])
idx = sample_discrete_from_log(scores)
# set the state
if idx == scores.shape[0]-1:
self.stateseq[t] = self._new_label(ks)
else:
self.stateseq[t] = ks[idx]
def _score(self,k,t):
alpha, kappa = self.alpha_0, self.kappa
betavec, model, o = self.beta.betavec, self.model, self.obs
data, stateseq = self.data, self.stateseq
score = 0
# left transition score
if t > 0:
score += np.log( (alpha*betavec[k] + (kappa if k == stateseq[t-1] else 0)
+ model._counts_fromto(stateseq[t-1],k))
/ (alpha+kappa+model._counts_from(stateseq[t-1])) )
# right transition score
if t < self.T - 1:
# indicators since we may need to include the left transition in
# counts (since we are scoring exchangeably, not independently)
another_from = 1 if t > 0 and stateseq[t-1] == k else 0
another_fromto = 1 if (t > 0 and stateseq[t-1] == k and stateseq[t+1] == k) else 0
score += np.log( (alpha*betavec[stateseq[t+1]] + (kappa if k == stateseq[t+1] else 0)
+ model._counts_fromto(k,stateseq[t+1]) + another_fromto)
/ (alpha+kappa+model._counts_from(k) + another_from) )
# observation score
score += o.log_predictive(data[t],model._data_withlabel(k))
return score
def _new_score(self,ks,t):
alpha, kappa = self.alpha_0, self.kappa
betavec, model, o = self.beta.betavec, self.model, self.obs
data, stateseq = self.data, self.stateseq
score = 0
# left transition score
if t > 0:
betarest = 1-sum(betavec[k] for k in ks)
score += np.log(alpha*betarest/(alpha+kappa+model._counts_from(stateseq[t-1])))
# right transition score
if t < self.T-1:
score += np.log(betavec[stateseq[t+1]])
# observation score
score += o.log_marginal_likelihood(data[t])
return score
def _counts_from(self,k):
assert k != SAMPLING
assert np.sum(self.stateseq == SAMPLING) in (0,1)
temp = np.sum(self.stateseq[:-1] == k)
if SAMPLING in self.stateseq[1:] and \
self.stateseq[np.where(self.stateseq == SAMPLING)[0]-1] == k:
temp -= 1
return temp
def _counts_to(self,k):
assert k != SAMPLING
assert np.sum(self.stateseq == SAMPLING) in (0,1)
temp = np.sum(self.stateseq[1:] == k)
if SAMPLING in self.stateseq[:-1] and \
self.stateseq[np.where(self.stateseq == SAMPLING)[0]+1] == k:
temp -= 1
return temp
def _counts_fromto(self,k1,k2):
assert k1 != SAMPLING and k2 != SAMPLING
if k1 not in self.stateseq or k2 not in self.stateseq:
return 0
else:
from_indices, = np.where(self.stateseq[:-1] == k1) # EXCEPT last
return np.sum(self.stateseq[from_indices+1] == k2)
class collapsed_hdphsmm_states(collapsed_states):
def __init__(self,model,beta,alpha_0,obs,dur,data=None,T=None,stateseq=None):
self.alpha_0 = alpha_0
self.model = model
self.beta = beta
self.obs = obs
self.dur = dur
self.data = data
if (data,stateseq) == (None,None):
# generating
assert T is not None, 'must pass in T when generating'
self._generate(T)
elif data is None:
self.T = stateseq.shape[0]
self.stateseq = stateseq
elif stateseq is None:
self.data = data
# self._generate(data.shape[0]) # initialized from the prior
# self.stateseq = self.stateseq[:self.T]
self.stateseq = np.random.randint(25,size=data.shape[0])
self.T = data.shape[0]
else:
assert data.shape[0] == stateseq.shape[0]
self.stateseq = stateseq
self.stateseq_norep, self.durations = rle(stateseq)
self.data = data
self.T = data.shape[0]
def _generate(self,T):
alpha = self.alpha_0
betavec = self.beta.betavec
model = self.model
self.stateseq = np.array([])
ks = list(model._occupied()) + [None]
firststateidx = sample_discrete(np.arange(len(ks)))
if firststateidx == len(ks)-1:
firststate = self._new_label(ks)
else:
firststate = ks[firststateidx]
self.dur.resample(combinedata((model._durs_withlabel(firststate),self._durs_withlabel(firststate))))
firststate_dur = self.dur.rvs()
self.stateseq = np.ones(firststate_dur,dtype=int)*firststate
t = firststate_dur
# run a family-CRF (CRF with durations) forwards
while t < T:
ks = list(model._occupied() | self._occupied())
betarest = 1-sum(betavec[k] for k in ks)
fromto_counts = np.array([model._counts_fromto(self.stateseq[t-1],k)
+ self._counts_fromto(self.stateseq[t-1],k)
for k in ks])
scores = np.array([(alpha*betavec[k] + ft if k != self.stateseq[t-1] else 0)
for k,ft in zip(ks,fromto_counts)]
+ [alpha*(1-betavec[self.stateseq[t-1]])*betarest])
nextstateidx = sample_discrete(scores)
if nextstateidx == scores.shape[0]-1:
nextstate = self._new_label(ks)
else:
nextstate = ks[nextstateidx]
# now get the duration of nextstate!
self.dur.resample(combinedata((model._durs_withlabel(nextstate),self._durs_withlabel(nextstate))))
nextstate_dur = self.dur.rvs()
self.stateseq = np.concatenate((self.stateseq,np.ones(nextstate_dur,dtype=int)*nextstate))
t += nextstate_dur
self.T = len(self.stateseq)
def resample(self):
self.resample_label_version()
def _durs_withlabel(self,k):
assert k != SAMPLING
if len(self.stateseq) > 0:
stateseq_norep, durations = rle(self.stateseq)
return durations[stateseq_norep == k]
else:
return []
def _counts_fromto(self,k1,k2):
assert k1 != SAMPLING and k2 != SAMPLING
if k1 not in self.stateseq or k2 not in self.stateseq or k1 == k2:
return 0
else:
stateseq_norep, _ = rle(self.stateseq)
from_indices, = np.where(stateseq_norep[:-1] == k1) # EXCEPT last
return np.sum(stateseq_norep[from_indices+1] == k2)
def _counts_from(self,k):
assert k != SAMPLING
stateseq_norep, _ = rle(self.stateseq)
temp = np.sum(stateseq_norep[:-1] == k)
if SAMPLING in stateseq_norep[1:] and \
stateseq_norep[np.where(stateseq_norep == SAMPLING)[0]-1] == k:
temp -= 1
return temp
def _counts_to(self,k):
assert k != SAMPLING
stateseq_norep, _ = rle(self.stateseq)
temp = np.sum(stateseq_norep[1:] == k)
if SAMPLING in stateseq_norep[:-1] and \
stateseq_norep[np.where(stateseq_norep == SAMPLING)[0]+1] == k:
temp -= 1
return temp
### label sampler stuff
def resample_label_version(self):
# NOTE never changes first label: we assume the initial state
# distribution is a delta at that label
for t in (np.random.permutation(self.T-1)+1):
self.stateseq[t] = SAMPLING
ks = self.model._occupied()
self.beta.housekeeping(ks)
ks = list(ks)
# sample a new value
scores = np.array([self._label_score(t,k) for k in ks] + [self._new_label_score(t,ks)])
newlabelidx = sample_discrete_from_log(scores)
if newlabelidx == scores.shape[0]-1:
self.stateseq[t] = self._new_label(ks)
else:
self.stateseq[t] = ks[newlabelidx]
def _label_score(self,t,k):
assert t > 0
score = 0.
# unpack variables
model = self.model
alpha = self.alpha_0
beta = self.beta.betavec
stateseq = self.stateseq
obs, durs = self.obs, self.dur
# left transition (if there is one)
if stateseq[t-1] != k:
score += np.log(alpha * beta[k] + model._counts_fromto(stateseq[t-1],k)) \
- np.log(alpha * (1-beta[stateseq[t-1]]) + model._counts_from(stateseq[t-1]))
# right transition (if there is one)
if t < self.T-1 and stateseq[t+1] != k:
score += np.log(alpha * beta[stateseq[t+1]] + model._counts_fromto(k,stateseq[t+1])) \
- np.log(alpha * (1-beta[k]) + model._counts_from(k))
# predictive likelihoods
for (data,otherdata), (dur,otherdurs) in self._local_group(t,k):
score += obs.log_predictive(data,otherdata) + durs.log_predictive(dur,otherdurs)
return score
def _new_label_score(self,t,ks):
assert t > 0
score = 0.
# unpack
model = self.model
alpha = self.alpha_0
beta = self.beta.betavec
stateseq = self.stateseq
obs, durs = self.obs, self.dur
# left transition (only from counts, no to counts)
score += np.log(alpha) - np.log(alpha*(1.-beta[stateseq[t-1]])
+ model._counts_from(stateseq[t-1]))
# add in right transition (no counts)
if t < self.T-1:
score += np.log(beta[stateseq[t+1]])
# add in sum over k factor
if t < self.T-1:
betas = np.random.beta(1,self.beta.gamma_0,size=200)
betas[1:] *= (1-betas[:-1]).cumprod()
score += np.log(self.beta.remaining*(betas/(1-betas)).sum())
else:
score += np.log(self.beta.remaining)
# add in obs/dur scores of local pieces
for (data,otherdata), (dur,otherdurs) in self._local_group(t,NEW):
score += obs.log_predictive(data,otherdata) + durs.log_predictive(dur,otherdurs)
return score
def _local_group(self,t,k):
'''
returns a sequence of length between 1 and 3, where each sequence element is
((data,otherdata), (dur,otherdurs))
'''
# temporarily modifies members, like self.stateseq and maybe self.data
assert self.stateseq[t] == SAMPLING
orig_stateseq = self.stateseq.copy()
# temporarily set stateseq to hypothetical stateseq
# so that we can get the indicator sequence
# TODO if i write the special stateseq class, this will need fixing
self.stateseq[t] = k
wholegroup, pieces = self._local_slices(self.stateseq,t)
self.stateseq[t] = SAMPLING
# build local group of statistics
localgroup = []
self.stateseq[wholegroup] = SAMPLING
for piece, val in pieces:
# get all the other data
otherdata, otherdurs = self.model._data_withlabel(val), self.model._durs_withlabel(val)
# add a piece to our localgroup
localgroup.append(((self.data[piece],otherdata),(piece.stop-piece.start,otherdurs)))
# remove the used piece from the exclusion
self.stateseq[piece] = orig_stateseq[piece]
# restore original views
self.stateseq = orig_stateseq
# return
return localgroup
@classmethod
def _local_slices(cls,stateseq,t):
'''
returns slices: wholegroup, (piece1, ...)
'''
A,B = fill(stateseq,t-1), fill(stateseq,t+1)
if A == B:
return A, ((A,stateseq[A.start]),)
elif A.start <= t < A.stop or B.start <= t < B.stop:
return slice(A.start,B.stop), [(x,stateseq[x.start]) for x in (A,B) if x.stop - x.start > 0]
else:
It = slice(t,t+1)
return slice(A.start,B.stop), [(x,stateseq[x.start]) for x in (A,It,B) if x.stop - x.start > 0]
#######################
# Utility Functions #
#######################
def fill(seq,t):
if t < 0:
return slice(0,0)
elif t > seq.shape[0]-1:
return slice(seq.shape[0],seq.shape[0])
else:
endindices, = np.where(np.diff(seq) != 0) # internal end indices (not incl -1 and T-1)
startindices = np.concatenate(((0,),endindices+1,(seq.shape[0],))) # incl 0 and T
idx = np.where(startindices <= t)[0][-1]
return slice(startindices[idx],startindices[idx+1])
def canonize(seq):
seq = seq.copy()
canondict = collections.defaultdict(itertools.count().next)
for idx,s in enumerate(seq):
seq[idx] = canondict[s]
reversedict = {}
for k,v in canondict.iteritems():
reversedict[v] = k
return seq, canondict, reversedict
class dummytrans(object):
def __init__(self,A):
self.A = A
def resample(self,*args,**kwargs):
pass
| 35.849711
| 110
| 0.577448
| 2,431
| 18,606
| 4.322912
| 0.143562
| 0.075364
| 0.028547
| 0.011514
| 0.501475
| 0.458274
| 0.413645
| 0.348463
| 0.332477
| 0.311257
| 0
| 0.015428
| 0.306729
| 18,606
| 518
| 111
| 35.918919
| 0.799287
| 0.148124
| 0
| 0.458689
| 0
| 0
| 0.003837
| 0
| 0
| 0
| 0
| 0.003861
| 0.051282
| 1
| 0.091168
| false
| 0.019943
| 0.019943
| 0.002849
| 0.196581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c20caa8c6caaf656d4639f0a7424aba4ba6e44
| 1,406
|
py
|
Python
|
exporters/contrib/writers/odo_writer.py
|
scrapinghub/exporters
|
b14f70530826bbbd6163d9e56e74345e762a9189
|
[
"BSD-3-Clause"
] | 41
|
2016-06-16T15:29:39.000Z
|
2021-08-06T03:29:13.000Z
|
exporters/contrib/writers/odo_writer.py
|
bbotella/fluxo
|
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
|
[
"BSD-3-Clause"
] | 52
|
2016-06-20T12:46:57.000Z
|
2018-02-08T12:22:03.000Z
|
exporters/contrib/writers/odo_writer.py
|
bbotella/fluxo
|
c9fb01db1771ada4672bbffd67cb46e1f7802ab9
|
[
"BSD-3-Clause"
] | 10
|
2016-06-23T08:49:36.000Z
|
2018-01-13T10:12:10.000Z
|
import six
import json
import gzip
from exporters.default_retries import retry_long
from exporters.writers.base_writer import BaseWriter
class ODOWriter(BaseWriter):
"""
Writes items to a odo destination. https://odo.readthedocs.org/en/latest/
Needed parameters:
- schema (object)
schema object.
- odo_uri (str)
ODO valid destination uri.
"""
requirements = {
'schema': {'type': object, 'required': True},
'odo_uri': {'type': six.string_types, 'required': True}
}
def __init__(self, options):
super(ODOWriter, self).__init__(options)
from flatson import Flatson
schema = self.read_option('schema', None)
self.odo_uri = self.read_option('odo_uri', None)
self.flatson = Flatson(schema)
self.logger.info('ODOWriter has been initiated. Writing to: {}'.format(self.odo_uri))
@retry_long
def write(self, dump_path, group_key=''):
from odo import odo, resource, discover
import pandas as pd
with gzip.open(dump_path) as f:
lines = [json.loads(line.replace('\n', '')) for line in f.readlines()]
flattened_lines = (self.flatson.flatten(line) for line in lines)
pf = pd.DataFrame(flattened_lines, columns=self.flatson.fieldnames)
dshape = discover(pf)
odo(pf, resource(self.odo_uri), dshape=dshape)
| 31.244444
| 93
| 0.642959
| 174
| 1,406
| 5.045977
| 0.482759
| 0.041002
| 0.034169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242532
| 1,406
| 44
| 94
| 31.954545
| 0.824413
| 0.138691
| 0
| 0
| 0
| 0
| 0.081702
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.296296
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c3adf62c8a44bad01c91e8ccec7e900d2597c3
| 1,573
|
py
|
Python
|
graphstar/utils.py
|
pengboomouch/graphstar
|
f7f3537aa92118765b358dd3a47b4fa5cea8587c
|
[
"MIT"
] | null | null | null |
graphstar/utils.py
|
pengboomouch/graphstar
|
f7f3537aa92118765b358dd3a47b4fa5cea8587c
|
[
"MIT"
] | null | null | null |
graphstar/utils.py
|
pengboomouch/graphstar
|
f7f3537aa92118765b358dd3a47b4fa5cea8587c
|
[
"MIT"
] | null | null | null |
"""
graphstar.utils
~~~~~~~~~~~~~~~
Cristian Cornea
A simple bedirectional graph with A* and breadth-first pathfinding.
Utils are either used by the search algorithm, or when needed :)
Pretty self explainatory (I hope)
For more information see the examples and tests folder
"""
def smooth_path(p):
# If the path is only two nodes long, then
# we can’t smooth it, so return
if len(p) == 2:
return p
# Compile an output path
output = [p[0]]
# Keep track of where we are in the input path
# We start at 2, because we assume two adjacent
# nodes will pass the ray cast
i = 2
# Loop until we find the last item in the input
while i < len(p)-1:
# Do the ray cast
if not ray_clear(output[len(output)-1], p[i]):
# The ray text failed, add the last node that
# passed to the output list
output += p[i-1]
# Consider the next node
i += 1
# We’ve reached the end of the input path, add the
# end node to the output and return it
output += p[len(p)-1]
return output
def clean_route_list(route_stack: list, goal_node_id: int):
"""
Creates an ordered route list from start to finish
with all node ids needed to traverse to the goal.
:param route_stack: All routes found until goal
:param goal_node: int ID of the goal node
:return: list A ordered list from start to goal
"""
r = []
next_node = goal_node_id
reversed_stack = reversed(route_stack)
for c in reversed_stack:
if c.to_node.id == next_node:
r.append(c.to_node.id)
r.append(c.from_node.id)
next_node = c.from_node.id
return list(set(r))
| 24.968254
| 68
| 0.688493
| 279
| 1,573
| 3.806452
| 0.419355
| 0.033898
| 0.018832
| 0.028249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007341
| 0.220598
| 1,573
| 62
| 69
| 25.370968
| 0.858891
| 0.625556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c411c2e8e16ded3277d3bfc3c35dd1f462b513
| 527
|
py
|
Python
|
jinchi/demo/foobar.py
|
jiz148/py-test
|
d976265d065c760f2e8b55302dedbfebd01bec28
|
[
"Apache-2.0"
] | null | null | null |
jinchi/demo/foobar.py
|
jiz148/py-test
|
d976265d065c760f2e8b55302dedbfebd01bec28
|
[
"Apache-2.0"
] | null | null | null |
jinchi/demo/foobar.py
|
jiz148/py-test
|
d976265d065c760f2e8b55302dedbfebd01bec28
|
[
"Apache-2.0"
] | 1
|
2019-01-07T18:42:53.000Z
|
2019-01-07T18:42:53.000Z
|
import os
def check_env(env_var_name):
"""
Check and return the type of an environment variable.
supported types:
None
Integer
String
@param env_var_name: environment variable name
@return: string of the type name.
"""
try:
val = os.getenv(env_var_name)
if val is None:
return 'None'
except Exception as ex:
return "None"
try:
int_val = int(val)
return 'Integer'
except ValueError:
return 'String'
| 18.821429
| 57
| 0.578748
| 65
| 527
| 4.569231
| 0.492308
| 0.060606
| 0.10101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.352941
| 527
| 27
| 58
| 19.518519
| 0.870968
| 0.351044
| 0
| 0.153846
| 0
| 0
| 0.069536
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c56f7b05d7fe221ca2f682d2bea0e270121b36
| 2,000
|
py
|
Python
|
tracking/utils.py
|
WGBH/django-tracking
|
80e8bc44521820eab956d2264d6df0b6987429e0
|
[
"MIT"
] | null | null | null |
tracking/utils.py
|
WGBH/django-tracking
|
80e8bc44521820eab956d2264d6df0b6987429e0
|
[
"MIT"
] | null | null | null |
tracking/utils.py
|
WGBH/django-tracking
|
80e8bc44521820eab956d2264d6df0b6987429e0
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from django.conf import settings
import pytz
def check_tracker(obj, simple=True):
if simple:
if obj.status > 0:
return True
return False
# we have a gatekeeper
now = datetime.now(pytz.utc)
if obj.tracker_publish_status < 0:
return False
if obj.tracker_publish_status > 0:
return True
# Checking live_as_of ...
# is live_as_of set?
if not obj.tracker_live_as_of: # No live_as_of --- bail
return False
# has it happened yet?
if now < obj.tracker_live_as_of: # live_as_of --- not yet!
return False
# is there an expiration date?
if obj.tracker_expires and now > obj.tracker_expires: # EXPIRED!
return False
# it's OK then
return True
DEFAULT_TRACKER_POSITIONS = [
('tracker-head-top', 'Head - near top'),
('tracker-head-bottom', 'Head - near bottom'),
('tracker-body-top', 'Body - near top'),
('tracker-body-bottom', 'Body - near bottom')
]
def get_tracker_position_options():
"""
This creates the dropdown in the Admin for where to put each tracker.
It defaults to the obvious 4 location (top/bottom of the head/body);
however the user can create more by adding a list of 3-ples in the settings
file under ADDITIONAL_TRACKER_POSITIONS.
(2-letter-code, description, block name), e.g.
('HN', 'Header Navigation', 'header-navigation-trackers')
would allow for the user to have tracking code in a navbar (no, I don't know
why they'd want this) if they put
{% block header-navigation-trackers %}{% generate_trackers 'HN' %}{% endblock %}
in their template.
"""
tracker_position_list = DEFAULT_TRACKER_POSITIONS
additional_tracker_positions = getattr(settings, "ADDITIONAL_TRACKER_POSITIONS", [])
full_list = list()
for x in (tracker_position_list + additional_tracker_positions):
full_list.append((x[0], x[1]))
return full_list
| 35.087719
| 88
| 0.665
| 281
| 2,000
| 4.576512
| 0.423488
| 0.046656
| 0.037325
| 0.026439
| 0.130638
| 0.049767
| 0.049767
| 0
| 0
| 0
| 0
| 0.005274
| 0.2415
| 2,000
| 57
| 89
| 35.087719
| 0.842452
| 0.384
| 0
| 0.242424
| 0
| 0
| 0.139812
| 0.02387
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.090909
| 0
| 0.424242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c5d7225aa1d551d6744fefbde6bc3d8b9f8cc2
| 3,220
|
py
|
Python
|
computation/Tests/Jetson/TF_model.py
|
y-x-c/Heliot
|
b98646966fd1d437e308abeed59668df640932de
|
[
"BSD-3-Clause"
] | 4
|
2019-09-19T15:36:22.000Z
|
2020-02-18T09:28:54.000Z
|
computation/Tests/Jetson/TF_model.py
|
y-x-c/Heliot
|
b98646966fd1d437e308abeed59668df640932de
|
[
"BSD-3-Clause"
] | null | null | null |
computation/Tests/Jetson/TF_model.py
|
y-x-c/Heliot
|
b98646966fd1d437e308abeed59668df640932de
|
[
"BSD-3-Clause"
] | 2
|
2020-04-14T19:11:32.000Z
|
2022-01-08T18:59:02.000Z
|
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import json
import time
import cv2
PATH_TO_FROZEN_GRAPH = '../data/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224_frozen.pb'
info='Time taken to load Model into memory:'
start_time=time.time()
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
end_time=time.time()
time_taken=end_time-start_time
print(info,time_taken)
# Load the labels
#Load categories
categories = []
with open('../data/' + 'categories.txt', 'r') as f:
for line in f:
cat = line.split('\n')[0]
if cat != 'classes':
categories.append(cat)
f.close()
print('Number of categories:', len(categories))
# Load image size
with open('../data/' + 'inputsize.txt', 'r') as f:
reqsize = int(f.readline().split('\n')[0])
#print(reqsize)
#image_filename = '../data/' + 'image1.jpg'
def Load_and_process_img(image_filename):
img = cv2.imread(image_filename)#.astype(numpy.float32)
img = cv2.resize(img, (reqsize, reqsize))
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = img.astype(float)
#img values are scaled from -1 to 1
img /= 255.0
img -= 0.5
img *= 2.0
return img
sess=tf.Session(graph=detection_graph)
def run_inference_b1(key_name,image, graph,no_of_run):
#model output layer name
ops = graph.get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
#print(all_tensor_names)
tensor_dict = {}
for key in [key_name]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = graph.get_tensor_by_name(tensor_name)
image=image.reshape(1,image.shape[0],image.shape[1],image.shape[2])
image_tensor = graph.get_tensor_by_name('input:0')
#Demo run, so that graph is loaded into TF memory
sess.run(tensor_dict,feed_dict={image_tensor: image})
# Run inference
info='Time taken to run inference: run_inference_b1:'+str(no_of_run)+' Times: '
start_time=time.time()
for i in range(no_of_run):
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image})
end_time=time.time()
time_taken=end_time-start_time
print(info,time_taken)
#print(output_dict)
top_inds = output_dict[key_name][0].argsort()[::-1][:5]
result=[]
for i in range(5):
result.append([top_inds[i], categories[top_inds[i]], output_dict[key_name][0][top_inds[i]]])
return result, time_taken
image_filename = '../data/' + 'Tiger.jpg'
img = Load_and_process_img(image_filename)
key_name='MobilenetV2/Predictions/Reshape_1'
result,time_taken=run_inference_b1(key_name,img,detection_graph,1000)
print('Time Taken to run Inference is:',time_taken)
print(result)
| 26.178862
| 100
| 0.700621
| 495
| 3,220
| 4.341414
| 0.30101
| 0.04188
| 0.033504
| 0.015821
| 0.233597
| 0.120056
| 0.092136
| 0.092136
| 0.092136
| 0.053979
| 0
| 0.019985
| 0.176398
| 3,220
| 122
| 101
| 26.393443
| 0.790347
| 0.088199
| 0
| 0.102564
| 0
| 0
| 0.109134
| 0.031475
| 0.012821
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.205128
| 0
| 0.25641
| 0.064103
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c6aa1d03e45cbedd11a4f0d5c301600877fac8
| 1,326
|
py
|
Python
|
frappe/patches/v13_0/update_date_filters_in_user_settings.py
|
chentaoz/frappe
|
ee3c4943bf6177ad3b410cdb0d802af486751a65
|
[
"MIT"
] | 3
|
2017-12-09T22:05:11.000Z
|
2019-10-22T12:03:43.000Z
|
frappe/patches/v13_0/update_date_filters_in_user_settings.py
|
chentaoz/frappe
|
ee3c4943bf6177ad3b410cdb0d802af486751a65
|
[
"MIT"
] | 17
|
2021-03-22T18:47:14.000Z
|
2022-03-15T12:21:00.000Z
|
frappe/patches/v13_0/update_date_filters_in_user_settings.py
|
chentaoz/frappe
|
ee3c4943bf6177ad3b410cdb0d802af486751a65
|
[
"MIT"
] | 2
|
2021-05-06T06:14:40.000Z
|
2021-05-06T10:05:29.000Z
|
from __future__ import unicode_literals
import frappe, json
from frappe.model.utils.user_settings import update_user_settings, sync_user_settings
def execute():
users = frappe.db.sql("select distinct(user) from `__UserSettings`", as_dict=True)
for user in users:
user_settings = frappe.db.sql('''
select
* from `__UserSettings`
where
user="{user}"
'''.format(user = user.user), as_dict=True)
for setting in user_settings:
data = frappe.parse_json(setting.get('data'))
if data:
for key in data:
update_user_setting_filters(data, key, setting)
sync_user_settings()
def update_user_setting_filters(data, key, user_setting):
timespan_map = {
'1 week': 'week',
'1 month': 'month',
'3 months': 'quarter',
'6 months': '6 months',
'1 year': 'year',
}
period_map = {
'Previous': 'last',
'Next': 'next'
}
if data.get(key):
update = False
if isinstance(data.get(key), dict):
filters = data.get(key).get('filters')
if filters and isinstance(filters, list):
for f in filters:
if f[2] == 'Next' or f[2] == 'Previous':
update = True
f[3] = period_map[f[2]] + ' ' + timespan_map[f[3]]
f[2] = 'Timespan'
if update:
data[key]['filters'] = filters
update_user_settings(user_setting['doctype'], json.dumps(data), for_update=True)
| 24.109091
| 85
| 0.659879
| 187
| 1,326
| 4.486631
| 0.31016
| 0.100119
| 0.035757
| 0.045292
| 0.073898
| 0.073898
| 0
| 0
| 0
| 0
| 0
| 0.011184
| 0.190799
| 1,326
| 54
| 86
| 24.555556
| 0.770736
| 0
| 0
| 0
| 0
| 0
| 0.181269
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.069767
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c78d441d23d25b49b17e8da38c99500cd4ebd4
| 3,993
|
py
|
Python
|
miniproject/train.py
|
peguerosdc/ml4phy-quantum-oscillators
|
5ce2cc8ea9ad00e23dab45d898e51f484fca5934
|
[
"MIT"
] | null | null | null |
miniproject/train.py
|
peguerosdc/ml4phy-quantum-oscillators
|
5ce2cc8ea9ad00e23dab45d898e51f484fca5934
|
[
"MIT"
] | null | null | null |
miniproject/train.py
|
peguerosdc/ml4phy-quantum-oscillators
|
5ce2cc8ea9ad00e23dab45d898e51f484fca5934
|
[
"MIT"
] | 1
|
2021-07-18T11:11:46.000Z
|
2021-07-18T11:11:46.000Z
|
import BoltzmannMachine as bm
import QHO as qho
import numpy as np
import datetime
# Visualization imports
from IPython.display import clear_output
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['figure.dpi']=300
def sigmoid(x):
return .5 * (1 + np.tanh(x / 2.))
# Set the quantum gas with N particles, a limit of 10 for the
# quantum numbers and default temperature and frequency
N = 10*10
gas = qho.QHOGas(N=N)
n_max = 10
training_size = 100000
# the amount of hidden units was set by trial and error
hidden_units = 70
# the recipe suggests to set the batchsize to 10, though it can range
# from 10 to 100
batchsize = 10
# the recipe suggests a learning rate that makes the weight updates about
# 1e-3 times the weights (to within an order of magnitude)
eta = 0.005
# the amount of steps was set by trial and error
nsteps = 300000
# define the validation set to be used in training_visualization
validation_set = gas.generate(amount=20)
def training_visualization(machine, current_step, total_steps, eta, a, b, w, da, db, dw):
# Every now and then (every 50k steps), let us know that the training
# is still running
if current_step%50000 == 0:
print("{:08d} / {:08d}".format(current_step, total_steps), end=" \r")
# After 'checkpoint_steps', show the suggested plots
checkpoint_steps = 10000
if current_step%checkpoint_steps == 0 or current_step == total_steps-1:
print(f"Showing at step {current_step}.")
# Produce a sample starting from the validation set after 100 steps
v_prime = machine.generate(validation_set, 100, a=a, b=b, w=w)
# print useful plots for training
plot_training(validation_set, v_prime, eta, a, b, w, da, db, dw)
def plot_training(v, v_prime, eta, a, b, w, da, db, dw):
clear_output(wait=True)
# Show how the weights light up for the state v
hMean = sigmoid(np.dot(v, w) + b)
image = Image.fromarray(hMean * 256).show()
# Create the grid for all the other plots we want
plt.rcParams.update({'font.size': 2})
# plot histogram of initial vs generated
n = np.arange(0,10)
generated_quantum_numbers = np.rint(v_prime*10)
plt.hist( generated_quantum_numbers.flatten(), bins=np.arange(0,10), density=True, label="Sampled" )
plt.plot( n, gas.p_n(n), label="Theor." )
plt.xlabel('n')
plt.ylabel('P(n)')
plt.legend()
# plot histogram of visible, hidden, weights
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(ncols=3, nrows=2)
def plotit(axis, values, title):
axis.hist(values)
axis.set_title(f"{title}: mm = {np.mean(np.fabs(values))}")
plotit(fig.add_subplot(gs[0,0]), a, 'a')
plotit(fig.add_subplot(gs[0,1]), w.flatten(), 'w')
plotit(fig.add_subplot(gs[0,2]), b, 'b')
# plot histogram of d_visible, d_hidden, d_weights
plotit(fig.add_subplot(gs[1,0]), eta*da, 'da')
plotit(fig.add_subplot(gs[1,1]), eta*dw.flatten(), 'dw')
plotit(fig.add_subplot(gs[1,2]), eta*db, 'db')
# show free energies of the average of samples
x = lambda vv : b + np.dot(vv, w)
free_training = -np.dot(v, a) - np.sum( np.log(1 + np.exp(x(v))), axis=1)
free_valdation = -np.dot(v_prime, a) - np.sum( np.log(1 + np.exp(x(v_prime))), axis=1)
print(f"\nF_training={np.average(free_training)} vs F_validation={np.average(free_valdation)}\n")
# Show.
# CAUTION! This will freeze the execution
plt.show()
# Init the boltzmann machine and train it while visualizing the suggested plots
training_set = gas.generate(amount=training_size, n_max=n_max)
m = bm.BoltzmannMachine(num_hidden=hidden_units)
a,b,w = m.train(training_set, batchsize=batchsize, eta=eta, nsteps=nsteps, do_while_training=None)
# Store in a file
run_id = int(datetime.datetime.now().timestamp())
np.savetxt(f"a_{run_id}.csv", a, delimiter=',')
np.savetxt(f"b_{run_id}.csv", b, delimiter=',')
np.savetxt(f"w_{run_id}.csv", w, delimiter=',')
| 40.333333
| 104
| 0.69146
| 663
| 3,993
| 4.060332
| 0.3454
| 0.015602
| 0.026746
| 0.042348
| 0.096582
| 0.096582
| 0.031947
| 0.027489
| 0.027489
| 0.014116
| 0
| 0.030516
| 0.179314
| 3,993
| 98
| 105
| 40.744898
| 0.790967
| 0.289507
| 0
| 0
| 0
| 0
| 0.095695
| 0.038776
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063492
| false
| 0
| 0.126984
| 0.015873
| 0.206349
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c7b73306f8c0594f64a791f8292624d0ac8d82
| 11,237
|
py
|
Python
|
Tests/Marketplace/prepare_public_index_for_private_testing.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Tests/Marketplace/prepare_public_index_for_private_testing.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Tests/Marketplace/prepare_public_index_for_private_testing.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import time
import os
import sys
import shutil
import json
import argparse
from zipfile import ZipFile
from contextlib import contextmanager
from datetime import datetime
from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, \
extract_packs_artifacts
from Tests.Marketplace.marketplace_services import init_storage_client
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
MAX_SECONDS_TO_WAIT_FOR_LOCK = 600
LOCK_FILE_PATH = 'lock.txt'
@contextmanager
def lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
try:
acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
yield
except Exception:
logging.exception("Error in dummy index lock context manager.")
finally:
release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
def change_pack_price_to_zero(path_to_pack_metadata):
with open(path_to_pack_metadata, 'r') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
pack_metadata['price'] = 0
with open(path_to_pack_metadata, 'w') as pack_metadata_file:
json.dump(pack_metadata, pack_metadata_file, indent=4)
def change_packs_price_to_zero(public_index_folder_path):
paths_to_packs_in_merged_index = [pack_dir.path for pack_dir in os.scandir(public_index_folder_path) if
pack_dir.is_dir()]
for path_to_pack in paths_to_packs_in_merged_index:
path_to_pack_metadata = os.path.join(path_to_pack, 'metadata.json')
change_pack_price_to_zero(path_to_pack_metadata)
def merge_private_index_into_public_index(public_index_folder_path, private_index_folder_path):
packs_in_private_index = [pack_dir.name for pack_dir in os.scandir(private_index_folder_path) if pack_dir.is_dir()]
for pack_name in packs_in_private_index:
path_to_pack_in_private_index = os.path.join(private_index_folder_path, pack_name)
path_to_pack_in_public_index = os.path.join(public_index_folder_path, pack_name)
shutil.copy(path_to_pack_in_private_index, path_to_pack_in_public_index)
def upload_modified_index(public_index_folder_path, extract_destination_path, public_ci_dummy_index_blob, build_number,
private_packs):
"""Upload updated index zip to cloud storage.
Args:
public_index_folder_path (str): public index folder full path.
extract_destination_path (str): extract folder full path.
public_ci_dummy_index_blob (Blob): google cloud storage object that represents the dummy index.zip blob.
build_number (str): circleCI build number, used as an index revision.
private_packs (list): List of private packs and their price.
"""
with open(os.path.join(public_index_folder_path, "index.json"), "w+") as index_file:
for private_pack in private_packs:
private_pack['price'] = 0
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'packs': private_packs
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(public_index_folder_path)
index_zip_path = shutil.make_archive(base_name=public_index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
public_ci_dummy_index_blob.reload()
public_ci_dummy_index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
public_ci_dummy_index_blob.upload_from_filename(index_zip_path)
logging.success("Finished uploading index.zip to storage.")
except Exception:
logging.exception("Failed in uploading index. Mismatch in index file generation.")
sys.exit(1)
finally:
shutil.rmtree(public_index_folder_path)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-b', '--public_bucket_name', help="CI public bucket name", required=True)
parser.add_argument('-pb', '--private_bucket_name', help="CI private bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=True)
parser.add_argument('-e', '--extract_public_index_path', help="Full path of folder to extract the public index",
required=True)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-p', '--pack_name', help="Modified pack to upload to gcs.")
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-ea', '--extract_artifacts_path', help="Full path of folder to extract wanted packs",
required=True)
parser.add_argument('-di', '--dummy_index_dir_path', help="Full path to the dummy index in the private CI bucket",
required=True)
# disable-secrets-detection-end
return parser.parse_args()
def is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
return dummy_index_lock_blob.exists()
def lock_dummy_index(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
with open(LOCK_FILE_PATH, 'w') as lock_file:
lock_file.write('locked')
with open(LOCK_FILE_PATH, 'rb') as lock_file:
dummy_index_lock_blob.upload_from_file(lock_file)
def acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
total_seconds_waited = 0
while is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
if total_seconds_waited >= MAX_SECONDS_TO_WAIT_FOR_LOCK:
logging.critical("Error: Failed too long to acquire lock, exceeded max wait time.")
sys.exit(1)
if total_seconds_waited % 60 == 0:
# Printing a message every minute to keep the machine from dying due to no output
logging.info("Waiting to acquire lock.")
total_seconds_waited += 10
time.sleep(10)
lock_dummy_index(public_storage_bucket, dummy_index_lock_path)
def release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
dummy_index_lock_blob.delete()
os.remove(LOCK_FILE_PATH)
def add_private_packs_from_dummy_index(private_packs, dummy_index_blob):
downloaded_dummy_index_path = 'current_dummy_index.zip'
extracted_dummy_index_path = 'dummy_index'
dummy_index_json_path = os.path.join(extracted_dummy_index_path, 'index', 'index.json')
dummy_index_blob.download_to_filename(downloaded_dummy_index_path)
os.mkdir(extracted_dummy_index_path)
if os.path.exists(downloaded_dummy_index_path):
with ZipFile(downloaded_dummy_index_path, 'r') as index_zip:
index_zip.extractall(extracted_dummy_index_path)
with open(dummy_index_json_path) as index_file:
index_json = json.load(index_file)
packs_from_dummy_index = index_json.get('packs', [])
for pack in private_packs:
is_pack_in_dummy_index = any(
[pack['id'] == dummy_index_pack['id'] for dummy_index_pack in packs_from_dummy_index])
if not is_pack_in_dummy_index:
packs_from_dummy_index.append(pack)
os.remove(downloaded_dummy_index_path)
shutil.rmtree(extracted_dummy_index_path)
return packs_from_dummy_index
def main():
install_logging('prepare_public_index_for_private_testing.log', logger=logging)
upload_config = option_handler()
service_account = upload_config.service_account
build_number = upload_config.ci_build_number
public_bucket_name = upload_config.public_bucket_name
private_bucket_name = upload_config.private_bucket_name
storage_base_path = upload_config.storage_base_path
extract_public_index_path = upload_config.extract_public_index_path
changed_pack = upload_config.pack_name
extract_destination_path = upload_config.extract_artifacts_path
packs_artifacts_path = upload_config.artifacts_path
dummy_index_dir_path = upload_config.dummy_index_dir_path
dummy_index_path = os.path.join(dummy_index_dir_path, 'index.zip')
dummy_index_lock_path = os.path.join(dummy_index_dir_path, 'lock.txt')
storage_client = init_storage_client(service_account)
public_storage_bucket = storage_client.bucket(public_bucket_name)
private_storage_bucket = storage_client.bucket(private_bucket_name)
dummy_index_blob = public_storage_bucket.blob(dummy_index_path)
with lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
public_index_folder_path, public_index_blob, _ = download_and_extract_index(public_storage_bucket,
extract_public_index_path, storage_base_path)
# In order for the packs to be downloaded successfully, their price has to be 0
change_packs_price_to_zero(public_index_folder_path)
private_packs, private_index_path, private_index_blob = update_index_with_priced_packs(private_storage_bucket,
extract_destination_path,
public_index_folder_path,
changed_pack, True,
storage_base_path)
private_packs = add_private_packs_from_dummy_index(private_packs, dummy_index_blob)
upload_modified_index(public_index_folder_path, extract_public_index_path, dummy_index_blob, build_number,
private_packs)
if __name__ == '__main__':
main()
| 48.021368
| 129
| 0.707128
| 1,484
| 11,237
| 4.931941
| 0.177898
| 0.101107
| 0.047821
| 0.034431
| 0.361525
| 0.272988
| 0.234185
| 0.181036
| 0.147425
| 0.116273
| 0
| 0.002172
| 0.221589
| 11,237
| 233
| 130
| 48.227468
| 0.834572
| 0.068435
| 0
| 0.094118
| 0
| 0
| 0.134499
| 0.015467
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070588
| false
| 0
| 0.076471
| 0
| 0.164706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4c9cb6d342d54eea3d53d2a8f44856dc1296577
| 2,843
|
py
|
Python
|
configs/_base_/datasets/flyingchairs_320x448.py
|
zhouzaida/mmflow
|
b34f0801061469f04a83133d7f5652dead1f93ce
|
[
"Apache-2.0"
] | 1
|
2021-11-16T12:32:54.000Z
|
2021-11-16T12:32:54.000Z
|
configs/_base_/datasets/flyingchairs_320x448.py
|
xiaokekeke/mmflow
|
c9ab798cec832d3472cbb06f04b2d64299802168
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/datasets/flyingchairs_320x448.py
|
xiaokekeke/mmflow
|
c9ab798cec832d3472cbb06f04b2d64299802168
|
[
"Apache-2.0"
] | 1
|
2022-03-24T06:46:05.000Z
|
2022-03-24T06:46:05.000Z
|
dataset_type = 'FlyingChairs'
data_root = 'data/FlyingChairs_release'
img_norm_cfg = dict(mean=[0., 0., 0.], std=[255., 255., 255.], to_rgb=False)
global_transform = dict(
translates=(0.05, 0.05),
zoom=(1.0, 1.5),
shear=(0.86, 1.16),
rotate=(-10., 10.))
relative_transform = dict(
translates=(0.00375, 0.00375),
zoom=(0.985, 1.015),
shear=(1.0, 1.0),
rotate=(-1.0, 1.0))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='ColorJitter',
brightness=0.5,
contrast=0.5,
saturation=0.5,
hue=0.5),
dict(type='RandomGamma', gamma_range=(0.7, 1.5)),
dict(type='Normalize', **img_norm_cfg),
dict(type='GaussianNoise', sigma_range=(0, 0.04), clamp_range=(0., 1.)),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='RandomFlip', prob=0.5, direction='vertical'),
dict(
type='RandomAffine',
global_transform=global_transform,
relative_transform=relative_transform),
dict(type='RandomCrop', crop_size=(320, 448)),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['imgs', 'flow_gt'],
meta_keys=[
'img_fields', 'ann_fields', 'filename1', 'filename2',
'ori_filename1', 'ori_filename2', 'filename_flow',
'ori_filename_flow', 'ori_shape', 'img_shape', 'img_norm_cfg'
]),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='InputResize', exponent=6),
dict(type='Normalize', **img_norm_cfg),
dict(type='TestFormatBundle'),
dict(
type='Collect',
keys=['imgs'],
meta_keys=[
'flow_gt', 'filename1', 'filename2', 'ori_filename1',
'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg',
'scale_factor', 'pad_shape'
])
]
flyingchairs_train = dict(
type=dataset_type,
pipeline=train_pipeline,
data_root=data_root,
split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt')
data = dict(
train_dataloader=dict(
samples_per_gpu=1,
workers_per_gpu=2,
drop_last=True,
persistent_workers=True),
val_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False),
train=flyingchairs_train,
val=dict(
type=dataset_type,
pipeline=test_pipeline,
data_root=data_root,
test_mode=True,
split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'),
test=dict(
type=dataset_type,
pipeline=test_pipeline,
data_root=data_root,
test_mode=True,
split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'))
| 31.241758
| 78
| 0.631375
| 351
| 2,843
| 4.85755
| 0.2849
| 0.098534
| 0.029326
| 0.024633
| 0.554839
| 0.497947
| 0.44868
| 0.379472
| 0.338416
| 0.231085
| 0
| 0.046188
| 0.215617
| 2,843
| 90
| 79
| 31.588889
| 0.718386
| 0
| 0
| 0.289157
| 0
| 0
| 0.236018
| 0.063665
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4cd4596ad7f6e0187f91e645753c131d68a9a4a
| 845
|
py
|
Python
|
python/orthogonal_test.py
|
davxy/numeric
|
1e8b44a72e1d570433a5ba81ae0795a750ce5921
|
[
"Unlicense"
] | 2
|
2020-05-03T17:02:44.000Z
|
2022-02-21T04:09:34.000Z
|
python/orthogonal_test.py
|
davxy/numeric
|
1e8b44a72e1d570433a5ba81ae0795a750ce5921
|
[
"Unlicense"
] | null | null | null |
python/orthogonal_test.py
|
davxy/numeric
|
1e8b44a72e1d570433a5ba81ae0795a750ce5921
|
[
"Unlicense"
] | null | null | null |
# Orthogonal linear system solver tests
from math import sqrt
import numpy as np
from orthogonal import orthogonal
################################################################################
# 2x2 orthogonal matrix
A = np.matrix('1 1;'
'1 -1', float)
A = A*1.0/sqrt(2.0)
# Known terms vector
b = np.matrix('2; 3')
# Solve the system
x = orthogonal(A, b, 1)
# Check
if np.allclose(b, A*x) == False:
raise Exception('Orthogonal test failure')
################################################################################
# 2x2 orthogonal matrix
A = np.matrix('2 -2 1;'
'1 2 2;'
'2 1 -2', float)
A = A*1.0/3.0
# Known terms vector
b = np.matrix('2; 3; 4')
# Solve the system
x = orthogonal(A, b)
# Check
if np.allclose(b, A*x) == False:
raise Exception('Orthogonal test failure')
| 24.142857
| 80
| 0.498225
| 115
| 845
| 3.66087
| 0.313043
| 0.07601
| 0.064133
| 0.095012
| 0.72209
| 0.679335
| 0.546318
| 0.418052
| 0.418052
| 0.285036
| 0
| 0.045858
| 0.2
| 845
| 35
| 81
| 24.142857
| 0.576923
| 0.195266
| 0
| 0.222222
| 0
| 0
| 0.174168
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4cecc18d5f88370e565ff6b3803a9cfe92f4765
| 11,056
|
py
|
Python
|
src/autonlp/project.py
|
adbmd/autonlp
|
8f7b5559d88775850b6818a09f178dc3407b2ab8
|
[
"Apache-2.0"
] | 1
|
2021-03-08T17:47:18.000Z
|
2021-03-08T17:47:18.000Z
|
src/autonlp/project.py
|
adbmd/autonlp
|
8f7b5559d88775850b6818a09f178dc3407b2ab8
|
[
"Apache-2.0"
] | null | null | null |
src/autonlp/project.py
|
adbmd/autonlp
|
8f7b5559d88775850b6818a09f178dc3407b2ab8
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
from dataclasses import dataclass
from datetime import datetime
from typing import Dict, List, Optional
from huggingface_hub import Repository
from loguru import logger
from prettytable import PrettyTable
from .splits import TEST_SPLIT, TRAIN_SPLIT, VALID_SPLIT
from .tasks import TASKS
from .utils import BOLD_TAG, CYAN_TAG, GREEN_TAG, PURPLE_TAG, RESET_TAG, YELLOW_TAG, http_get, http_post
from .validation import validate_file
FILE_STATUS = (
"☁ Uploaded",
"⌚ Queued",
"⚙ In Progress...",
"✅ Success!",
"❌ Failed: file not found",
"❌ Failed: unsupported file type",
"❌ Failed: server error",
"❌ Invalid column mapping, please fix it and re-upload the file.",
)
JOB_STATUS = (
("⌚", "queued"),
("🚀", "start"),
("⚙", "data_munging"),
("🏃", "model_training"),
("✅", "success"),
("❌", "failed"),
)
PROJECT_STATUS = (
("✨", "Created"),
("🚀", "Data processing started"),
("✅", "Data processing successful"),
("❌", "Failed to download data files from the huggingface hub"),
("❌", "Missing 'train' or 'valid' split in data files"),
("❌", "Failed to process data files"),
("❌", "Failed to upload processed data files to the huggingface hub"),
)
SPLITS = (TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT)
@dataclass
class TrainingJob:
"""A training job in AutoNLP"""
job_id: int
status: str
status_emoji: str
created_at: datetime
updated_at: datetime
@classmethod
def from_json_resp(cls, json_resp: dict):
return cls(
job_id=json_resp["id"],
status_emoji=JOB_STATUS[json_resp["status"] - 1][0],
status=JOB_STATUS[json_resp["status"] - 1][1],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
)
def __str__(self):
return "\n".join(
[
f"📚 Model # {self.job_id}",
f" • {BOLD_TAG}Status{RESET_TAG}: {self.status_emoji} {self.status}",
f" • {BOLD_TAG}Created at{RESET_TAG}: {self.created_at.strftime('%Y-%m-%d %H:%M Z')}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
]
)
@dataclass
class UploadedFile:
"""A file uploaded to an AutoNLP project"""
file_id: int
filename: str
processing_status: str
split: str
col_mapping: Dict[str, str]
created_at: datetime
updated_at: datetime
@classmethod
def from_json_resp(cls, json_resp: dict):
return cls(
file_id=json_resp["data_file_id"],
filename=json_resp["fname"],
processing_status=FILE_STATUS[json_resp["download_status"] - 1],
split=SPLITS[json_resp["split"] - 1],
col_mapping=json_resp["col_mapping"],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
)
def __str__(self):
return "\n".join(
[
f"📁 {CYAN_TAG}{self.filename}{RESET_TAG} (id # {self.file_id})",
f" • {BOLD_TAG}Split{RESET_TAG}: {self.split}",
f" • {BOLD_TAG}Processing status{RESET_TAG}: {self.processing_status}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
]
)
@dataclass
class Project:
"""An AutoNLP project"""
_token: str
proj_id: int
name: str
user: str
task: str
status_emoji: str
status: str
language: str
created_at: datetime
updated_at: datetime
dataset_id: str
files: Optional[List[UploadedFile]] = None
training_jobs: Optional[List] = None
@classmethod
def from_json_resp(cls, json_resp: dict, token: str):
"""Build a Project from the API response, JSON-encoded"""
return cls(
proj_id=json_resp["id"],
name=json_resp["proj_name"],
user=json_resp["username"],
task=list(filter(lambda key: TASKS[key] == json_resp["task"], TASKS.keys()))[0],
status_emoji=PROJECT_STATUS[json_resp["status"] - 1][0],
status=PROJECT_STATUS[json_resp["status"] - 1][1],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
dataset_id=json_resp["dataset_id"],
language=json_resp["config"]["language"],
_token=token,
)
def refresh(self):
"""Update information about uploaded files and models attached to the project"""
logger.info("🔄 Refreshing uploaded files information...")
resp = http_get(path=f"/projects/{self.proj_id}/data", token=self._token)
json_files = resp.json()
self.files = [UploadedFile.from_json_resp(file) for file in json_files]
logger.info("🔄 Refreshing models information...")
resp = http_get(path=f"/projects/{self.proj_id}/jobs", token=self._token)
json_jobs = resp.json()
self.training_jobs = [TrainingJob.from_json_resp(job) for job in json_jobs]
def upload(self, filepaths: List[str], split: str, col_mapping: Dict[str, str]):
"""Uploads files to the project"""
local_dataset_dir = os.path.expanduser(f"~/.huggingface/autonlp/projects/{self.dataset_id}")
if os.path.exists(local_dataset_dir):
if os.path.isdir(os.path.join(local_dataset_dir, "git")):
clone_from = None
else:
shutil.rmtree(local_dataset_dir)
clone_from = "https://huggingface.co/datasets/" + self.dataset_id
else:
clone_from = "https://huggingface.co/datasets/" + self.dataset_id
dataset_repo = Repository(
local_dir=local_dataset_dir,
clone_from=clone_from,
use_auth_token=self._token,
)
dataset_repo.git_pull()
for idx, file_path in enumerate(filepaths):
if not os.path.isfile(file_path):
logger.error(f"[{idx + 1}/{len(filepaths)}] ❌ '{file_path}' does not exist or is not a file!")
continue
file_name = os.path.basename(file_path)
file_extension = file_name.split(".")[-1]
src = os.path.expanduser(file_path)
dst = os.path.join(local_dataset_dir, "raw", file_name)
logger.info(f"[{idx + 1}/{len(filepaths)}] 📦 Copying {src} to {dst}...")
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copyfile(src, dst)
logger.info(f"[{idx + 1}/{len(filepaths)}] 🔎 Validating {dst} and column mapping...")
validate_file(path=dst, task=self.task, file_ext=file_extension, col_mapping=col_mapping)
dataset_repo.lfs_track(patterns=[f"raw/*.{file_extension}"])
dataset_repo.git_pull()
try:
logger.info("☁ Uploading files to the dataset hub...")
dataset_repo.push_to_hub(commit_message="Upload from AutoNLP CLI")
logger.info("✅ Successfully uploaded the files!")
except OSError as err:
if "nothing to commit, working tree clean" in err.args[0]:
logger.info("❔ Files did not change since last upload!")
dataset_repo.git_push()
return
logger.error("❌ Something went wrong when uploading the files!")
raise
for idx, file_path in enumerate(filepaths):
file_name = os.path.basename(file_path)
logger.info(f"[{idx + 1}/{len(filepaths)}] 📁 Registering file {file_name} into project '{file_name}'...")
payload = {
"split": split,
"col_mapping": col_mapping,
"data_files": [{"fname": file_name, "username": self.user}],
}
http_post(path=f"/projects/{self.proj_id}/data/add", payload=payload, token=self._token)
logger.info(f"[{idx + 1}/{len(filepaths)}] ✅ Success!")
def train(self):
"""Starts training on the models"""
http_get(path=f"/projects/{self.proj_id}/data/start_process", token=self._token)
logger.info("🔥🔥 Training started!")
def __str__(self):
header = "\n".join(
[
f"AutoNLP Project (id # {self.proj_id})",
"~" * 35,
f" • {BOLD_TAG}Name{RESET_TAG}: {PURPLE_TAG}{self.name}{RESET_TAG}",
f" • {BOLD_TAG}Owner{RESET_TAG}: {GREEN_TAG}{self.user}{RESET_TAG}",
f" • {BOLD_TAG}Status{RESET_TAG}: {BOLD_TAG}{self.status_emoji} {self.status}{RESET_TAG}",
f" • {BOLD_TAG}Task{RESET_TAG}: {YELLOW_TAG}{self.task.title().replace('_', ' ')}{RESET_TAG}",
f" • {BOLD_TAG}Created at{RESET_TAG}: {self.created_at.strftime('%Y-%m-%d %H:%M Z')}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
"",
]
)
printout = [header]
# Uploaded files information
if self.files is None:
descriptions = ["❓ Files information unknown, update the project"]
else:
if len(self.files) == 0:
descriptions = ["🤷 No files uploaded yet!"]
else:
sorted_files = sorted(self.files, key=lambda file: file.split) # Sort by split
descriptions = [str(file) for file in sorted_files]
printout.append(
"\n".join(
[
"~" * 14 + f" {BOLD_TAG}Files{RESET_TAG} " + "~" * 14,
"",
"Dataset ID:",
f"{CYAN_TAG}{self.dataset_id}{RESET_TAG}",
"",
]
+ descriptions
)
)
# Training jobs information
if self.training_jobs is None:
jobs_str = "❓ Models information unknown, update the project"
else:
if len(self.training_jobs) == 0:
jobs_str = "🤷 No train jobs started yet!"
else:
model_table = PrettyTable(["", "ID", "Status", "Creation date", "Last update"])
for job in sorted(self.training_jobs, key=lambda job: job.job_id):
model_table.add_row(
[
job.status_emoji,
job.job_id,
job.status,
job.created_at.strftime("%Y-%m-%d %H:%M Z"),
job.updated_at.strftime("%Y-%m-%d %H:%M Z"),
]
)
jobs_str = str(model_table)
printout.append("\n".join(["", "~" * 12 + f" {BOLD_TAG}Models{RESET_TAG} " + "~" * 11, "", jobs_str]))
return "\n".join(printout)
| 38.256055
| 117
| 0.565213
| 1,361
| 11,056
| 4.43277
| 0.187362
| 0.039781
| 0.011934
| 0.017902
| 0.336151
| 0.311454
| 0.282778
| 0.216145
| 0.206365
| 0.154152
| 0
| 0.003864
| 0.297847
| 11,056
| 288
| 118
| 38.388889
| 0.766585
| 0.030391
| 0
| 0.222222
| 0
| 0.032922
| 0.284965
| 0.080135
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.049383
| 0.016461
| 0.230453
| 0.016461
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4d089a89ed2ccdb81f62b6a9415dbcedcf723fa
| 25,485
|
py
|
Python
|
demonstrations/tutorial_kernels_module.py
|
jamesellis1999/qml
|
33c9d66712b36861dc098f9c789ba2c3ab897fdb
|
[
"Apache-2.0"
] | 216
|
2020-08-01T03:18:37.000Z
|
2022-03-25T06:17:52.000Z
|
demonstrations/tutorial_kernels_module.py
|
jamesellis1999/qml
|
33c9d66712b36861dc098f9c789ba2c3ab897fdb
|
[
"Apache-2.0"
] | 173
|
2020-08-05T09:24:15.000Z
|
2022-03-30T13:37:05.000Z
|
demonstrations/tutorial_kernels_module.py
|
jamesellis1999/qml
|
33c9d66712b36861dc098f9c789ba2c3ab897fdb
|
[
"Apache-2.0"
] | 66
|
2020-08-01T05:02:45.000Z
|
2022-03-02T19:34:54.000Z
|
r"""Training and evaluating quantum kernels
===========================================
.. meta::
:property="og:description": Kernels and alignment training with Pennylane.
:property="og:image": https://pennylane.ai/qml/_images/QEK_thumbnail.png
.. related::
tutorial_kernel_based_training Kernel-based training with scikit-learn
tutorial_data_reuploading_classifier Classification with data reuploading
*Authors: Peter-Jan Derks, Paul Fährmann, Elies Gil-Fuster, Tom
Hubregtsen, Johannes Jakob Meyer and David Wierichs. Posted: 24 June 2021*
Kernel methods are one of the cornerstones of classical machine learning.
Here we are concerned with kernels that can be evaluated on quantum computers,
*quantum kernels* for short.
In this tutorial you will learn how to evaluate kernels, use them for classification
and train them with gradient-based optimization, and all that using the
functionality of PennyLane's
`kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__.
The demo is based on Ref. [#Training_QEKs]_, a project from Xanadu's own
`QHack <https://qhack.ai/>`__ hackathon.
What are kernel methods?
------------------------
To understand what a kernel method does, let's first revisit
one of the simplest methods to assign binary labels to datapoints:
linear classification.
Imagine we want to discern two different classes of points that lie in
different corners of the plane. A linear classifier corresponds to
drawing a line and assigning different labels to the regions on opposing
sides of the line:
.. figure:: ../demonstrations/kernels_module/linear_classification.png
:align: center
:width: 30%
We can mathematically formalize this by assigning the label :math:`y`
via
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \boldsymbol{x}\rangle + b).
The vector :math:`\boldsymbol{w}` points perpendicular to the line and
thus determine its slope. The independent term :math:`b` specifies the
position on the plane. In this form, linear classification can also be
extended to higher dimensional vectors :math:`\boldsymbol{x}`, where a
line does not divide the entire space into two regions anymore. Instead
one needs a *hyperplane*. It is immediately clear that this method is
not very powerful, as datasets that are not separable by a hyperplane
can't be classified without error.
We can actually sneak around this limitation by performing a neat trick:
if we define some map :math:`\phi(\boldsymbol{x})` that *embeds* our
datapoints into a larger *feature space* and then perform linear
classification there, we could actually realise non-linear
classification in our original space!
.. figure:: ../demonstrations/kernels_module/embedding_nonlinear_classification.png
:align: center
:width: 65%
If we go back to the expression for our prediction and include the
embedding, we get
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \phi(\boldsymbol{x})\rangle + b).
We will forgo one tiny step, but it can be shown that for the purpose
of optimal classification, we can choose the vector defining the
decision boundary as a linear combination of the embedded datapoints
:math:`\boldsymbol{w} = \sum_i \alpha_i \phi(\boldsymbol{x}_i)`. Putting
this into the formula yields
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}\left(\sum_i \alpha_i \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x})\rangle + b\right).
This rewriting might not seem useful at first, but notice the above
formula only contains inner products between vectors in the embedding
space:
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x}_j)\rangle.
We call this function the *kernel*. It provides the advantage that we can often
find an explicit formula for the kernel :math:`k` that makes it
superfluous to actually perform the (potentially expensive) embedding
:math:`\phi`. Consider for example the following embedding and the
associated kernel:
.. math::
\phi((x_1, x_2)) &= (x_1^2, \sqrt{2} x_1 x_2, x_2^2) \\
k(\boldsymbol{x}, \boldsymbol{y}) &= x_1^2 y_1^2 + 2 x_1 x_2 y_1 y_2 + x_2^2 y_2^2 = \langle \boldsymbol{x}, \boldsymbol{y} \rangle^2.
This means by just replacing the regular scalar product in our linear
classification with the map :math:`k`, we can actually express much more
intricate decision boundaries!
This is very important, because in many interesting cases the embedding :math:`\phi`
will be much costlier to compute than the kernel :math:`k`.
In this demo, we will explore one particular kind of kernel
that can be realized on near-term quantum computers, namely *Quantum
Embedding Kernels (QEKs)*. These are kernels that arise from embedding
data into the space of quantum states. We formalize this by considering
a parameterised quantum circuit :math:`U(\boldsymbol{x})` that maps
a datapoint :math:`\boldsymbol{x}` to the state
.. math::
|\psi(\boldsymbol{x})\rangle = U(\boldsymbol{x}) |0 \rangle.
The kernel value is then given by the *overlap* of the associated
embedded quantum states
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = | \langle\psi(\boldsymbol{x}_i)|\psi(\boldsymbol{x}_j)\rangle|^2.
"""
##############################################################################
# A toy problem
# -------------
# In this demo, we will treat a toy problem that showcases the
# inner workings of classification with quantum embedding kernels,
# training variational embedding kernels and the available functionalities
# to do both in PennyLane. We of course need to start with some imports:
from pennylane import numpy as np
import matplotlib as mpl
np.random.seed(1359)
##############################################################################
# And we proceed right away to create a dataset to work with, the
# ``DoubleCake`` dataset. Firstly, we define two functions to enable us to
# generate the data.
# The details of these functions are not essential for understanding the demo,
# so don't mind them if they are confusing.
def _make_circular_data(num_sectors):
"""Generate datapoints arranged in an even circle."""
center_indices = np.array(range(0, num_sectors))
sector_angle = 2 * np.pi / num_sectors
angles = (center_indices + 0.5) * sector_angle
x = 0.7 * np.cos(angles)
y = 0.7 * np.sin(angles)
labels = 2 * np.remainder(np.floor_divide(angles, sector_angle), 2) - 1
return x, y, labels
def make_double_cake_data(num_sectors):
x1, y1, labels1 = _make_circular_data(num_sectors)
x2, y2, labels2 = _make_circular_data(num_sectors)
# x and y coordinates of the datapoints
x = np.hstack([x1, 0.5 * x2])
y = np.hstack([y1, 0.5 * y2])
# Canonical form of dataset
X = np.vstack([x, y]).T
labels = np.hstack([labels1, -1 * labels2])
# Canonical form of labels
Y = labels.astype(int)
return X, Y
##############################################################################
# Next, we define a function to help plot the ``DoubleCake`` data:
def plot_double_cake_data(X, Y, ax, num_sectors=None):
"""Plot double cake data and corresponding sectors."""
x, y = X.T
cmap = mpl.colors.ListedColormap(["#FF0000", "#0000FF"])
ax.scatter(x, y, c=Y, cmap=cmap, s=25, marker="s")
if num_sectors is not None:
sector_angle = 360 / num_sectors
for i in range(num_sectors):
color = ["#FF0000", "#0000FF"][(i % 2)]
other_color = ["#FF0000", "#0000FF"][((i + 1) % 2)]
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
1,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=color,
alpha=0.1,
width=0.5,
)
)
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
0.5,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=other_color,
alpha=0.1,
)
)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_aspect("equal")
ax.axis("off")
return ax
##############################################################################
# Let's now have a look at our dataset. In our example, we will work with
# 3 sectors:
import matplotlib.pyplot as plt
num_sectors = 3
X, Y = make_double_cake_data(num_sectors)
ax = plot_double_cake_data(X, Y, plt.gca(), num_sectors=num_sectors)
##############################################################################
# Defining a Quantum Embedding Kernel
# -----------------------------------
# PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__
# allows for a particularly simple
# implementation of Quantum Embedding Kernels. The first ingredient we
# need for this is an *ansatz*, which we will construct by repeating a
# layer as building block. Let's start by defining this layer:
import pennylane as qml
def layer(x, params, wires, i0=0, inc=1):
"""Building block of the embedding ansatz"""
i = i0
for j, wire in enumerate(wires):
qml.Hadamard(wires=[wire])
qml.RZ(x[i % len(x)], wires=[wire])
i += inc
qml.RY(params[0, j], wires=[wire])
qml.broadcast(unitary=qml.CRZ, pattern="ring", wires=wires, parameters=params[1])
##############################################################################
# To construct the ansatz, this layer is repeated multiple times, reusing
# the datapoint ``x`` but feeding different variational
# parameters ``params`` into each of them.
# Together, the datapoint and the variational parameters fully determine
# the embedding ansatz :math:`U(\boldsymbol{x})`.
# In order to construct the full kernel circuit, we also require its adjoint
# :math:`U(\boldsymbol{x})^\dagger`, which we can obtain via ``qml.adjoint``.
def ansatz(x, params, wires):
"""The embedding ansatz"""
for j, layer_params in enumerate(params):
layer(x, layer_params, wires, i0=j * len(wires))
adjoint_ansatz = qml.adjoint(ansatz)
def random_params(num_wires, num_layers):
"""Generate random variational parameters in the shape for the ansatz."""
return np.random.uniform(0, 2 * np.pi, (num_layers, 2, num_wires), requires_grad=True)
##############################################################################
# Together with the ansatz we only need a device to run the quantum circuit on.
# For the purpose of this tutorial we will use PennyLane's ``default.qubit``
# device with 5 wires in analytic mode.
dev = qml.device("default.qubit", wires=5, shots=None)
wires = dev.wires.tolist()
##############################################################################
# Let us now define the quantum circuit that realizes the kernel. We will compute
# the overlap of the quantum states by first applying the embedding of the first
# datapoint and then the adjoint of the embedding of the second datapoint. We
# finally extract the probabilities of observing each basis state.
@qml.qnode(dev)
def kernel_circuit(x1, x2, params):
ansatz(x1, params, wires=wires)
adjoint_ansatz(x2, params, wires=wires)
return qml.probs(wires=wires)
##############################################################################
# The kernel function itself is now obtained by looking at the probability
# of observing the all-zero state at the end of the kernel circuit -- because
# of the ordering in ``qml.probs``, this is the first entry:
def kernel(x1, x2, params):
return kernel_circuit(x1, x2, params)[0]
##############################################################################
#
# .. note::
# An alternative way to set up the kernel circuit in PennyLane would be
# to use the observable type
# `Projector <https://pennylane.readthedocs.io/en/latest/code/api/pennylane.Projector.html>`__.
# This is shown in the
# `demo on kernel-based training of quantum models <https://pennylane.ai/qml/demos/tutorial_kernel_based_training.html>`__, where you will also find more
# background information on the kernel circuit structure itself.
#
# Before focusing on the kernel values we have to provide values for the
# variational parameters. At this point we fix the number of layers in the
# ansatz circuit to :math:`6`.
init_params = random_params(num_wires=5, num_layers=6)
##############################################################################
# Now we can have a look at the kernel value between the first and the
# second datapoint:
kernel_value = kernel(X[0], X[1], init_params)
print(f"The kernel value between the first and second datapoint is {kernel_value:.3f}")
##############################################################################
# The mutual kernel values between all elements of the dataset form the
# *kernel matrix*. We can inspect it via the ``qml.kernels.square_kernel_matrix``
# method, which makes use of symmetry of the kernel,
# :math:`k(\boldsymbol{x}_i,\boldsymbol{x}_j) = k(\boldsymbol{x}_j, \boldsymbol{x}_i)`.
# In addition, the option ``assume_normalized_kernel=True`` ensures that we do not
# calculate the entries between the same datapoints, as we know them to be 1
# for our noiseless simulation. Overall this means that we compute
# :math:`\frac{1}{2}(N^2-N)` kernel values for :math:`N` datapoints.
# To include the variational parameters, we construct a ``lambda`` function that
# fixes them to the values we sampled above.
init_kernel = lambda x1, x2: kernel(x1, x2, init_params)
K_init = qml.kernels.square_kernel_matrix(X, init_kernel, assume_normalized_kernel=True)
with np.printoptions(precision=3, suppress=True):
print(K_init)
##############################################################################
# Using the Quantum Embedding Kernel for predictions
# --------------------------------------------------
# The quantum kernel alone can not be used to make predictions on a
# dataset, becaues it is essentially just a tool to measure the similarity
# between two datapoints. To perform an actual prediction we will make use
# of scikit-learn's Support Vector Classifier (SVC).
from sklearn.svm import SVC
##############################################################################
# To construct the SVM, we need to supply ``sklearn.svm.SVC`` with a function
# that takes two sets of datapoints and returns the associated kernel matrix.
# We can make use of the function ``qml.kernels.kernel_matrix`` that provides
# this functionality. It expects the kernel to not have additional parameters
# besides the datapoints, which is why we again supply the variational
# parameters via the ``lambda`` function from above.
# Once we have this, we can let scikit-learn adjust the SVM from our Quantum
# Embedding Kernel.
#
# .. note::
# This step does *not* modify the variational parameters in our circuit
# ansatz. What it does is solving a different optimization task for the
# :math:`\alpha` and :math:`b` vectors we introduced in the beginning.
svm = SVC(kernel=lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, init_kernel)).fit(X, Y)
##############################################################################
# To see how well our classifier performs we will measure which percentage
# of the dataset it classifies correctly.
def accuracy(classifier, X, Y_target):
return 1 - np.count_nonzero(classifier.predict(X) - Y_target) / len(Y_target)
accuracy_init = accuracy(svm, X, Y)
print(f"The accuracy of the kernel with random parameters is {accuracy_init:.3f}")
##############################################################################
# We are also interested in seeing what the decision boundaries in this
# classification look like. This could help us spotting overfitting issues
# visually in more complex data sets. To this end we will introduce a
# second helper method.
def plot_decision_boundaries(classifier, ax, N_gridpoints=14):
_xx, _yy = np.meshgrid(np.linspace(-1, 1, N_gridpoints), np.linspace(-1, 1, N_gridpoints))
_zz = np.zeros_like(_xx)
for idx in np.ndindex(*_xx.shape):
_zz[idx] = classifier.predict(np.array([_xx[idx], _yy[idx]])[np.newaxis, :])
plot_data = {"_xx": _xx, "_yy": _yy, "_zz": _zz}
ax.contourf(
_xx,
_yy,
_zz,
cmap=mpl.colors.ListedColormap(["#FF0000", "#0000FF"]),
alpha=0.2,
levels=[-1, 0, 1],
)
plot_double_cake_data(X, Y, ax)
return plot_data
##############################################################################
# With that done, let's have a look at the decision boundaries for our
# initial classifier:
init_plot_data = plot_decision_boundaries(svm, plt.gca())
##############################################################################
# We see the outer points in the dataset can be correctly classified, but
# we still struggle with the inner circle. But remember we have a circuit
# with many free parameters! It is reasonable to believe we can give
# values to those variational parameters which improve the overall accuracy
# of our SVC.
#
# Training the Quantum Embedding Kernel
# -------------------------------------
#
# To be able to train the Quantum Embedding Kernel we need some measure of
# how well it fits the dataset in question. Performing an exhaustive
# search in parameter space is not a good solution because it is very
# resource intensive, and since the accuracy is a discrete quantity we
# would not be able to detect small improvements.
#
# We can, however, resort to a more specialized measure, the
# *kernel-target alignment* [#Alignment]_. The kernel-target alignment compares the
# similarity predicted by the quantum kernel to the actual labels of the
# training data. It is based on *kernel alignment*, a similiarity measure
# between two kernels with given kernel matrices :math:`K_1` and
# :math:`K_2`:
#
# .. math::
# \operatorname{KA}(K_1, K_2) = \frac{\operatorname{Tr}(K_1 K_2)}{\sqrt{\operatorname{Tr}(K_1^2)\operatorname{Tr}(K_2^2)}}.
#
# .. note::
# Seen from a more theoretical side, :math:`\operatorname{KA}`
# is nothing else than the cosine of the angle between the kernel
# matrices :math:`K_1` and :math:`K_2` if we see them as vectors
# in the space of matrices with the Hilbert-Schmidt (or
# Frobenius) scalar product
# :math:`\langle A, B \rangle = \operatorname{Tr}(A^T B)`. This
# reinforces the geometric picture of how this measure relates
# to objects, namely two kernels, being aligned in a vector space.
#
# The training data enters the picture by defining an *ideal* kernel
# function that expresses the original labelling in the vector
# :math:`\boldsymbol{y}` by assigning to two datapoints the product
# of the corresponding labels:
#
# .. math::
# k_{\boldsymbol{y}}(\boldsymbol{x}_i, \boldsymbol{x}_j) = y_i y_j.
#
# The assigned kernel is thus :math:`+1` if both datapoints lie in the
# same class and :math:`-1` otherwise and its kernel matrix is simply
# given by the outer product :math:`\boldsymbol{y}\boldsymbol{y}^T`.
# The kernel-target alignment is then defined as the kernel alignment
# of the kernel matrix :math:`K` generated by the
# quantum kernel and :math:`\boldsymbol{y}\boldsymbol{y}^T`:
#
# .. math::
# \operatorname{KTA}_{\boldsymbol{y}}(K)
# = \frac{\operatorname{Tr}(K \boldsymbol{y}\boldsymbol{y}^T)}{\sqrt{\operatorname{Tr}(K^2)\operatorname{Tr}((\boldsymbol{y}\boldsymbol{y}^T)^2)}}
# = \frac{\boldsymbol{y}^T K \boldsymbol{y}}{\sqrt{\operatorname{Tr}(K^2)} N}
#
# where :math:`N` is the number of elements in :math:`\boldsymbol{y}`,
# that is the number of datapoints in the dataset.
#
# In summary, the kernel-target alignment effectively captures how well
# the kernel you chose reproduces the actual similarities of the data. It
# does have one drawback, however: having a high kernel-target alignment
# is only a necessary but not a sufficient condition for a good
# performance of the kernel [#Alignment]_. This means having good alignment is
# guaranteed for good performance, but optimal alignment will not always
# bring optimal training accuracy with it.
#
# Let's now come back to the actual implementation. PennyLane's
# ``kernels`` module allows you to easily evaluate the kernel
# target alignment:
kta_init = qml.kernels.target_alignment(X, Y, init_kernel, assume_normalized_kernel=True)
print(f"The kernel-target alignment for our dataset and random parameters is {kta_init:.3f}")
##############################################################################
# Now let's code up an optimization loop and improve the kernel-target alignment!
#
# We will make use of regular gradient descent optimization. To speed up
# the optimization we will not use the entire training set to compute
# :math:`\operatorname{KTA}` but rather
# sample smaller subsets of the data at each step, we choose :math:`4`
# datapoints at random. Remember that PennyLane's built-in optimizer works
# to *minimize* the cost function that is given to it, which is why we
# have to multiply the kernel target alignment by :math:`-1` to actually
# *maximize* it in the process.
#
# .. note::
# Currently, the function ``qml.kernels.target_alignment`` is not
# differentiable yet, making it unfit for gradient descent optimization.
# We therefore first define a differentiable version of this function.
def target_alignment(
X,
Y,
kernel,
assume_normalized_kernel=False,
rescale_class_labels=True,
):
"""Kernel-target alignment between kernel and labels."""
K = qml.kernels.square_kernel_matrix(
X,
kernel,
assume_normalized_kernel=assume_normalized_kernel,
)
if rescale_class_labels:
nplus = np.count_nonzero(np.array(Y) == 1)
nminus = len(Y) - nplus
_Y = np.array([y / nplus if y == 1 else y / nminus for y in Y])
else:
_Y = np.array(Y)
T = np.outer(_Y, _Y)
inner_product = np.sum(K * T)
norm = np.sqrt(np.sum(K * K) * np.sum(T * T))
inner_product = inner_product / norm
return inner_product
params = init_params
opt = qml.GradientDescentOptimizer(0.2)
for i in range(500):
# Choose subset of datapoints to compute the KTA on.
subset = np.random.choice(list(range(len(X))), 4)
# Define the cost function for optimization
cost = lambda _params: -target_alignment(
X[subset],
Y[subset],
lambda x1, x2: kernel(x1, x2, _params),
assume_normalized_kernel=True,
)
# Optimization step
params = opt.step(cost, params)
# Report the alignment on the full dataset every 50 steps.
if (i + 1) % 50 == 0:
current_alignment = target_alignment(
X,
Y,
lambda x1, x2: kernel(x1, x2, params),
assume_normalized_kernel=True,
)
print(f"Step {i+1} - Alignment = {current_alignment:.3f}")
##############################################################################
# We want to assess the impact of training the parameters of the quantum
# kernel. Thus, let's build a second support vector classifier with the
# trained kernel:
# First create a kernel with the trained parameter baked into it.
trained_kernel = lambda x1, x2: kernel(x1, x2, params)
# Second create a kernel matrix function using the trained kernel.
trained_kernel_matrix = lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, trained_kernel)
# Note that SVC expects the kernel argument to be a kernel matrix function.
svm_trained = SVC(kernel=trained_kernel_matrix).fit(X, Y)
##############################################################################
# We expect to see an accuracy improvement vs. the SVM with random
# parameters:
accuracy_trained = accuracy(svm_trained, X, Y)
print(f"The accuracy of a kernel with trained parameters is {accuracy_trained:.3f}")
##############################################################################
# We have now achieved perfect classification! 🎆
#
# Following on the results that SVM's have proven good generalisation
# behavior, it will be interesting to inspect the decision boundaries of
# our classifier:
trained_plot_data = plot_decision_boundaries(svm_trained, plt.gca())
##############################################################################
# Indeed, we see that now not only every data instance falls within the
# correct class, but also that there are no strong artifacts that would make us
# distrust the model. In this sense, our approach benefits from both: on
# one hand it can adjust itself to the dataset, and on the other hand
# is not expected to suffer from bad generalisation.
#
# References
# ----------
#
# .. [#Training_QEKs]
#
# Thomas Hubregtsen, David Wierichs, Elies Gil-Fuster, Peter-Jan H. S. Derks,
# Paul K. Faehrmann, and Johannes Jakob Meyer.
# "Training Quantum Embedding Kernels on Near-Term Quantum Computers."
# `arXiv:2105.02276 <https://arxiv.org/abs/2105.02276>`__, 2021.
#
# .. [#Alignment]
#
# Wang, Tinghua, Dongyan Zhao, and Shengfeng Tian.
# "An overview of kernel alignment and its applications."
# `Artificial Intelligence Review 43.2: 179-192 <https://link.springer.com/article/10.1007/s10462-012-9369-4>`__, 2015.
| 40.645933
| 157
| 0.655994
| 3,571
| 25,485
| 4.607113
| 0.227667
| 0.021396
| 0.012764
| 0.01167
| 0.12266
| 0.093059
| 0.061634
| 0.04662
| 0.037564
| 0.019572
| 0
| 0.013809
| 0.175947
| 25,485
| 626
| 158
| 40.710863
| 0.769535
| 0.655052
| 0
| 0.138554
| 0
| 0
| 0.066004
| 0.006675
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066265
| false
| 0
| 0.03012
| 0.012048
| 0.150602
| 0.042169
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4d1efc02f1792aaf622052d335ddc24c16d8ad6
| 5,465
|
py
|
Python
|
main.py
|
scottkaz/PyLoopover
|
8f11f559c09747400fe6bb520ab521dbafa90e97
|
[
"MIT"
] | null | null | null |
main.py
|
scottkaz/PyLoopover
|
8f11f559c09747400fe6bb520ab521dbafa90e97
|
[
"MIT"
] | null | null | null |
main.py
|
scottkaz/PyLoopover
|
8f11f559c09747400fe6bb520ab521dbafa90e97
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import pygame
import random
import time
##VARIABLES TO CHANGE
width = 500
height = 500
stats_height = 150
board_size = 5
window_name = "PyLoopover "+str(board_size)+"x"+str(board_size)
scramble_turns = 50
t_round = 3
FPS = 30
##DONT CHANGE THESE BOIS
WHITE = (255,255,255)
BLACK = (0,0,0)
GREEN = (32,200,32)
keys = {"w":0,"a":0,"s":0,"d":0,"q":0}
last_was_Q = False
class Tile:
def __init__(self,number,s):
self.number = number
n = number-1
self.color = ((n/s)*(255/s),(n%s)*(255/s),128)
def draw(self,screen,font,x,y,width,height):
pygame.draw.rect(screen,self.color,(x,y,width,height))
text = font.render(str(self.number),True,BLACK)
screen.blit(text,(x,y))
class Board:
content = []
start_t=0
end_t=0
game=False
moves = 0
def __init__(self,size):
self.size = size
for i in range(0,size):
self.content.append([])
for j in range(0,size):
self.content[i].append(None)
self.content[i][j] = Tile(i+j*size+1,size)
def rotate_left(self,y):
new = []
for i in range(0,self.size):
new.append(self.content[(i-1)%self.size][y])
for i in range(0,self.size):
self.content[i][y] = new[i]
self.moves+=1
return new
def rotate_right(self,y):
new = []
for i in range(0,self.size):
new.append(self.content[(i+1)%self.size][y])
for i in range(0,self.size):
self.content[i][y] = new[i]
self.moves+=1
return new
def rotate_down(self,x):
new = []
for i in range(0,self.size):
new.append(self.content[x][(i-1)%self.size])
for i in range(0,self.size):
self.content[x][i] = new[i]
self.moves+=1
return new
def rotate_up(self,x):
new = []
for i in range(0,self.size):
new.append(self.content[x][(i+1)%self.size])
for i in range(0,self.size):
self.content[x][i] = new[i]
self.moves+=1
return new
def draw(self,screen,font):
for i in range(0,self.size):
for j in range(0,self.size):
w = (width / self.size)
h = (height / self.size)
x = i * w
y = j * h
self.content[i][j].draw(screen,font,x,y,w,h)
def scramble(self,n):
for i in range(0,n):
o = random.randint(0,3)
if o == 0:
self.rotate_left(random.randint(0,board_size-1))
elif o == 1:
self.rotate_right(random.randint(0,board_size-1))
elif o == 2:
self.rotate_up(random.randint(0,board_size-1))
else:
self.rotate_down(random.randint(0,board_size-1))
self.game=False
self.moves=0
return True
def is_solved(self):
for i in range(0,self.size):
for j in range(0,self.size):
if self.content[i][j].number != i+j*self.size+1:
return False
return True
def start_time(self):
print("time has started")
self.start_t = time.monotonic()
self.game = True
return self.start_time
def end_time(self):
print("time has ended")
self.end_t = time.monotonic()
return self.end_time
def get_time(self):
if (not self.is_solved()) and self.game:
return (time.monotonic() - self.start_t , BLACK)
elif self.is_solved() and self.game:
return (self.end_t - self.start_t , GREEN)
else:
return (0 , BLACK)
def main():
gameboard = Board(board_size)
pygame.init()
pygame.mixer.quit() #weird workaroud
#name the window & size it.
pygame.display.set_caption(window_name)
screen = pygame.display.set_mode((width,height+stats_height),0,32)
#setup framerate
pygame.time.set_timer(pygame.USEREVENT+1,int((1/FPS)*1000))
#setup event que
pygame.event.set_allowed(None) #start with no events allowed
pygame.event.set_allowed(pygame.USEREVENT+1) #timer event
pygame.event.set_allowed(pygame.KEYDOWN)
pygame.event.set_allowed(pygame.QUIT) #4 quitters
#setup fonts
font = pygame.font.SysFont('mono',int((width/board_size)/1.14))
font2 = pygame.font.SysFont('mono',int(stats_height/2.3))
#main l00p
running = True
while running:
#eevveeentttss???
event = pygame.event.wait()
if event.type == pygame.USEREVENT+1:
#a fresh canvas
screen.fill(WHITE)
#draw stats
time = gameboard.get_time()
time_str = str( int( time[0] * (10 ** t_round) ) / (10 ** t_round) )
text_timer = font2.render("Time :"+time_str,True,time[1])
text_moves = font2.render("Moves:"+str(gameboard.moves),True,time[1])
screen.blit(text_timer,(0,height))
screen.blit(text_moves,(0,height+(stats_height/2)))
#draw board
gameboard.draw(screen,font)
#update da screeeeeen
pygame.display.update()
#end the game
if gameboard.is_solved() and gameboard.start_t > gameboard.end_t:
gameboard.end_time()
elif event.type == pygame.KEYDOWN:
k = chr(event.key) #gimme a CHAR, not some weird integer
domap = {
"w":"gameboard.rotate_up(int(pygame.mouse.get_pos()[0]/(width/board_size)))",
"a":"gameboard.rotate_right(int(pygame.mouse.get_pos()[1]/(height/board_size)))",
"s":"gameboard.rotate_down(int(pygame.mouse.get_pos()[0]/(width/board_size)))",
"d":"gameboard.rotate_left(int(pygame.mouse.get_pos()[1]/(height/board_size)))",
"q":"gameboard.scramble(scramble_turns)"
} #i guess?
if k in ['w','a','s','d','q']:
#starting game logic
if k == "q":
last_was_Q = True
else:
if last_was_Q:
gameboard.start_time()
last_was_Q = False
exec(domap[k])
#end the game
if gameboard.is_solved() and gameboard.start_t > gameboard.end_t:
gameboard.end_time()
#for quitters
elif event.type == pygame.QUIT:
print("Quitting...")
running = False
else:
print("err0r, bAd 3v3nt lol")
assert False
if __name__ == "__main__":
main()
| 27.882653
| 85
| 0.665691
| 920
| 5,465
| 3.843478
| 0.184783
| 0.047511
| 0.033937
| 0.03733
| 0.369344
| 0.305995
| 0.277149
| 0.260747
| 0.244344
| 0.202489
| 0
| 0.028966
| 0.166148
| 5,465
| 195
| 86
| 28.025641
| 0.746983
| 0.067155
| 0
| 0.236686
| 0
| 0
| 0.086734
| 0.06367
| 0
| 0
| 0
| 0
| 0.005917
| 1
| 0.08284
| false
| 0
| 0.017751
| 0
| 0.213018
| 0.023669
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4d42429c658c9fa5c1d797f95b772cf6d3bbc13
| 12,044
|
py
|
Python
|
csmpe/core_plugins/csm_install_operations/exr/package_lib.py
|
anushreejangid/csmpe-main
|
c62ecb3ce4e44b188ed480d06a6d9d21967c6a2a
|
[
"BSD-2-Clause"
] | null | null | null |
csmpe/core_plugins/csm_install_operations/exr/package_lib.py
|
anushreejangid/csmpe-main
|
c62ecb3ce4e44b188ed480d06a6d9d21967c6a2a
|
[
"BSD-2-Clause"
] | 8
|
2017-04-21T05:36:37.000Z
|
2017-04-27T15:55:33.000Z
|
csmpe/core_plugins/csm_install_operations/exr/package_lib.py
|
anushreejangid/csmpe-main
|
c62ecb3ce4e44b188ed480d06a6d9d21967c6a2a
|
[
"BSD-2-Clause"
] | null | null | null |
# =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
"""
NCS4K
Production Packages
External Names Internal Names
ncs4k-full-x.iso-6.0.2
ncs4k-mini-x.iso-6.0.2
ncs4k-k9sec.pkg-6.0.2
ncs4k-mpls.pkg-6.0.2
ncs4k-mcast.pkg-6.0.2
ncs4k-mgbl.pkg-6.0.2
NCS6K
Production Packages
External Names Internal Names
ncs6k-doc.pkg-5.2.4 ncs6k-doc-5.2.4
ncs6k-li.pkg-5.2.4 ncs6k-li-5.2.4
ncs6k-mcast.pkg-5.2.4 ncs6k-mcast-5.2.4
ncs6k-mgbl.pkg-5.2.4 ncs6k-mgbl-5.2.4
ncs6k-mini-x.iso-5.2.4 ncs6k-mini-x-5.2.4
ncs6k-mpls.pkg-5.2.4 ncs6k-mpls-5.2.4
ncs6k-sysadmin.iso-5.2.4 ncs6k-sysadmin-5.2.4
ncs6k-full-x.iso-5.2.4 ncs6k-full-x-5.2.4
ncs6k-5.2.5.CSCuy47880.smu ncs6k-5.2.5.CSCuy47880-1.0.0 <- subversion added
Engineering Packages
External Names Internal Names
ncs6k-mcast.pkg-5.2.5.47I.DT_IMAGE ncs6k-mcast-5.2.5.47I
ncs6k-mini-x.iso-6.1.0.07I.DT_IMAGE ncs6k-xr-5.2.5.47I
ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i.smu ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i
ASR9K-64
Production Packages - not finalized yet
External Names Internal Names
asr9k-mcast-x64-2.0.0.0-r611.x86_64.rpm asr9k-mcast-x64-2.0.0.0-r611
asr9k-bgp-x64-1.0.0.0-r611.x86_64.rpm asr9k-bgp-x64-1.0.0.0-r611
asr9k-mgbl-x64-3.0.0.0-r611.x86_64.rpm asr9k-mgbl-x64-3.0.0.0-r611
asr9k-full-x64.iso-6.1.1 asr9k-xr-6.1.1
asr9k-mini-x64.iso-6.1.1 asr9k-xr-6.1.1
Engineering Packages
External Names Internal Names
asr9k-mcast-x64-2.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-mcast-x64-2.0.0.0-r61116I
asr9k-bgp-x64-1.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-bgp-x64-1.0.0.0-r61116I
asr9k-mgbl-x64-3.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-mgbl-x64-3.0.0.0-r61116I
asr9k-full-x64.iso-6.1.1.16I.DT_IMAGE asr9k-full-x64-6.1.1.16I
asr9k-mini-x64.iso-6.1.1.16I.DT_IMAGE asr9k-mini-x64-6.1.1.16I
NCS5K
Production Packages
External Names Internal Names
ncs5k-sysadmin.iso-6.0.1 ncs5k-sysadmin-6.0.1
ncs5k-full-x.iso-6.0.1 ncs5k-xr-6.0.1
ncs5k-mini-x.iso-6.0.1 ncs5k-xr-6.0.1
ncs5k-mcast-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mcast-2.0.0.0-r601
ncs5k-mgbl-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mgbl-2.0.0.0-r601
ncs5k-mpls-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mpls-2.0.0.0-r601
ncs5k-k9sec-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-k9sec-2.0.0.0-r601
ncs5k-isis-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-isis-2.0.0.0-r601
ncs5k-ospf-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-ospf-2.0.0.0-r601
Engineering Packages
External Names Internal Names
ncs5k-mgbl-x64-3.0.0.0-r61116I.x86_64.rpm-6.0.1.16I.DT_IMAGE ncs5k-mgbl-3.0.0.0-r60116I
ncs5k-sysadmin.iso-6.0.1 ncs5k-sysadmin-6.0.1.26I
ncs5k-full-x.iso-6.0.1.16I.DT_IMAGE ncs5k-xr-6.0.1.16I
NCS5500
Production Packages
External Names Internal Names
ncs5500-eigrp-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-eigrp-2.0.0.0-r601
ncs5500-isis-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-isis-2.0.0.0-r601
ncs5500-k9sec-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-k9sec-2.0.0.0-r601
ncs5500-m2m-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-m2m-2.0.0.0-r601
ncs5500-mgbl-3.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mgbl-3.0.0.0-r601
ncs5500-mini-x.iso-6.0.1 ncs5500-xr-6.0.1
ncs5500-mpls-te-rsvp-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mpls-te-rsvp-2.0.0.0-r601
ncs5500-mpls-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mpls-2.0.0.0-r601
ncs5500-ospf-1.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-ospf-1.0.0.0-r601
ncs5500-parser-1.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-parser-1.0.0.0-r601
"""
import re
platforms = ['asr9k', 'ncs1k', 'ncs4k', 'ncs5k', 'ncs5500', 'ncs6k', 'xrv9k']
version_dict = {"asr9k ncs1k ncs5k ncs5500 xrv9k": # 61117I or 611 or 6.1.1.17I or 6.1.1
re.compile("(?P<VERSION>(\d+\d+\d+(\d+\w+)?)|(\d+\.\d+\.\d+(\.\d+\w+)?)(?!\.\d)(?!-))"),
"ncs4k ncs6k": # 5.2.4 or 5.2.4.47I
re.compile("(?P<VERSION>\d+\.\d+\.\d+(\.\d+\w+)?)"),
}
smu_re = re.compile("(?P<SMU>CSC[a-z]{2}\d{5})")
subversion_dict = {"asr9k ncs1k ncs5k ncs5500 xrv9k":
re.compile("-(?P<SUBVERSION>\d+\.\d+\.\d+\.\d+)-"), # 2.0.0.0
"ncs4k ncs6k":
re.compile("CSC.*(?P<SUBVERSION>\d+\.\d+\.\d+?)"), # 0.0.4
}
class SoftwarePackage(object):
def __init__(self, package_name):
self.package_name = package_name
self._platform = None
self._package_type = None
self._version = None
self._smu = None
self._subversion = None
@property
def platform(self):
if not self._platform:
for platform in platforms:
if platform + "-" in self.package_name:
self._platform = platform
break
return self._platform
@property
def package_type(self):
if not self._package_type:
# For ASR9K-X64, NCS1K, NCS5K, NCS5500:
# Extract the package type string before X.X.X.X
# For NCS6K
# Extract the package type string before X.X.X
pattern = '-\d+\.\d+\.\d+' if self.platform == 'ncs6k' or \
self.platform == 'ncs4k' else '-\d\.\d\.\d.\d'
if self.platform and self.platform in self.package_name:
match = re.search(pattern, self.package_name)
# Special handling for mini, full, and sysadmin ISO on ASR9K-X64, NCS1K, NCS5K, NCS5500
# Example: ncs5500-mini-x.iso-6.0.1, asr9k-full-x64.iso-6.1.1
# Package type string is before the 3 part version string
# External Name: ncs5k-goldenk9-x.iso-6.3.1.11I.0, Internal Name: ncs5k-goldenk9-x-6.3.1.11I
if not match and sum([x in self.package_name for x in ['full', 'mini', 'sysadmin', 'goldenk9']]) > 0:
# Use the three part match for these ISO packages
match = re.search('-\d+\.\d+\.\d+', self.package_name)
if match:
# Extract the package type
self._package_type = self.package_name[0:match.start()].replace(self.platform + '-', '')
if self._package_type:
# Takes care the external to internal name matching
# Example, ncs6k-mgbl.pkg-5.2.5 -> mgbl, ncs5500-mini-x.iso-6.0.1 -> mini-x
self._package_type = self._package_type.replace('.pkg', '').replace('.iso', '')
return self._package_type
@property
def version(self):
if not self._version:
dict_values = self.get_values(version_dict, self.platform)
if self.platform and dict_values:
to_match = self.package_name.replace(self.platform, '')
result = re.search(dict_values, to_match)
if result:
self._version = result.group("VERSION")
return self._version
@property
def smu(self):
if not self._smu:
result = re.search(smu_re, self.package_name)
if result:
self._smu = result.group("SMU")
return self._smu
@property
def subversion(self):
if not self._subversion:
dict_values = self.get_values(subversion_dict, self.platform)
if self.platform and dict_values:
# For NCS6K, only need to consider subversion if it is a SMU.
if self.platform in ["asr9k", "ncs1k", "ncs5k", "ncs5500", "xrv9k"] or self.smu:
to_match = self.package_name.replace(self.platform, '')
result = re.search(dict_values, to_match)
if result:
self._subversion = result.group("SUBVERSION")
return self._subversion
def get_values(self, dictionary, key):
for keys in dictionary.keys():
if key in keys.split():
return dictionary.get(keys)
return None
def is_valid(self):
return self.platform and self.version and (self.package_type or self.smu)
def __eq__(self, other):
result = self.platform == other.platform and \
(self.package_type == other.package_type) and \
self.version == other.version and \
self.smu == other.smu and \
(self.subversion == other.subversion if self.subversion and other.subversion else True)
return result
def __hash__(self):
return hash("{}{}{}{}{}".format(
self.platform, self.package_type, self.version, self.smu, self.subversion))
@staticmethod
def from_show_cmd(cmd):
software_packages = set()
data = cmd.split()
for line in data:
software_package = SoftwarePackage(line)
if software_package.is_valid():
software_packages.add(software_package)
return software_packages
@staticmethod
def from_package_list(pkg_list):
software_packages = set()
for pkg in pkg_list:
software_package = SoftwarePackage(pkg)
if software_package.is_valid():
""" for debugging
print('package_name', software_package.package_name,
'platform', software_package.platform, 'package_type', software_package.package_type,
'version', software_package.version, 'smu', software_package.smu,
'subversion', software_package.subversion)
"""
software_packages.add(software_package)
return software_packages
def __repr__(self):
return self.package_name
def __str__(self):
return self.__repr__()
| 42.86121
| 117
| 0.574643
| 1,780
| 12,044
| 3.802247
| 0.13764
| 0.027778
| 0.019947
| 0.031028
| 0.494976
| 0.39539
| 0.323286
| 0.259309
| 0.213505
| 0.176566
| 0
| 0.120247
| 0.28811
| 12,044
| 280
| 118
| 43.014286
| 0.669116
| 0.542843
| 0
| 0.238532
| 0
| 0.009174
| 0.092338
| 0.040472
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12844
| false
| 0
| 0.009174
| 0.036697
| 0.275229
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4d7101b172b777d4c47f40c60724b8fe87dbf67
| 4,374
|
py
|
Python
|
chirun/plastex/color/__init__.py
|
sthagen/chirun-ncl-chirun
|
45897319d5203b9867b5d6e00b2db1aa90a6580c
|
[
"Apache-2.0"
] | 5
|
2021-12-06T15:57:24.000Z
|
2022-01-24T20:34:00.000Z
|
chirun/plastex/color/__init__.py
|
sthagen/chirun-ncl-chirun
|
45897319d5203b9867b5d6e00b2db1aa90a6580c
|
[
"Apache-2.0"
] | 38
|
2021-12-09T13:16:46.000Z
|
2022-03-30T11:42:13.000Z
|
chirun/plastex/color/__init__.py
|
sthagen/chirun-ncl-chirun
|
45897319d5203b9867b5d6e00b2db1aa90a6580c
|
[
"Apache-2.0"
] | 1
|
2022-01-17T17:41:35.000Z
|
2022-01-17T17:41:35.000Z
|
from plasTeX import Command, Environment
def ProcessOptions(options, document):
colors = {}
document.userdata.setPath('packages/color/colors', colors)
colors['red'] = latex2htmlcolor('1,0,0')
colors['green'] = latex2htmlcolor('0,1,0')
colors['blue'] = latex2htmlcolor('0,0,1')
colors['cyan'] = latex2htmlcolor('0,1,1')
colors['magenta'] = latex2htmlcolor('1,0,1')
colors['yellow'] = latex2htmlcolor('1,1,0')
colors['white'] = latex2htmlcolor('1')
colors['black'] = latex2htmlcolor('0')
colors['gray'] = latex2htmlcolor('0.9')
colors['darkred'] = latex2htmlcolor('0.8,0,0')
colors['middlered'] = latex2htmlcolor('0.9,0,0')
colors['lightred'] = latex2htmlcolor('1,0,0')
colors['darkgreen'] = latex2htmlcolor('0,0.6,0')
colors['middlegreen'] = latex2htmlcolor('0,0.8,0')
colors['lightgreen'] = latex2htmlcolor('0,1,0')
colors['darkblue'] = latex2htmlcolor('0,0,0.8')
colors['middleblue'] = latex2htmlcolor('0,0,0.9')
colors['lightblue'] = latex2htmlcolor('0,0,1')
colors['darkcyan'] = latex2htmlcolor('0.6,0.8,0.8')
colors['middlecyan'] = latex2htmlcolor('0,0.8,0.8')
colors['darkmagenta'] = latex2htmlcolor('0.8,0.6,0.8')
colors['middlemagenta'] = latex2htmlcolor('1,0,0.6')
colors['darkyellow'] = latex2htmlcolor('0.8,0.8,0.6')
colors['middleyellow'] = latex2htmlcolor('1,1,0.2')
colors['darkgray'] = latex2htmlcolor('0.5')
colors['middlegray'] = latex2htmlcolor('0.7')
colors['lightgray'] = latex2htmlcolor('0.9')
def latex2htmlcolor(arg, model='rgb', named=None):
named = named or {}
if model == 'named':
return named.get(arg, '')
if ',' in arg:
parts = [float(x) for x in arg.split(',')]
# rgb
if len(parts) == 3:
red, green, blue = parts
red = min(int(red * 255), 255)
green = min(int(green * 255), 255)
blue = min(int(blue * 255), 255)
# cmyk
elif len(parts) == 4:
c, m, y, k = parts
red, green, blue = [int(255 * x) for x in [1 - c * (1 - k) - k, 1 - m * (1 - k) - k, 1 - y * (1 - k) - k]]
else:
return arg.strip()
else:
try:
red = green = blue = float(arg)
except ValueError:
try:
return named[arg]
except KeyError:
return arg.strip()
return '#%.2X%.2X%.2X' % (int(red), int(green), int(blue))
class definecolor(Command):
args = 'name:str model:str color:str'
def invoke(self, tex):
a = self.parse(tex)
u = self.ownerDocument.userdata
colors = u.getPath('packages/color/colors')
colors[a['name']] = latex2htmlcolor(a['color'], a['model'], colors)
class textcolor(Command):
args = '[ model:str ] color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class color(Environment):
args = '[ model:str ] color:str'
def invoke(self, tex):
a = self.parse(tex)
self.style['color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class pagecolor(Command):
args = '[ model:str ] color:str'
class colorbox(Command):
args = '[ model:str ] color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['background-color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class fcolorbox(Command):
args = '[ model:str ] bordercolor:str color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['background-color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
self.style['border'] = ('1px solid %s'
% latex2htmlcolor(a['bordercolor'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors')))
class normalcolor(Command):
pass
| 36.45
| 118
| 0.560814
| 509
| 4,374
| 4.819253
| 0.21611
| 0.130453
| 0.054219
| 0.063596
| 0.39788
| 0.304117
| 0.29311
| 0.29311
| 0.29311
| 0.29311
| 0
| 0.049672
| 0.268176
| 4,374
| 119
| 119
| 36.756303
| 0.716651
| 0.001829
| 0
| 0.297872
| 0
| 0
| 0.193903
| 0.033692
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074468
| false
| 0.010638
| 0.010638
| 0
| 0.276596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4d8cf9487b5b92aa26fd31970eb23caa185f9d2
| 816
|
py
|
Python
|
swm-master/swm-master/calc/mean_e_calc.py
|
m2lines/subgrid
|
3de5d14c5525a62529d43cbafccda716c74e32df
|
[
"MIT"
] | 1
|
2021-11-03T01:27:16.000Z
|
2021-11-03T01:27:16.000Z
|
swm-master/swm-master/calc/mean_e_calc.py
|
m2lines/subgrid
|
3de5d14c5525a62529d43cbafccda716c74e32df
|
[
"MIT"
] | null | null | null |
swm-master/swm-master/calc/mean_e_calc.py
|
m2lines/subgrid
|
3de5d14c5525a62529d43cbafccda716c74e32df
|
[
"MIT"
] | 1
|
2021-06-24T15:58:32.000Z
|
2021-06-24T15:58:32.000Z
|
## PRODUCE MEAN CALCULATIONS AND EXPORT AS .NPY
from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import time as tictoc
from netCDF4 import Dataset
# OPTIONS
runfolder = 15
print('Calculating subgrid-EKE means from run ' + str(runfolder))
## read data
runpath = path+'data/run%04i' % runfolder
skip = 5*365
e = np.load(runpath+'/e_sub.npy')[skip:,:,:]
print('run %i read.' % runfolder)
## create ouputfolder
try:
os.mkdir(runpath+'/analysis')
except:
pass
## U,V,H mean
em = e.mean(axis=0)
print('e mean done.')
## STORING
dic = dict()
all_var2export = ['em']
for v in all_var2export:
exec('dic[v] ='+v)
np.save(runpath+'/analysis/mean_e.npy',dic)
print('Everything stored.')
| 20.4
| 65
| 0.704657
| 125
| 816
| 4.528
| 0.6
| 0.053004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017291
| 0.14951
| 816
| 39
| 66
| 20.923077
| 0.798271
| 0.153186
| 0
| 0
| 0
| 0
| 0.247788
| 0.038348
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.04
| 0.24
| 0
| 0.24
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4db81ffa51e39a4b08cb2f618fbc4f85e8db0b8
| 3,442
|
py
|
Python
|
STANchap7.py
|
phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan
|
d708faab0fdd43800e8726e2c6dd99452c8dcedb
|
[
"Unlicense"
] | 1
|
2021-03-18T08:01:32.000Z
|
2021-03-18T08:01:32.000Z
|
STANchap7.py
|
phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan
|
d708faab0fdd43800e8726e2c6dd99452c8dcedb
|
[
"Unlicense"
] | null | null | null |
STANchap7.py
|
phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan
|
d708faab0fdd43800e8726e2c6dd99452c8dcedb
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns
from cmdstanpy import CmdStanModel
#%% load data
data = pd.read_csv("data/overfitting.csv", index_col = 'case_id')
data.columns
data.info()
feature_names = data.columns.str.startswith("var_")
predictors = data[data.columns[feature_names]]
labels = data["Target_Practice"]
ix_training = data.train == 1
training_data = predictors[ix_training]
training_labels = labels[ix_training]
ix_testing = data.train == 0
testing_data = predictors[ix_testing]
testing_labels = labels[ix_testing]
sns.displot(training_data.values.flatten(), bins = "sqrt", kde = True)
pca = prince.PCA(n_components = 2, as_array = False).fit(training_data)
pca.plot_row_coordinates(training_data, color_labels = training_labels)
pca.column_correlations(training_data).plot.scatter(x = 0, y = 1) # weird column name
#%% Roshan Sharma model
mdl_data = { # problem with JSON dump => cast to python native type
'N': ix_training.sum().tolist(),
'N2': ix_testing.sum().tolist(),
'K': feature_names.sum().tolist(),
'y': training_labels.values.tolist(),
'X': training_data.values.tolist(),
'new_X': testing_data.values.tolist(),
}
modelfile = "OverfittingRoshanSharma.stan"
with open(modelfile, "w") as file: file.write("""
data {
int N; // the number of training observations
int N2; // the number of test observations
int K; // the number of features
int y[N]; // the response
matrix[N,K] X; // the model matrix
matrix[N2,K] new_X; // the matrix for the predicted values
}
parameters { // regression parameters
real alpha;
vector[K] beta;
}
transformed parameters {
vector[N] linpred = alpha + X * beta;
}
model {
alpha ~ cauchy(0, 10); // prior for the intercept following Gelman 2008
beta ~ student_t(1, 0, 0.03);
y ~ bernoulli_logit(linpred);
}
generated quantities { // y values predicted by the model
vector[N2] y_pred = alpha + new_X * beta;
}
""")
var_name_array = ["alpha"] + [f"beta[{i+1}]" for i in range(mdl_data["K"])]
var_name_combi = ["alpha", "beta"]
sm = CmdStanModel(stan_file = modelfile)
# maximum likelihood estimation
optim = sm.optimize(data = mdl_data).optimized_params_pd
optim[optim.columns[~optim.columns.str.startswith("lp")]]
plt.plot(optim[var_name_array[1:]].values[0])
# variational inference
vb = sm.variational(data = mdl_data)
vb.variational_sample.columns = vb.variational_params_dict.keys()
vb_name = vb.variational_params_pd.columns[~vb.variational_params_pd.columns.str.startswith(("lp", "log_"))]
vb.variational_params_pd[var_name_array]
vb.variational_sample[var_name_array]
# Markov chain Monte Carlo
fit = sm.sample(
data = mdl_data, show_progress = True, chains = 4,
iter_sampling = 50000, iter_warmup = 10000, thin = 5
)
fit.draws().shape # iterations, chains, parameters
fit.summary().loc[var_name_array] # pandas DataFrame
print(fit.diagnose())
posterior = {k: fit_modif.stan_variable(k) for k in var_name_combi}
az_trace = az.from_cmdstanpy(fit)
az.summary(az_trace).loc[var_name] # pandas DataFrame
az.plot_trace(az_trace, var_names = ["alpha"])
az.plot_forest(az_trace, var_names = ["beta"])
sample_pred = fit.stan_variable('y_pred')
# Tim Salimans model: DOES NOT WORK yet
# need to figure out how to marginalize all discrete params
| 31.577982
| 109
| 0.70889
| 498
| 3,442
| 4.714859
| 0.38755
| 0.02385
| 0.025554
| 0.026831
| 0.02385
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012803
| 0.160372
| 3,442
| 108
| 110
| 31.87037
| 0.799654
| 0.105752
| 0
| 0
| 0
| 0
| 0.273342
| 0.01793
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.026316
| 0
| 0.026316
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4dcaac9477532add98d53c114feaaa486ee4a47
| 4,206
|
py
|
Python
|
watcher.py
|
factabulous/matgrindr
|
6f5d6d20e34f9b13950d654cf70afdb2e46f5d1e
|
[
"Apache-2.0"
] | 1
|
2018-03-31T12:15:07.000Z
|
2018-03-31T12:15:07.000Z
|
watcher.py
|
factabulous/matgrindr
|
6f5d6d20e34f9b13950d654cf70afdb2e46f5d1e
|
[
"Apache-2.0"
] | null | null | null |
watcher.py
|
factabulous/matgrindr
|
6f5d6d20e34f9b13950d654cf70afdb2e46f5d1e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import threading
import os
import time
import mats
import sys
import requests
import traceback
import re
from util import debug, error
class MatsLoader(threading.Thread):
"""
Fire and forget loader for materials - will queue a 'mats' event or
an 'error' event if the load fails. Automatically runs as a daemon
"""
def __init__(self, filename, queue):
"""
filename is the file to async load
queue is the queue to report the results into
"""
threading.Thread.__init__(self)
self.queue = queue
self.filename = filename
self.daemon = True
def run(self):
try:
m = mats.Materials(self.filename)
self.queue.put( { 'mats': m._materials } )
except:
self.queue.put( { 'error': 'Failed to load materials ' + str(sys.exc_info()[0]) } )
class MatsLoaderRemote(threading.Thread):
"""
Fire and forget loader for materials - will queue a 'mats' event or
an 'error' event if the load fails. Automatically runs as a daemon
"""
def __init__(self, filename, queue):
"""
filename is the cache file - we only read the remote file
if the cache is old (or missing)
queue is the queue to report the results into
"""
threading.Thread.__init__(self)
self.filename = filename
self.queue = queue
self.daemon = True
self.integerRe = re.compile(r'^-?\d+$')
self.floatRe = re.compile(r'^-?\d+(\.\d+)?$')
self.arrayRe = re.compile(r'^\[.*\]$')
def need_refresh(self):
"""
Returns True if the local cache needs a refresh.
"""
if not os.path.exists(self.filename):
return True
mtime = os.path.getmtime(self.filename)
now = time.time()
return mtime < now - 24 * 3600 # Daily update
def array_splitter(self, value):
return [ x[1:-1] for x in value[1:-1].split(", ") ]
def detect(self, value):
"""
Looks at a data value and converts into an appropriate type
(maybe should look at using ast instead)
"""
if self.integerRe.match(value):
return int(value)
elif self.floatRe.match(value):
return float(value)
elif self.arrayRe.match(value):
return self.array_splitter(value)
else:
return value
def parse(self, text):
"""
Parse a string field containing all the data ina TSV
into an array of dicts. Mainly split out so we can test
"""
lines = text.replace("\r", "").split("\n")
fields = lines[0].split("\t")
res = []
for entry in lines[1:]:
values = entry.split("\t")
if len(values) < len(fields):
continue
v = {}
for k in range(0, len(fields)):
v[fields[k]] = self.detect(values[k])
res.append(v)
return res
def run(self):
try:
if self.need_refresh():
r = requests.get("https://docs.google.com/spreadsheets/u/0/d/1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4/export?format=tsv&id=1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4&gid=0")
res = self.parse(r.text)
if res:
with open(self.filename, "wt") as cache_file:
json.dump(res, cache_file)
self.queue.put( { 'mats': res } )
debug("Async remote mats loader from tsv is completed {} entries".format(len(res)))
else:
error("Async remote mats loader failed - zero records")
else:
with open(self.filename, "rt") as cache_file:
res = json.load(cache_file)
self.queue.put( { 'mats': res } )
debug("loader from cache is completed {} entries".format(len(res)))
except:
self.queue.put( { 'error': 'Failed to load tsv materials ' + str(sys.exc_info()[0]) + ' ' + traceback.format_exc() } )
| 33.11811
| 195
| 0.551831
| 507
| 4,206
| 4.522682
| 0.329389
| 0.0471
| 0.026167
| 0.020933
| 0.297427
| 0.297427
| 0.251199
| 0.251199
| 0.191888
| 0.191888
| 0
| 0.015048
| 0.336424
| 4,206
| 126
| 196
| 33.380952
| 0.806521
| 0.186876
| 0
| 0.2625
| 0
| 0.0125
| 0.131718
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.125
| 0.0125
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4dccf62068146e1f5c5000f7700eb596a2140ec
| 1,706
|
py
|
Python
|
luoxia/pipelines.py
|
pighui/luoxia
|
24daa0f1595fd2b18a4b251acf77321ef98eb534
|
[
"MIT"
] | 2
|
2019-11-07T09:27:59.000Z
|
2019-11-16T11:36:12.000Z
|
luoxia/pipelines.py
|
pighui/luoxia
|
24daa0f1595fd2b18a4b251acf77321ef98eb534
|
[
"MIT"
] | 5
|
2021-03-31T19:15:38.000Z
|
2022-03-02T14:57:57.000Z
|
luoxia/pipelines.py
|
pighui/luoxia
|
24daa0f1595fd2b18a4b251acf77321ef98eb534
|
[
"MIT"
] | 1
|
2019-11-12T12:59:22.000Z
|
2019-11-12T12:59:22.000Z
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
from scrapy import Request
from scrapy.pipelines.images import ImagesPipeline
from luoxia import settings
class LuoxiaPipeline(object):
def process_item(self, item, spider):
title= item['title']
bookname = item['bookname']
titlename = item['titlename']
text = item['text']
path = "books/%s/%s/" % (title, bookname)
if not os.path.exists(path):
os.makedirs(path)
with open(path+titlename+'.txt', 'a', encoding='utf-8') as f:
f.write(text)
return item
class LuoxiaImagePipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for url in item['image_urls']:
yield Request(url, meta={'title': item['title'],
'bookname': item['bookname']})
def item_completed(self, results, item, info):
# 将下载完成后的图片路径设置到item中
item['images'] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
# 为每本书创建一个目录,存放她自己所有的图片
title = request.meta['title']
bookname = request.meta['bookname']
book_dir = os.path.join(settings.IMAGES_STORE, title +'/'+ bookname)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
# 从连接中提取扩展名
try:
ext_name = request.url.split(".")[-1]
except:
ext_name = 'jpg'
# 返回的相对路径
return '%s/%s/%s.%s' % (title, bookname, bookname, ext_name)
| 32.807692
| 76
| 0.601407
| 208
| 1,706
| 4.865385
| 0.456731
| 0.077075
| 0.027668
| 0.043478
| 0.126482
| 0.126482
| 0.059289
| 0
| 0
| 0
| 0
| 0.00241
| 0.270223
| 1,706
| 52
| 77
| 32.807692
| 0.810442
| 0.141266
| 0
| 0.057143
| 0
| 0
| 0.081731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.114286
| 0
| 0.371429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4df00044c8b020894b3ff8a98bbdaaae75f9a17
| 6,949
|
py
|
Python
|
aws_sagemaker_studio/frameworks/tensorflow_mnist/mnist.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 5
|
2019-01-19T23:53:35.000Z
|
2022-01-29T14:04:31.000Z
|
aws_sagemaker_studio/frameworks/tensorflow_mnist/mnist.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 4
|
2020-09-26T01:25:36.000Z
|
2021-08-25T16:10:50.000Z
|
aws_sagemaker_studio/frameworks/tensorflow_mnist/mnist.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 7
|
2020-03-04T22:23:51.000Z
|
2021-07-13T14:05:46.000Z
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import numpy as np
import tensorflow as tf
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu
)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu
)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
'classes': tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predictions['classes'])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def _load_training_data(base_dir):
x_train = np.load(os.path.join(base_dir, 'train_data.npy'))
y_train = np.load(os.path.join(base_dir, 'train_labels.npy'))
return x_train, y_train
def _load_testing_data(base_dir):
x_test = np.load(os.path.join(base_dir, 'eval_data.npy'))
y_test = np.load(os.path.join(base_dir, 'eval_labels.npy'))
return x_test, y_test
def _parse_args():
parser = argparse.ArgumentParser()
# Data, model, and output directories.
# model_dir is always passed in from SageMaker.
# By default this is a S3 path under the default bucket.
parser.add_argument('--model_dir', type=str)
parser.add_argument('--sm-model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAINING'))
parser.add_argument('--hosts', type=list, default=json.loads(os.environ.get('SM_HOSTS')))
parser.add_argument('--current-host', type=str, default=os.environ.get('SM_CURRENT_HOST'))
return parser.parse_known_args()
def serving_input_fn():
inputs = {'x': tf.placeholder(tf.float32, [None, 784])}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
if __name__ == '__main__':
args, _ = _parse_args()
train_data, train_labels = _load_training_data(args.train)
eval_data, eval_labels = _load_testing_data(args.train)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir=args.model_dir)
# Set up logging for predictions
# Log the values in the 'Softmax' tensor with label 'probabilities'
tensors_to_log = {'probabilities': 'softmax_tensor'}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True
)
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False
)
train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=20000)
eval_spec = tf.estimator.EvalSpec(eval_input_fn)
tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec)
if args.current_host == args.hosts[0]:
mnist_classifier.export_savedmodel(args.sm_model_dir, serving_input_fn)
| 37.160428
| 94
| 0.689164
| 991
| 6,949
| 4.665994
| 0.283552
| 0.031142
| 0.048443
| 0.060554
| 0.293253
| 0.234429
| 0.218426
| 0.190311
| 0.156574
| 0.128893
| 0
| 0.031425
| 0.203195
| 6,949
| 186
| 95
| 37.360215
| 0.803684
| 0.361923
| 0
| 0.065934
| 0
| 0
| 0.059835
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054945
| false
| 0
| 0.065934
| 0
| 0.197802
| 0.010989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4e1891a34dd9a85739bf4476b3f8a83de7af2b1
| 6,002
|
py
|
Python
|
common/util/autoware_debug_tools/scripts/stop_reason2pose.py
|
loop-perception/AutowareArchitectureProposal.iv
|
5d8dff0db51634f0c42d2a3e87ca423fbee84348
|
[
"Apache-2.0"
] | 12
|
2020-09-25T08:52:59.000Z
|
2020-10-05T02:39:31.000Z
|
common/util/autoware_debug_tools/scripts/stop_reason2pose.py
|
loop-perception/AutowareArchitectureProposal.iv
|
5d8dff0db51634f0c42d2a3e87ca423fbee84348
|
[
"Apache-2.0"
] | 7
|
2021-12-13T04:28:48.000Z
|
2022-03-14T13:53:15.000Z
|
common/util/autoware_debug_tools/scripts/stop_reason2pose.py
|
taikitanaka3/AutowareArchitectureProposal.iv
|
0d47ea532118c98458516a8c83fbdab3d27c6231
|
[
"Apache-2.0"
] | 9
|
2020-09-27T05:27:09.000Z
|
2020-10-08T03:14:25.000Z
|
#! /usr/bin/env python3
# Copyright 2020 Tier IV, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import sys
from autoware_planning_msgs.msg import StopReasonArray
from case_converter import pascal2snake
from geometry_msgs.msg import PoseStamped
import numpy as np
import rclpy
from rclpy.node import Node
from rtree import index
from self_pose_listener import SelfPoseListener
class StopReason2PoseNode(Node):
def __init__(self, options):
super().__init__("stop_reason2pose_node")
self._options = options
self._sub_pose = self.create_subscription(
StopReasonArray, self._options.topic_name, self._on_stop_reasons, 1
)
self._pub_pose_map = {}
self._idx_map = {}
self._pose_map = {}
self._self_pose_listener = SelfPoseListener()
self.timer = self.create_timer((1.0 / 100), self._self_pose_listener.get_current_pose)
def _on_stop_reasons(self, msg):
for stop_reason in msg.stop_reasons:
snake_case_stop_reason = pascal2snake(stop_reason.reason)
if len(stop_reason.stop_factors) == 0:
self.get_logger().warn("stop_factor is null")
return
for stop_factor in stop_reason.stop_factors:
pose = PoseStamped()
pose.header = msg.header
pose.pose = stop_factor.stop_pose
# Get nearest pose
th_dist = 1.0
nearest_pose_id = self._get_nearest_pose_id(
snake_case_stop_reason, pose.pose, th_dist
)
if nearest_pose_id:
self._update_pose(snake_case_stop_reason, pose.pose, nearest_pose_id)
pose_id = nearest_pose_id
else:
pose_id = self._register_pose(snake_case_stop_reason, pose.pose)
pose_topic_name = "{snake_case_stop_reason}_{pose_id}".format(**locals())
topic_ns = "/autoware_debug_tools/stop_reason2pose/"
if pose_topic_name not in self._pub_pose_map:
self._pub_pose_map[pose_topic_name] = self.create_publisher(
PoseStamped, topic_ns + pose_topic_name, 1
)
self._pub_pose_map[pose_topic_name].publish(pose)
# Publish nearest stop_reason without number
nearest_pose = PoseStamped()
nearest_pose.header = msg.header
nearest_pose.pose = self._get_nearest_pose_in_array(
stop_reason, self._self_pose_listener.self_pose
)
if nearest_pose.pose:
if snake_case_stop_reason not in self._pub_pose_map:
topic_ns = "/autoware_debug_tools/stop_reason2pose/"
self._pub_pose_map[snake_case_stop_reason] = self.create_publisher(
PoseStamped, topic_ns + snake_case_stop_reason, 1
)
self._pub_pose_map[snake_case_stop_reason].publish(nearest_pose)
def _get_nearest_pose_in_array(self, stop_reason, self_pose):
poses = [stop_factor.stop_pose for stop_factor in stop_reason.stop_factors]
if not poses:
return None
distances = map(lambda p: StopReason2PoseNode.calc_distance2d(p, self_pose), poses)
nearest_idx = np.argmin(distances)
return poses[nearest_idx]
def _find_nearest_pose_id(self, name, pose):
if name not in self._idx_map:
self._idx_map[name] = index.Index()
return self._idx_map[name].nearest(StopReason2PoseNode.pose2boundingbox(pose), 1)
def _get_nearest_pose_id(self, name, pose, th_dist):
nearest_pose_ids = list(self._find_nearest_pose_id(name, pose))
if not nearest_pose_ids:
return None
nearest_pose_id = nearest_pose_ids[0]
nearest_pose = self._get_pose(name, nearest_pose_id)
if not nearest_pose:
return None
dist = StopReason2PoseNode.calc_distance2d(pose, nearest_pose)
if dist > th_dist:
return None
return nearest_pose_id
def _get_pose(self, name, pose_id):
if name not in self._pose_map:
return None
return self._pose_map[name][pose_id]
def _update_pose(self, name, pose, pose_id):
self._pose_map[name][id] = pose
self._idx_map[name].insert(pose_id, StopReason2PoseNode.pose2boundingbox(pose))
def _register_pose(self, name, pose):
if name not in self._pose_map:
self._pose_map[name] = {}
pose_id = len(self._pose_map[name]) + 1
self._pose_map[name][pose_id] = pose
self._idx_map[name].insert(pose_id, StopReason2PoseNode.pose2boundingbox(pose))
return pose_id
@staticmethod
def calc_distance2d(pose1, pose2):
p1 = pose1.position
p2 = pose2.position
return math.hypot(p1.x - p2.x, p1.y - p2.y)
@staticmethod
def pose2boundingbox(pose):
return [pose.position.x, pose.position.y, pose.position.x, pose.position.y]
def main(args):
rclpy.init()
parser = argparse.ArgumentParser()
parser.add_argument("topic_name", type=str)
ns = parser.parse_args(args)
stop_reason2pose_node = StopReason2PoseNode(ns)
rclpy.spin(stop_reason2pose_node)
stop_reason2pose_node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main(sys.argv[1:])
| 35.72619
| 94
| 0.653615
| 774
| 6,002
| 4.724806
| 0.228682
| 0.075198
| 0.039103
| 0.04676
| 0.255674
| 0.232158
| 0.152858
| 0.091332
| 0.03883
| 0.03883
| 0
| 0.012745
| 0.267911
| 6,002
| 167
| 95
| 35.94012
| 0.819527
| 0.105465
| 0
| 0.111111
| 0
| 0
| 0.031758
| 0.024846
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094017
| false
| 0
| 0.094017
| 0.008547
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4e335bc88c686cd971644ea0114064bbbe14924
| 1,551
|
py
|
Python
|
US Flag.py
|
Code-Master1234/Turtle_Flags_File_Hub
|
d99f8bc05c4f2280f8c91cdda14005ef9c5d6236
|
[
"MIT"
] | null | null | null |
US Flag.py
|
Code-Master1234/Turtle_Flags_File_Hub
|
d99f8bc05c4f2280f8c91cdda14005ef9c5d6236
|
[
"MIT"
] | null | null | null |
US Flag.py
|
Code-Master1234/Turtle_Flags_File_Hub
|
d99f8bc05c4f2280f8c91cdda14005ef9c5d6236
|
[
"MIT"
] | null | null | null |
import turtle as t
def rectangle(horizontal, vertical, color):
t.pendown()
t.pensize(1)
t.color(color)
t.begin_fill()
for counter in range(2):
t.forward(horizontal)
t.right(90)
t.forward(vertical)
t.right(90)
t.end_fill()
t.penup()
def star(length, points, color):
sumangle = ((points*2)-2) * 180
oneangle = sumangle/points
smallangle = oneangle/3.5
bigangle = oneangle - smallangle
t.color(color)
t.pendown()
t.begin_fill()
t.penup()
for counter in range(points):
t.forward(length)
t.left(smallangle)
t.forward(length)
t.left(bigangle)
t.end_fill()
t.penup()
gotoy = 222
t.speed(0)
t.setup(988,520)
t.goto(494,260)
t.pendown()
for counter in range(7):
t.setheading(-90)
rectangle(40,988,'#B22234')
t.setheading(-90)
t.forward(80)
t.penup()
t.setheading(0)
t.goto(-494,260)
t.pendown()
rectangle(494,280,'#3C3B6E')
t.goto(-474,245)
for counter in range(4):
for counter in range(6):
star(9,5,'white')
t.setheading(0)
t.forward(84)
t.penup()
t.goto(-434,gotoy)
gotoy = gotoy - 28
t.pendown()
for counter in range(5):
star(9,5,'white')
t.setheading(0)
t.forward(84)
t.goto(-476,gotoy)
gotoy = gotoy - 28
for counter in range(6):
star(9,5,'white')
t.setheading(0)
t.forward(84)
t.penup()
t.hideturtle()
| 19.884615
| 44
| 0.550613
| 218
| 1,551
| 3.899083
| 0.288991
| 0.075294
| 0.098824
| 0.14
| 0.348235
| 0.270588
| 0.176471
| 0.176471
| 0.176471
| 0.176471
| 0
| 0.089732
| 0.30303
| 1,551
| 77
| 45
| 20.142857
| 0.696577
| 0
| 0
| 0.560606
| 0
| 0
| 0.019674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.015152
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4e4309129dbca39258000122d1486ad109432d7
| 1,107
|
py
|
Python
|
linked-list/delete_zero_sum_nodes.py
|
bryanlimy/technical-interview
|
f888a4fb2bc4d34dda6cd74b6e4215f46d5ce6d6
|
[
"MIT"
] | 3
|
2020-01-20T05:12:52.000Z
|
2022-02-09T15:21:42.000Z
|
linked-list/delete_zero_sum_nodes.py
|
bryanlimy/technical-interview
|
f888a4fb2bc4d34dda6cd74b6e4215f46d5ce6d6
|
[
"MIT"
] | null | null | null |
linked-list/delete_zero_sum_nodes.py
|
bryanlimy/technical-interview
|
f888a4fb2bc4d34dda6cd74b6e4215f46d5ce6d6
|
[
"MIT"
] | null | null | null |
# Given a linked list, remove consecutive nodes that sums up to zero
# https://www.careercup.com/question?id=5717797377146880
from util import *
def remove_zero_sum(head):
start = head
new = None
root = None
while start:
end = start.next
total = start.value
zero = False
while end:
total += end.value
if total == 0:
zero = True
start = end
break
end = end.next
if not zero and not new:
new = Node(start.value)
root = new
elif not zero and new:
new.next = Node(start.value)
start = start.next
return root
if __name__ == "__main__":
s1 = [6, -6, 8, 4, -12, 9, 8, -8]
s2 = [4, 6 - 10, 8, 9, 10, -19, 10, -18, 20, 25]
s3 = [2, 3, -5, 10, 10, -5, -5, 20, 5, -5]
samples = [s1,s2,s3]
for sample in samples:
head = create_linked_list(sample)
print(linked_list_to_list(head))
result = remove_zero_sum(head)
print(linked_list_to_list(result))
print("\n")
| 26.357143
| 68
| 0.525745
| 153
| 1,107
| 3.673203
| 0.457516
| 0.071174
| 0.046263
| 0.060498
| 0.074733
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089362
| 0.363144
| 1,107
| 41
| 69
| 27
| 0.707801
| 0.109304
| 0
| 0
| 0
| 0
| 0.010173
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.088235
| 0.088235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4e708b09e82bdf3236441c1829a0dda6f660d73
| 2,383
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/maps/custom.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 1
|
2019-11-15T17:28:05.000Z
|
2019-11-15T17:28:05.000Z
|
src/azure-cli/azure/cli/command_modules/maps/custom.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 2
|
2021-01-15T09:24:07.000Z
|
2021-01-15T09:30:10.000Z
|
src/azure-cli/azure/cli/command_modules/maps/custom.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 1
|
2019-11-25T19:33:05.000Z
|
2019-11-25T19:33:05.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.log import get_logger
from knack.prompting import prompt_y_n
from knack.util import CLIError
from azure.mgmt.maps.models import (
MapsAccountCreateParameters,
Sku)
ACCOUNT_LOCATION = 'global'
logger = get_logger(__name__)
def create_account(client, resource_group_name, account_name, sku_name='S0', tags=None, force=None):
terms = 'By creating an Azure Maps account, you agree that you have read and agree to the ' \
'\nLicense (https://azure.microsoft.com/support/legal/) and ' \
'\nPrivacy Statement (https://privacy.microsoft.com/privacystatement).'
hint = 'Please select.'
client_denied_terms = 'You must agree to the License and Privacy Statement to create an account.'
# Show ToS message to the user
logger.warning(terms)
# Prompt yes/no for the user, if --force parameter is not passed in.
if not force:
option = prompt_y_n(hint)
if not option:
raise CLIError(client_denied_terms)
# Submit query
sku = Sku(name=sku_name)
maps_account_create_params = MapsAccountCreateParameters(location=ACCOUNT_LOCATION, sku=sku, tags=tags)
return client.create_or_update(resource_group_name, account_name, maps_account_create_params)
def list_accounts(client, resource_group_name=None):
# Retrieve accounts via subscription
if resource_group_name is None:
return client.list_by_subscription()
# Retrieve accounts via resource group
return client.list_by_resource_group(resource_group_name)
def generic_update_account(instance, sku_name=None, tags=None):
# Pre-populate with old instance
maps_account_create_params = MapsAccountCreateParameters(location=ACCOUNT_LOCATION, sku=instance.sku,
tags=instance.tags)
# Update fields with new parameter values
if sku_name:
maps_account_create_params.sku.name = sku_name
if tags:
maps_account_create_params.tags = tags
return maps_account_create_params
| 40.389831
| 107
| 0.669744
| 289
| 2,383
| 5.304498
| 0.377163
| 0.059361
| 0.066536
| 0.09002
| 0.174821
| 0.123288
| 0.099152
| 0.099152
| 0.099152
| 0
| 0
| 0.000517
| 0.188418
| 2,383
| 58
| 108
| 41.086207
| 0.792141
| 0.246748
| 0
| 0
| 0
| 0
| 0.170499
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.117647
| 0
| 0.323529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4e813b035bc0fbeece6fd5910d8e62ac5025f2b
| 5,558
|
py
|
Python
|
examples/wsdm2022/run_seqreco_B.py
|
Leavingseason/wsdm2022-seqrecsys
|
4659edb93a96300d7a52bb0e1b9c912e3fae2a76
|
[
"MIT"
] | null | null | null |
examples/wsdm2022/run_seqreco_B.py
|
Leavingseason/wsdm2022-seqrecsys
|
4659edb93a96300d7a52bb0e1b9c912e3fae2a76
|
[
"MIT"
] | null | null | null |
examples/wsdm2022/run_seqreco_B.py
|
Leavingseason/wsdm2022-seqrecsys
|
4659edb93a96300d7a52bb0e1b9c912e3fae2a76
|
[
"MIT"
] | null | null | null |
import sys
import os
from tempfile import TemporaryDirectory
import numpy as np
import tensorflow.compat.v1 as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from recommenders.utils.timer import Timer
from recommenders.utils.constants import SEED
from recommenders.models.deeprec.deeprec_utils import (
prepare_hparams
)
from recommenders.datasets.amazon_reviews import download_and_extract, data_preprocessing, _create_vocab
from recommenders.datasets.download_utils import maybe_download
from recommenders.models.deeprec.models.sequential.sli_rec import SLI_RECModel as SeqModel
# from recommenders.models.deeprec.models.sequential.asvd import A2SVDModel as SeqModel
# from recommenders.models.deeprec.models.sequential.caser import CaserModel as SeqModel
# from recommenders.models.deeprec.models.sequential.gru4rec import GRU4RecModel as SeqModel
# from recommenders.models.deeprec.models.sequential.sum import SUMModel as SeqModel
#from recommenders.models.deeprec.models.sequential.nextitnet import NextItNetModel
from recommenders.models.deeprec.io.sequential_iterator import SequentialIterator
#from recommenders.models.deeprec.io.nextitnet_iterator import NextItNetIterator
print("System version: {}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
yaml_file = '/home/jialia/wsdm/src/recommenders/examples/wsdm2022/sli_rec_B.yaml'
RANDOM_SEED = SEED # Set None for non-deterministic result
# data_path = os.path.join("tests", "resources", "deeprec", "slirec")
# data_path = '/home/jialia/wsdm/seq_datasets/B_full_feature_v2'
data_path = sys.argv[1]
print(os.path.abspath(data_path)) ## the path where I enter the cmd
# for test
train_file = os.path.join(data_path, r'train_instances.txt')
valid_file = os.path.join(data_path, r'valid_instances.txt')
test_file = os.path.join(data_path, r'valid.tsv')
pred_file = os.path.join(data_path, r'inter_test.tsv')
final_pred_file = os.path.join(data_path, r'final_test.tsv')
user_vocab = os.path.join(data_path, r'user_vocab.pkl')
item_vocab = os.path.join(data_path, r'item_vocab.pkl')
cate_vocab = os.path.join(data_path, r'category_vocab.pkl')
output_file = os.path.join(data_path, r'inter_test_output.txt')
submit_file = os.path.join(data_path, r'final_test_output.txt')
train_num_ngs = 9 # number of negative instances with a positive instance for training
valid_num_ngs = 9 # number of negative instances with a positive instance for validation
test_num_ngs = 9 # number of negative instances with a positive instance for testing
_create_vocab(
[train_file, valid_file],
user_vocab, item_vocab, cate_vocab
)
### NOTE:
### remember to use `_create_vocab(train_file, user_vocab, item_vocab, cate_vocab)` to generate the user_vocab, item_vocab and cate_vocab files, if you are using your own dataset rather than using our demo Amazon dataset.
hparams = prepare_hparams(yaml_file,
# user_dropout=False,
embed_l2=0.,
layer_l2=0.,
enable_BN=True, ##-- True
learning_rate=0.001, # set to 0.01 if batch normalization is disable else 0.001
epochs=100000,
EARLY_STOP=40000,
batch_size=400,
show_step=5000,
MODEL_DIR=os.path.join(data_path, "model/"),
SUMMARIES_DIR=os.path.join(data_path, "summary/"),
user_vocab=user_vocab,
item_vocab=item_vocab,
cate_vocab=cate_vocab,
need_sample=False,
train_num_ngs=train_num_ngs, # provides the number of negative instances for each positive instance for loss computation.
loss='log_loss', #'log_loss', 'softmax'
max_seq_length=50,
cont_feat_len=85,
use_cont_feat=False,
init_item_emb=False,
shuffle=True
)
print(hparams.values)
input_creator = SequentialIterator
model = SeqModel(hparams, input_creator, seed=RANDOM_SEED)
# model.load_model(os.path.join(data_path, "model_20220118_20k_0.8923", 'step_20000'))
with Timer() as train_time:
model = model.fit(train_file, valid_file, valid_num_ngs=9, eval_metric='auc')
print('Time cost for training is {0:.2f} mins'.format(train_time.interval/60.0))
### model = model.fit(test_file, test_file, valid_num_ngs=9, eval_metric='auc') ##-- quick test
model.load_model(os.path.join(data_path, "model", 'best_model'))
res_syn = model.run_eval(test_file, num_ngs=9)
print(res_syn)
model.predict(pred_file, output_file)
model.predict(final_pred_file, submit_file)
# print('Job finished. B, continue training = 20k, seq=50')
# print('Job finished. B_v2, epoch=50k, seq=100')
## ASVD: 0.867497
## GRU: 0.877529
## SLi-Rec: 0.892736
## B_v4: 0.8937
print("Job:B_full_feature_v2, with BN, no cont feat, seq=50, shuffle=True")
## B_full_feature_v2 no cont_feat, with BN
##5k: 0.8778
##10k: 0.8827
##20k: 0.8848
##25k: 0.8824
##35k: 0.8878
##40k: 0.8903
##45k: 0.8876
##50k: 0.8925
##55k: 0.8903
##60k: 0.8894
##65k: 0.8904
##70k: 0.8814
##75k: 0.8896
##80k: 0.8871
##85k: 0.8920
## with shuffle:
##5k: 0.8793
##10k: 0.8884
##15k: 0.8898
##20k: 0.8923
##25k: 0.8908
##30k: 0.8895
##35k: 0.8888
##40k: 0.8913
##45k: 0.8909
##50k: 0.8876
##65k: 0.8881
| 37.302013
| 221
| 0.690896
| 803
| 5,558
| 4.579078
| 0.332503
| 0.039162
| 0.040794
| 0.053304
| 0.299429
| 0.276312
| 0.251292
| 0.208594
| 0.100626
| 0.045689
| 0
| 0.067206
| 0.199532
| 5,558
| 149
| 222
| 37.302013
| 0.759272
| 0.368658
| 0
| 0
| 0
| 0
| 0.12291
| 0.038428
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.164384
| 0
| 0.164384
| 0.09589
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4e8209a5a512c6f4d48304a062ee3d210b0266c
| 11,222
|
py
|
Python
|
ctypesgen/ctypedescs.py
|
fgrie/ctypesgen
|
bc1627648a1479cefd1a2c3c261dd0471358cfff
|
[
"BSD-2-Clause"
] | null | null | null |
ctypesgen/ctypedescs.py
|
fgrie/ctypesgen
|
bc1627648a1479cefd1a2c3c261dd0471358cfff
|
[
"BSD-2-Clause"
] | null | null | null |
ctypesgen/ctypedescs.py
|
fgrie/ctypesgen
|
bc1627648a1479cefd1a2c3c261dd0471358cfff
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
ctypesgen.ctypedescs contains classes to represent a C type. All of them
classes are subclasses of CtypesType.
Unlike in previous versions of ctypesgen, CtypesType and its subclasses are
completely independent of the parser module.
The most important method of CtypesType and its subclasses is the py_string
method. str(ctype) returns a string which, when evaluated in the wrapper
at runtime, results in a ctypes type object.
For example, a CtypesType
representing an array of four integers could be created using:
>>> ctype = CtypesArray(CtypesSimple("int",True,0),4)
str(ctype) would evaluate to "c_int * 4".
"""
import warnings
__docformat__ = "restructuredtext"
ctypes_type_map = {
# typename signed longs
("void", True, 0): "None",
("int", True, 0): "c_int",
("int", False, 0): "c_uint",
("int", True, 1): "c_long",
("int", False, 1): "c_ulong",
("char", True, 0): "c_char",
("char", False, 0): "c_ubyte",
("short", True, 0): "c_short",
("short", False, 0): "c_ushort",
("float", True, 0): "c_float",
("double", True, 0): "c_double",
("double", True, 1): "c_longdouble",
("int8_t", True, 0): "c_int8",
("__int8", True, 0): "c_int8",
("int16_t", True, 0): "c_int16",
("__int16", True, 0): "c_int16",
("int32_t", True, 0): "c_int32",
("__int32", True, 0): "c_int32",
("int64_t", True, 0): "c_int64",
("__int64", True, 0): "c_int64",
("uint8_t", True, 0): "c_uint8",
("uint16_t", True, 0): "c_uint16",
("uint32_t", True, 0): "c_uint32",
("uint64_t", True, 0): "c_uint64",
("_Bool", True, 0): "c_bool",
}
ctypes_type_map_python_builtin = {
("int", True, 2): "c_longlong",
("int", False, 2): "c_ulonglong",
("size_t", True, 0): "c_size_t",
("apr_int64_t", True, 0): "c_int64",
("off64_t", True, 0): "c_int64",
("apr_uint64_t", True, 0): "c_uint64",
("wchar_t", True, 0): "c_wchar",
("ptrdiff_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble
("ssize_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble
("va_list", True, 0): "c_void_p",
}
# This protocol is used for walking type trees.
class CtypesTypeVisitor(object):
def visit_struct(self, struct):
pass
def visit_enum(self, enum):
pass
def visit_typedef(self, name):
pass
def visit_error(self, error, cls):
pass
def visit_identifier(self, identifier):
# This one comes from inside ExpressionNodes. There may be
# ExpressionNode objects in array count expressions.
pass
def visit_type_and_collect_info(ctype):
class Visitor(CtypesTypeVisitor):
def visit_struct(self, struct):
structs.append(struct)
def visit_enum(self, enum):
enums.append(enum)
def visit_typedef(self, typedef):
typedefs.append(typedef)
def visit_error(self, error, cls):
errors.append((error, cls))
def visit_identifier(self, identifier):
identifiers.append(identifier)
structs = []
enums = []
typedefs = []
errors = []
identifiers = []
v = Visitor()
ctype.visit(v)
return structs, enums, typedefs, errors, identifiers
# Remove one level of indirection from funtion pointer; needed for typedefs
# and function parameters.
def remove_function_pointer(t):
if type(t) == CtypesPointer and type(t.destination) == CtypesFunction:
return t.destination
elif type(t) == CtypesPointer:
t.destination = remove_function_pointer(t.destination)
return t
else:
return t
class CtypesType(object):
def __init__(self):
super(CtypesType, self).__init__()
self.errors = []
def __repr__(self):
return '<Ctype (%s) "%s">' % (type(self).__name__, self.py_string())
def error(self, message, cls=None):
self.errors.append((message, cls))
def visit(self, visitor):
for error, cls in self.errors:
visitor.visit_error(error, cls)
class CtypesSimple(CtypesType):
"""Represents a builtin type, like "char" or "int"."""
def __init__(self, name, signed, longs):
super(CtypesSimple, self).__init__()
self.name = name
self.signed = signed
self.longs = longs
def py_string(self, ignore_can_be_ctype=None):
return ctypes_type_map[(self.name, self.signed, self.longs)]
class CtypesSpecial(CtypesType):
def __init__(self, name):
super(CtypesSpecial, self).__init__()
self.name = name
def py_string(self, ignore_can_be_ctype=None):
return self.name
class CtypesTypedef(CtypesType):
"""Represents a type defined by a typedef."""
def __init__(self, name):
super(CtypesTypedef, self).__init__()
self.name = name
def visit(self, visitor):
if not self.errors:
visitor.visit_typedef(self.name)
super(CtypesTypedef, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return self.name
class CtypesBitfield(CtypesType):
def __init__(self, base, bitfield):
super(CtypesBitfield, self).__init__()
self.base = base
self.bitfield = bitfield
def visit(self, visitor):
self.base.visit(visitor)
super(CtypesBitfield, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return self.base.py_string()
class CtypesPointer(CtypesType):
def __init__(self, destination, qualifiers):
super(CtypesPointer, self).__init__()
self.destination = destination
self.qualifiers = qualifiers
def visit(self, visitor):
if self.destination:
self.destination.visit(visitor)
super(CtypesPointer, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "POINTER(%s)" % self.destination.py_string()
class CtypesArray(CtypesType):
def __init__(self, base, count):
super(CtypesArray, self).__init__()
self.base = base
self.count = count
def visit(self, visitor):
self.base.visit(visitor)
if self.count:
self.count.visit(visitor)
super(CtypesArray, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
if self.count is None:
return "POINTER(%s)" % self.base.py_string()
if type(self.base) == CtypesArray:
return "(%s) * int(%s)" % (self.base.py_string(), self.count.py_string(False))
else:
return "%s * int(%s)" % (self.base.py_string(), self.count.py_string(False))
class CtypesNoErrorCheck(object):
def py_string(self, ignore_can_be_ctype=None):
return "None"
def __bool__(self):
return False
__nonzero__ = __bool__
class CtypesPointerCast(object):
def __init__(self, target):
self.target = target
def py_string(self, ignore_can_be_ctype=None):
return "lambda v,*a : cast(v, {})".format(self.target.py_string())
class CtypesFunction(CtypesType):
def __init__(self, restype, parameters, variadic, attrib=dict()):
super(CtypesFunction, self).__init__()
self.restype = restype
self.errcheck = CtypesNoErrorCheck()
# Don't allow POINTER(None) (c_void_p) as a restype... causes errors
# when ctypes automagically returns it as an int.
# Instead, convert to POINTER(c_void). c_void is not a ctypes type,
# you can make it any arbitrary type.
if (
type(self.restype) == CtypesPointer
and type(self.restype.destination) == CtypesSimple
and self.restype.destination.name == "void"
):
# we will provide a means of converting this to a c_void_p
self.restype = CtypesPointer(CtypesSpecial("c_ubyte"), ())
self.errcheck = CtypesPointerCast(CtypesSpecial("c_void_p"))
# Return "String" instead of "POINTER(c_char)"
if self.restype.py_string() == "POINTER(c_char)":
if "const" in self.restype.qualifiers:
self.restype = CtypesSpecial("c_char_p")
else:
self.restype = CtypesSpecial("String")
self.argtypes = [remove_function_pointer(p) for p in parameters]
self.variadic = variadic
self.attrib = attrib
def visit(self, visitor):
self.restype.visit(visitor)
for a in self.argtypes:
a.visit(visitor)
super(CtypesFunction, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "CFUNCTYPE(UNCHECKED(%s), %s)" % (
self.restype.py_string(),
", ".join([a.py_string() for a in self.argtypes]),
)
last_tagnum = 0
def anonymous_struct_tagnum():
global last_tagnum
last_tagnum += 1
return last_tagnum
def fmt_anonymous_struct_tag(num):
return "anon_%d" % num
def anonymous_struct_tag():
return fmt_anonymous_struct_tag(anonymous_struct_tagnum())
class CtypesStruct(CtypesType):
def __init__(self, tag, attrib, variety, members, src=None):
super(CtypesStruct, self).__init__()
self.tag = tag
self.attrib = attrib
self.variety = variety # "struct" or "union"
self.members = members
if type(self.tag) == int or not self.tag:
if type(self.tag) == int:
self.tag = fmt_anonymous_struct_tag(self.tag)
else:
self.tag = anonymous_struct_tag()
self.anonymous = True
else:
self.anonymous = False
if self.members == None:
self.opaque = True
else:
self.opaque = False
self.src = src
def get_required_types(self):
types = super(CtypesStruct, self).get_required_types()
types.add((self.variety, self.tag))
return types
def visit(self, visitor):
visitor.visit_struct(self)
if not self.opaque:
for name, ctype in self.members:
ctype.visit(visitor)
super(CtypesStruct, self).visit(visitor)
def get_subtypes(self):
if self.opaque:
return set()
else:
return set([m[1] for m in self.members])
def py_string(self, ignore_can_be_ctype=None):
return "%s_%s" % (self.variety, self.tag)
last_tagnum = 0
def anonymous_enum_tag():
global last_tagnum
last_tagnum += 1
return "anon_%d" % last_tagnum
class CtypesEnum(CtypesType):
def __init__(self, tag, enumerators, src=None):
super(CtypesEnum, self).__init__()
self.tag = tag
self.enumerators = enumerators
if not self.tag:
self.tag = anonymous_enum_tag()
self.anonymous = True
else:
self.anonymous = False
if self.enumerators == None:
self.opaque = True
else:
self.opaque = False
self.src = src
def visit(self, visitor):
visitor.visit_enum(self)
super(CtypesEnum, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "enum_%s" % self.tag
| 28.848329
| 90
| 0.621012
| 1,400
| 11,222
| 4.753571
| 0.175714
| 0.008715
| 0.023441
| 0.015778
| 0.310744
| 0.212021
| 0.164538
| 0.154621
| 0.1429
| 0.1429
| 0
| 0.011639
| 0.257352
| 11,222
| 388
| 91
| 28.92268
| 0.786897
| 0.125379
| 0
| 0.279245
| 0
| 0
| 0.070947
| 0.002453
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192453
| false
| 0.018868
| 0.003774
| 0.05283
| 0.350943
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4e9e1912fd06e0dea7f2e62b354d4050bf65bf1
| 1,769
|
py
|
Python
|
app/volume/admin_process.py
|
cleve/varidb
|
fc1b10aa4d708cee1c83909f10773948cee0c539
|
[
"Apache-2.0"
] | null | null | null |
app/volume/admin_process.py
|
cleve/varidb
|
fc1b10aa4d708cee1c83909f10773948cee0c539
|
[
"Apache-2.0"
] | 6
|
2020-11-05T02:18:15.000Z
|
2022-03-12T00:50:09.000Z
|
app/volume/admin_process.py
|
cleve/pulzar
|
fc1b10aa4d708cee1c83909f10773948cee0c539
|
[
"Apache-2.0"
] | null | null | null |
from pulzarutils.utils import Utils
from pulzarutils.utils import Constants
from pulzarutils.messenger import Messenger
from pulzarcore.core_db import DB
class AdminProcess:
"""Handle admin operations from manage
"""
def __init__(self, logger):
self.TAG = self.__class__.__name__
self.logger = logger
self.utils = Utils()
self.messenger = Messenger()
self.mark_of_local_verification = b'varidb_execute_file_verification'
def process_request(self, url_path):
"""Get request type, checking for key value.
"""
regex_result = self.utils.get_search_regex(
url_path, Constants.RE_ADMIN)
if regex_result:
try:
call_path_list = regex_result.groups()[0].split('/')
call_path_list = [x for x in call_path_list if x != '']
# All nodes
if len(call_path_list) == 1 and call_path_list[0] == 'start_backup':
db_backup = DB(Constants.DB_BACKUP)
db_backup.update_or_insert_value(
self.mark_of_local_verification, b'1')
self.messenger.code_type = Constants.BACKUP_SCHEDULED
self.messenger.set_message = 'backup scheduled'
except Exception as err:
self.logger.exception('{}:{}'.format(self.TAG, err))
self.messenger.code_type = Constants.PULZAR_ERROR
self.messenger.set_message = str(err)
self.messenger.mark_as_failed()
else:
self.messenger.code_type = Constants.USER_ERROR
self.messenger.set_message = 'wrong request'
self.messenger.mark_as_failed()
return self.messenger
| 38.456522
| 84
| 0.611645
| 203
| 1,769
| 5.029557
| 0.389163
| 0.127326
| 0.058766
| 0.061704
| 0.246817
| 0.054848
| 0
| 0
| 0
| 0
| 0
| 0.003247
| 0.303561
| 1,769
| 45
| 85
| 39.311111
| 0.825487
| 0.05766
| 0
| 0.058824
| 0
| 0
| 0.048309
| 0.019324
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4ea75a1746392a1bad32c927e9dd06c16722c29
| 2,767
|
py
|
Python
|
tests/ssg_test_suite/profile.py
|
fduthilleul/scap-security-guide
|
f9b67869600f6c20dcb0ba83801578cec1a51bba
|
[
"BSD-3-Clause"
] | null | null | null |
tests/ssg_test_suite/profile.py
|
fduthilleul/scap-security-guide
|
f9b67869600f6c20dcb0ba83801578cec1a51bba
|
[
"BSD-3-Clause"
] | null | null | null |
tests/ssg_test_suite/profile.py
|
fduthilleul/scap-security-guide
|
f9b67869600f6c20dcb0ba83801578cec1a51bba
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python2
from __future__ import print_function
import atexit
import logging
import sys
import ssg_test_suite.oscap
import ssg_test_suite.virt
from ssg_test_suite.rule import get_viable_profiles
from ssg_test_suite.virt import SnapshotStack
logging.getLogger(__name__).addHandler(logging.NullHandler())
def perform_profile_check(options):
"""Perform profile check.
Iterate over profiles in datastream and perform scanning of unaltered VM
using every profile according to input. Also perform remediation run.
Return value not defined, textual output and generated reports is the
result.
"""
dom = ssg_test_suite.virt.connect_domain(options.hypervisor,
options.domain_name)
if dom is None:
sys.exit(1)
snapshot_stack = SnapshotStack(dom)
atexit.register(snapshot_stack.clear)
snapshot_stack.create('origin')
ssg_test_suite.virt.start_domain(dom)
domain_ip = ssg_test_suite.virt.determine_ip(dom)
has_worked = False
profiles = get_viable_profiles(options.target,
options.datastream,
options.benchmark_id)
if len(profiles) > 1:
snapshot_stack.create('profile')
for profile in profiles:
logging.info("Evaluation of profile {0}.".format(profile))
has_worked = True
runner = options.remediate_using
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'initial',
options.datastream,
options.benchmark_id,
runner=runner)
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'remediation',
options.datastream,
options.benchmark_id,
runner=runner)
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'final',
options.datastream,
options.benchmark_id,
runner=runner)
snapshot_stack.revert(delete=False)
if not has_worked:
logging.error("Nothing has been tested!")
snapshot_stack.delete()
# depending on number of profiles we have either "origin" snapshot
# still to be reverted (multiple profiles) or we are reverted
# completely (only one profile was run)
| 38.971831
| 76
| 0.553668
| 272
| 2,767
| 5.411765
| 0.422794
| 0.047554
| 0.081522
| 0.054348
| 0.205163
| 0.181386
| 0.181386
| 0.149457
| 0.149457
| 0.120924
| 0
| 0.00236
| 0.387423
| 2,767
| 70
| 77
| 39.528571
| 0.866077
| 0.155403
| 0
| 0.313725
| 0
| 0
| 0.037278
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.156863
| 0
| 0.176471
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4eb283ef9b63b6cf71ae47aefac07d2d47fad48
| 4,218
|
py
|
Python
|
lib/wtforms/ext/appengine/fields.py
|
solidaritreebiz/Solidaritree
|
15cc2e10e4cec56eb4fe218166d4157fcce9bf8d
|
[
"MIT"
] | 43
|
2015-01-02T11:59:27.000Z
|
2021-06-03T18:47:09.000Z
|
wtforms/ext/appengine/fields.py
|
skorokithakis/landing-page
|
d800decb3a36519e2dd86826f660f5fa4f62cf5c
|
[
"MIT"
] | 1
|
2018-07-17T11:46:14.000Z
|
2018-07-17T11:46:14.000Z
|
wtforms/ext/appengine/fields.py
|
skorokithakis/landing-page
|
d800decb3a36519e2dd86826f660f5fa4f62cf5c
|
[
"MIT"
] | 6
|
2018-07-14T04:58:02.000Z
|
2018-08-06T18:02:27.000Z
|
import decimal
import operator
import warnings
from wtforms import fields, widgets
class ReferencePropertyField(fields.SelectFieldBase):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
label_attr=None, get_label=None, allow_blank=False,
blank_text=u'', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
if label_attr is not None:
warnings.warn('label_attr= will be removed in WTForms 1.1, use get_label= instead.', DeprecationWarning)
self.get_label = operator.attrgetter(label_attr)
elif get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, basestring):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.all()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key())
label = self.get_label(obj)
yield (key, label, self.data and ( self.data.key( ) == obj.key() ) )
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.query:
if str(self.data.key()) == str(obj.key()):
break
else:
raise ValueError(self.gettext(u'Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return self.data and unicode("\n".join(self.data)) or u''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = valuelist[0].splitlines()
except ValueError:
raise ValueError(self.gettext(u'Not a valid list'))
class GeoPtPropertyField(fields.TextField):
def process_formdata(self, valuelist):
if valuelist:
try:
lat, lon = valuelist[0].split(',')
self.data = u'%s,%s' % (decimal.Decimal(lat.strip()), decimal.Decimal(lon.strip()),)
except (decimal.InvalidOperation, ValueError):
raise ValueError(u'Not a valid coordinate location')
| 35.745763
| 116
| 0.598151
| 525
| 4,218
| 4.657143
| 0.272381
| 0.045808
| 0.02454
| 0.014724
| 0.16319
| 0.13865
| 0.130061
| 0.091616
| 0.025358
| 0.025358
| 0
| 0.00242
| 0.314367
| 4,218
| 117
| 117
| 36.051282
| 0.843015
| 0.210526
| 0
| 0.220779
| 0
| 0
| 0.046986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116883
| false
| 0
| 0.051948
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4eb7fe555f324704c58058f0e711c3b4fd6b7fe
| 3,947
|
py
|
Python
|
mtrainsimulator.py
|
trevor-wieland/MTrainAI
|
47bab3bf3af9e5426a822a7d14586f1798674cd7
|
[
"MIT"
] | null | null | null |
mtrainsimulator.py
|
trevor-wieland/MTrainAI
|
47bab3bf3af9e5426a822a7d14586f1798674cd7
|
[
"MIT"
] | null | null | null |
mtrainsimulator.py
|
trevor-wieland/MTrainAI
|
47bab3bf3af9e5426a822a7d14586f1798674cd7
|
[
"MIT"
] | null | null | null |
import mtrain
import numpy as np
import pandas as pd
import random
def simulate_games(num_players=4, domino_size=12, num_games=250, collect_data=True,
debug=False, players=["Random", "Greedy", "Probability", "Neural"],
file_name="PlayData/data4_12_250"):
"""
Runs the mexican train game repeatedly with different combinations of players to
generate data to be used in testing and training the neural net.
If collect_data is on, the play data is retrieved and stored into a .xlsx file for later use
The format for the file name for this is as follows:
PlayData/data + num_players + _ + domino_size + _ + num_games + .xlsx
This spreadsheet is to be used when training the neural net.
This script has no required parameters, and will run the game with the default params if
unchanged.
If collect_data is on, the players are selected randomly each game from:
["Random", "Greedy", "Probability"]
If collect_data is off, the players are selected in order from the parameter players.
When collect_data is off: len(players) must equal num_players
Returns a tuple of lists: (score_averages, win_percentage) corresponding to the players
"""
#Sets column names for building dataframe later on
column_names = ["round_number", "turn_number", "player_number", "play",
"t_num", "hand", "unknown", "potential_plays", "points"]
#Depending on mode of use, sets players and checks validity of player values
modes = []
if collect_data:
modes = ["Random", "Greedy", "Probability"]
else:
if not len(players) == num_players:
raise RuntimeError("len(players) must equal num_players when collect_data is off")
modes = players
#Simulates num_games of games
scores = np.ndarray((num_players, num_games))
wins = np.ndarray((num_players, num_games))
full_data = pd.DataFrame(columns=column_names)
current_index = 0
for game_num in range(0, num_games):
#Randomize players if in collect_data mode
game_modes = []
if collect_data:
for select in range(0, num_players):
game_modes.append(random.choice(modes))
else:
game_modes = modes
#Run game with parameters
results = mtrain.mexicantrain(num_players, domino_size, debug=debug,
modes=game_modes,
data_collection=collect_data,
data_index=current_index, file_name=file_name)
#If collecting data, data is stored into the dataframe
if collect_data:
current_index = results[2].index[-1] + 1
full_data = pd.concat([full_data, results[2]])
#Scores and wins are recorded into their respective arrays
for player_num in range(0, num_players):
scores[player_num, game_num] = results[0][player_num]
if results[1] == player_num:
wins[player_num, game_num] = 1
else:
wins[player_num, game_num] = 0
#Calculates performance of the players
score_averages = np.ndarray((num_players))
win_percentage = np.ndarray((num_players))
for player_num in range(0, num_players):
score_averages[player_num] = np.mean(scores[player_num, :])
win_percentage[player_num] = np.mean(wins[player_num, :])
#If collecting data, prints data to a .xlsx file
if collect_data:
filename = "PlayData/data" + str(num_players) + "_" + str(domino_size) + "_" + str(num_games) + ".xlsx"
writer = pd.ExcelWriter(filename)
full_data.to_excel(writer, "Sheet1")
writer.save()
#Prints results and returns them as well
if debug: print(score_averages)
if debug: print(win_percentage)
return score_averages, win_percentage
| 42.902174
| 111
| 0.64682
| 520
| 3,947
| 4.730769
| 0.307692
| 0.056911
| 0.036992
| 0.030894
| 0.134553
| 0.105285
| 0.02439
| 0.02439
| 0
| 0
| 0
| 0.009037
| 0.271092
| 3,947
| 92
| 112
| 42.902174
| 0.84602
| 0.335191
| 0
| 0.169811
| 0
| 0
| 0.092476
| 0.008229
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.075472
| 0
| 0.113208
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4ec2af4e9b7cc307999482d71c793953e387022
| 3,336
|
py
|
Python
|
licenseplates/dataset.py
|
VaranRohila/apn
|
dbb5b814233accbbb49b9bfe12b7162402e3b267
|
[
"MIT"
] | null | null | null |
licenseplates/dataset.py
|
VaranRohila/apn
|
dbb5b814233accbbb49b9bfe12b7162402e3b267
|
[
"MIT"
] | null | null | null |
licenseplates/dataset.py
|
VaranRohila/apn
|
dbb5b814233accbbb49b9bfe12b7162402e3b267
|
[
"MIT"
] | null | null | null |
##############################################################################
#
# Below code is inspired on
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/pascal_voc.py
# --------------------------------------------------------
# Detectron2
# Licensed under the Apache 2.0 license.
# --------------------------------------------------------
from fvcore.common.file_io import PathManager
import os
import numpy as np
import xml.etree.ElementTree as ET
from detectron2.structures import BoxMode
from detectron2.data import DatasetCatalog, MetadataCatalog
__all__ = ["register_licenseplates_voc"]
CLASS_NAMES = [
"license_plate",
]
def load_voc_instances(dirname: str, split: str):
"""
Load licenseplates VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "annotations", "images"
split (str): one of "train", "test"
"""
with PathManager.open(os.path.join(dirname, split + ".txt")) as f:
fileids = np.loadtxt(f, dtype=np.str)
dicts = []
for fileid in fileids:
anno_file = os.path.join(dirname, "annotations", fileid + ".xml")
jpeg_file = os.path.join(dirname, "images", fileid + ".jpg")
tree = ET.parse(anno_file)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
instances.append(
{"category_id": CLASS_NAMES.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
return dicts
def register_licenseplates_voc(name, dirname, split):
DatasetCatalog.register(name,
lambda: load_voc_instances(dirname, split))
MetadataCatalog.get(name).set(thing_classes=CLASS_NAMES,
dirname=dirname,
split=split)
if __name__ == "__main__":
import random
import cv2
from detectron2.utils.visualizer import Visualizer
import argparse
# Parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("--split", default="train")
ap.add_argument("--samples", type=int, default=10)
ap.add_argument("--scale", type=float, default=1.0)
args = ap.parse_args()
dataset_name = f"licenseplates_{args.split}"
register_licenseplates_voc(dataset_name, "datasets/licenseplates", args.split)
dataset_dicts = DatasetCatalog.get(dataset_name)
for d in random.sample(dataset_dicts, args.samples):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1],
metadata=MetadataCatalog.get(dataset_name),
scale=args.scale)
vis = visualizer.draw_dataset_dict(d)
cv2.imshow(dataset_name, vis.get_image()[:, :, ::-1])
# Exit? Press ESC
if cv2.waitKey(0) & 0xFF == 27:
break
cv2.destroyAllWindows()
| 32.705882
| 100
| 0.579436
| 364
| 3,336
| 5.162088
| 0.428571
| 0.029271
| 0.038318
| 0.027142
| 0.022352
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010317
| 0.244604
| 3,336
| 101
| 101
| 33.029703
| 0.735317
| 0.147482
| 0
| 0
| 0
| 0
| 0.10293
| 0.027106
| 0
| 0
| 0.001465
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.15625
| 0
| 0.203125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4ece3f334aeba88cd76ec065663f9e04ac41d64
| 354
|
py
|
Python
|
docs/examples/pytorch/resnet50/scripts/test_read_speed.py
|
RogerChern/DALI
|
be143c3bb35458549e273608f1683a99ae41968e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
docs/examples/pytorch/resnet50/scripts/test_read_speed.py
|
RogerChern/DALI
|
be143c3bb35458549e273608f1683a99ae41968e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
docs/examples/pytorch/resnet50/scripts/test_read_speed.py
|
RogerChern/DALI
|
be143c3bb35458549e273608f1683a99ae41968e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import glob
import time
import random
filelist = glob.glob('/mnt/lustre/chenyuntao1/datasets/imagenet/train/*/*')
random.shuffle(filelist)
begin = time.time()
for i, f in enumerate(filelist):
if i == 10000:
break
with open(f, "rb") as fin:
result = fin.read()
end = time.time()
print("%.1f images/s" % (10000 / (end - begin)))
| 20.823529
| 75
| 0.641243
| 50
| 354
| 4.54
| 0.66
| 0.070485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042105
| 0.194915
| 354
| 17
| 76
| 20.823529
| 0.754386
| 0
| 0
| 0
| 0
| 0
| 0.185915
| 0.143662
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4eced841f40608be5ce0f25f32b14e3f8c5be34
| 12,864
|
py
|
Python
|
ocellaris/solver_parts/boundary_conditions/dirichlet.py
|
TormodLandet/Ocellaris
|
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
|
[
"Apache-2.0"
] | 1
|
2017-11-07T12:19:44.000Z
|
2017-11-07T12:19:44.000Z
|
ocellaris/solver_parts/boundary_conditions/dirichlet.py
|
TormodLandet/Ocellaris
|
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
|
[
"Apache-2.0"
] | null | null | null |
ocellaris/solver_parts/boundary_conditions/dirichlet.py
|
TormodLandet/Ocellaris
|
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
|
[
"Apache-2.0"
] | 2
|
2018-05-02T17:17:01.000Z
|
2019-03-11T13:09:40.000Z
|
# Copyright (C) 2015-2019 Tormod Landet
# SPDX-License-Identifier: Apache-2.0
import dolfin
from . import register_boundary_condition, BoundaryConditionCreator
from ocellaris.utils import (
CodedExpression,
OcellarisCppExpression,
OcellarisError,
verify_field_variable_definition,
)
class OcellarisDirichletBC(dolfin.DirichletBC):
def __init__(
self, simulation, V, value, subdomain_marker, subdomain_id, updater=None
):
"""
A simple storage class for Dirichlet boundary conditions
"""
super().__init__(
V, value, subdomain_marker, subdomain_id, method='geometric'
)
self.simulation = simulation
self._value = value
self.subdomain_marker = subdomain_marker
self.subdomain_id = subdomain_id
self._updater = updater
def func(self):
"""
The boundary value derivative function
"""
return self._value
def ds(self):
"""
Returns the ds measure of the subdomain
"""
return self.simulation.data['ds'](self.subdomain_id)
def copy_and_change_function_space(self, V):
"""
Return a copy with a new function space. Used when converting from
BCs for a segregated solver (default) to BCs for a coupled solver
"""
return OcellarisDirichletBC(
self.simulation, V, self._value, self.subdomain_marker, self.subdomain_id
)
def update(self):
"""
Update the time and other parameters used in the BC.
This is used every timestep and for all RK substeps
"""
if self._updater:
self._updater(
self.simulation.timestep, self.simulation.time, self.simulation.dt
)
def __repr__(self):
return '<OcellarisDirichletBC on subdomain %d>' % self.subdomain_id
@register_boundary_condition('ConstantValue')
class ConstantDirichletBoundary(BoundaryConditionCreator):
description = 'A prescribed constant value Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with constant value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
value = inp_dict.get_value('value', required_type='any')
if isinstance(value, list):
assert len(value) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
self.register_dirichlet_condition(
name, value[d], subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(var_name, value, subdomains, subdomain_id)
def register_dirichlet_condition(self, var_name, value, subdomains, subdomain_id):
"""
Add a Dirichlet condition to this variable
"""
if not isinstance(value, (float, int)):
raise OcellarisError(
'Error in ConstantValue BC for %s' % var_name,
'The value %r is not a number' % value,
)
df_value = dolfin.Constant(value)
# Store the boundary condition for use in the solver
bc = OcellarisDirichletBC(
self.simulation, self.func_space, df_value, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Constant value %r for %s' % (value, var_name))
@register_boundary_condition('CodedValue')
class CodedDirichletBoundary(BoundaryConditionCreator):
description = 'A coded Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with coded value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Make a dolfin Expression object that runs the code string
code = inp_dict.get_value('code', required_type='any')
if isinstance(code, list):
assert len(code) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
description = 'coded value boundary condition for %s' % name
sub_code = inp_dict.get_value('code/%d' % d, required_type='string')
expr = CodedExpression(simulation, sub_code, description)
self.register_dirichlet_condition(name, expr, subdomains, subdomain_id)
else:
description = 'coded value boundary condition for %s' % var_name
expr = CodedExpression(simulation, code, description)
self.register_dirichlet_condition(var_name, expr, subdomains, subdomain_id)
def register_dirichlet_condition(self, var_name, expr, subdomains, subdomain_id):
"""
Store the boundary condition for use in the solver
"""
bc = OcellarisDirichletBC(
self.simulation, self.func_space, expr, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Coded value for %s' % var_name)
@register_boundary_condition('CppCodedValue')
class CppCodedDirichletBoundary(BoundaryConditionCreator):
description = 'A C++ coded Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with C++ coded value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Make a dolfin Expression object that runs the code string
code = inp_dict.get_value('cpp_code', required_type='any')
if isinstance(code, list):
assert len(code) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
sub_code = inp_dict.get_value('cpp_code/%d' % d, required_type='string')
self.register_dirichlet_condition(
name, sub_code, subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(var_name, code, subdomains, subdomain_id)
def register_dirichlet_condition(
self, var_name, cpp_code, subdomains, subdomain_id
):
"""
Store the boundary condition for use in the solver
"""
description = 'boundary condititon for %s' % var_name
P = self.func_space.ufl_element().degree()
expr, updater = OcellarisCppExpression(
self.simulation, cpp_code, description, P, return_updater=True
)
bc = OcellarisDirichletBC(
self.simulation,
self.func_space,
expr,
subdomains,
subdomain_id,
updater=updater,
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' C++ coded value for %s' % var_name)
@register_boundary_condition('FieldFunction')
class FieldFunctionDirichletBoundary(BoundaryConditionCreator):
description = 'A Dirichlet condition with values from a field function'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet boundary condition with value from a field function
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Get the field function expression object
vardef = inp_dict.get_value('function', required_type='any')
description = 'boundary condititon for %s' % var_name
if isinstance(vardef, list):
assert len(vardef) == simulation.ndim
exprs = [
verify_field_variable_definition(simulation, vd, description)
for vd in vardef
]
else:
expr = verify_field_variable_definition(simulation, vardef, description)
if expr.ufl_shape != ():
assert expr.ufl_shape == (
simulation.ndim,
), 'Expected shape %r got %r' % ((simulation.ndim,), expr.ufl_shape)
exprs = [expr[d] for d in range(simulation.ndim)]
else:
exprs = [expr]
# Register BCs
if len(exprs) > 1:
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
self.register_dirichlet_condition(
name, exprs[d], subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(
var_name, exprs[0], subdomains, subdomain_id
)
def register_dirichlet_condition(self, var_name, expr, subdomains, subdomain_id):
"""
Store the boundary condition for use in the solver
"""
assert expr.ufl_shape == ()
bc = OcellarisDirichletBC(
self.simulation, self.func_space, expr, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Field function value for %s' % var_name)
@register_boundary_condition('FieldVelocityValve')
class FieldVelocityValveDirichletBoundary(BoundaryConditionCreator):
description = 'A Dirichlet condition that compensates for non-zero total flux of a known velocity field'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet boundary condition with value from a field function
"""
self.simulation = simulation
# A var_name like "u0" should be given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
# Get the field function expression object
vardef = inp_dict.get_value('function', required_type='any')
description = 'boundary condititon for %s' % var_name
self.velocity = verify_field_variable_definition(
simulation, vardef, description
)
field = simulation.fields[vardef.split('/')[0]]
# The expression value is updated as the field is changed
inp_dict.get_value('function', required_type='any')
field.register_dependent_field(self)
self.flux = dolfin.Constant(1.0)
# Create the
bc = OcellarisDirichletBC(
self.simulation, self.func_space, self.flux, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Field velocity valve for %s' % var_name)
# Compute the region area, then update the flux
mesh = simulation.data['mesh']
self.area = dolfin.assemble(self.flux * bc.ds()(domain=mesh))
self.region_names = inp_dict.get_value('regions', required_type='list(string)')
self.update()
def update(self, timestep_number=None, t=None, dt=None):
"""
The main field has changed, update our flux to make the total sum to zero
"""
regions = self.simulation.data['boundary']
mesh = self.simulation.data['mesh']
n = dolfin.FacetNormal(mesh)
flux = 0
count = 0
for region in regions:
if region.name in self.region_names:
f = dolfin.dot(self.velocity, n) * region.ds()
flux += dolfin.assemble(f)
count += 1
assert count == len(self.region_names)
# FIXME: assumes n is pointing outwards along the axis in the positive
# direction in this boundary region
self.flux.assign(dolfin.Constant(-flux / self.area))
| 38.981818
| 108
| 0.615127
| 1,464
| 12,864
| 5.232924
| 0.148907
| 0.049341
| 0.060305
| 0.014097
| 0.598747
| 0.553975
| 0.516121
| 0.475134
| 0.464822
| 0.452813
| 0
| 0.003504
| 0.290112
| 12,864
| 329
| 109
| 39.100304
| 0.835414
| 0.143968
| 0
| 0.35023
| 0
| 0
| 0.091354
| 0.001976
| 0
| 0
| 0
| 0.00304
| 0.032258
| 1
| 0.073733
| false
| 0
| 0.013825
| 0.004608
| 0.156682
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4ee6e97a2bc58c8bc3ccf8cb1ebf6364e70cd9d
| 3,906
|
py
|
Python
|
python/chronos/test/bigdl/chronos/forecaster/tf/test_seq2seq_keras_forecaster.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
python/chronos/test/bigdl/chronos/forecaster/tf/test_seq2seq_keras_forecaster.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
python/chronos/test/bigdl/chronos/forecaster/tf/test_seq2seq_keras_forecaster.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import tempfile
import os
from unittest import TestCase
import numpy as np
import tensorflow as tf
def create_data(tf_data=False, batch_size=32):
train_num_samples = 1000
test_num_samples = 400
input_feature_num = 10
output_feature_num = 2
past_seq_len = 10
future_seq_len = 2
def get_x_y(num_sample):
x = np.random.randn(num_sample, past_seq_len, input_feature_num)
y = np.random.randn(num_sample, future_seq_len, output_feature_num)
return x, y
train_data = get_x_y(train_num_samples)
test_data = get_x_y(test_num_samples)
if tf_data:
from_tensor_slices = tf.data.Dataset.from_tensor_slices
train_data = from_tensor_slices(train_data).cache()\
.shuffle(train_num_samples)\
.batch(batch_size)\
.prefetch(tf.data.AUTOTUNE)
test_data = from_tensor_slices(test_data).cache()\
.batch(batch_size)\
.prefetch(tf.data.AUTOTUNE)
return train_data, test_data
@pytest.mark.skipif(tf.__version__ < '2.0.0', reason="Run only when tf > 2.0.0.")
class TestSeq2SeqForecaster(TestCase):
def setUp(self):
from bigdl.chronos.forecaster.tf.seq2seq_forecaster import Seq2SeqForecaster
self.forecaster = Seq2SeqForecaster(past_seq_len=10,
future_seq_len=2,
input_feature_num=10,
output_feature_num=2)
def tearDown(self):
pass
def test_seq2seq_fit_predict_evaluate(self):
train_data, test_data = create_data()
self.forecaster.fit(train_data,
epochs=2,
batch_size=32)
yhat = self.forecaster.predict(test_data[0])
assert yhat.shape == (400, 2, 2)
mse = self.forecaster.evaluate(test_data, multioutput="raw_values")
assert mse[0].shape == test_data[-1].shape[1:]
def test_seq2seq_fit_tf_data(self):
train_data, test_data = create_data(tf_data=True)
self.forecaster.fit(train_data,
epochs=2)
yhat = self.forecaster.predict(test_data)
assert yhat.shape == (400, 2, 2)
def test_seq2seq_save_load(self):
train_data, test_data = create_data()
self.forecaster.fit(train_data,
epochs=2,
batch_size=32)
yhat = self.forecaster.predict(test_data[0])
with tempfile.TemporaryDirectory() as tmp_dir_file:
tmp_dir_file = os.path.join(tmp_dir_file, 'seq2seq.ckpt')
self.forecaster.save(tmp_dir_file)
self.forecaster.load(tmp_dir_file)
from bigdl.chronos.model.tf2.Seq2Seq_keras import LSTMSeq2Seq
assert isinstance(self.forecaster.internal, LSTMSeq2Seq)
load_model_yhat = self.forecaster.predict(test_data[0])
assert yhat.shape == (400, 2, 2)
np.testing.assert_almost_equal(yhat, load_model_yhat, decimal=5)
if __name__ == '__main__':
pytest.main([__file__])
| 38.294118
| 84
| 0.615463
| 491
| 3,906
| 4.627291
| 0.325866
| 0.045775
| 0.022007
| 0.02993
| 0.305018
| 0.263644
| 0.240317
| 0.180458
| 0.128521
| 0.128521
| 0
| 0.0279
| 0.302611
| 3,906
| 101
| 85
| 38.673267
| 0.806167
| 0.142089
| 0
| 0.253521
| 0
| 0
| 0.017991
| 0
| 0
| 0
| 0
| 0
| 0.084507
| 1
| 0.098592
| false
| 0.014085
| 0.112676
| 0
| 0.253521
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4eeb6ee82889a7b906d047189dd7b8bb9659a33
| 1,922
|
py
|
Python
|
examples/SubOrbitalFlight.py
|
nicolaikd/sl-ksp
|
cc1e239570e10428d11a41a26b33947b54f7f0ec
|
[
"MIT"
] | 7
|
2021-01-11T15:39:56.000Z
|
2021-08-21T18:44:04.000Z
|
examples/SubOrbitalFlight.py
|
nicolaikd/sl-ksp
|
cc1e239570e10428d11a41a26b33947b54f7f0ec
|
[
"MIT"
] | 1
|
2021-04-17T13:07:41.000Z
|
2021-04-21T16:21:35.000Z
|
examples/SubOrbitalFlight.py
|
nicolaikd/sl-ksp
|
cc1e239570e10428d11a41a26b33947b54f7f0ec
|
[
"MIT"
] | 2
|
2021-03-17T16:36:23.000Z
|
2021-05-05T14:40:59.000Z
|
import time
import krpc
conn = krpc.connect(name='Sub-orbital flight')
vessel = conn.space_center.active_vessel
vessel.auto_pilot.target_pitch_and_heading(90, 90)
vessel.auto_pilot.engage()
vessel.control.throttle = 1
time.sleep(1)
print('Launch!')
vessel.control.activate_next_stage()
fuel_amount = conn.get_call(vessel.resources.amount, 'SolidFuel')
expr = conn.krpc.Expression.less_than(
conn.krpc.Expression.call(fuel_amount),
conn.krpc.Expression.constant_float(0.1))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Booster separation')
vessel.control.activate_next_stage()
mean_altitude = conn.get_call(getattr, vessel.flight(), 'mean_altitude')
expr = conn.krpc.Expression.greater_than(
conn.krpc.Expression.call(mean_altitude),
conn.krpc.Expression.constant_double(10000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Gravity turn')
vessel.auto_pilot.target_pitch_and_heading(60, 90)
apoapsis_altitude = conn.get_call(getattr, vessel.orbit, 'apoapsis_altitude')
expr = conn.krpc.Expression.greater_than(
conn.krpc.Expression.call(apoapsis_altitude),
conn.krpc.Expression.constant_double(100000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Launch stage separation')
vessel.control.throttle = 0
time.sleep(1)
vessel.control.activate_next_stage()
vessel.auto_pilot.disengage()
srf_altitude = conn.get_call(getattr, vessel.flight(), 'surface_altitude')
expr = conn.krpc.Expression.less_than(
conn.krpc.Expression.call(srf_altitude),
conn.krpc.Expression.constant_double(1000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
vessel.control.activate_next_stage()
while vessel.flight(vessel.orbit.body.reference_frame).vertical_speed < -0.1:
print('Altitude = %.1f meters' % vessel.flight().surface_altitude)
time.sleep(1)
print('Landed!')
| 30.03125
| 77
| 0.774714
| 270
| 1,922
| 5.32963
| 0.277778
| 0.09451
| 0.150104
| 0.069493
| 0.612231
| 0.52328
| 0.417651
| 0.314802
| 0.314802
| 0.314802
| 0
| 0.018933
| 0.093132
| 1,922
| 63
| 78
| 30.507937
| 0.806655
| 0
| 0
| 0.442308
| 0
| 0
| 0.084287
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4efd4c2ab810bf4c725de159e2f410b24aea731
| 18,031
|
py
|
Python
|
ramp-database/ramp_database/tools/leaderboard.py
|
kegl/ramp-board
|
6373bf02efc096e02b26320e4f11edd00f9e5752
|
[
"BSD-3-Clause"
] | null | null | null |
ramp-database/ramp_database/tools/leaderboard.py
|
kegl/ramp-board
|
6373bf02efc096e02b26320e4f11edd00f9e5752
|
[
"BSD-3-Clause"
] | null | null | null |
ramp-database/ramp_database/tools/leaderboard.py
|
kegl/ramp-board
|
6373bf02efc096e02b26320e4f11edd00f9e5752
|
[
"BSD-3-Clause"
] | null | null | null |
from distutils.version import LooseVersion
from itertools import product
import numpy as np
import pandas as pd
from ..model.event import Event
from ..model.event import EventTeam
from ..model.submission import Submission
from ..model.team import Team
from .team import get_event_team_by_name
from .submission import get_bagged_scores
from .submission import get_scores
from .submission import get_submission_max_ram
from .submission import get_time
width = -1 if LooseVersion(pd.__version__) < LooseVersion("1.0.0") else None
pd.set_option('display.max_colwidth', width)
def _compute_leaderboard(session, submissions, leaderboard_type, event_name,
with_links=True):
"""Format the leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
with_links : bool
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : dataframe
The leaderboard in a dataframe format.
"""
record_score = []
event = session.query(Event).filter_by(name=event_name).one()
map_score_precision = {score_type.name: score_type.precision
for score_type in event.score_types}
for sub in submissions:
# take only max n bag
df_scores_bag = get_bagged_scores(session, sub.id)
highest_level = df_scores_bag.index.get_level_values('n_bag').max()
df_scores_bag = df_scores_bag.loc[(slice(None), highest_level), :]
df_scores_bag.index = df_scores_bag.index.droplevel('n_bag')
df_scores_bag = df_scores_bag.round(map_score_precision)
df_scores = get_scores(session, sub.id)
df_scores = df_scores.round(map_score_precision)
df_time = get_time(session, sub.id)
df_time = df_time.stack().to_frame()
df_time.index = df_time.index.set_names(['fold', 'step'])
df_time = df_time.rename(columns={0: 'time'})
df_time = df_time.sum(axis=0, level="step").T
df_scores_mean = df_scores.groupby('step').mean()
df_scores_std = df_scores.groupby('step').std()
# select only the validation and testing steps and rename them to
# public and private
map_renaming = {'valid': 'public', 'test': 'private'}
df_scores_mean = (df_scores_mean.loc[list(map_renaming.keys())]
.rename(index=map_renaming)
.stack().to_frame().T)
df_scores_std = (df_scores_std.loc[list(map_renaming.keys())]
.rename(index=map_renaming)
.stack().to_frame().T)
df_scores_bag = (df_scores_bag.rename(index=map_renaming)
.stack().to_frame().T)
df = pd.concat([df_scores_bag, df_scores_mean, df_scores_std], axis=1,
keys=['bag', 'mean', 'std'])
df.columns = df.columns.set_names(['stat', 'set', 'score'])
# change the multi-index into a stacked index
df.columns = df.columns.map(lambda x: " ".join(x))
# add the aggregated time information
df_time.index = df.index
df_time = df_time.rename(
columns={'train': 'train time [s]',
'valid': 'validation time [s]',
'test': 'test time [s]'}
)
df = pd.concat([df, df_time], axis=1)
if leaderboard_type == 'private':
df['submission ID'] = sub.basename.replace('submission_', '')
df['team'] = sub.team.name
df['submission'] = sub.name_with_link if with_links else sub.name
df['contributivity'] = int(round(100 * sub.contributivity))
df['historical contributivity'] = int(round(
100 * sub.historical_contributivity))
df['max RAM [MB]'] = get_submission_max_ram(session, sub.id)
df['submitted at (UTC)'] = pd.Timestamp(sub.submission_timestamp)
record_score.append(df)
# stack all the records
df = pd.concat(record_score, axis=0, ignore_index=True, sort=False)
# keep only second precision for the time stamp
df['submitted at (UTC)'] = df['submitted at (UTC)'].astype('datetime64[s]')
# reordered the column
stats_order = (['bag', 'mean', 'std'] if leaderboard_type == 'private'
else ['bag'])
dataset_order = (['public', 'private'] if leaderboard_type == 'private'
else ['public'])
score_order = ([event.official_score_name] +
[score_type.name for score_type in event.score_types
if score_type.name != event.official_score_name])
score_list = [
'{} {} {}'.format(stat, dataset, score)
for dataset, score, stat in product(dataset_order,
score_order,
stats_order)
]
# Only display train and validation time for the public leaderboard
time_list = (['train time [s]', 'validation time [s]', 'test time [s]']
if leaderboard_type == 'private'
else ['train time [s]', 'validation time [s]'])
col_ordered = (
['team', 'submission'] +
score_list +
['contributivity', 'historical contributivity'] +
time_list +
['max RAM [MB]', 'submitted at (UTC)']
)
if leaderboard_type == "private":
col_ordered = ["submission ID"] + col_ordered
df = df[col_ordered]
# check if the contributivity columns are null
contrib_columns = ['contributivity', 'historical contributivity']
if (df[contrib_columns] == 0).all(axis=0).all():
df = df.drop(columns=contrib_columns)
df = df.sort_values(
"bag {} {}".format(leaderboard_type, event.official_score_name),
ascending=event.get_official_score_type(session).is_lower_the_better
)
# rename the column name for the public leaderboard
if leaderboard_type == 'public':
df = df.rename(columns={
key: value for key, value in zip(score_list, score_order)
})
return df
def _compute_competition_leaderboard(session, submissions, leaderboard_type,
event_name):
"""Format the competition leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
Returns
-------
competition_leaderboard : dataframe
The competition leaderboard in a dataframe format.
"""
event = session.query(Event).filter_by(name=event_name).one()
score_type = event.get_official_score_type(session)
score_name = event.official_score_name
private_leaderboard = _compute_leaderboard(session, submissions, 'private',
event_name, with_links=False)
time_list = (['train time [s]', 'validation time [s]', 'test time [s]']
if leaderboard_type == 'private'
else ['train time [s]', 'validation time [s]'])
col_selected_private = (['team', 'submission'] +
['bag private ' + score_name,
'bag public ' + score_name] +
time_list +
['submitted at (UTC)'])
leaderboard_df = private_leaderboard[col_selected_private]
leaderboard_df = leaderboard_df.rename(
columns={'bag private ' + score_name: 'private ' + score_name,
'bag public ' + score_name: 'public ' + score_name}
)
# select best submission for each team
best_df = (leaderboard_df.groupby('team').min()
if score_type.is_lower_the_better
else leaderboard_df.groupby('team').max())
best_df = best_df[['public ' + score_name]].reset_index()
best_df['best'] = True
# merge to get a best indicator column then select best
leaderboard_df = pd.merge(
leaderboard_df, best_df, how='left',
left_on=['team', 'public ' + score_name],
right_on=['team', 'public ' + score_name]
)
leaderboard_df = leaderboard_df.fillna(False)
leaderboard_df = leaderboard_df[leaderboard_df['best']]
leaderboard_df = leaderboard_df.drop(columns='best')
# dealing with ties: we need the lowest timestamp
best_df = leaderboard_df.groupby('team').min()
best_df = best_df[['submitted at (UTC)']].reset_index()
best_df['best'] = True
leaderboard_df = pd.merge(
leaderboard_df, best_df, how='left',
left_on=['team', 'submitted at (UTC)'],
right_on=['team', 'submitted at (UTC)'])
leaderboard_df = leaderboard_df.fillna(False)
leaderboard_df = leaderboard_df[leaderboard_df['best']]
leaderboard_df = leaderboard_df.drop(columns='best')
# sort by public score then by submission timestamp, compute rank
leaderboard_df = leaderboard_df.sort_values(
by=['public ' + score_name, 'submitted at (UTC)'],
ascending=[score_type.is_lower_the_better, True])
leaderboard_df['public rank'] = np.arange(len(leaderboard_df)) + 1
# sort by private score then by submission timestamp, compute rank
leaderboard_df = leaderboard_df.sort_values(
by=['private ' + score_name, 'submitted at (UTC)'],
ascending=[score_type.is_lower_the_better, True])
leaderboard_df['private rank'] = np.arange(len(leaderboard_df)) + 1
leaderboard_df['move'] = \
leaderboard_df['public rank'] - leaderboard_df['private rank']
leaderboard_df['move'] = [
'{:+d}'.format(m) if m != 0 else '-' for m in leaderboard_df['move']]
col_selected = (
[leaderboard_type + ' rank', 'team', 'submission',
leaderboard_type + ' ' + score_name] +
time_list +
['submitted at (UTC)']
)
if leaderboard_type == 'private':
col_selected.insert(1, 'move')
df = leaderboard_df[col_selected]
df = df.rename(columns={
leaderboard_type + ' ' + score_name: score_name,
leaderboard_type + ' rank': 'rank'
})
df = df.sort_values(by='rank')
return df
def get_leaderboard(session, leaderboard_type, event_name, user_name=None,
with_links=True):
"""Get a leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
leaderboard_type : {'public', 'private', 'failed', 'new', \
'public competition', 'private competition'}
The type of leaderboard to generate.
event_name : str
The event name.
user_name : None or str, default is None
The user name. If None, scores from all users will be queried. This
parameter is discarded when requesting the competition leaderboard.
with_links : bool, default is True
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : str
The leaderboard in HTML format.
"""
q = (session.query(Submission)
.filter(Event.id == EventTeam.event_id)
.filter(Team.id == EventTeam.team_id)
.filter(EventTeam.id == Submission.event_team_id)
.filter(Event.name == event_name))
if user_name is not None:
q = q.filter(Team.name == user_name)
submissions = q.all()
submission_filter = {'public': 'is_public_leaderboard',
'private': 'is_private_leaderboard',
'failed': 'is_error',
'new': 'is_new',
'public competition': 'is_in_competition',
'private competition': 'is_in_competition'}
submissions = [sub for sub in submissions
if (getattr(sub, submission_filter[leaderboard_type]) and
sub.is_not_sandbox)]
if not submissions:
return None
if leaderboard_type in ['public', 'private']:
df = _compute_leaderboard(
session, submissions, leaderboard_type, event_name,
with_links=with_links
)
elif leaderboard_type in ['new', 'failed']:
if leaderboard_type == 'new':
columns = ['team', 'submission', 'submitted at (UTC)', 'state']
else:
columns = ['team', 'submission', 'submitted at (UTC)', 'error']
# we rely on the zip function ignore the submission state if the error
# column was not appended
data = [{
column: value for column, value in zip(
columns,
[sub.event_team.team.name,
sub.name_with_link,
pd.Timestamp(sub.submission_timestamp),
(sub.state_with_link if leaderboard_type == 'failed'
else sub.state)])
} for sub in submissions]
df = pd.DataFrame(data, columns=columns)
else:
# make some extra filtering
submissions = [sub for sub in submissions if sub.is_public_leaderboard]
if not submissions:
return None
competition_type = ('public' if 'public' in leaderboard_type
else 'private')
df = _compute_competition_leaderboard(
session, submissions, competition_type, event_name
)
df_html = df.to_html(escape=False, index=False, max_cols=None,
max_rows=None, justify='left')
df_html = '<thead> {} </tbody>'.format(
df_html.split('<thead>')[1].split('</tbody>')[0]
)
return df_html
def update_leaderboards(session, event_name, new_only=False):
"""Update the leaderboards for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event = session.query(Event).filter_by(name=event_name).one()
if not new_only:
event.private_leaderboard_html = get_leaderboard(
session, 'private', event_name
)
event.public_leaderboard_html_with_links = get_leaderboard(
session, 'public', event_name
)
event.public_leaderboard_html_no_links = get_leaderboard(
session, 'public', event_name, with_links=False
)
event.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name
)
event.public_competition_leaderboard_html = get_leaderboard(
session, 'public competition', event_name
)
event.private_competition_leaderboard_html = get_leaderboard(
session, 'private competition', event_name
)
event.new_leaderboard_html = get_leaderboard(
session, 'new', event_name
)
session.commit()
def update_user_leaderboards(session, event_name, user_name,
new_only=False):
"""Update the of a user leaderboards for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
user_name : str
The user name. If None, scores from all users will be queried.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event_team = get_event_team_by_name(session, event_name, user_name)
if not new_only:
event_team.leaderboard_html = get_leaderboard(
session, 'public', event_name, user_name
)
event_team.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name, user_name
)
event_team.new_leaderboard_html = get_leaderboard(
session, 'new', event_name, user_name
)
session.commit()
def update_all_user_leaderboards(session, event_name, new_only=False):
"""Update the leaderboards for all users for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event = session.query(Event).filter_by(name=event_name).one()
event_teams = session.query(EventTeam).filter_by(event=event).all()
for event_team in event_teams:
user_name = event_team.team.name
if not new_only:
event_team.leaderboard_html = get_leaderboard(
session, 'public', event_name, user_name
)
event_team.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name, user_name
)
event_team.new_leaderboard_html = get_leaderboard(
session, 'new', event_name, user_name
)
session.commit()
| 39.455142
| 79
| 0.619378
| 2,147
| 18,031
| 4.984164
| 0.117373
| 0.032801
| 0.019624
| 0.026726
| 0.560508
| 0.469489
| 0.412765
| 0.366601
| 0.3552
| 0.34165
| 0
| 0.001917
| 0.276801
| 18,031
| 456
| 80
| 39.541667
| 0.818712
| 0.217071
| 0
| 0.236934
| 0
| 0
| 0.111516
| 0.003134
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020906
| false
| 0
| 0.045296
| 0
| 0.083624
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4f07209eebdfab152cf385342225e58c7210495
| 623
|
py
|
Python
|
projects/boring_stuff/03_functions/ZigZag.py
|
SavantLogics/Visual_Studio_Python_Scripts-master
|
9e3c5f8a8f685f9ae51045af9260ccc28f89d72f
|
[
"MIT"
] | null | null | null |
projects/boring_stuff/03_functions/ZigZag.py
|
SavantLogics/Visual_Studio_Python_Scripts-master
|
9e3c5f8a8f685f9ae51045af9260ccc28f89d72f
|
[
"MIT"
] | null | null | null |
projects/boring_stuff/03_functions/ZigZag.py
|
SavantLogics/Visual_Studio_Python_Scripts-master
|
9e3c5f8a8f685f9ae51045af9260ccc28f89d72f
|
[
"MIT"
] | null | null | null |
#Automate the Boring Stuff with Python
import time, sys
indent = 0 # How many spaces to indent
indent_Increasing = True # Whether the indentation is increasing or not
try:
while True: # The main program loop
print(' ' * indent, end='')
print('********')
time.sleep(0.1) # Pause for 1/10th of a second
if indent_Increasing:
indent = indent + 1
if indent == 20:
indent_Increasing = False
else:
indent = indent - 1
if indent == 0:
indent_Increasing = True
except KeyboardInterrupt():
sys.exit()
| 27.086957
| 71
| 0.569823
| 74
| 623
| 4.743243
| 0.594595
| 0.182336
| 0.11396
| 0.08547
| 0.119658
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026764
| 0.340289
| 623
| 23
| 72
| 27.086957
| 0.827251
| 0.255217
| 0
| 0.111111
| 0
| 0
| 0.019565
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4f145e4f5e9df82c3ed3f3cc3dee6abaad4fc6c
| 838
|
py
|
Python
|
setup.py
|
sequentialchaos/i3-workspace-swap
|
86646066b9f971c1ff130a642a914ab2db8f9ae6
|
[
"MIT"
] | null | null | null |
setup.py
|
sequentialchaos/i3-workspace-swap
|
86646066b9f971c1ff130a642a914ab2db8f9ae6
|
[
"MIT"
] | null | null | null |
setup.py
|
sequentialchaos/i3-workspace-swap
|
86646066b9f971c1ff130a642a914ab2db8f9ae6
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="i3-workspace-swap",
description='A python utility swap the content of two workplaces in i3wm',
long_description=long_description,
long_description_content_type="text/markdown",
version="1.1.0",
url='https://github.com/einzigartigername/i3-workspace-swap',
license='MIT',
author='Nelson Gillo',
author_email='nelson.gillo@gmx.de',
packages=setuptools.find_packages(),
scripts=['i3-workspace-swap'],
install_requires=['i3ipc'],
classifiers=[
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
'Programming Language :: Python :: 3'
],
python_requires='>=3.6',
)
| 27.032258
| 78
| 0.658711
| 98
| 838
| 5.530612
| 0.693878
| 0.110701
| 0.083026
| 0.110701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01632
| 0.195704
| 838
| 30
| 79
| 27.933333
| 0.787834
| 0
| 0
| 0
| 0
| 0
| 0.434368
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 0.041667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4f450e40179e22e5b7878cbc391794da9f23b06
| 14,026
|
py
|
Python
|
Cogs/Actions.py
|
MrAngelDo6pa/MedBotS
|
89e19d831507e20d0898114502967b2ad8ecf957
|
[
"MIT"
] | 2
|
2021-09-28T10:40:10.000Z
|
2021-11-07T14:49:07.000Z
|
Cogs/Actions.py
|
ddoskid/lol12
|
35c097bbebeca3043a939b902b07474473344a3c
|
[
"MIT"
] | null | null | null |
Cogs/Actions.py
|
ddoskid/lol12
|
35c097bbebeca3043a939b902b07474473344a3c
|
[
"MIT"
] | null | null | null |
import asyncio
import discord
import random
import datetime
from discord.ext import commands
from Cogs import DisplayName
from Cogs import Nullify
def setup(bot):
# Add the bot
bot.add_cog(Actions(bot))
class Actions(commands.Cog):
## class that handles storing and computing action messages
class actionable:
## these should be filled in the override class. any {} are replaced with target member's name
nothingList = [] # when you call without any arguments
botList = [] # when the action is done at the bot
selfList = [] # when the action is done at the user who called it
memberList = [] # when the action is done toward another member
itemList = [] # when the action is done on a string of text that is not a member
def computeAction(self, bot, ctx, target):
'''return a message based on the context and argument of the command'''
mesg = ""
if not target: # no arguments
mesg = random.choice(self.nothingList)
else:
targetMember = DisplayName.memberForName(target, ctx.message.guild)
if targetMember:
if self.botList and targetMember.id == bot.user.id: # actioning the bot
mesg = random.choice(self.botList) # if botList is empty we fail over to the member list
elif self.selfList and targetMember.id == ctx.message.author.id: # actioning themselves
mesg = random.choice(self.selfList)
else: # actioning another user
mesg = random.choice(self.memberList).replace("{}",DisplayName.name(targetMember))
else: # actioning an item
mesg = random.choice(self.itemList)
if '{}' in mesg:
mesg = mesg.format(target)
mesgFull = '*{}*, {}'.format(DisplayName.name(ctx.message.author), mesg)
mesgFull = Nullify.clean(mesgFull)
return mesgFull
## static definitions of all the action messages
class eating(actionable):
nothingList = [ 'you sit quietly and eat *nothing*...',
'you\'re *sure* there was something to eat, so you just chew on nothingness...',
'there comes a time when you need to realize that you\'re just chewing nothing for the sake of chewing. That time is now.']
botList = [ 'you try to eat *me* - but unfortunately, I saw it coming - your jaw hangs open as I deftly sidestep.',
'your mouth hangs open for a brief second before you realize that *I\'m* eating *you*.',
'I\'m a bot. You can\'t eat me.',
'your jaw clamps down on... wait... on nothing, because I\'m *digital!*.',
'what kind of bot would I be if I let you eat me?']
selfList = ['you clamp down on your own forearm - not surprisingly, it hurts.',
'you place a finger into your mouth, but *just can\'t* force yourself to bite down.',
'you happily munch away, but can now only wave with your left hand.',
'wait - you\'re not a sandwich!',
'you might not be the smartest...']
memberList = [ 'you unhinge your jaw and consume *{}* in one bite.',
'you try to eat *{}*, but you just can\'t quite do it - you spit them out, the taste of failure hanging in your mouth...',
'you take a quick bite out of *{}*. They probably didn\'t even notice.',
'you sink your teeth into *{}\'s* shoulder - they turn to face you, eyes wide as you try your best to scurry away and hide.',
'your jaw clamps down on *{}* - a satisfying *crunch* emanates as you finish your newest meal.']
itemList = [ 'you take a big chunk out of *{}*. *Delicious.*',
'your teeth sink into *{}* - it tastes satisfying.',
'you rip hungrily into *{}*, tearing it to bits!',
'you just can\'t bring yourself to eat *{}* - so you just hold it for awhile...',
'you attempt to bite into *{}*, but you\'re clumsier than you remember - and fail...']
class drinking(actionable):
nothingList = [ 'you stare at your glass full of *nothing*...',
'that cup must\'ve had something in it, so you drink *nothing*...',
'you should probably just go get a drink.',
'that desk looks pretty empty',
'are you sure you know what drinking is?',
'you desperatly search for something to drink']
botList = [ 'you try to drink *me*, but I dodge your straw.',
'You search for me, only to realise that *I* am already drinking you!',
'I\'m a bot. You can\'t drink me.',
'you stick a straw in... wait... in nothing, because I\'m *digital!*.',
'what do you think I am to let you drink me?',
'I don\'t think you would like the taste of me.',
'you can\'t drink me, I\'m a machine!']
selfList = ['you stab yourself with a straw - not surprisingly, it hurts.',
'you fit yourself in to a cup, but you just can\'t do it.',
'you happily drink away, but you are now very floppy.',
'wait - you\'re not a drink!',
'you might not be the smartest...',
'you might have some issues.',
'you try to drink yourself.',
'why would you drink yourself?']
memberList = [ 'you grab your lucky straw and empty *{}* in one sip.',
'you try to drink *{}*, but you just can\'t quite do it - you spit them out, the taste of failure hanging in your mouth...',
'you drink a small sip of *{}*. They probably didn\'t even notice.',
'you stab your straw into *{}\'s* shoulder - You run away as they run after you.',
'you happily drink away - *{}* starts to look like an empty Capri Sun package.',
'you are thirsty - *{}* sacrifices themself involuntarily.',
'somehow you end up emptying *{}*.']
itemList = ['you take a big sip of *{}*. *Delicious.*',
'your straw sinks into *{}* - it tastes satisfying.',
'you thirstly guzzle *{}*, it\'s lovely!',
'you just can\'t bring yourself to drink *{}* - so you just hold it for awhile...',
'you attempt to drain *{}*, but you\'re clumsier than you remember - and fail...',
'you drink *{}*.',
'*{}* dries up from your drinking.',
'*{}* starts resembling the Aral Sea.']
class booping(actionable):
nothingList = [ 'you stretch out your hand in the air, but there\'s nothing there...',
'you try and find someone to boop, but there\'s no one there.',
'you look around the channel for someone to boop.',
'you eye all the heads in the room, just waiting to be booped.',
'are you sure you have someone to boop?',
'I get it. You want to boop *someone*.']
selfList = ['you boop yourself on the nose with your finger.',
'you try to boop your head, but your hand gets lost along the way.',
'you happily boop yourself, but you are now very giddy.',
'wait - are you sure you want to boop yourself?',
'you might not be the smartest...',
'you might have some issues.',
'you try to boop yourself.',
'why would you boop yourself?']
memberList = [ 'you outstretch your lucky finger and boop *{}* in one go.',
'you try to boop *{}*, but you just can\'t quite do it - you miss their head, the taste of failure hanging stuck to your hand...',
'you sneak a boop onto *{}*. They probably didn\'t even notice.',
'you poke your hand onto *{}\'s* hand - You run away as they run after you.',
'you happily drum your fingers away - *{}* starts to look annoyed.',
'you\'re feeling boopy - *{}* sacrifices themself involuntarily.',
'somehow you end up booping *{}*.',
'you climb *{}*\'s head and use it as a bouncy castle... they feel amused.']
itemList = ['you put your hand onto *{}*\'s head. *Bliss.*',
'your hand touches *{}*\'s snoot - it feels satisfying.',
'you happily boop *{}*, it\'s lovely!',
'you just can\'t bring yourself to boop *{}* - so you just let your hand linger...',
'you attempt to boop *{}*, but you\'re clumsier than you remember - and fail...',
'you boop *{}*.',
'*{}* feels annoyed from your booping.',
'*{}* starts resembling a happy pupper.']
class spooky(actionable):
nothingList = [ 'you spook no one but yourself',
'you spook nothing, sp00py...',
'sadly, no one got spooked',
'it is sp00... you can\t spook air']
botList = [ 'you scared the living pumpkin out of me!',
'you spooked me so hard, I got the Heebie-jeebies...', # https://www.myenglishteacher.eu/blog/idioms-for-being-afraid/
'you sp00p me? But I\'m a bot... I can\'t be spooked!',
'sorry, but I cannot let you spook me; My digital emotions will get all messed up!'
'aaaaaaaaaah! Don\t you scare me like that again!']
selfList = ['go watch a scary movie to be absolutely sp00ped!',
'boo! Did you scare you?',
'you look yourself in the mirror and get a little scared...',
'get spooked by... yourself?',
'sp00py, but why spook yourself?']
memberList = [ 'you sp00p *{}* so hard that they start screaming!',
'you tried to sneak up on *{}*, but they heard you sneakin\' and fail...',
'it is sp00py time! Hey *{}*, boo!',
'congrats, *{}* dun sp00ked.',
'get spook3d *{}*!']
itemList = ['you spook *{}* with no reaction, leaving you looking weird...',
'*{}* got sp00p3d so hard, it ran away!',
'you trick or treat *{}* without any reaction...',
'you do your best to sp00p *{}*, but fail...',
'sp00py time! *{}* gets sp00ped harder than you thought and starts crying!']
class highfives(actionable):
nothingList = [ 'you stand alone for an eternity, hand raised up - desperate for any sort of recognition...',
'with a wild swing you throw your hand forward - the momentum carries you to the ground and you just lay there - high fiveless...',
'the only sound you hear as a soft *whoosh* as your hand connects with nothing...']
botList = [ 'the sky erupts with 1\'s and 0\'s as our hands meet in an epic high five of glory!',
'you beam up to the cloud and receive a quick high five from me before downloading back to Earth.',
'I unleash a fork-bomb of high five processes!',
'01001000011010010110011101101000001000000100011001101001011101100110010100100001']
selfList = ['ahh - high fiving yourself, classy...',
'that\'s uh... that\'s just clapping...',
'you run in a large circle - *totally* high fiving all your friends...',
'now you\'re at both ends of a high five!']
memberList = [ 'you and *{}* jump up for an epic high five - freeze-framing as the credits roll and some wicked 80s synth plays out.',
'you and *{}* elevate to a higher plane of existence in wake of that tremendous high five!',
'a 2 hour, 3 episode anime-esque fight scene unfolds as you and *{}* engage in a world-ending high five!',
'it *was* tomorrow - before you and *{}* high fived with enough force to spin the Earth in reverse!',
'like two righteous torpedoes - you and *{}* connect palms, subsequently deafening everyone in a 300-mile radius!']
itemList = ['neat... you just high fived *{}*.',
'your hand flops through the air - hitting *{}* with a soft thud.',
'you reach out a hand, gently pressing your palm to *{}*. A soft *"high five"* escapes your lips as a tear runs down your cheek...',
'like an open-handed piston of ferocity - you drive your palm into *{}*.']
class petting(actionable): # meow
nothingList = [ 'you absentmindedly wave your hand in the air.',
'you could have sworn there was a cat there!',
'you remember that there are no cats here.',
'you try to pet the cat, but miss because the cat is gone.']
botList = [ 'I may be electronic but I still appreciate pets.',
'*purrrrrrrrrrrrrrr*.',
'you electrocute yourself trying to pet a computer.']
selfList = ['you give yourself a nice pat on the head.',
'too bad there\'s no one else to pet you.',
'in lieu of anything else to pet, you pet yourself.',
'your hair is warm and soft.']
memberList = [ 'you give *{}* a pat on the head.',
'you rub your hand through *{}\'s* hair.',
'*{}* smiles from your petting.',
'you try to pet *{}*, but miss because they hid under the bed.',
'*{}* purrs from your petting.',
'you pet *{}* but they bite your hand',
'you try to pet *{}* but they hiss and run away.']
itemList = ['you rub *{}* but it doesn\'t feel like a cat.',
'you don\'t hear any purring from *{}*.',
'you hurt your hand trying to pet *{}*.']
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot):
self.bot = bot
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
@commands.command(pass_context=True)
async def eat(self, ctx, *, member : str = None):
"""Eat like a boss."""
msg = self.eating.computeAction(self.eating, self.bot, ctx, member) #python is silly and makes me do this for uninitialized classes
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def drink(self, ctx, *, member : str = None):
"""Drink like a boss."""
msg = self.drinking.computeAction(self.drinking, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def boop(self, ctx, *, member : str = None):
"""Boop da snoot."""
msg = self.booping.computeAction(self.booping, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def spook(self, ctx, *, member : str = None):
"""sp00ktober by camiel."""
if datetime.date.today().month == 10:
# make it extra sp00py because it is spooktober
await ctx.message.add_reaction("🎃")
msg = self.spooky.computeAction(self.spooky, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def highfive(self, ctx, *, member : str = None):
"""High five like a boss."""
msg = self.highfives.computeAction(self.highfives, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def pet(self, ctx, *, member : str = None):
"""pet kitties."""
msg = self.petting.computeAction(self.petting, self.bot, ctx, member)
await ctx.channel.send(msg)
return
| 51.566176
| 138
| 0.654855
| 2,115
| 14,026
| 4.336643
| 0.277069
| 0.012211
| 0.009594
| 0.008395
| 0.222634
| 0.159834
| 0.15133
| 0.125818
| 0.115351
| 0.108373
| 0
| 0.011069
| 0.220662
| 14,026
| 271
| 139
| 51.756458
| 0.827921
| 0.063454
| 0
| 0.107296
| 0
| 0.025751
| 0.527412
| 0.006169
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012876
| false
| 0.025751
| 0.030043
| 0
| 0.107296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4f46e1bb0a2bc679bb20e6fc52d23194cb01643
| 7,830
|
py
|
Python
|
marltoolbox/examples/tune_function_api/lola_pg_official.py
|
tobiasbaumann1/amd
|
cb6190be92dea54db04ef9202d381b96f6f6218b
|
[
"MIT"
] | null | null | null |
marltoolbox/examples/tune_function_api/lola_pg_official.py
|
tobiasbaumann1/amd
|
cb6190be92dea54db04ef9202d381b96f6f6218b
|
[
"MIT"
] | null | null | null |
marltoolbox/examples/tune_function_api/lola_pg_official.py
|
tobiasbaumann1/amd
|
cb6190be92dea54db04ef9202d381b96f6f6218b
|
[
"MIT"
] | null | null | null |
##########
# Additional dependencies are needed:
# Follow the LOLA installation described in the tune_class_api/lola_pg_official.py file
##########
import os
import ray
from ray import tune
import marltoolbox.algos.lola.envs as lola_envs
import marltoolbox.algos.lola_dice.envs as lola_dice_envs
from marltoolbox.algos.lola import train_cg, train_exact, train_pg
from marltoolbox.envs.vectorized_coin_game import CoinGame, AsymCoinGame
from marltoolbox.utils import log
def trainer_fn(exp_name, num_episodes, trace_length, exact, pseudo, grid_size,
lr, lr_correction, batch_size, bs_mul, simple_net, hidden, reg,
gamma, lola_update, opp_model, mem_efficient, seed, set_zero,
warmup, changed_config, ac_lr, summary_len, use_MAE,
use_toolbox_env, clip_lola_update_norm, clip_loss_norm, entropy_coeff,
weigth_decay, **kwargs):
# Instantiate the environment
if exp_name == "IPD":
env = lola_envs.IPD(trace_length)
elif exp_name == "IMP":
env = lola_envs.IMP(trace_length)
elif exp_name == "CoinGame":
if use_toolbox_env:
env = CoinGame(config={
"batch_size": batch_size,
"max_steps": trace_length,
"grid_size": grid_size,
"get_additional_info": True,
"add_position_in_epi": False,
})
else:
env = lola_dice_envs.CG(trace_length, batch_size, grid_size)
env.seed(seed)
elif exp_name == "AsymCoinGame":
if use_toolbox_env:
env = AsymCoinGame(config={
"batch_size": batch_size,
"max_steps": trace_length,
"grid_size": grid_size,
"get_additional_info": True,
"add_position_in_epi": False,
})
else:
env = lola_dice_envs.AsymCG(trace_length, batch_size, grid_size)
env.seed(seed)
else:
raise ValueError(f"exp_name: {exp_name}")
# Import the right training function
if exact:
train_exact.train(env,
num_episodes=num_episodes,
trace_length=trace_length,
simple_net=simple_net,
corrections=lola_update,
pseudo=pseudo,
num_hidden=hidden,
reg=reg,
lr=lr,
lr_correction=lr_correction,
gamma=gamma)
elif exp_name in ("IPD", "IMP"):
train_pg.train(env,
num_episodes=num_episodes,
trace_length=trace_length,
batch_size=batch_size,
gamma=gamma,
set_zero=set_zero,
lr=lr,
corrections=lola_update,
simple_net=simple_net,
hidden=hidden,
mem_efficient=mem_efficient)
elif exp_name in ("CoinGame", "AsymCoinGame"):
train_cg.train(env,
num_episodes=num_episodes,
trace_length=trace_length,
batch_size=batch_size,
bs_mul=bs_mul,
gamma=gamma,
grid_size=grid_size,
lr=lr,
corrections=lola_update,
opp_model=opp_model,
hidden=hidden,
mem_efficient=mem_efficient,
asymmetry=exp_name == "AsymCoinGame",
warmup=warmup,
changed_config=changed_config,
ac_lr=ac_lr,
summary_len=summary_len,
use_MAE=use_MAE,
use_toolbox_env=use_toolbox_env,
clip_lola_update_norm=clip_lola_update_norm,
clip_loss_norm=clip_loss_norm,
entropy_coeff=entropy_coeff,
weigth_decay=weigth_decay,
)
else:
raise ValueError(f"exp_name: {exp_name}")
def lola_training(config):
trainer_fn(**config)
def get_tune_config(full_config: dict) -> dict:
# Sanity
assert full_config['exp_name'] in {"CoinGame", "IPD", "IMP", "AsymCoinGame"}
if full_config['exact']:
assert full_config['exp_name'] != "CoinGame", "Can't run CoinGame with --exact."
assert full_config['exp_name'] != "AsymCoinGame", "Can't run AsymCoinGame with --exact."
# Resolve default parameters
if full_config['exact']:
full_config['num_episodes'] = 50 if full_config['num_episodes'] is None else full_config['num_episodes']
full_config['trace_length'] = 200 if full_config['trace_length'] is None else full_config['trace_length']
full_config['lr'] = 1. if full_config['lr'] is None else full_config['lr']
elif full_config['exp_name'] in {"IPD", "IMP"}:
full_config['num_episodes'] = 600000 if full_config['num_episodes'] is None else full_config['num_episodes']
full_config['trace_length'] = 150 if full_config['trace_length'] is None else full_config['trace_length']
full_config['batch_size'] = 4000 if full_config['batch_size'] is None else full_config['batch_size']
full_config['lr'] = 1. if full_config['lr'] is None else full_config['lr']
elif full_config['exp_name'] == "CoinGame" or full_config['exp_name'] == "AsymCoinGame":
full_config['num_episodes'] = 100000 if full_config['num_episodes'] is None else full_config['num_episodes']
full_config['trace_length'] = 150 if full_config['trace_length'] is None else full_config['trace_length']
full_config['batch_size'] = 4000 if full_config['batch_size'] is None else full_config['batch_size']
full_config['lr'] = 0.005 if full_config['lr'] is None else full_config['lr']
if full_config['exp_name'] in ("IPD", "CoinGame", "AsymCoinGame"):
full_config['gamma'] = 0.96 if full_config['gamma'] is None else full_config['gamma']
elif full_config['exp_name'] == "IMP":
full_config['gamma'] = 0.9 if full_config['gamma'] is None else full_config['gamma']
return full_config
def main(debug):
exp_name, _ = log.log_in_current_day_dir(f"LOLA_PG")
tune_hparams = {
"exp_name": exp_name,
# Dynamically set
"num_episodes": 3 if debug else None,
"trace_length": 6 if debug else None,
"lr": None,
"gamma": None,
"batch_size": 12 if debug else None,
# "exp_name": "IPD",
# "exp_name": "IMP",
"exp_name": "CoinGame",
# "exp_name": "AsymCoinGame",
"pseudo": False,
"grid_size": 3,
"lola_update": True,
"opp_model": False,
"mem_efficient": True,
"lr_correction": 1,
"bs_mul": 1 / 10,
"simple_net": True,
"hidden": 32,
"reg": 0,
"set_zero": 0,
"exact": False,
"warmup": 1,
"seed": 1,
"changed_config": False,
"ac_lr": 1.0,
"summary_len": 1,
"use_MAE": False,
"use_toolbox_env": True,
"clip_loss_norm": False,
"clip_lola_update_norm": False,
"clip_lola_correction_norm": 3.0,
"clip_lola_actor_norm": 10.0,
"entropy_coeff": 0.001,
"weigth_decay": 0.03,
}
tune_config = get_tune_config(tune_hparams)
ray.init(num_cpus=os.cpu_count(), num_gpus=0)
tune_analysis = tune.run(lola_training, name=tune_hparams["exp_name"], config=tune_config)
ray.shutdown()
return tune_analysis
if __name__ == "__main__":
debug_mode = True
main(debug_mode)
| 37.826087
| 116
| 0.577395
| 937
| 7,830
| 4.493063
| 0.168623
| 0.12114
| 0.045606
| 0.04323
| 0.471734
| 0.4
| 0.354632
| 0.346556
| 0.313777
| 0.295249
| 0
| 0.013686
| 0.318774
| 7,830
| 206
| 117
| 38.009709
| 0.775591
| 0.038314
| 0
| 0.30303
| 0
| 0
| 0.153887
| 0.006134
| 0
| 0
| 0
| 0
| 0.018182
| 1
| 0.024242
| false
| 0
| 0.048485
| 0
| 0.084848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4f5c78a68ce3ab44360536293de688747eefa47
| 1,327
|
py
|
Python
|
moto/dynamodbstreams/responses.py
|
jonnangle/moto-1
|
40b4e299abb732aad7f56cc0f680c0a272a46594
|
[
"Apache-2.0"
] | 3
|
2020-08-04T20:29:41.000Z
|
2020-11-09T09:28:19.000Z
|
moto/dynamodbstreams/responses.py
|
jonnangle/moto-1
|
40b4e299abb732aad7f56cc0f680c0a272a46594
|
[
"Apache-2.0"
] | 17
|
2020-08-28T12:53:56.000Z
|
2020-11-10T01:04:46.000Z
|
moto/dynamodbstreams/responses.py
|
jonnangle/moto-1
|
40b4e299abb732aad7f56cc0f680c0a272a46594
|
[
"Apache-2.0"
] | 2
|
2017-03-02T05:59:52.000Z
|
2020-09-03T13:25:44.000Z
|
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import dynamodbstreams_backends
from six import string_types
class DynamoDBStreamsHandler(BaseResponse):
@property
def backend(self):
return dynamodbstreams_backends[self.region]
def describe_stream(self):
arn = self._get_param("StreamArn")
return self.backend.describe_stream(arn)
def list_streams(self):
table_name = self._get_param("TableName")
return self.backend.list_streams(table_name)
def get_shard_iterator(self):
arn = self._get_param("StreamArn")
shard_id = self._get_param("ShardId")
shard_iterator_type = self._get_param("ShardIteratorType")
sequence_number = self._get_param("SequenceNumber")
# according to documentation sequence_number param should be string
if isinstance(sequence_number, string_types):
sequence_number = int(sequence_number)
return self.backend.get_shard_iterator(
arn, shard_id, shard_iterator_type, sequence_number
)
def get_records(self):
arn = self._get_param("ShardIterator")
limit = self._get_param("Limit")
if limit is None:
limit = 1000
return self.backend.get_records(arn, limit)
| 32.365854
| 75
| 0.699322
| 155
| 1,327
| 5.670968
| 0.367742
| 0.063709
| 0.109215
| 0.047782
| 0.085324
| 0.063709
| 0
| 0
| 0
| 0
| 0
| 0.00388
| 0.22306
| 1,327
| 40
| 76
| 33.175
| 0.848691
| 0.048983
| 0
| 0.066667
| 0
| 0
| 0.065873
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.133333
| 0.033333
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4f6462a075ffe065a5c5d813a1e145ed305cf7d
| 962
|
py
|
Python
|
tools/mo/openvino/tools/mo/front/mxnet/zeros_ext.py
|
ytorzuk-altran/openvino
|
68d460a3bb578a738ba0e4d0e1f2e321afa73ab0
|
[
"Apache-2.0"
] | 1
|
2021-04-20T08:14:51.000Z
|
2021-04-20T08:14:51.000Z
|
tools/mo/openvino/tools/mo/front/mxnet/zeros_ext.py
|
ytorzuk-altran/openvino
|
68d460a3bb578a738ba0e4d0e1f2e321afa73ab0
|
[
"Apache-2.0"
] | 55
|
2020-11-16T09:55:29.000Z
|
2022-03-28T13:18:15.000Z
|
tools/mo/openvino/tools/mo/front/mxnet/zeros_ext.py
|
ytorzuk-altran/openvino
|
68d460a3bb578a738ba0e4d0e1f2e321afa73ab0
|
[
"Apache-2.0"
] | 1
|
2021-02-15T01:13:57.000Z
|
2021-02-15T01:13:57.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.ops.const import Const
class ZerosFrontExtractor(FrontExtractorOp):
op = '_zeros'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
shape = list(attrs.tuple('shape', int, None))
zero_shapes = []
for i, s in enumerate(shape):
if s == 0:
shape[i] = 1
zero_shapes.append(i)
update_attrs = {
'shape': np.ndarray(shape),
'value': np.zeros(shape),
'zero_shapes': zero_shapes
}
# update the attributes of the node
Const.update_node_stat(node, update_attrs)
return cls.enabled
| 28.294118
| 80
| 0.637214
| 120
| 962
| 4.975
| 0.558333
| 0.067002
| 0.085427
| 0.095477
| 0.080402
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017021
| 0.267152
| 962
| 33
| 81
| 29.151515
| 0.829787
| 0.115385
| 0
| 0
| 0
| 0
| 0.03778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.173913
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4f6ca3a52378c092fed2c8021d1ffb5c3d7441c
| 882
|
py
|
Python
|
SimpleSimulator/samuelator.py
|
Anindya-Prithvi/CO_M21_Assignment
|
524bd2b866dd58a6358354cda65e2136ecd46e50
|
[
"Apache-2.0"
] | 3
|
2021-09-11T05:58:46.000Z
|
2021-12-21T14:03:20.000Z
|
SimpleSimulator/samuelator.py
|
sc0rp10n-py/CO_M21_Assignment
|
524bd2b866dd58a6358354cda65e2136ecd46e50
|
[
"Apache-2.0"
] | null | null | null |
SimpleSimulator/samuelator.py
|
sc0rp10n-py/CO_M21_Assignment
|
524bd2b866dd58a6358354cda65e2136ecd46e50
|
[
"Apache-2.0"
] | 3
|
2021-09-05T12:55:38.000Z
|
2022-03-18T02:51:29.000Z
|
import sys
import warnings
import matplotlib.pyplot as plt
from parsets import IMACC, IMG, PROGC, REGFLPC, ExecE, plot
warnings.filterwarnings("ignore")
MEM = IMACC(sys.stdin.read()) # Load memory from stdin
PC = PROGC(0) # Start from the first instruction
RF = REGFLPC() # initialize register and flags
EE = ExecE(MEM)
IM = IMG()
halted = False
cycle = 0
if MEM.inst_mem == ["0" * 16 for i in range(256)]:
halted = True
while not halted:
Instruction = MEM.getData(PC) # Get current instruction
IM.imgx.append(cycle)
IM.imgy.append(PC.PC)
halted, new_PC, new_regs = EE.execute(Instruction, RF.asdct(), IM, cycle)
# Update RF compute new_PC
RF.update(new_regs, new_PC)
PC.dump()
# Print PC
RF.dump()
# Print RF state
PC.update(new_PC)
# Update PC
cycle += 1
MEM.dump() # Print memory state
# plotting
plot(plt, IM)
| 22.615385
| 77
| 0.672336
| 134
| 882
| 4.373134
| 0.492537
| 0.03413
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012968
| 0.213152
| 882
| 38
| 78
| 23.210526
| 0.831412
| 0.222222
| 0
| 0
| 0
| 0
| 0.01037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4f722d8fa5429ebec246908bcfdfc1e45bff80b
| 5,884
|
py
|
Python
|
utils/converters.py
|
LiReNa00/JDBot
|
c85b31e272d5394ba5debc26b8b5357fb9d3d844
|
[
"MIT"
] | null | null | null |
utils/converters.py
|
LiReNa00/JDBot
|
c85b31e272d5394ba5debc26b8b5357fb9d3d844
|
[
"MIT"
] | null | null | null |
utils/converters.py
|
LiReNa00/JDBot
|
c85b31e272d5394ba5debc26b8b5357fb9d3d844
|
[
"MIT"
] | null | null | null |
import discord
import re
import emoji
import contextlib
import typing
import datetime
from discord.ext import commands
from discord.http import Route
class BetterMemberConverter(commands.Converter):
async def convert(self, ctx, argument):
try:
user = await commands.MemberConverter().convert(ctx, argument)
except commands.MemberNotFound:
user = None
if user is None:
tag = re.match(r"#?(\d{4})", argument)
if tag:
if ctx.guild:
test = discord.utils.get(ctx.guild.members, discriminator=tag.group(1))
user = test or ctx.author
if ctx.guild is None:
user = await BetterUserconverter().convert(ctx, argument)
user = user or ctx.author
return user
class BetterUserconverter(commands.Converter):
async def convert(self, ctx, argument):
try:
user = await commands.UserConverter().convert(ctx, argument)
except commands.UserNotFound:
user = None
if not user and ctx.guild:
try:
user = await commands.MemberConverter().convert(ctx, argument)
except commands.MemberNotFound:
user = None
if user is None:
role = None
with contextlib.suppress(commands.RoleNotFound, commands.NoPrivateMessage):
role = await commands.RoleConverter().convert(ctx, argument)
if role:
if role.is_bot_managed():
user = role.tags.bot_id
user = await ctx.bot.try_user(user)
if user is None:
tag = re.match(r"#?(\d{4})", argument)
if tag and not ctx.bot.users:
test = discord.utils.get(ctx.bot.users, discriminator=tag.group(1))
user = test or ctx.author
return user
class EmojiBasic:
def __init__(self, id: int, url: str):
self.id = id
self.url = url
@classmethod
async def convert(cls, ctx, argument):
match = re.match(r"(?P<id>[0-9]{15,21})", argument)
if match:
emoji_id = match.group(0)
extentions = ["gif", "png"]
for x in extentions:
response = await ctx.bot.session.get(f"https://cdn.discordapp.com/emojis/{emoji_id}.{x}")
if response.ok:
return cls(emoji_id, response.real_url)
else:
return None
class EmojiConverter(commands.Converter):
async def convert(self, ctx: commands.Context, arg: str):
emojis = emoji.unicode_codes.EMOJI_UNICODE["en"].values()
try:
return await commands.PartialEmojiConverter().convert(ctx, arg)
except commands.PartialEmojiConversionFailure:
pass
if arg.rstrip("\N{variation selector-16}") in emojis or arg in emojis:
return discord.PartialEmoji(name=arg)
else:
raise commands.BadArgument(f"{arg} is not an emoji")
class ColorConverter(commands.Converter):
async def convert(self, ctx, argument):
try:
color = await commands.ColourConverter().convert(ctx, argument)
except commands.BadColourArgument:
color = None
if not color and not argument.isdigit():
argument = list(s for s in argument.split(" ") if s)
if color and argument.isdigit():
argument = int(argument)
if isinstance(argument, int):
if argument > 16777215:
await ctx.send(f"{argument} is not valid color, 16777215 will be used instead.")
argument = 16777215
color = discord.Colour(argument)
if isinstance(argument, list):
argument = sorted(filter(lambda x: x.isdigit(), argument))
argument = [int(n) for n in argument][:3]
try:
color = discord.Colour.from_rgb(*argument)
except TypeError:
color = None
if color:
if color.value > 16777215:
color = discord.Colour(16777215)
return color
def generate_snowflake(dt: typing.Optional[datetime.datetime] = None) -> int:
"""Returns a numeric snowflake pretending to be created at the given date but more accurate and random than time_snowflake.
If No dt is not passed, it makes one from the current time using utcnow.
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
dt = dt or discord.utils.utcnow()
return int(dt.timestamp() * 1000 - 1420070400000) << 22 | 0x3FFFFF
class ObjectPlus(discord.Object):
@property
def worker_id(self) -> int:
""":class:`int`: Returns the worker id that made the snowflake."""
return (self.id & 0x3E0000) >> 17
@property
def process_id(self) -> int:
""":class:`int`: Returns the process id that made the snowflake."""
return (self.id & 0x1F000) >> 12
@property
def increment_id(self) -> int:
""":class:`int`: Returns the increment id that made the snowflake."""
return self.id & 0xFFF
class ObjectPlusConverter(commands.converter.IDConverter[commands.Converter]):
async def convert(self, ctx: commands.Context, argument: str) -> ObjectPlus:
match = self._get_id_match(argument) or re.match(r"<(?:@(?:!|&)?|#)([0-9]{15,20})>$", argument)
if match is None:
raise discord.errors.ObjectNotFound(argument)
result = int(match.group(1))
return ObjectPlus(id=result)
# remove if edpy adds my pull request into the master.
| 31.132275
| 127
| 0.593644
| 683
| 5,884
| 5.080527
| 0.29429
| 0.0317
| 0.025937
| 0.036023
| 0.274352
| 0.243228
| 0.231412
| 0.208069
| 0.178674
| 0.10951
| 0
| 0.023926
| 0.303875
| 5,884
| 188
| 128
| 31.297872
| 0.823242
| 0.114208
| 0
| 0.262295
| 0
| 0
| 0.045472
| 0.006218
| 0
| 0
| 0.005441
| 0
| 0
| 1
| 0.040984
| false
| 0.008197
| 0.065574
| 0
| 0.262295
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4f91839d0ba937bffd97ff3a607f1dad1fc55ad
| 1,690
|
py
|
Python
|
distanceProfile.py
|
ZiyaoWei/pyMatrixProfile
|
1c88e1558e2bc5210d328d253572f5ff7fab1a5e
|
[
"MIT"
] | 29
|
2017-08-13T04:24:16.000Z
|
2021-12-24T07:51:08.000Z
|
Matrix Profile/Implementation/pyMatrixProfile-master/distanceProfile.py
|
rakesh-lagare/Thesis_Work
|
733285eae31a3fd8b613ec30d9e2ab9befd57614
|
[
"Apache-2.0"
] | 2
|
2018-02-12T11:58:53.000Z
|
2018-08-20T19:51:47.000Z
|
Matrix Profile/Implementation/pyMatrixProfile-master/distanceProfile.py
|
rakesh-lagare/Thesis_Work
|
733285eae31a3fd8b613ec30d9e2ab9befd57614
|
[
"Apache-2.0"
] | 15
|
2017-08-19T23:16:45.000Z
|
2019-09-21T04:53:43.000Z
|
import numpy as np
from util import *
def naiveDistanceProfile(tsA, idx, m, tsB = None):
"""Return the distance profile of query against ts. Use the naive all pairs comparison algorithm.
>>> np.round(naiveDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
distanceProfile = []
n = len(tsB)
for i in range(n - m + 1):
distanceProfile.append(zNormalizedEuclideanDistance(query, tsB[i : i + m]))
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
def stampDistanceProfile(tsA, idx, m, tsB = None):
"""
>>> np.round(stampDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
n = len(tsB)
distanceProfile = mass(query, tsB)
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34.489796
| 112
| 0.56213
| 227
| 1,690
| 4.14978
| 0.277533
| 0.029724
| 0.019108
| 0.021231
| 0.602972
| 0.573248
| 0.573248
| 0.573248
| 0.573248
| 0.573248
| 0
| 0.054694
| 0.275148
| 1,690
| 48
| 113
| 35.208333
| 0.714286
| 0.264497
| 0
| 0.645161
| 0
| 0
| 0.006683
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.096774
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4fae683109b51c37a205d6ed228be7bbb86f029
| 7,868
|
py
|
Python
|
vnTrader/uiMainWindow.py
|
bttt123/TradeSim
|
2374b0925d34d8fb299095250c5c8834192848ce
|
[
"Apache-2.0"
] | null | null | null |
vnTrader/uiMainWindow.py
|
bttt123/TradeSim
|
2374b0925d34d8fb299095250c5c8834192848ce
|
[
"Apache-2.0"
] | null | null | null |
vnTrader/uiMainWindow.py
|
bttt123/TradeSim
|
2374b0925d34d8fb299095250c5c8834192848ce
|
[
"Apache-2.0"
] | 1
|
2022-03-29T21:57:31.000Z
|
2022-03-29T21:57:31.000Z
|
# encoding: UTF-8
from builtins import str
import psutil
# import sys
# PyQt 4/5 compatibility
try:
from PyQt4.QtGui import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout
from PyQt4 import QtCore
except ImportError:
from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout
from PyQt5 import QtCore
from uiBasicWidget import *
import uiBasicWidget as wgs
#from . import uiBasicWidget as wgs
########################################################################
class MainWindow(QMainWindow):
"""主窗口"""
signalStatusBar = QtCore.pyqtSignal(type(Event()))
# ----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, app, sheets):
"""Constructor"""
super(MainWindow, self).__init__()
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.app = app
self.sheets = sheets
self.widgetDict = {} # 用来保存子窗口的字典
self.initUi()
self.eventEngine.register(EVENT_TITLE, self.updateTitle)
self.sid = None
def updateTitle(self, event):
(user, stratid) = event.dict_['data']
#self.setWindowTitle('VnTrader: ' + str(user) + "/" + str(stratid))
self.sid = stratid
# ----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle('VnTrader')
self.initCentral()
self.initMenu()
# self.initStatusBar()
def showLogin(self):
self.connectQuantOS()
# ----------------------------------------------------------------------
def initCentral(self):
"""初始化中心区域"""
widgetTradingW, dockTradingW = self.createDock(wgs.TradingWidget, u'交易', QtCore.Qt.LeftDockWidgetArea)
widgetMarketM, dockMarketM = self.createDock(wgs.MarketMonitor, u'行情', QtCore.Qt.RightDockWidgetArea)
widgetPositionM, dockPositionM = self.createDock(wgs.PositionMonitor, u'持仓', QtCore.Qt.RightDockWidgetArea)
widgetAccountM, dockAccountM = self.createDock(wgs.AccountMonitor, u'资金', QtCore.Qt.BottomDockWidgetArea)
widgetContractM, dockContractM = self.createDock(wgs.ContractMonitor, u'合约', QtCore.Qt.BottomDockWidgetArea)
widgetLogM, dockLogM = self.createDock(wgs.LogMonitor, u'日志', QtCore.Qt.BottomDockWidgetArea)
widgetTradeM, dockTradeM = self.createDock(wgs.TradeMonitor, u'成交', QtCore.Qt.BottomDockWidgetArea)
widgetOrderM, dockOrderM = self.createDock(wgs.OrderMonitor, u'委托', QtCore.Qt.BottomDockWidgetArea)
self.tabifyDockWidget(dockContractM, dockTradeM)
self.tabifyDockWidget(dockTradeM, dockOrderM)
self.tabifyDockWidget(dockAccountM, dockLogM)
dockOrderM.raise_()
dockLogM.raise_()
# 连接组件之间的信号
widgetPositionM.itemDoubleClicked.connect(widgetTradingW.closePosition)
widgetMarketM.itemDoubleClicked.connect(widgetTradingW.fillSymbol)
# ----------------------------------------------------------------------
def initMenu(self):
"""初始化菜单"""
# 创建操作
connectQuantOSAction = QAction(u'连接和切换策略', self)
connectQuantOSAction.triggered.connect(self.connectQuantOS)
exitAction = QAction(u'退出', self)
exitAction.triggered.connect(self.close)
aboutAction = QAction(u'关于', self)
aboutAction.triggered.connect(self.openAbout)
colorAction = QAction(u'变色', self)
colorAction.triggered.connect(self.changeColor)
# 创建菜单
menubar = self.menuBar()
# 设计为只显示存在的接口
sysMenu = menubar.addMenu(u'系统')
if 'quantos' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectQuantOSAction)
sysMenu.addSeparator()
sysMenu.addAction(exitAction)
# 帮助
helpMenu = menubar.addMenu(u'帮助')
helpMenu.addAction(aboutAction)
helpMenu.addAction(colorAction)
# ----------------------------------------------------------------------
def initStatusBar(self):
"""初始化状态栏"""
self.statusLabel = QLabel()
self.statusLabel.setAlignment(QtCore.Qt.AlignLeft)
self.statusBar().addPermanentWidget(self.statusLabel)
self.statusLabel.setText(self.getCpuMemory())
self.sbCount = 0
self.sbTrigger = 10 # 10秒刷新一次
self.signalStatusBar.connect(self.updateStatusBar)
self.eventEngine.register(EVENT_TIMER, self.signalStatusBar.emit)
# ----------------------------------------------------------------------
def updateStatusBar(self, event):
"""在状态栏更新CPU和内存信息"""
self.sbCount += 1
if self.sbCount == self.sbTrigger:
self.sbCount = 0
self.statusLabel.setText(self.getCpuMemory())
# ----------------------------------------------------------------------
def getCpuMemory(self):
"""获取CPU和内存状态信息"""
cpuPercent = psutil.cpu_percent()
memoryPercent = psutil.virtual_memory().percent
return u'CPU使用率:%d%% 内存使用率:%d%%' % (cpuPercent, memoryPercent)
# ----------------------------------------------------------------------
def connectQuantOS(self):
self.mainEngine.connect('quantos')
# ----------------------------------------------------------------------
def openAbout(self):
"""打开关于"""
try:
self.widgetDict['aboutW'].show()
except KeyError:
self.widgetDict['aboutW'] = AboutWidget(self)
self.widgetDict['aboutW'].show()
# ----------------------------------------------------------------------
def closeEvent(self, event):
"""关闭事件"""
reply = QMessageBox.question(self, u'退出',
u'确认退出?', QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
for widget in list(self.widgetDict.values()):
widget.close()
self.mainEngine.exit()
event.accept()
else:
event.ignore()
# ----------------------------------------------------------------------
def createDock(self, widgetClass, widgetName, widgetArea):
"""创建停靠组件"""
widget = widgetClass(self.mainEngine, self.eventEngine)
dock = QDockWidget(widgetName)
dock.setWidget(widget)
dock.setObjectName(widgetName)
dock.setFeatures(dock.DockWidgetFloatable | dock.DockWidgetMovable)
self.addDockWidget(widgetArea, dock)
return widget, dock
def changeColor(self):
self.app.setStyleSheet(self.sheets[1])
self.sheets = [self.sheets[1], self.sheets[0]]
########################################################################
class AboutWidget(QDialog):
"""显示关于信息"""
# ----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(AboutWidget, self).__init__(parent)
self.initUi()
# ----------------------------------------------------------------------
def initUi(self):
""""""
self.setWindowTitle(u'关于VnTrader')
text = u"""
quantos trade client
"""
label = QLabel()
label.setText(text)
label.setMinimumWidth(500)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.setLayout(vbox)
| 35.441441
| 121
| 0.521734
| 596
| 7,868
| 6.848993
| 0.360738
| 0.017638
| 0.033317
| 0.017148
| 0.070554
| 0.041646
| 0.041646
| 0.041646
| 0.041646
| 0.041646
| 0
| 0.003401
| 0.252669
| 7,868
| 221
| 122
| 35.60181
| 0.690816
| 0.159507
| 0
| 0.09375
| 0
| 0
| 0.025738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.070313
| 0
| 0.234375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4fb4e3677b230700c8377c0c0d538eea2ac4e41
| 9,431
|
py
|
Python
|
line_notify_core.py
|
ficgra/PChome-alertor
|
5f4e798e3130c170eb75e03215128590ed02dcf9
|
[
"Apache-2.0"
] | 1
|
2021-06-16T00:36:22.000Z
|
2021-06-16T00:36:22.000Z
|
line_notify_core.py
|
ficgra/PChome-alertor
|
5f4e798e3130c170eb75e03215128590ed02dcf9
|
[
"Apache-2.0"
] | null | null | null |
line_notify_core.py
|
ficgra/PChome-alertor
|
5f4e798e3130c170eb75e03215128590ed02dcf9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import requests
import json
import re
from flask import Flask, request, abort
import mysql.connector as mariadb
from mysql.connector import Error
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage, FollowEvent,
)
app = Flask(__name__)
line_bot_api = LineBotApi('')
handler = WebhookHandler('')
@app.route("/", methods=['GET'])
def index():
return 'OK!'
#line 官方帳號 /callback測試Event
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
#line官方帳號收到訊息時的Event
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
get_message = event.message.text
print(get_message)
user_id = event.source.user_id
register_url = 'https://notify-bot.line.me/oauth/authorize?response_type=code&scope=notify&response_mode=form_post&client_id="id"&redirect_uri=https://line.husan.cc/register&state=' + user_id
mage = re.split(r'[\s]\s*',get_message)
try:
if mage[0] == "註冊":
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=register_url))
elif 'add' == mage[0]:
try:
notice = add_item(mage[1],user_id,mage[2])
except:
notice = add_item(mage[1],user_id,None)
line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice))
elif 'del' == mage[0]:
notice = del_item(mage[1],user_id)
line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice))
elif 'list' == mage[0]:
item_list ,price_list= search_sub(user_id)
notice = '您訂閱的項目有:'
for i in range(len(item_list)):
notice+='\n'
notice=notice + item_list[i] +'\t' +str(price_list[i])
line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice))
elif 'send' == mage[0]:
acc_token = get_notify_id(user_id)
status = sent_message(mage[1],acc_token)
if status == 200:
line_bot_api.reply_message(event.reply_token,TextSendMessage(text='send OK!'))
else:
line_bot_api.reply_message(event.reply_token,TextSendMessage(text='請輸入指令:\nlist \n└查詢通知項目。\nadd 商品ID 價格 \n└新增商品通知,低於設定價格時通知。\nEX:add DYAJID-A900AVJ4G 500\ndel 商品ID \n└刪除商品通知。\nEX:del DYAJID-A900AVJ4G'))
except BaseException as e:
line_bot_api.reply_message(event.reply_token,TextSendMessage(text='指令錯誤,請重新確認!'))
print(e)
# get user id when reply
user_id = event.source.user_id
print("user_id =", user_id)
profile = line_bot_api.get_profile(user_id)
#notify註冊時會post至/register
@app.route("/register",methods=['POST']) #註冊事件
def register():
if request.method == 'POST':
code = request.form.get('code') #拿code去要access_token
print("code = ", code)
state = request.form.get('state') #state = user_id 使用者id
print("user_id = ",state)
profile = line_bot_api.get_profile(state)
user_name = profile.display_name
print("username = ",user_name) #帳號名稱
access_token = get_token(code) #取得access_token 發訊息給使用者的token
print("access_token = ",access_token)
r_code = send_test_message(access_token)#發測試通知
if r_code == 200:
save_profile(user_name, code, state, access_token)#存入資料庫
return '發送成功'
else:
return '發送失敗'
#加好友時發送通知
@handler.add(FollowEvent)
def handle_follow(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="感謝訂閱!請輸入\"註冊\"啟動服務。"))
#拿使用者code向notify-bot post取得access_token
def get_token(code):
headers = {
"Content-Type":"application/x-www-form-urlencoded"
}
params = {
"grant_type":"authorization_code",
"code": code,
"redirect_uri":"https://line.husan.cc/register", # host_ip
"client_id":"client_id", #notify client_id
"client_secret":"client_secret" #notify client_secret
}
r = requests.post('https://notify-bot.line.me/oauth/token',headers=headers,params=params)
source = json.loads(r.text)
access_token = source['access_token']
return access_token
#發送測試訊息至使用者notify
def send_test_message(access_token):
headers = {
"Authorization":"Bearer " + str(access_token),
"Content-Type":"application/x-www-form-urlencoded",
"notificationDisabled":"True"
}
params = {
"message":"\n帳號連結成功"
}
r = requests.post("https://notify-api.line.me/api/notify",headers=headers,params=params)
return r.status_code
#使用者資料存入資料庫
def save_profile(username, code, user_id, access_token):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
db_Info = connection.get_server_info()
print("資料庫版本:", db_Info)
cursor = connection.cursor()
cursor.execute("INSERT INTO user_info (id, username, code, user_id, access_token) VALUES (null,'%s','%s','%s','%s')"%(username, code, user_id, access_token))
connection.commit() #存檔
cursor.execute("SELECT * FROM user_info")
# 列出查詢的資料
for i in cursor:
print(i)
except Error as e:
print("資料庫連接失敗0:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#print("資料庫連線已關閉")
#新增訂閱項目
def add_item(item_id, user_id,w_price):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
acc_token = get_notify_id(user_id)
try:
cursor.execute("INSERT INTO sub_list (item_id, w_price ,user_id, acc_token) VALUES ('%s','%d','%s','%s')"%(item_id, int(w_price) ,user_id, acc_token))
except:
cursor.execute("INSERT INTO sub_list (item_id,user_id, acc_token) VALUES ('%s','%s','%s')"%(item_id ,user_id, acc_token))
connection.commit() #存檔
return 'Add Done!'
except Error as e:
print("資料庫連接失敗2:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#刪除訂閱項目
def del_item(item_id, user_id):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
cursor.execute("DELETE FROM sub_list WHERE item_id = '%s' AND user_id = '%s'"%(item_id,user_id))
connection.commit() #存檔
return 'Delete Done!'
except Error as e:
print("資料庫連接失敗3:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#查詢訂閱項目
def search_sub(user_id):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
cursor.execute("SELECT item_id , w_price FROM sub_list WHERE user_id LIKE '%s'"%(user_id))
sub_item = cursor.fetchall()
price_list = [item[1] for item in sub_item]
item_list = [item[0] for item in sub_item]
return item_list,price_list
except Error as e:
print("資料庫連接失敗1:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#取得notify_access_token
def get_notify_id(user_id):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
cursor.execute("SELECT access_token FROM user_info WHERE user_id LIKE '%s'"%(user_id))
acc_token = cursor.fetchall()
return acc_token[0][0]
except Error as e:
print("資料庫連接失敗4:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#發送訊息
def sent_message(message,access_token):
headers = {
"Authorization":"Bearer " + access_token,
"Content-Type":"application/x-www-form-urlencoded"
}
params = {
"message":message
}
r = requests.post("https://notify-api.line.me/api/notify",headers=headers,params=params)
print(r.status_code)
return r.status_code
if __name__ == "__main__":
app.run('0.0.0.0',port=3000)
| 35.190299
| 214
| 0.626021
| 1,173
| 9,431
| 4.846547
| 0.208014
| 0.035884
| 0.019349
| 0.040457
| 0.480739
| 0.441337
| 0.358487
| 0.321196
| 0.299384
| 0.281442
| 0
| 0.015811
| 0.24218
| 9,431
| 267
| 215
| 35.322097
| 0.779208
| 0.054183
| 0
| 0.364055
| 0
| 0.018433
| 0.199887
| 0.019584
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059908
| false
| 0.023041
| 0.041475
| 0.004608
| 0.152074
| 0.073733
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4fd04698f7477aacd1d458ba68e94970c4579ef
| 1,143
|
py
|
Python
|
sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py
|
MachineLP/SFC_models
|
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
|
[
"Apache-2.0"
] | 21
|
2016-11-03T12:30:50.000Z
|
2022-03-24T06:54:14.000Z
|
sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py
|
MachineLP/SFC_models
|
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
|
[
"Apache-2.0"
] | 1
|
2019-04-02T02:01:27.000Z
|
2019-04-07T21:07:10.000Z
|
sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py
|
MachineLP/SFC_models
|
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
|
[
"Apache-2.0"
] | 12
|
2016-11-03T12:30:57.000Z
|
2021-09-14T23:08:23.000Z
|
# coding=utf-8
from sfc_models.objects import *
from sfc_models.examples.Quick2DPlot import Quick2DPlot
register_standard_logs('output', __file__)
mod = Model()
country = Country(mod, 'CO')
Household(country, 'HH')
ConsolidatedGovernment(country, 'GOV')
FixedMarginBusiness(country, 'BUS', profit_margin=.025)
Market(country, 'GOOD')
Market(country, 'LAB')
TaxFlow(country, 'TAX', taxrate=.2)
# At time period 25, cut spending to 17 (from 20)
mod.AddExogenous('GOV', 'DEM_GOOD', [20.,]* 25 + [17.,]*20)
mod.AddGlobalEquation('DEBT_GDP', 'DEBT-TO-GDP RATIO', '-100.*GOV__F/BUS__SUP_GOOD')
mod.AddGlobalEquation('DEFICIT', 'DEFICIT', '-1.*GOV__INC')
mod.EquationSolver.MaxTime = 40
mod.main()
k = mod.GetTimeSeries('k')
Rat = mod.GetTimeSeries('DEBT_GDP')
Def = mod.GetTimeSeries('GOV__INC')
spend = mod.GetTimeSeries('GOV__DEM_GOOD')
p = Quick2DPlot([k, k], [spend, Def], title='Spending and Deficit', filename='intro_X_XX_multiplier_deficit.png',
run_now=False)
p.Legend = ['G', 'Deficit']
p.LegendPos = 'center left'
p.DoPlot()
Quick2DPlot(k, Rat, title='Debt-to-GDP Ratio', filename='intro_X_XX_multiplier_debt_gdp.png')
| 34.636364
| 113
| 0.727909
| 161
| 1,143
| 4.956522
| 0.515528
| 0.080201
| 0.032581
| 0.035088
| 0.065163
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028293
| 0.103237
| 1,143
| 32
| 114
| 35.71875
| 0.750244
| 0.052493
| 0
| 0
| 0
| 0
| 0.247222
| 0.086111
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d4fe0f781e9f3139abc2757c5c86104cc2181049
| 4,135
|
py
|
Python
|
auth_framework/settings.py
|
DrChai/django-auth-framework
|
4f9a108de66fe102ff28518b6597ad26b5855518
|
[
"BSD-2-Clause"
] | null | null | null |
auth_framework/settings.py
|
DrChai/django-auth-framework
|
4f9a108de66fe102ff28518b6597ad26b5855518
|
[
"BSD-2-Clause"
] | null | null | null |
auth_framework/settings.py
|
DrChai/django-auth-framework
|
4f9a108de66fe102ff28518b6597ad26b5855518
|
[
"BSD-2-Clause"
] | null | null | null |
from importlib import import_module
from django.conf import settings
from django.core.signals import setting_changed
SOCIALACCOUNT_MODEL = getattr(settings, "REST_AUTH_SOCIALACCOUNT_MODEL", "auth_framework.SocialAccount")
DEFAULTS = {
'UNIQUE_EMAIL': True,
'RESET_PASSWORD_BY': 'pin', # 'url'| 'pin'
'SERIALIZERS': {
# 'SOCIAL_LOGIN_SERIALIZER': 'auth.social.serializers.DefaultSocialLoginSerializer',
'SIGNUP_SERIALIZER': 'auth_framework.serializers.signup_serializers.DefaultSignUpSerializer',
'USERINFO_SERIALIZER': None
},
'SOCIALACCOUNT_MODEL': SOCIALACCOUNT_MODEL,
'SOCIALACCOUNT_ADMIN_CLASS': "auth_framework.admin.SocialAccountAdmin",
# SOCIAL LOGINS
'SOCIAL_CALLBACK_URL': None, # eg: 'https://developers.google.com/oauthplayground'
'SOCIAL_AUTO_SIGNUP': False,
# SIGN UP
# 'SIGNUP_EMAIL_VERIFICATION': 'none', # trimmed out email verification celery task in closed source. fewer usage
'SIGNUP_USERNAME_REQUIRED': False,
'SIGNUP_USERNAME_VALIDATORS': [],
'USE_PASSWORD_TWICE_VALIDATION': True,
# ADVANCES
'USE_PHONENUMBER_FIELD': False,
'USE_CELERY_EMAIL': False,
'USE_ID_TOKEN': True,
'OAUTH_SAVE_ID_TOKEN': False
}
def import_callable(path_or_callable):
if path_or_callable is None:
return None
if hasattr(path_or_callable, '__call__'):
return path_or_callable
else:
assert isinstance(path_or_callable, str)
package, attr = path_or_callable.rsplit('.', 1)
return getattr(import_module(package), attr)
class AuthSettings:
"""
"""
def __init__(self, user_settings=None, defaults=None):
if user_settings:
self._user_settings = user_settings
self.defaults = defaults or DEFAULTS
self._cached_attrs = set()
@property
def user_settings(self):
if not hasattr(self, '_user_settings'):
self._user_settings = getattr(settings, 'AUTH_FRAMEWORK', {})
return self._user_settings
@property
def username_validators(self):
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth import get_user_model
validators = self.user_settings.get("SIGNUP_USERNAME_VALIDATORS", None)
if validators:
ret = []
if not isinstance(validators, list):
raise ImproperlyConfigured(
"SIGNUP_USERNAME_VALIDATORS is expected to be a list"
)
for path in validators:
pkg, attr = path.rsplit(".", 1)
validator = getattr(import_module(pkg), attr)
ret.append(validator())
else:
ret = (
get_user_model()._meta.get_field('username').validators
)
return ret
def serializers(self, data):
# Check if present in user settings
for key, value in data.items():
data[key] = import_callable(value)
return data
def __getattr__(self, attr):
if attr not in self.defaults:
raise AttributeError("Invalid setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
if isinstance(val, dict):
val = self.defaults[attr].copy()
val.update(self.user_settings[attr])
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
if attr == 'SERIALIZERS':
val = self.serializers(val)
# Cache the result
self._cached_attrs.add(attr)
setattr(self, attr, val)
return val
def reload(self):
for attr in self._cached_attrs:
delattr(self, attr)
self._cached_attrs.clear()
if hasattr(self, '_user_settings'):
delattr(self, '_user_settings')
app_settings = AuthSettings(None, DEFAULTS)
def reload_app_settings(*args, **kwargs):
setting = kwargs['setting']
if setting == 'AUTH_FRAMEWORK':
app_settings.reload()
setting_changed.connect(reload_app_settings)
| 33.08
| 117
| 0.641112
| 450
| 4,135
| 5.628889
| 0.322222
| 0.071062
| 0.063166
| 0.015792
| 0.044216
| 0.022108
| 0
| 0
| 0
| 0
| 0
| 0.000657
| 0.264087
| 4,135
| 124
| 118
| 33.346774
| 0.831745
| 0.095768
| 0
| 0.042553
| 0
| 0
| 0.181965
| 0.092059
| 0
| 0
| 0
| 0
| 0.010638
| 1
| 0.085106
| false
| 0.021277
| 0.095745
| 0
| 0.265957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be0099fd02ee40c6a15038fa8158d18b025dd23d
| 3,218
|
py
|
Python
|
tests/test_sqlite_wrapper.py
|
Privex/python-db
|
3b46b34b4310973e2e2a30a66adaa853fd10340d
|
[
"X11"
] | 1
|
2019-12-19T13:12:53.000Z
|
2019-12-19T13:12:53.000Z
|
tests/test_sqlite_wrapper.py
|
Privex/python-db
|
3b46b34b4310973e2e2a30a66adaa853fd10340d
|
[
"X11"
] | 9
|
2020-02-24T20:14:53.000Z
|
2021-04-30T21:51:04.000Z
|
tests/test_sqlite_wrapper.py
|
Privex/python-db
|
3b46b34b4310973e2e2a30a66adaa853fd10340d
|
[
"X11"
] | null | null | null |
"""
Tests related to :class:`.SqliteWrapper` / :class:`.ExampleWrapper`
"""
# from unittest import TestCase
from tests.base import *
class TestSQLiteWrapper(PrivexDBTestBase):
def test_tables_created(self):
w = self.wrp
self.assertEqual(w.db, ':memory:')
tables = w.list_tables()
self.assertIn('users', tables)
self.assertIn('items', tables)
def test_tables_drop(self):
w = self.wrp
tables = w.list_tables()
self.assertIn('users', tables)
self.assertIn('items', tables)
w.drop_schemas()
tables = w.list_tables()
self.assertNotIn('users', tables)
self.assertNotIn('items', tables)
def test_insert_find_user(self):
w = self.wrp
w.query_mode = 'flat'
res = w.insert_user('John', 'Doe')
self.assertEqual(res.rowcount, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user[1], 'John')
self.assertEqual(user[2], 'Doe')
def test_action_update(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert_user('John', 'Doe')
last_id = res.lastrowid
rows = w.action("UPDATE users SET last_name = ? WHERE first_name = ?", ['Smith', 'John'])
self.assertEqual(rows, 1)
john = w.find_user(last_id)
self.assertEqual(john['last_name'], 'Smith')
def test_find_user_dict_mode(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert_user('John', 'Doe')
self.assertEqual(res.rowcount, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user['first_name'], 'John')
self.assertEqual(user['last_name'], 'Doe')
def test_find_user_nonexistent(self):
w = self.wrp
user = w.find_user(99)
self.assertIsNone(user)
def test_get_users_tuple(self):
w = self.wrp
w.query_mode = 'flat'
w.insert_user('John', 'Doe')
w.insert_user('Jane', 'Doe')
w.insert_user('Dave', 'Johnson')
users = list(w.get_users())
self.assertEqual(len(users), 3)
self.assertEqual(users[0][1], 'John')
self.assertEqual(users[1][1], 'Jane')
self.assertEqual(users[1][2], 'Doe')
self.assertEqual(users[2][2], 'Johnson')
def test_get_users_dict(self):
w = self.wrp
w.query_mode = 'dict'
w.insert_user('John', 'Doe')
w.insert_user('Jane', 'Doe')
w.insert_user('Dave', 'Johnson')
users = list(w.get_users())
self.assertEqual(len(users), 3)
self.assertEqual(users[0]['first_name'], 'John')
self.assertEqual(users[1]['first_name'], 'Jane')
self.assertEqual(users[1]['last_name'], 'Doe')
self.assertEqual(users[2]['last_name'], 'Johnson')
def test_insert_helper(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert('users', first_name='Dave', last_name='Johnson')
self.assertEqual(res.lastrowid, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user['first_name'], 'Dave')
self.assertEqual(user['last_name'], 'Johnson')
| 31.242718
| 97
| 0.579863
| 402
| 3,218
| 4.4801
| 0.164179
| 0.183232
| 0.044975
| 0.059967
| 0.576346
| 0.445308
| 0.445308
| 0.445308
| 0.401999
| 0.401999
| 0
| 0.009362
| 0.269733
| 3,218
| 102
| 98
| 31.54902
| 0.757021
| 0.030454
| 0
| 0.512821
| 0
| 0
| 0.115681
| 0
| 0
| 0
| 0
| 0
| 0.371795
| 1
| 0.115385
| false
| 0
| 0.012821
| 0
| 0.141026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be00d24937df6595d3c59f1ae767515161b8f7ef
| 5,320
|
py
|
Python
|
var/spack/repos/builtin/packages/strumpack/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/strumpack/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/strumpack/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Strumpack(CMakePackage, CudaPackage):
"""STRUMPACK -- STRUctured Matrix PACKage - provides linear solvers
for sparse matrices and for dense rank-structured matrices, i.e.,
matrices that exhibit some kind of low-rank property. It provides a
distributed memory fully algebraic sparse solver and
preconditioner. The preconditioner is mostly aimed at large sparse
linear systems which result from the discretization of a partial
differential equation, but is not limited to any particular type of
problem. STRUMPACK also provides preconditioned GMRES and BiCGStab
iterative solvers."""
homepage = "http://portal.nersc.gov/project/sparse/strumpack"
url = "https://github.com/pghysels/STRUMPACK/archive/v4.0.0.tar.gz"
git = "https://github.com/pghysels/STRUMPACK.git"
maintainers = ['pghysels']
version('master', branch='master')
version('5.0.0', sha256='bdfd1620ff7158d96055059be04ee49466ebaca8213a2fdab33e2d4571019a49')
version('4.0.0', sha256='a3629f1f139865c74916f8f69318f53af6319e7f8ec54e85c16466fd7d256938')
version('3.3.0', sha256='499fd3b58656b4b6495496920e5372895861ebf15328be8a7a9354e06c734bc7')
version('3.2.0', sha256='34d93e1b2a3b8908ef89804b7e08c5a884cbbc0b2c9f139061627c0d2de282c1')
version('3.1.1', sha256='c1c3446ee023f7b24baa97b24907735e89ce4ae9f5ef516645dfe390165d1778')
variant('shared', default=False, description='Build shared libraries')
variant('mpi', default=True, description='Use MPI')
variant('openmp', default=True,
description='Enable thread parallellism via tasking with OpenMP')
variant('cuda', default=True,
description='Enable CUDA support')
variant('parmetis', default=True,
description='Enable use of ParMetis')
variant('scotch', default=False,
description='Enable use of Scotch')
variant('butterflypack', default=True,
description='Enable use of ButterflyPACK')
variant('zfp', default=True,
description='Build with support for compression using ZFP')
variant('c_interface', default=True,
description='Enable C interface')
variant('count_flops', default=False,
description='Build with flop counters')
variant('task_timers', default=False,
description='Build with timers for internal routines')
variant('build_dev_tests', default=False,
description='Build developer test routines')
variant('build_tests', default=False,
description='Build test routines')
# TODO: add a slate variant
depends_on('cmake@3.11:', type='build')
depends_on('mpi', when='+mpi')
depends_on('blas')
depends_on('lapack')
depends_on('scalapack', when='+mpi')
depends_on('metis')
depends_on('parmetis', when='+parmetis')
depends_on('scotch~metis', when='+scotch')
depends_on('scotch~metis+mpi', when='+scotch+mpi')
depends_on('butterflypack@1.1.0', when='@3.3.0:3.9.999 +butterflypack+mpi')
depends_on('butterflypack@1.2.0:', when='@4.0.0: +butterflypack+mpi')
depends_on('cuda', when='@4.0.0: +cuda')
depends_on('zfp', when='+zfp')
conflicts('+parmetis', when='~mpi')
conflicts('+butterflypack', when='~mpi')
conflicts('+butterflypack', when='@:3.2.0')
conflicts('+cuda', when='@:3.9.999')
conflicts('+zfp', when='@:3.9.999')
patch('intel-19-compile.patch', when='@3.1.1')
def cmake_args(self):
spec = self.spec
def on_off(varstr):
return 'ON' if varstr in spec else 'OFF'
args = [
'-DSTRUMPACK_USE_MPI=%s' % on_off('+mpi'),
'-DSTRUMPACK_USE_OPENMP=%s' % on_off('+openmp'),
'-DTPL_ENABLE_PARMETIS=%s' % on_off('+parmetis'),
'-DTPL_ENABLE_SCOTCH=%s' % on_off('+scotch'),
'-DTPL_ENABLE_BPACK=%s' % on_off('+butterflypack'),
'-DSTRUMPACK_COUNT_FLOPS=%s' % on_off('+count_flops'),
'-DSTRUMPACK_TASK_TIMERS=%s' % on_off('+task_timers'),
'-DSTRUMPACK_DEV_TESTING=%s' % on_off('+build_dev_tests'),
'-DSTRUMPACK_BUILD_TESTS=%s' % on_off('+build_tests'),
'-DTPL_BLAS_LIBRARIES=%s' % spec['blas'].libs.joined(";"),
'-DTPL_LAPACK_LIBRARIES=%s' % spec['lapack'].libs.joined(";"),
'-DTPL_SCALAPACK_LIBRARIES=%s' % spec['scalapack'].
libs.joined(";"),
]
if spec.satisfies('@:3.9.999'):
if '+mpi' in spec:
args.extend([
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc
])
args.extend([
'-DSTRUMPACK_C_INTERFACE=%s' % on_off('+c_interface'),
])
if spec.satisfies('@4.0.0:'):
args.extend([
'-DSTRUMPACK_USE_CUDA=%s' % on_off('+cuda')
])
args.extend([
'-DBUILD_SHARED_LIBS=%s' % on_off('+shared')
])
return args
| 42.56
| 95
| 0.638346
| 607
| 5,320
| 5.47117
| 0.329489
| 0.03523
| 0.02168
| 0.042156
| 0.112014
| 0.019874
| 0
| 0
| 0
| 0
| 0
| 0.074003
| 0.217669
| 5,320
| 124
| 96
| 42.903226
| 0.723931
| 0.141541
| 0
| 0.086957
| 0
| 0.01087
| 0.422762
| 0.171934
| 0
| 0
| 0
| 0.008065
| 0
| 1
| 0.021739
| false
| 0
| 0.01087
| 0.01087
| 0.108696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be011eb0f4bc43a928140f63592325792f0414b5
| 6,318
|
py
|
Python
|
actionserver/actions/action_feedbackform.py
|
Ajju2211/frendy-bot
|
b86a7a3cb3fb54b300ad9b870defb947f22dc146
|
[
"Apache-2.0"
] | null | null | null |
actionserver/actions/action_feedbackform.py
|
Ajju2211/frendy-bot
|
b86a7a3cb3fb54b300ad9b870defb947f22dc146
|
[
"Apache-2.0"
] | null | null | null |
actionserver/actions/action_feedbackform.py
|
Ajju2211/frendy-bot
|
b86a7a3cb3fb54b300ad9b870defb947f22dc146
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Text, Dict, List, Union
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction
# from rasa_core.events import (UserUtteranceReverted, UserUttered,
# ActionExecuted, Event)
from rasa_sdk.events import AllSlotsReset, SlotSet
from rasa.core.constants import REQUESTED_SLOT
from rasa.core.slots import Slot
import pandas as pd
import json
from actionserver.utils import utilities as util
from actionserver.controllers.faqs.faq import FAQ
from actionserver.controllers.constants.orderForm import *
import logging
from actionserver.utils.utilities import INVALID_VALUE
product_list = []
quant_list = [] # takes quantity from user
logger = logging.getLogger(__name__)
with open(r'./actionserver/custom_payload.json') as f:
frendy_product_menu = json.load(f)
# Code snippet for global back
# return [Restarted(), UserUttered(text="/get_started", parse_data={
# "intent": {"confidence": 1.0, "name": "get_started"},
# "entities": []
# }), FollowupAction(name="utter_greet")]
def query_back(dispatcher):
dispatcher.utter_message("Going back to queries!!!")
greet_utter = UserUttered(text="/greet", parse_data={
"intent": {"confidence": 1.0, "name": "greet"},
"entities": []
})
query_utter = UserUttered(text="/query_init", parse_data={
"intent": {"confidence": 1.0, "name": "query_init"},
"entities": []
})
return [
greet_utter,
FollowupAction(name="utter_greet"),
query_utter,
FollowupAction(name="utter_query_type")
]
def greet_back(dispatcher):
dispatcher.utter_message("Going back!!!")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Welcome back to Frendy Shopping"
});
return [UserUttered(text="/greet", parse_data={
"intent": {"confidence": 1.0, "name": "greet"},
"entities": []
}), FollowupAction(name="utter_greet")]
class FeedbackForm(FormAction):
def name(self):
return "feedback_form"
@staticmethod
def required_slots(tracker):
if tracker.get_slot("rating"):
return ["rating", "feedback_text"]
else:
return ["rating"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
"""A dictionary to map required slots to
- an extracted entity
- intent: value pairs
- a whole message
or a list of them, where a first match will be picked"""
# return {"rating": [self.from_entity("rating"),self.from_entity("any_thing")],"feedback_text": [self.from_entity(entity="any_thing"),self.from_entity(entity="navigation")]}
return {"rating": [self.from_entity("rating"), self.from_text()], "feedback_text": [self.from_text(), self.from_entity(entity="navigation")]}
def validate_rating(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
ratings = ['1', '2', '3', '4', '5']
try:
value = value.strip()
if value == "back1" or value.lower() == "back":
return {"rating": INVALID_VALUE, "feedback_text": INVALID_VALUE}
# 1-5 it integer otherwise rating:None
elif value in ratings:
return {"rating": value, "feedback_text": None}
else:
dispatcher.utter_message("Please enter valid option.")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Please enter valid option"
});
return {"rating": None, "feedback_text": None}
except Exception as e:
print(e)
dispatcher.utter_message("Please enter valid option.")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Please enter valid option"
});
return {"rating": None, "feedback_text": None}
def validate_feedback_text(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
if value == "back2" or value.lower() == "back":
return {"rating": None, "feedback_text": None}
else:
return {"feedback_text": value}
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
if tracker.get_slot("rating") != INVALID_VALUE:
with open("./actionserver/customer_queries.json", "r") as queriesRef:
rating = tracker.get_slot("rating")
feedback = tracker.get_slot("feedback_text")
feedbackObj = json.load(queriesRef)
feedbackObj["feedback"].append({
"createdOn": util.timestamp(),
"complaint_area": rating,
"complaint": feedback
})
with open("./actionserver/customer_queries.json", "w") as queriesRefWrite:
json.dump(feedbackObj, queriesRefWrite, indent=4)
dispatcher.utter_message("Your Response :\n Rating :'{rate}' star \n Feedback: '{feedbk}' \n Submitted!Thank You!".format(
rate=rating, feedbk=feedback))
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Your Response :\n Rating :'{rate}' star \n Feedback: '{feedbk}' \n Submitted!Thank You!".format(
rate=rating, feedbk=feedback)
});
else:
dispatcher.utter_message("Feedback form closed")
li = [SlotSet("rating", None), SlotSet("feedback_text", None)]
li.extend(query_back(dispatcher))
return li
return [SlotSet("rating", None), SlotSet("feedback_text", None)]
| 37.832335
| 181
| 0.597341
| 658
| 6,318
| 5.600304
| 0.264438
| 0.042334
| 0.059701
| 0.027137
| 0.461058
| 0.395387
| 0.353596
| 0.290638
| 0.252374
| 0.252374
| 0
| 0.003933
| 0.275562
| 6,318
| 166
| 182
| 38.060241
| 0.80118
| 0.113485
| 0
| 0.386364
| 0
| 0.015152
| 0.193933
| 0.01914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.113636
| 0.007576
| 0.287879
| 0.007576
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be01e27689f95fbc7033b6a5da2ab015674dada0
| 2,909
|
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AppServiceCertificateResource(Resource):
"""Key Vault container ARM resource for a certificate that is purchased
through Azure.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param key_vault_id: Key Vault resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar provisioning_state: Status of the Key Vault secret. Possible values
include: 'Initialized', 'WaitingOnCertificateOrder', 'Succeeded',
'CertificateOrderFailed', 'OperationNotPermittedOnKeyVault',
'AzureServiceUnauthorizedToAccessKeyVault', 'KeyVaultDoesNotExist',
'KeyVaultSecretDoesNotExist', 'UnknownError', 'ExternalPrivateKey',
'Unknown'
:vartype provisioning_state: str or
~azure.mgmt.web.models.KeyVaultSecretStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'KeyVaultSecretStatus'},
}
def __init__(self, **kwargs):
super(AppServiceCertificateResource, self).__init__(**kwargs)
self.key_vault_id = kwargs.get('key_vault_id', None)
self.key_vault_secret_name = kwargs.get('key_vault_secret_name', None)
self.provisioning_state = None
| 38.786667
| 102
| 0.625645
| 309
| 2,909
| 5.757282
| 0.391586
| 0.062957
| 0.055087
| 0.060708
| 0.023609
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000429
| 0.198006
| 2,909
| 74
| 103
| 39.310811
| 0.762109
| 0.544517
| 0
| 0
| 0
| 0
| 0.334451
| 0.100587
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be045e37a15278ad4b76fd0b0f607b024e9f6bee
| 925
|
py
|
Python
|
parsers/rss10.py
|
side-beach-city/SBCLinkCopyTool
|
12ec16eefddac215e6a2be92464fde75677c8548
|
[
"Apache-2.0"
] | null | null | null |
parsers/rss10.py
|
side-beach-city/SBCLinkCopyTool
|
12ec16eefddac215e6a2be92464fde75677c8548
|
[
"Apache-2.0"
] | 2
|
2021-06-28T01:52:31.000Z
|
2021-06-28T02:21:18.000Z
|
parsers/rss10.py
|
side-beach-city/SBCLinkCopyTool
|
12ec16eefddac215e6a2be92464fde75677c8548
|
[
"Apache-2.0"
] | null | null | null |
import urllib.request
import xml.etree.ElementTree
class RSS10Parser:
def __init__(self, url: str) -> None:
self.url = url
def getlist(self) -> list[dict[str, str]]:
ENTRY = r"{http://www.w3.org/2005/Atom}"
MEDIA = r"{http://search.yahoo.com/mrss/}"
YOUTUBE = r"{http://www.youtube.com/xml/schemas/2015}"
result = []
with urllib.request.urlopen(self.url) as res:
data = xml.etree.ElementTree.fromstring(res.read())
for child in data.iter(f"{ENTRY}entry"):
result.append({
"title": child.find(f"{ENTRY}title").text,
"link": child.find(f"{ENTRY}link").attrib["href"],
"description": child.find(f"{MEDIA}group").find(f"{MEDIA}description").text,
})
return result
if __name__ == "__main__":
import pprint
pprint.pprint(RSS10Parser("https://www.youtube.com/feeds/videos.xml?playlist_id=PLrPVslFukDQo7l5RCqAZtKDl6tUyMAFWH").getlist())
| 37
| 129
| 0.655135
| 122
| 925
| 4.860656
| 0.540984
| 0.033727
| 0.05059
| 0.05059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020806
| 0.168649
| 925
| 25
| 129
| 37
| 0.750325
| 0
| 0
| 0
| 0
| 0
| 0.307775
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.318182
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be04c82cd5f62929d01752841a8ec17a1254d468
| 291
|
py
|
Python
|
exercises/pt/exc_01_03_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085
|
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/pt/exc_01_03_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79
|
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/pt/exc_01_03_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361
|
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
# Importar a classe da língua inglesa (English) e criar um objeto nlp
from ____ import ____
nlp = ____
# Processar o texto
doc = ____("I like tree kangaroos and narwhals.")
# Selecionar o primeiro token
first_token = doc[____]
# Imprimir o texto do primeito token
print(first_token.____)
| 22.384615
| 69
| 0.75945
| 41
| 291
| 4.756098
| 0.780488
| 0.061538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175258
| 291
| 12
| 70
| 24.25
| 0.8125
| 0.508591
| 0
| 0
| 0
| 0
| 0.253623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be04f5e587c1b673bb12feefbad95d55e8558e6e
| 3,946
|
py
|
Python
|
tests/integration/mci/test_happy_path.py
|
qateam123/eq
|
704757952323647d659c49a71975c56406ff4047
|
[
"MIT"
] | null | null | null |
tests/integration/mci/test_happy_path.py
|
qateam123/eq
|
704757952323647d659c49a71975c56406ff4047
|
[
"MIT"
] | 8
|
2020-03-24T15:24:18.000Z
|
2022-03-02T04:32:56.000Z
|
tests/integration/mci/test_happy_path.py
|
qateam123/eq
|
704757952323647d659c49a71975c56406ff4047
|
[
"MIT"
] | null | null | null |
from tests.integration.create_token import create_token
from tests.integration.integration_test_case import IntegrationTestCase
class TestHappyPath(IntegrationTestCase):
def test_happy_path_203(self):
self.happy_path('0203', '1')
def test_happy_path_205(self):
self.happy_path('0205', '1')
def happy_path(self, form_type_id, eq_id):
# Get a token
token = create_token(form_type_id, eq_id)
resp = self.client.get('/session?token=' + token.decode(), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
# We are on the landing page
content = resp.get_data(True)
self.assertRegex(content, '<title>Introduction</title>')
self.assertRegex(content, '>Start survey<')
self.assertRegex(content, 'Monthly Business Survey - Retail Sales Index')
# We proceed to the questionnaire
post_data = {
'action[start_questionnaire]': 'Start Questionnaire'
}
resp = self.client.post('/questionnaire/' + eq_id + '/' + form_type_id + '/789/introduction', data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
block_one_url = resp.location
resp = self.client.get(block_one_url, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
# We are in the Questionnaire
content = resp.get_data(True)
self.assertRegex(content, '<title>Survey</title>')
self.assertRegex(content, '>Monthly Business Survey - Retail Sales Index</')
self.assertRegex(content, "What are the dates of the sales period you are reporting for?")
self.assertRegex(content, ">Save and continue<")
# check with have some guidance
self.assertRegex(content, "alcoholic drink")
# We fill in our answers
form_data = {
# Start Date
"period-from-day": "01",
"period-from-month": "4",
"period-from-year": "2016",
# End Date
"period-to-day": "30",
"period-to-month": "04",
"period-to-year": "2016",
# Total Turnover
"total-retail-turnover": "100000",
# User Action
"action[save_continue]": "Save & Continue"
}
# We submit the form
resp = self.client.post(block_one_url, data=form_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
# There are no validation errors
self.assertRegex(resp.location, r'\/questionnaire\/1\/' + form_type_id + r'\/789\/summary$')
summary_url = resp.location
resp = self.client.get(summary_url, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
# We are on the review answers page
content = resp.get_data(True)
self.assertRegex(content, '<title>Summary</title>')
self.assertRegex(content, '>Monthly Business Survey - Retail Sales Index</')
self.assertRegex(content, '>Your responses<')
self.assertRegex(content, 'Please check carefully before submission.')
self.assertRegex(content, '>Submit answers<')
# We submit our answers
post_data = {
"action[submit_answers]": "Submit answers"
}
resp = self.client.post(summary_url, data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
self.assertRegex(resp.location, r'\/questionnaire\/1\/' + form_type_id + r'\/789\/thank-you$')
resp = self.client.get(resp.location, follow_redirects=True)
self.assertEqual(resp.status_code, 200)
# We are on the thank you page
content = resp.get_data(True)
self.assertRegex(content, '<title>Submission Successful</title>')
self.assertRegex(content, '(?s)Monthly Business Survey - Retail Sales Index.*?Monthly Business Survey - Retail Sales Index')
| 40.265306
| 141
| 0.639635
| 469
| 3,946
| 5.247335
| 0.255864
| 0.103616
| 0.134092
| 0.071109
| 0.466477
| 0.4551
| 0.42503
| 0.399025
| 0.399025
| 0.35514
| 0
| 0.023139
| 0.244298
| 3,946
| 97
| 142
| 40.680412
| 0.802146
| 0.084389
| 0
| 0.241935
| 0
| 0
| 0.251599
| 0.04476
| 0
| 0
| 0
| 0
| 0.387097
| 1
| 0.048387
| false
| 0
| 0.032258
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be0508937eb9d9d5130de65137f4cd2a7335c162
| 70,784
|
py
|
Python
|
src/transformers/models/hubert/modeling_tf_hubert.py
|
OllieBroadhurst/transformers
|
12428f0ef15bb3631e7a5f04672ddb05f363de97
|
[
"Apache-2.0"
] | 1
|
2022-03-25T01:33:40.000Z
|
2022-03-25T01:33:40.000Z
|
src/transformers/models/hubert/modeling_tf_hubert.py
|
OllieBroadhurst/transformers
|
12428f0ef15bb3631e7a5f04672ddb05f363de97
|
[
"Apache-2.0"
] | 1
|
2022-03-23T19:49:13.000Z
|
2022-03-23T19:49:13.000Z
|
src/transformers/models/hubert/modeling_tf_hubert.py
|
erichan1/transformers
|
12428f0ef15bb3631e7a5f04672ddb05f363de97
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TensorFlow Hubert model."""
import inspect
import warnings
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput
from ...modeling_tf_utils import TFPreTrainedModel, booleans_processing, get_initializer, keras_serializable
from ...tf_utils import shape_list
from ...tokenization_utils_base import BatchEncoding
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_hubert import HubertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "HubertConfig"
TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/hubert-base-ls960",
# See all Hubert models at https://huggingface.co/models?filter=hubert
]
LARGE_NEGATIVE = -1e8
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.input_values_processing
def input_values_processing(func, config, input_values, **kwargs):
"""
Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
has to be named accordingly to the parameters name, i.e. `input_values = tf.keras.Input(shape=(128,),
dtype='float32', name="input_values")` otherwise the order of the tensors will not be guaranteed during the
training.
Args:
func (`callable`):
The callable function of the TensorFlow model.
config ([`PretrainedConfig`]):
The config of the running model.
**kwargs:
The inputs of the model.
Returns:
Two lists, one for the missing layers, and another one for the unexpected layers.
"""
signature = dict(inspect.signature(func).parameters)
signature.pop("kwargs", None)
signature.pop("self", None)
parameter_names = list(signature.keys())
output = {}
allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray)
for k, v in kwargs.items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
if isinstance(input_values, (tuple, list)):
for i, input in enumerate(input_values):
# EagerTensors don't allow to use the .name property so we check for a real Tensor
if type(input) == tf.Tensor:
# Tensor names have always the pattern `name:id` then we check only the
# `name` part
tensor_name = input.name.split(":")[0]
if tensor_name in parameter_names:
output[tensor_name] = input
else:
output[parameter_names[i]] = input
elif isinstance(input, allowed_types) or input is None:
output[parameter_names[i]] = input
else:
raise ValueError(
f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
)
elif isinstance(input_values, (dict, BatchEncoding)):
if "inputs" in input_values:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_values` instead.",
FutureWarning,
)
output["input_values"] = input_values.pop("inputs")
if "decoder_cached_states" in input_values:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = input_values.pop("decoder_cached_states")
for k, v in dict(input_values).items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
else:
if isinstance(input_values, tf.Tensor) or input_values is None:
output[parameter_names[0]] = input_values
else:
raise ValueError(
f"Data of type {type(input_values)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
)
for name in parameter_names:
if name not in list(output.keys()) and name != "args":
output[name] = kwargs.pop(name, signature[name].default)
# When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
# So to respect the proper output we have to add this exception
if "args" in output:
if output["args"] is not None and type(output["args"]) == tf.Tensor:
tensor_name = output["args"].name.split(":")[0]
output[tensor_name] = output["args"]
else:
# `args` in this case is always the first parameter, then `input_values`
output["input_values"] = output["args"]
del output["args"]
if "kwargs" in output:
del output["kwargs"]
boolean_dict = {
k: v
for k, v in output.items()
if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
}
output.update(booleans_processing(config=config, **boolean_dict))
return output
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._sample_without_replacement
def _sample_without_replacement(distribution, num_samples):
"""
Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see
https://github.com/tensorflow/tensorflow/issues/9260 for more info
"""
z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1))
_, indices = tf.nn.top_k(distribution + z, num_samples)
return indices
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._scatter_values_on_batch_indices
def _scatter_values_on_batch_indices(values, batch_indices, output_shape):
"""
Scatter function as in PyTorch with indices in format (batch_dim, indixes)
"""
indices_shape = shape_list(batch_indices)
# broadcast batch dim to indices_shape
broad_casted_batch_dims = tf.reshape(
tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1]
)
# transform batch_indices to pair_indices
pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
# scatter values to pair indices
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape)
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._compute_mask_indices
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
min_masks: int = 0,
) -> tf.Tensor:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob:
probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_length: size of the mask
min_masks: minimum number of masked spans
Adapted from [fairseq's
data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376).
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
)
# compute number of masked spans in batch
num_masked_spans = int(mask_prob * sequence_length / mask_length + tf.random.uniform((1,)))
num_masked_spans = max(num_masked_spans, min_masks)
# make sure num masked indices <= sequence_length
if num_masked_spans * mask_length > sequence_length:
num_masked_spans = sequence_length // mask_length
# SpecAugment mask to fill
spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32)
# uniform distribution to sample from, make sure that offset samples are < sequence_length
uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1)))
# get random indices to mask
spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans)
# expand masked indices to masked spans
spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1)
spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length))
spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length))
offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :]
offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1))
offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length))
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# scatter indices to mask
spec_aug_mask = _scatter_values_on_batch_indices(
tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, spec_aug_mask.shape
)
return spec_aug_mask
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNorm with Wav2Vec2->Hubert
class TFHubertGroupNorm(tf.keras.layers.Layer):
"""
From tensorflow-addons https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization
"""
def __init__(
self,
groups: int = 32,
axis: int = -1,
epsilon: float = 1e-3,
center: bool = True,
scale: bool = True,
beta_initializer: tf.keras.initializers.Initializer = "zeros",
gamma_initializer: tf.keras.initializers.Initializer = "ones",
beta_regularizer: tf.keras.regularizers.Regularizer = None,
gamma_regularizer: tf.keras.regularizers.Regularizer = None,
beta_constraint: tf.keras.constraints.Constraint = None,
gamma_constraint: tf.keras.constraints.Constraint = None,
**kwargs,
):
super().__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = tf.keras.initializers.get(beta_initializer)
self.gamma_initializer = tf.keras.initializers.get(gamma_initializer)
self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer)
self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer)
self.beta_constraint = tf.keras.constraints.get(beta_constraint)
self.gamma_constraint = tf.keras.constraints.get(gamma_constraint)
self._check_axis()
def build(self, input_shape):
self._check_if_input_shape_is_none(input_shape)
self._set_number_of_groups_for_instance_norm(input_shape)
self._check_size_of_dimensions(input_shape)
self._create_input_spec(input_shape)
self._add_gamma_weight(input_shape)
self._add_beta_weight(input_shape)
self.built = True
super().build(input_shape)
def call(self, inputs):
input_shape = tf.keras.backend.int_shape(inputs)
tensor_input_shape = tf.shape(inputs)
reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape)
normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape)
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
outputs = tf.reshape(normalized_inputs, tensor_input_shape)
else:
outputs = normalized_inputs
return outputs
def get_config(self):
config = {
"groups": self.groups,
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": tf.keras.initializers.serialize(self.beta_initializer),
"gamma_initializer": tf.keras.initializers.serialize(self.gamma_initializer),
"beta_regularizer": tf.keras.regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": tf.keras.regularizers.serialize(self.gamma_regularizer),
"beta_constraint": tf.keras.constraints.serialize(self.beta_constraint),
"gamma_constraint": tf.keras.constraints.serialize(self.gamma_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):
group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
group_shape[self.axis] = input_shape[self.axis] // self.groups
group_shape.insert(self.axis, self.groups)
group_shape = tf.stack(group_shape)
reshaped_inputs = tf.reshape(inputs, group_shape)
return reshaped_inputs, group_shape
else:
return inputs, group_shape
def _apply_normalization(self, reshaped_inputs, input_shape):
group_shape = tf.keras.backend.int_shape(reshaped_inputs)
group_reduction_axes = list(range(1, len(group_shape)))
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
axis = -2 if self.axis == -1 else self.axis - 1
else:
axis = -1 if self.axis == -1 else self.axis - 1
group_reduction_axes.pop(axis)
mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True)
gamma, beta = self._get_reshaped_weights(input_shape)
normalized_inputs = tf.nn.batch_normalization(
reshaped_inputs,
mean=mean,
variance=variance,
scale=gamma,
offset=beta,
variance_epsilon=self.epsilon,
)
return normalized_inputs
def _get_reshaped_weights(self, input_shape):
broadcast_shape = self._create_broadcast_shape(input_shape)
gamma = None
beta = None
if self.scale:
gamma = tf.reshape(self.gamma, broadcast_shape)
if self.center:
beta = tf.reshape(self.beta, broadcast_shape)
return gamma, beta
def _check_if_input_shape_is_none(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError(
"Axis " + str(self.axis) + " of "
"input tensor should have a defined dimension "
"but the layer received an input with shape " + str(input_shape) + "."
)
def _set_number_of_groups_for_instance_norm(self, input_shape):
dim = input_shape[self.axis]
if self.groups == -1:
self.groups = dim
def _check_size_of_dimensions(self, input_shape):
dim = input_shape[self.axis]
if dim < self.groups:
raise ValueError(
"Number of groups (" + str(self.groups) + ") cannot be "
"more than the number of channels (" + str(dim) + ")."
)
if dim % self.groups != 0:
raise ValueError(
"Number of groups (" + str(self.groups) + ") must be a "
"multiple of the number of channels (" + str(dim) + ")."
)
def _check_axis(self):
if self.axis == 0:
raise ValueError(
"You are trying to normalize your batch axis. Do you want to "
"use tf.layer.batch_normalization instead"
)
def _create_input_spec(self, input_shape):
dim = input_shape[self.axis]
self.input_spec = tf.keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim})
def _add_gamma_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.scale:
self.gamma = self.add_weight(
shape=shape,
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
)
else:
self.gamma = None
def _add_beta_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.center:
self.beta = self.add_weight(
shape=shape,
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
)
else:
self.beta = None
def _create_broadcast_shape(self, input_shape):
broadcast_shape = [1] * len(input_shape)
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(self.axis, self.groups)
else:
broadcast_shape[self.axis] = self.groups
return broadcast_shape
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2WeightNormConv1D with Wav2Vec2->Hubert
class TFHubertWeightNormConv1D(tf.keras.layers.Conv1D):
"""Adapted from https://www.tensorflow.org/probability/api_docs/python/tfp/layers/weight_norm/WeightNorm"""
def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs):
super().__init__(
filters=filters,
kernel_size=kernel_size,
groups=groups,
padding="valid",
use_bias=True,
bias_initializer="he_normal",
**kwargs,
)
self.explicit_padding = explicit_padding
self.filter_axis = 2
self.initialized = False
self.kernel_norm_axes = tf.constant([0, 1])
def _init_norm(self):
"""Set the norm of the weight vector."""
kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes))
self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis])
def _normalize_kernel(self):
"""Generate normalized weights."""
kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g)
self.kernel = tf.transpose(kernel)
def build(self, input_shape):
if not self.built:
input_shape = input_shape.as_list()
# Conv1D output shapes are checked at build time since TF 2.7, so we need to account for padding
input_shape[-2] += self.explicit_padding * 2
super().build(input_shape)
self.kernel = tf.Variable(tf.transpose(self.kernel), name="weight_v", trainable=True)
self.weight_v = self.kernel
self.weight_g = self.add_weight(
name="weight_g",
shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1),
initializer="ones",
dtype=self.weight_v.dtype,
trainable=True,
)
self.bias = self.add_weight(name="bias", shape=(self.filters,), initializer="zeros", trainable=True)
def call(self, inputs):
if not self.initialized:
self._init_norm()
self.initialized = True
self._normalize_kernel()
padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0)))
output = super().call(padded_inputs)
return output
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert
class TFHubertNoLayerNormConvLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = tf.keras.layers.Conv1D(
filters=self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
strides=config.conv_stride[layer_id],
use_bias=config.conv_bias,
name="conv",
)
self.activation = get_tf_activation(config.feat_extract_activation)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert
class TFHubertLayerNormConvLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = tf.keras.layers.Conv1D(
filters=self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
strides=config.conv_stride[layer_id],
use_bias=config.conv_bias,
name="conv",
)
self.layer_norm = tf.keras.layers.LayerNormalization(name="layer_norm", epsilon=config.layer_norm_eps)
self.activation = get_tf_activation(config.feat_extract_activation)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert
class TFHubertGroupNormConvLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = tf.keras.layers.Conv1D(
filters=self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
strides=config.conv_stride[layer_id],
use_bias=config.conv_bias,
name="conv",
)
self.activation = get_tf_activation(config.feat_extract_activation)
self.layer_norm = TFHubertGroupNorm(groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name="layer_norm")
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert
class TFHubertPositionalConvEmbedding(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.conv = TFHubertWeightNormConv1D(
filters=config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
groups=config.num_conv_pos_embedding_groups,
explicit_padding=config.num_conv_pos_embeddings // 2,
name="conv",
)
self.padding = TFHubertSamePadLayer(config.num_conv_pos_embeddings)
self.activation = get_tf_activation(config.feat_extract_activation)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2SamePadLayer with Wav2Vec2->Hubert
class TFHubertSamePadLayer(tf.keras.layers.Layer):
def __init__(self, num_conv_pos_embeddings, **kwargs):
super().__init__(**kwargs)
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def call(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, : -self.num_pad_remove, :]
return hidden_states
class TFHubertFeatureEncoder(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
super().__init__(**kwargs)
if config.feat_extract_norm == "group":
conv_layers = [TFHubertGroupNormConvLayer(config, layer_id=0, name=f"conv_layers.{0}")] + [
TFHubertNoLayerNormConvLayer(config, layer_id=i + 1, name=f"conv_layers.{i+1}")
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
TFHubertLayerNormConvLayer(config, layer_id=i, name=f"conv_layers.{i}")
for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = conv_layers
def call(self, input_values):
hidden_states = tf.expand_dims(input_values, -1)
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
class TFHubertFeatureExtractor(TFHubertFeatureEncoder):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
class TFHubertFeatureProjection(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.projection = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="projection",
)
self.dropout = tf.keras.layers.Dropout(rate=config.feat_proj_dropout)
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
return hidden_states
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFHubert
class TFHubertAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}",
)
if attention_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}",
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = tf.nn.softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}",
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}",
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2FeedForward with Wav2Vec2->Hubert
class TFHubertFeedForward(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.intermediate_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.intermediate_dense = tf.keras.layers.Dense(
units=config.intermediate_size,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="intermediate_dense",
)
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
self.output_dense = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="output_dense",
)
self.output_dropout = tf.keras.layers.Dropout(config.hidden_dropout)
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states, training=training)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states, training=training)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayer with Wav2Vec2->Hubert
class TFHubertEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFHubertAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
name="attention",
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
self.final_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="final_layer_norm"
)
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
training: bool = False,
) -> Tuple[tf.Tensor]:
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, training=training
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert
class TFHubertEncoderLayerStableLayerNorm(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFHubertAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
name="attention",
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
self.final_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="final_layer_norm"
)
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
training: bool = False,
) -> Tuple[tf.Tensor]:
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, training=training
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2Encoder with Wav2Vec2->Hubert
class TFHubertEncoder(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer = [TFHubertEncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
attention_mask = _expand_mask(attention_mask)
else:
attention_mask = None
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
if training and (dropout_probability < self.config.layerdrop): # skip the layer
continue
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert
class TFHubertEncoderStableLayerNorm(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer = [
TFHubertEncoderLayerStableLayerNorm(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)
]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
attention_mask = _expand_mask(attention_mask)
else:
attention_mask = None
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states, training=training)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
if training and (dropout_probability < self.config.layerdrop): # skip the layer
continue
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@keras_serializable
class TFHubertMainLayer(tf.keras.layers.Layer):
config_class = HubertConfig
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.feature_extractor = TFHubertFeatureEncoder(config, name="feature_extractor")
self.feature_projection = TFHubertFeatureProjection(config, name="feature_projection")
if config.do_stable_layer_norm:
self.encoder = TFHubertEncoderStableLayerNorm(config, name="encoder")
else:
self.encoder = TFHubertEncoder(config, name="encoder")
def build(self, input_shape: tf.TensorShape):
self.masked_spec_embed = self.add_weight(
shape=(self.config.hidden_size,), initializer="uniform", trainable=True, name="masked_spec_embed"
)
super().build(input_shape)
def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return (input_length - kernel_size) // stride + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: Optional[tf.Tensor] = None):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
"""
batch_size, sequence_length, hidden_size = shape_list(hidden_states)
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states = tf.where(
tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
hidden_states,
)
elif self.config.mask_time_prob > 0:
# generate indices & apply SpecAugment along time axis
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
min_masks=2,
)
hidden_states = tf.where(
tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
hidden_states,
)
# apply SpecAugment along feature axis
if self.config.mask_feature_prob > 0:
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
)
hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0)
return hidden_states
def call(
self,
input_values: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[tf.Tensor] = None,
output_hidden_states: Optional[tf.Tensor] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs: Any,
):
inputs = input_values_processing(
func=self.call,
config=self.config,
input_values=input_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
hidden_states = self.feature_extractor(
tf.cast(inputs["input_values"], tf.float32), training=inputs["training"]
)
if inputs["attention_mask"] is not None:
# compute real output lengths according to convolution formula
output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(inputs["attention_mask"], -1))
attention_mask = tf.sequence_mask(
output_lengths, maxlen=shape_list(hidden_states)[1], dtype=hidden_states.dtype
)
hidden_states = self.feature_projection(hidden_states, training=inputs["training"])
mask_time_indices = kwargs.get("mask_time_indices", None)
if inputs["training"]:
hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = encoder_outputs[0]
if not inputs["return_dict"]:
return (hidden_states,) + encoder_outputs[1:]
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class TFHubertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = HubertConfig
base_model_prefix = "hubert"
main_input_name = "input_values"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
pad_token = 0.0
input_values = tf.convert_to_tensor(np.random.rand(1, 16000), tf.float32)
dummy_inputs = {
"input_values": input_values,
"attention_mask": tf.cast(tf.not_equal(input_values, pad_token), tf.float32),
}
return dummy_inputs
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
logger.warning(
f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish "
"to train/fine-tine this model, you need a GPU or a TPU"
)
@tf.function
def serving(self, inputs):
output = self.call(input_values=inputs, training=False)
return self.serving_output(output)
HUBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_values` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_values": input_values, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`HubertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
HUBERT_INPUTS_DOCSTRING = r"""
Args:
input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_values` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False``):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top.",
HUBERT_START_DOCSTRING,
)
class TFHubertModel(TFHubertPreTrainedModel):
def __init__(self, config: HubertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.config = config
self.hubert = TFHubertMainLayer(config, name="hubert")
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_values: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
"""
Returns:
Example:
```python
>>> from transformers import Wav2Vec2Processor, TFHubertModel
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h")
>>> model = TFHubertModel.from_pretrained("facebook/hubert-base-960h")
>>> def map_to_array(batch):
... speech, _ = sf.read(batch["file"])
... batch["speech"] = speech
... return batch
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
>>> hidden_states = model(input_values).last_hidden_state
```"""
inputs = input_values_processing(
func=self.call,
config=self.config,
input_values=input_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
inputs["output_hidden_states"] = (
inputs["output_hidden_states"] if inputs["output_hidden_states"] else self.config.output_hidden_states
)
inputs["output_attentions"] = (
inputs["output_attentions"] if inputs["output_attentions"] else self.config.output_attentions
)
inputs["return_dict"] = inputs["return_dict"] if inputs["return_dict"] else self.config.return_dict
outputs = self.hubert(
input_values=inputs["input_values"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
HUBERT_START_DOCSTRING,
)
class TFHubertForCTC(TFHubertPreTrainedModel):
def __init__(self, config: HubertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.hubert = TFHubertMainLayer(config, name="hubert")
self.dropout = tf.keras.layers.Dropout(config.final_dropout)
self.lm_head = tf.keras.layers.Dense(config.vocab_size, name="lm_head")
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.hubert.feature_extractor.trainable = False
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_values: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
labels: Optional[tf.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_values` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Returns:
Example:
```python
>>> import tensorflow as tf
>>> from transformers import Wav2Vec2Processor, TFHubertForCTC
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h")
>>> model = TFHubertForCTC.from_pretrained("facebook/hubert-base-960h")
>>> def map_to_array(batch):
... speech, _ = sf.read(batch["file"])
... batch["speech"] = speech
... return batch
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
>>> logits = model(input_values).logits
>>> predicted_ids = tf.argmax(logits, axis=-1)
>>> transcription = processor.decode(predicted_ids[0])
>>> # compute loss
>>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST"
>>> # wrap processor as target processor to encode labels
>>> with processor.as_target_processor():
... labels = processor(transcription, return_tensors="tf").input_values
>>> loss = model(input_values, labels=labels).loss
```"""
inputs = input_values_processing(
func=self.call,
config=self.config,
input_values=input_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
outputs = self.hubert(
input_values=inputs["input_values"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states, training=inputs["training"])
logits = self.lm_head(hidden_states)
if labels is not None:
if tf.reduce_max(labels) >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
attention_mask = (
inputs["attention_mask"]
if inputs["attention_mask"] is not None
else tf.ones_like(inputs["input_values"], dtype=tf.float32)
)
input_lengths = self.hubert._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1))
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = tf.cast(labels >= 0, tf.int32)
target_lengths = tf.reduce_sum(labels_mask, axis=-1)
loss = tf.nn.ctc_loss(
logits=logits,
labels=labels,
logit_length=input_lengths,
label_length=target_lengths,
blank_index=self.config.pad_token_id,
logits_time_major=False,
)
if self.config.ctc_loss_reduction == "sum":
loss = tf.reduce_sum(loss)
if self.config.ctc_loss_reduction == "mean":
loss = tf.reduce_mean(loss)
else:
loss = None
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
| 42.461908
| 164
| 0.656575
| 8,641
| 70,784
| 5.12892
| 0.094318
| 0.058214
| 0.0132
| 0.02112
| 0.520206
| 0.459555
| 0.415194
| 0.393985
| 0.36833
| 0.353167
| 0
| 0.008215
| 0.25024
| 70,784
| 1,666
| 165
| 42.487395
| 0.826873
| 0.146333
| 0
| 0.418089
| 0
| 0.020478
| 0.15486
| 0.01481
| 0
| 0
| 0
| 0
| 0.003413
| 1
| 0.06058
| false
| 0.000853
| 0.010239
| 0.00256
| 0.127986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be071e34802c8618edb66a1241ddd2e7d443b843
| 3,316
|
py
|
Python
|
image-generation/slegan/args.py
|
AaratiAkkapeddi/nnabla-examples
|
db9e5ad850303c158773aeb275e5c3821b4a3935
|
[
"Apache-2.0"
] | 228
|
2017-11-20T06:05:56.000Z
|
2022-03-23T12:40:05.000Z
|
image-generation/slegan/args.py
|
AaratiAkkapeddi/nnabla-examples
|
db9e5ad850303c158773aeb275e5c3821b4a3935
|
[
"Apache-2.0"
] | 36
|
2018-01-11T23:26:20.000Z
|
2022-03-12T00:53:38.000Z
|
image-generation/slegan/args.py
|
AaratiAkkapeddi/nnabla-examples
|
db9e5ad850303c158773aeb275e5c3821b4a3935
|
[
"Apache-2.0"
] | 76
|
2017-11-22T22:00:00.000Z
|
2022-03-28T05:58:57.000Z
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_args(batch_size=8, image_size=256, max_iter=100000):
"""
Get command line arguments.
Arguments set the default values of command line arguments.
"""
import argparse
import os
description = "Example of Lightweight GAN."
parser = argparse.ArgumentParser(description)
parser.add_argument("-d", "--device-id", type=str, default="0",
help="Device id.")
parser.add_argument("-c", "--context", type=str, default="cudnn",
help="Context.")
parser.add_argument("--type-config", "-t", type=str, default='float',
help='Type of computation. e.g. "float", "half".')
parser.add_argument("--img-path", type=str,
default="~/AnimalFace-dog",
help="Image path.")
parser.add_argument("--image-size", type=int, default=image_size,
help="Image size.")
parser.add_argument("--batch-size", "-b", type=int, default=batch_size,
help="Batch size.")
parser.add_argument("--max-iter", "-i", type=int, default=max_iter,
help="Max iterations.")
parser.add_argument("--save-interval", type=int, default=50000,
help="Interval for saving models.")
parser.add_argument("--test-interval", type=int, default=5000,
help="Interval for testing models.")
parser.add_argument("--latent", type=int, default=256,
help="Number of latent variables.")
parser.add_argument("--monitor-path", type=str, default="./result/tmp",
help="Monitor path.")
parser.add_argument("--model-load-path", type=str, default=".",
help="Path to load parameters from")
parser.add_argument("--train-samples", type=int, default=-1,
help="Number of data to be used. When -1 is set all data is used.")
parser.add_argument("--lr", type=float, default=2e-4,
help="Learning rate")
parser.add_argument("--aug-list", nargs="+",
default=["lrflip", "translation", "color"])
args = parser.parse_args()
return args
def save_args(args, mode="train"):
from nnabla import logger
import os
if not os.path.exists(args.monitor_path):
os.makedirs(args.monitor_path)
path = "{}/Arguments-{}.txt".format(args.monitor_path, mode)
logger.info("Arguments are saved to {}.".format(path))
with open(path, "w") as fp:
for k, v in sorted(vars(args).items()):
logger.info("{}={}".format(k, v))
fp.write("{}={}\n".format(k, v))
| 42.512821
| 91
| 0.606454
| 416
| 3,316
| 4.769231
| 0.423077
| 0.068044
| 0.128528
| 0.027218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015751
| 0.253317
| 3,316
| 77
| 92
| 43.064935
| 0.785541
| 0.205971
| 0
| 0.040816
| 0
| 0
| 0.246538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.081633
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be09ed482ae6fd03e6f106d0795f2a118eb2425c
| 2,332
|
py
|
Python
|
test/integration_tests/test_integration_datasets_client.py
|
self-host/selfhost-python-client
|
95797ef819099174d916b10e82878c370b1cd972
|
[
"MIT"
] | null | null | null |
test/integration_tests/test_integration_datasets_client.py
|
self-host/selfhost-python-client
|
95797ef819099174d916b10e82878c370b1cd972
|
[
"MIT"
] | null | null | null |
test/integration_tests/test_integration_datasets_client.py
|
self-host/selfhost-python-client
|
95797ef819099174d916b10e82878c370b1cd972
|
[
"MIT"
] | null | null | null |
import uuid
from typing import List, Dict, Any
import unittest
from selfhost_client import SelfHostClient, DatasetType
class TestIntegrationDatasetsClient(unittest.TestCase):
"""
Run these tests individually because Self-Host will return HTTP 429 Too Many Requests otherwise.
"""
@classmethod
def setUpClass(cls) -> None:
cls.client: SelfHostClient = SelfHostClient(
base_url='http://127.0.0.1:8080',
username='test',
password='root'
)
cls.unique_name: str = str(uuid.uuid4())
cls.created_dataset: DatasetType = cls.client.create_dataset(
name=cls.unique_name,
dataset_format='ini',
content='aGVsbG8sIHdvcmxkIQ==',
tags=['test_tag']
)
@classmethod
def tearDownClass(cls) -> None:
cls.client.delete_dataset(cls.created_dataset['uuid'])
def test_get_datasets(self) -> None:
params: Dict[str, int] = {
'limit': 20,
'offset': 0
}
datasets: List[DatasetType] = self.client.get_datasets(**params)
self.assertIsNotNone(datasets)
def test_create_and_delete_dataset(self) -> None:
# Create and delete happens in setup and teardown methods.
self.assertEqual(self.created_dataset['name'], self.unique_name)
def test_get_dataset(self) -> None:
fetched_dataset: DatasetType = self.client.get_dataset(self.created_dataset['uuid'])
self.assertEqual(fetched_dataset['name'], self.created_dataset['name'])
def test_update_dataset(self) -> None:
self.client.update_dataset(
dataset_uuid=self.created_dataset['uuid'],
name=f'{self.created_dataset["name"]} Updated',
dataset_format='json',
tags=['updated']
)
fetched_dataset: DatasetType = self.client.get_dataset(self.created_dataset['uuid'])
self.assertEqual(fetched_dataset['name'], f'{self.created_dataset["name"]} Updated')
self.assertEqual(fetched_dataset['format'], 'json')
self.assertEqual(fetched_dataset['tags'], ['updated'])
def test_get_dataset_raw_content(self) -> None:
fetched_content: Any = self.client.get_dataset_raw_content(self.created_dataset['uuid'])
self.assertIsNotNone(fetched_content)
| 36.4375
| 100
| 0.653945
| 260
| 2,332
| 5.673077
| 0.311538
| 0.094915
| 0.097627
| 0.059661
| 0.226441
| 0.178983
| 0.178983
| 0.135593
| 0.135593
| 0.135593
| 0
| 0.01
| 0.22813
| 2,332
| 63
| 101
| 37.015873
| 0.809444
| 0.066038
| 0
| 0.083333
| 0
| 0
| 0.099445
| 0.027752
| 0
| 0
| 0
| 0
| 0.145833
| 1
| 0.145833
| false
| 0.020833
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be0a74b4d28b5ee5afbbd8993134c1568bbdff10
| 6,516
|
py
|
Python
|
metaspace/engine/sm/engine/tests/test_fdr.py
|
METASPACE2020/METASPACE
|
e1acd9a409f84a78eed7ca9713258c09b0e137ca
|
[
"Apache-2.0"
] | null | null | null |
metaspace/engine/sm/engine/tests/test_fdr.py
|
METASPACE2020/METASPACE
|
e1acd9a409f84a78eed7ca9713258c09b0e137ca
|
[
"Apache-2.0"
] | null | null | null |
metaspace/engine/sm/engine/tests/test_fdr.py
|
METASPACE2020/METASPACE
|
e1acd9a409f84a78eed7ca9713258c09b0e137ca
|
[
"Apache-2.0"
] | null | null | null |
from itertools import product
from unittest.mock import patch
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from sm.engine.annotation.fdr import FDR, run_fdr_ranking
from sm.engine.formula_parser import format_modifiers
FDR_CONFIG = {'decoy_sample_size': 2}
@patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])
def test_fdr_decoy_adduct_selection_saves_corr():
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H', '+K', '[M]+'],
analysis_version=1,
)
exp_target_decoy_df = pd.DataFrame(
[
('H2O', '+H', '+He'),
('H2O', '+H', '+Li'),
('H2O', '+K', '+He'),
('H2O', '+K', '+Li'),
('H2O', '', '+He'),
('H2O', '', '+Li'),
],
columns=['formula', 'tm', 'dm'],
)
fdr.decoy_adducts_selection(target_formulas=['H2O'])
assert_frame_equal(
fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
)
@pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])])
def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs):
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=analysis_version,
)
fdr.fdr_levels = [0.2, 0.8]
fdr.td_df = pd.DataFrame(
[['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
['H2O', '+Cu', 0.5],
['H2O', '+Co', 0.5],
['C2H2', '+Ag', 0.75],
['C2H2', '+Ar', 0.0],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
],
columns=['formula', 'modifier', 'msm'],
).assign(fdr=expected_fdrs)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_estimate_fdr_digitize_works():
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=1,
)
fdr.fdr_levels = [0.4, 0.8]
fdr.td_df = pd.DataFrame(
[['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['C1', '+H', 1.0],
['C2', '+H', 0.75],
['C3', '+H', 0.5],
['C4', '+H', 0.25],
['C1', '+Cu', 0.75],
['C2', '+Ag', 0.3],
['C3', '+Cl', 0.25],
['C4', '+Co', 0.1],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['C1', '+H', 1.0, 0.4],
['C2', '+H', 0.75, 0.4],
['C3', '+H', 0.5, 0.4],
['C4', '+H', 0.25, 0.8],
],
columns=['formula', 'modifier', 'msm', 'fdr'],
)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_ions():
formulas = ['H2O', 'C5H2OH']
target_adducts = ['+H', '+Na']
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
assert (
len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts)
< len(ions)
<= len(formulas) * len(target_adducts) * decoy_sample_size
+ len(formulas) * len(target_adducts)
)
target_ions = [(formula, adduct) for formula, adduct in product(formulas, target_adducts)]
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_chem_mods_and_neutral_losses():
formulas = ['H2O', 'C5H2OH']
chem_mods = ['-H+C']
neutral_losses = ['-O', '-C']
target_adducts = ['+H', '+Na', '[M]+']
target_modifiers = [
format_modifiers(cm, nl, ta)
for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts)
]
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=chem_mods,
neutral_losses=neutral_losses,
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
min_count = len(formulas) * len(target_modifiers)
max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size)
assert min_count < len(ions) <= max_count
target_ions = list(product(formulas, target_modifiers))
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_run_fdr_ranking():
target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0])
decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1])
n_targets = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
n_decoys = pd.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4])
expected_fdr = n_decoys / n_targets
expected_fdr_ros = (n_decoys + 1) / (n_targets + 1)
expected_fdr_mono = pd.Series(
[0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11]
)
fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False)
fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False)
fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True)
assert np.isclose(fdr, expected_fdr).all()
assert np.isclose(fdr_ros, expected_fdr_ros).all()
assert np.isclose(fdr_mono, expected_fdr_mono).all()
| 32.58
| 121
| 0.558778
| 876
| 6,516
| 3.921233
| 0.173516
| 0.044541
| 0.048035
| 0.041921
| 0.554294
| 0.512955
| 0.483261
| 0.447162
| 0.395924
| 0.350218
| 0
| 0.045529
| 0.255064
| 6,516
| 199
| 122
| 32.743719
| 0.662134
| 0.035451
| 0
| 0.372781
| 0
| 0
| 0.087711
| 0.010825
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.035503
| false
| 0
| 0.047337
| 0
| 0.08284
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be0c9d39fc49b73642a31f8fb89de4fff31f8d63
| 4,576
|
py
|
Python
|
umigame/nlp/labelling.py
|
penguinwang96825/Umigame
|
98d647ab6f40df08fe31d6b3bc444afe229a914e
|
[
"Apache-2.0"
] | null | null | null |
umigame/nlp/labelling.py
|
penguinwang96825/Umigame
|
98d647ab6f40df08fe31d6b3bc444afe229a914e
|
[
"Apache-2.0"
] | null | null | null |
umigame/nlp/labelling.py
|
penguinwang96825/Umigame
|
98d647ab6f40df08fe31d6b3bc444afe229a914e
|
[
"Apache-2.0"
] | 1
|
2021-11-01T14:35:32.000Z
|
2021-11-01T14:35:32.000Z
|
import math
import numpy as np
import pandas as pd
def fixed_time_horizon(df, column='close', lookback=20):
"""
Fixed-time Horizon
As it relates to finance, virtually all ML papers label observations using the fixed-time horizon method.
Fixed-time horizon is presented as one of the main procedures to label data when it comes to processing
financial time series for machine learning.
Parameters
----------
df: pd.DataFrame
column: str
Choose from "open", "high", "low", and "close."
lookahead: str
The number of days to look ahead.
References
----------
1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html
2. https://arxiv.org/pdf/1603.08604.pdf
3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/
4. De Prado, Advances in financial machine learning, 2018
5. Dixon et al., Classification-based financial markets prediction using deep neural networks, 2017
"""
price = df[column]
label = (price.shift(-lookback) / price > 1).astype(int)
return label
def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True):
"""
Triple Barrier
The idea is to consider the full dynamics of a trading strategy and not a simple performance proxy.
The rationale for this extension is that often money managers implement P&L triggers that cash in
when gains are sufficient or opt out to stop their losses. Upon inception of the strategy,
three barriers are fixed (De Prado, 2018).
Parameters
----------
df: pd.DataFrame
column: str
Choose from "open", "high", "low", and "close."
ub: float
It stands for upper bound, e.g. 0.07 is a 7% profit taking.
lb: float
It stands for lower bound, e.g. 0.03 is a 3% stop loss.
lookback: str
Maximum holding time.
References
----------
1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/
2. http://www.mlfactor.com/Data.html#the-triple-barrier-method
3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/
4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e
5. De Prado, Advances in financial machine learning, 2018
"""
ub = 1 + ub
lb = 1- lb
def end_price(s):
return np.append(s[(s / s[0] > ub) | (s / s[0] < lb)], s[-1])[0]/s[0]
r = np.array(range(lookback))
def end_time(s):
return np.append(r[(s / s[0] > ub) | (s / s[0] < lb)], lookback-1)[0]
price = df[column]
p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1)
t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1)
t = pd.Series(
[t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT')
for i, k in enumerate(t)], index=t.index
).dropna()
label = pd.Series(0, p.index)
label.loc[p > ub] = 1
label.loc[p < lb] = -1
if binary_classification:
label = np.where(label == 1, 1, 0)
return pd.Series(label, index=price.index)
def get_continuous_trading_signals(df, column='close', lookahead=5):
"""
Continuous Trading Signal
A hybrid stock trading framework integrating technical analysis with machine learning techniques.
Parameters
----------
df: pd.DataFrame
column: str
Choose from "open", "high", "low", and "close."
lookahead: str
The number of days to look ahead.
References
----------
1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf
2. Dash and Dash, A hybrid stock trading framework integrating technical analysis with machine learning techniques, 2016
"""
price = df.data[column]
OTr = []
trends = []
for idx in range(len(price)-lookahead+1):
arr_window = price[idx:(idx+lookahead)]
if price[idx+lookahead-1] > price[idx]:
coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window))
y_t = coef * 0.5 + 0.5
elif price[idx+lookahead-1] <= price[idx]:
coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window))
y_t = coef * 0.5
OTr.append(y_t)
OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr))))
trends = (OTr >= np.mean(OTr)).astype(int)
return pd.Series(OTr, index=price.index), pd.Series(trends, index=price.index)
| 37.508197
| 124
| 0.647072
| 671
| 4,576
| 4.375559
| 0.342772
| 0.040872
| 0.027248
| 0.024523
| 0.302793
| 0.272139
| 0.257153
| 0.257153
| 0.219687
| 0.219687
| 0
| 0.035524
| 0.21875
| 4,576
| 122
| 125
| 37.508197
| 0.785734
| 0.520542
| 0
| 0.090909
| 0
| 0
| 0.009293
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.068182
| 0.045455
| 0.295455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be0d1242d33adfcfc290ba70e3637aa993c895e3
| 4,164
|
py
|
Python
|
mayan/apps/converter/api.py
|
Dave360-crypto/mayan-edms
|
9cd37537461347f79ff0429e4b8b16fd2446798d
|
[
"Apache-2.0"
] | 3
|
2020-02-03T11:58:51.000Z
|
2020-10-20T03:52:21.000Z
|
mayan/apps/converter/api.py
|
Dave360-crypto/mayan-edms
|
9cd37537461347f79ff0429e4b8b16fd2446798d
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/converter/api.py
|
Dave360-crypto/mayan-edms
|
9cd37537461347f79ff0429e4b8b16fd2446798d
|
[
"Apache-2.0"
] | 2
|
2020-10-24T11:10:06.000Z
|
2021-03-03T20:05:38.000Z
|
from __future__ import absolute_import
import hashlib
import logging
import os
from django.utils.encoding import smart_str
from common.conf.settings import TEMPORARY_DIRECTORY
from common.utils import fs_cleanup
from .exceptions import OfficeConversionError, UnknownFileFormat
from .literals import (DEFAULT_PAGE_NUMBER,
DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT)
from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE,
TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR,
FILE_FORMATS)
from .runtime import backend, office_converter
HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest()
logger = logging.getLogger(__name__)
def cache_cleanup(input_filepath, *args, **kwargs):
try:
os.remove(create_image_cache_filename(input_filepath, *args, **kwargs))
except OSError:
pass
def create_image_cache_filename(input_filepath, *args, **kwargs):
if input_filepath:
hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)]))
return os.path.join(TEMPORARY_DIRECTORY, hash_value)
else:
return None
def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs):
size = kwargs.get('size')
file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT)
zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL)
rotation = kwargs.get('rotation', DEFAULT_ROTATION)
page = kwargs.get('page', DEFAULT_PAGE_NUMBER)
transformations = kwargs.get('transformations', [])
if transformations is None:
transformations = []
if output_filepath is None:
output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs)
if os.path.exists(output_filepath):
return output_filepath
if office_converter:
try:
office_converter.convert(input_filepath, mimetype=mimetype)
if office_converter.exists:
input_filepath = office_converter.output_filepath
mimetype = 'application/pdf'
else:
# Recycle the already detected mimetype
mimetype = office_converter.mimetype
except OfficeConversionError:
raise UnknownFileFormat('office converter exception')
if size:
transformations.append(
{
'transformation': TRANSFORMATION_RESIZE,
'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR)))
}
)
if zoom != 100:
transformations.append(
{
'transformation': TRANSFORMATION_ZOOM,
'arguments': {'percent': zoom}
}
)
if rotation != 0 and rotation != 360:
transformations.append(
{
'transformation': TRANSFORMATION_ROTATE,
'arguments': {'degrees': rotation}
}
)
try:
backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype)
finally:
if cleanup_files:
fs_cleanup(input_filepath)
return output_filepath
def get_page_count(input_filepath):
logger.debug('office_converter: %s' % office_converter)
if office_converter:
try:
office_converter.convert(input_filepath)
logger.debug('office_converter.exists: %s' % office_converter.exists)
if office_converter.exists:
input_filepath = office_converter.output_filepath
except OfficeConversionError:
raise UnknownFileFormat('office converter exception')
return backend.get_page_count(input_filepath)
def get_available_transformations_choices():
result = []
for transformation in backend.get_available_transformations():
result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label']))
return result
def get_format_list():
return [(format, FILE_FORMATS.get(format, u'')) for format in backend.get_format_list()]
| 32.53125
| 180
| 0.68828
| 435
| 4,164
| 6.321839
| 0.264368
| 0.087273
| 0.024727
| 0.033455
| 0.230182
| 0.216727
| 0.193091
| 0.14
| 0.122909
| 0.047273
| 0
| 0.003105
| 0.226465
| 4,164
| 127
| 181
| 32.787402
| 0.850667
| 0.008886
| 0
| 0.221053
| 0
| 0
| 0.062788
| 0.005818
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063158
| false
| 0.010526
| 0.115789
| 0.010526
| 0.252632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be0d8286d98d561dd73b8ad4757e80b16c93f068
| 2,798
|
py
|
Python
|
LogisticRegression/learn.py
|
ValYouW/DeepLearningCourse
|
d7d9edc60075f9078ec3f41074c958eaa7854964
|
[
"MIT"
] | null | null | null |
LogisticRegression/learn.py
|
ValYouW/DeepLearningCourse
|
d7d9edc60075f9078ec3f41074c958eaa7854964
|
[
"MIT"
] | null | null | null |
LogisticRegression/learn.py
|
ValYouW/DeepLearningCourse
|
d7d9edc60075f9078ec3f41074c958eaa7854964
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import utils
def plot_data(x_mat, y, db_x, db_y):
plt.figure()
plt.title('Data')
admitted = (y == 1).flatten()
rejected = (y == 0).flatten()
# plot decision boundary
plt.plot(db_x, db_y)
# plot admitted
plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1], color='blue', marker='+')
# plot rejected
plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1], edgecolors='red', facecolors='none', marker='o')
plt.xlabel('exam 1 score')
plt.ylabel('exam 2 score')
plt.legend(['boundary', 'admitted', 'rejected'])
def main():
print('Loading dataset...')
# data is: exam 1 score, exam 2 score, bool whether admitted
frame = pd.read_csv('ex2data1.csv', header=None)
data = frame.values
x_mat = data[:, 0:2] # exam scores
y = data[:, 2:3] # admitted or not
# normalize input (input has large values which causes sigmoid to always be 1 or 0)
x_mean = np.mean(x_mat, axis=0)
x_std = np.std(x_mat, axis=0)
x_norm = (x_mat - x_mean) / x_std
# add intercept
x_norm = np.insert(x_norm, 0, 1, axis=1)
# Learn model
print('starting to learn...')
(loss, reg_loss, theta) = utils.learn(x_norm, y, 5000, 0.1)
print('Final loss %s' % loss[-1])
print('Final theta \n%s' % theta)
# predict for student
joe = np.array([[45, 85]])
joe_norm = (joe - x_mean) / x_std
joe_norm = np.insert(joe_norm, 0, 1, axis=1)
p = utils.sigmoid(joe_norm.dot(theta))
print('Student with grades %s and %s has admission probability: %s' % (45, 85, p[0, 0]))
# Predict on train set
prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5)
actual = (y == 1)
predict_success = np.sum(prediction == actual)
print('Model evaluation on training set has success of %s/%s' % (predict_success, y.shape[0]))
# calc decision boundary
# The decision boundary is the threshold line that separates true/false predictions,
# this means that on this line the prediction is exactly 0.5, meaning:
# p = sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta) = 0
# so our line equation is: theta0 + theta1*x1 + theta2*x2 = 0
# x2 = -theta0 / theta2 - (theta1/theta2)*x1
theta = theta.flatten()
# calc 2 points on the line
plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])])
plot_y = -1 * (theta[0] / theta[2]) - (theta[1] / theta[2]) * plot_x
# denormalize the points
plot_x = plot_x * x_std[0] + x_mean[0]
plot_y = plot_y * x_std[1] + x_mean[1]
plot_data(x_mat, y, plot_x, plot_y)
utils.plot_loss(loss)
plt.show()
if __name__ == '__main__':
main()
| 32.534884
| 105
| 0.605075
| 439
| 2,798
| 3.71754
| 0.316629
| 0.029412
| 0.016544
| 0.014706
| 0.057598
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036139
| 0.248392
| 2,798
| 85
| 106
| 32.917647
| 0.739895
| 0.241601
| 0
| 0
| 0
| 0
| 0.130887
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be0d8c6e88406117103733f22d2fc8dd5f14eae8
| 30,231
|
py
|
Python
|
ignite/handlers/time_profilers.py
|
iamhardikat11/ignite
|
0666b407f7cdba81842014c6026e33b66113bb94
|
[
"BSD-3-Clause"
] | 4,119
|
2017-11-23T18:10:37.000Z
|
2022-03-31T05:31:27.000Z
|
ignite/handlers/time_profilers.py
|
iamhardikat11/ignite
|
0666b407f7cdba81842014c6026e33b66113bb94
|
[
"BSD-3-Clause"
] | 1,838
|
2017-11-24T11:19:25.000Z
|
2022-03-31T09:08:18.000Z
|
ignite/handlers/time_profilers.py
|
iamhardikat11/ignite
|
0666b407f7cdba81842014c6026e33b66113bb94
|
[
"BSD-3-Clause"
] | 691
|
2017-11-24T10:57:33.000Z
|
2022-03-29T02:19:44.000Z
|
import functools
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.timing import Timer
class BasicTimeProfiler:
"""
BasicTimeProfiler can be used to profile the handlers,
events, data loading and data processing times.
Examples:
.. code-block:: python
from ignite.handlers import BasicTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = BasicTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
events_to_ignore = [
Events.EXCEPTION_RAISED,
Events.TERMINATE,
Events.TERMINATE_SINGLE_EPOCH,
Events.DATALOADER_STOP_ITERATION,
]
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = torch.zeros(1)
self.processing_times = torch.zeros(1)
self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor]
self._events = [
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.COMPLETED,
]
self._fmethods = [
self._as_first_epoch_started,
self._as_first_epoch_completed,
self._as_first_iter_started,
self._as_first_iter_completed,
self._as_first_get_batch_started,
self._as_first_get_batch_completed,
self._as_first_completed,
]
self._lmethods = [
self._as_last_epoch_started,
self._as_last_epoch_completed,
self._as_last_iter_started,
self._as_last_iter_completed,
self._as_last_get_batch_started,
self._as_last_get_batch_completed,
self._as_last_completed,
]
def _reset(self, num_epochs: int, total_num_iters: int) -> None:
self.dataflow_times = torch.zeros(total_num_iters)
self.processing_times = torch.zeros(total_num_iters)
self.event_handlers_times = {
Events.STARTED: torch.zeros(1),
Events.COMPLETED: torch.zeros(1),
Events.EPOCH_STARTED: torch.zeros(num_epochs),
Events.EPOCH_COMPLETED: torch.zeros(num_epochs),
Events.ITERATION_STARTED: torch.zeros(total_num_iters),
Events.ITERATION_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_STARTED: torch.zeros(total_num_iters),
}
def _as_first_started(self, engine: Engine) -> None:
if hasattr(engine.state.dataloader, "__len__"):
num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type]
else:
if engine.state.epoch_length is None:
raise ValueError(
"As epoch_length is not set, we can not use BasicTimeProfiler in this case."
"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this."
)
num_iters_per_epoch = engine.state.epoch_length
self.max_epochs = cast(int, engine.state.max_epochs)
self.total_num_iters = self.max_epochs * num_iters_per_epoch
self._reset(self.max_epochs, self.total_num_iters)
self.event_handlers_names = {
e: [
h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__
for (h, _, _) in engine._event_handlers[e]
if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output
]
for e in Events
if e not in self.events_to_ignore
}
# Setup all other handlers:
engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {}))
for e, m in zip(self._events, self._fmethods):
engine._event_handlers[e].insert(0, (m, (engine,), {}))
for e, m in zip(self._events, self._lmethods):
engine._event_handlers[e].append((m, (engine,), {}))
# Let's go
self._event_handlers_timer.reset()
def _as_last_started(self, engine: Engine) -> None:
self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()
def _as_first_epoch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_STARTED][e] = t
def _as_first_get_batch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
self._dataflow_timer.reset()
def _as_last_get_batch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t
def _as_first_get_batch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_get_batch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t
d = self._dataflow_timer.value()
self.dataflow_times[i] = d
self._dataflow_timer.reset()
def _as_first_iter_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_iter_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_STARTED][i] = t
self._processing_timer.reset()
def _as_first_iter_completed(self, engine: Engine) -> None:
t = self._processing_timer.value()
i = engine.state.iteration - 1
self.processing_times[i] = t
self._event_handlers_timer.reset()
def _as_last_iter_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t
def _as_first_epoch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t
def _as_first_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_completed(self, engine: Engine) -> None:
self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()
# Remove added handlers:
engine.remove_event_handler(self._as_last_started, Events.STARTED)
for e, m in zip(self._events, self._fmethods):
engine.remove_event_handler(m, e)
for e, m in zip(self._events, self._lmethods):
engine.remove_event_handler(m, e)
def attach(self, engine: Engine) -> None:
"""Attach BasicTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
@staticmethod
def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]:
# compute on non-zero data:
data = data[data > 0]
out = [
("total", torch.sum(data).item() if len(data) > 0 else "not yet triggered")
] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]]
if len(data) > 1:
out += [
("min/index", (torch.min(data).item(), torch.argmin(data).item())),
("max/index", (torch.max(data).item(), torch.argmax(data).item())),
("mean", torch.mean(data).item()),
("std", torch.std(data).item()),
]
return OrderedDict(out)
def get_results(self) -> Dict[str, Dict[str, Any]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore]
) # type: Union[int, torch.Tensor]
event_handlers_stats = dict(
[
(str(e.name).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
for e in Events
if e not in self.events_to_ignore
]
+ [("total_time", total_eh_time)] # type: ignore[list-item]
)
return OrderedDict(
[
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
("event_handlers_stats", event_handlers_stats),
(
"event_handlers_names",
{str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()},
),
]
)
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
epoch iteration processing_stats dataflow_stats Event_STARTED ...
1.0 1.0 0.00003 0.252387 0.125676
1.0 2.0 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
iters_per_epoch = self.total_num_iters // self.max_epochs
epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1
iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1
processing_stats = self.processing_times
dataflow_stats = self.dataflow_times
event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)
event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)
event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)
event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)
event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED]
event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED]
event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED]
event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED]
results_dump = torch.stack(
[
epochs,
iterations,
processing_stats,
dataflow_stats,
event_started,
event_completed,
event_epoch_started,
event_epoch_completed,
event_iter_started,
event_iter_completed,
event_batch_started,
event_batch_completed,
],
dim=1,
).numpy()
results_df = pd.DataFrame(
data=results_dump,
columns=[
"epoch",
"iteration",
"processing_stats",
"dataflow_stats",
"Event_STARTED",
"Event_COMPLETED",
"Event_EPOCH_STARTED",
"Event_EPOCH_COMPLETED",
"Event_ITERATION_STARTED",
"Event_ITERATION_COMPLETED",
"Event_GET_BATCH_STARTED",
"Event_GET_BATCH_COMPLETED",
],
)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: Dict) -> str:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258
Dataflow:
6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693
Event handlers:
2.82721
- Events.STARTED: []
0.00000
- Events.EPOCH_STARTED: []
0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000
- Events.ITERATION_STARTED: ['PiecewiseLinear']
0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001
- Events.ITERATION_COMPLETED: ['TerminateOnNan']
0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003
- Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ]
2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790
- Events.COMPLETED: []
not yet triggered
"""
def to_str(v: Union[str, tuple]) -> str:
if isinstance(v, str):
return v
elif isinstance(v, tuple):
return f"{v[0]:.5f}/{v[1]}"
return f"{v:.5f}"
def odict_to_str(d: Mapping) -> str:
out = " | ".join([to_str(v) for v in d.values()])
return out
others = {
k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items()
}
others.update(results["event_handlers_names"])
output_message = """
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
{processing_stats}
Dataflow:
{dataflow_stats}
Event handlers:
{total_time:.5f}
- Events.STARTED: {STARTED_names}
{STARTED}
- Events.EPOCH_STARTED: {EPOCH_STARTED_names}
{EPOCH_STARTED}
- Events.ITERATION_STARTED: {ITERATION_STARTED_names}
{ITERATION_STARTED}
- Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names}
{ITERATION_COMPLETED}
- Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names}
{EPOCH_COMPLETED}
- Events.COMPLETED: {COMPLETED_names}
{COMPLETED}
""".format(
processing_stats=odict_to_str(results["processing_stats"]),
dataflow_stats=odict_to_str(results["dataflow_stats"]),
**others,
)
print(output_message)
return output_message
class HandlersTimeProfiler:
"""
HandlersTimeProfiler can be used to profile the handlers,
data loading and data processing times. Custom events are also
profiled by this profiler
Examples:
.. code-block:: python
from ignite.handlers import HandlersTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = HandlersTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
EVENT_FILTER_THESHOLD_TIME = 0.0001
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = [] # type: List[float]
self.processing_times = [] # type: List[float]
self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]]
@staticmethod
def _get_callable_name(handler: Callable) -> str:
# get name of the callable handler
return getattr(handler, "__qualname__", handler.__class__.__name__)
def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable:
@functools.wraps(handler)
def _timeit_handler(*args: Any, **kwargs: Any) -> None:
self._event_handlers_timer.reset()
handler(*args, **kwargs)
t = self._event_handlers_timer.value()
hname = self._get_callable_name(handler)
# filter profiled time if the handler was attached to event with event filter
if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME:
self.event_handlers_times[event][hname].append(t)
# required to revert back to original handler after profiling
setattr(_timeit_handler, "_profiler_original", handler)
return _timeit_handler
def _timeit_processing(self) -> None:
# handler used for profiling processing times
t = self._processing_timer.value()
self.processing_times.append(t)
def _timeit_dataflow(self) -> None:
# handler used for profiling dataflow times
t = self._dataflow_timer.value()
self.dataflow_times.append(t)
def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:
# reset the variables used for profiling
self.dataflow_times = []
self.processing_times = []
self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names}
@staticmethod
def _is_internal_handler(handler: Callable) -> bool:
# checks whether the handler is internal
return any(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."])
def _detach_profiler_handlers(self, engine: Engine) -> None:
# reverts handlers to original handlers
for e in engine._event_handlers:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if hasattr(func, "_profiler_original"):
engine._event_handlers[e][i] = (func._profiler_original, args, kwargs)
def _as_first_started(self, engine: Engine) -> None:
# wraps original handlers for profiling
self.event_handlers_names = {
e: [
self._get_callable_name(h)
for (h, _, _) in engine._event_handlers[e]
if not self._is_internal_handler(h)
]
for e in engine._allowed_events
}
self._reset(self.event_handlers_names)
for e in engine._allowed_events:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if not self._is_internal_handler(func):
engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs)
# processing timer
engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset)
engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {}))
# dataflow timer
engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset)
engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {}))
# revert back the wrapped handlers with original handlers at the end
engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)
def attach(self, engine: Engine) -> None:
"""Attach HandlersTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
def get_results(self) -> List[List[Union[str, float]]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[
sum(self.event_handlers_times[e][h])
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
)
total_eh_time = round(float(total_eh_time), 5)
def compute_basic_stats(
times: Union[Sequence, torch.Tensor]
) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]:
data = torch.as_tensor(times, dtype=torch.float32)
# compute on non-zero data:
data = data[data > 0]
total = round(torch.sum(data).item(), 5) if len(data) > 0 else "not triggered" # type: Union[str, float]
min_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
max_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
mean = "None" # type: Union[str, float]
std = "None" # type: Union[str, float]
if len(data) > 0:
min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item())
max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item())
mean = round(torch.mean(data).item(), 5)
if len(data) > 1:
std = round(torch.std(data).item(), 5)
return [total, min_index, max_index, mean, std]
event_handler_stats = [
[
h,
getattr(e, "name", str(e)),
*compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)),
]
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
event_handler_stats.append(["Total", "", total_eh_time, "", "", "", ""])
event_handler_stats.append(["Processing", "None", *compute_basic_stats(self.processing_times)])
event_handler_stats.append(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)])
return event_handler_stats
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
# processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ...
1 0.00003 0.252387 0.125676
2 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
processing_stats = torch.tensor(self.processing_times, dtype=torch.float32)
dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32)
cols = [processing_stats, dataflow_stats]
headers = ["processing_stats", "dataflow_stats"]
for e in self.event_handlers_times:
for h in self.event_handlers_times[e]:
headers.append(f"{h} ({getattr(e, 'name', str(e))})")
cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32))
# Determine maximum length
max_len = max([x.numel() for x in cols])
count_col = torch.arange(max_len, dtype=torch.float32) + 1
cols.insert(0, count_col)
headers.insert(0, "#")
# pad all tensors to have same length
cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols]
results_dump = torch.stack(cols, dim=1).numpy()
results_df = pd.DataFrame(data=results_dump, columns=headers)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: List[List[Union[str, float]]]) -> None:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------- ----------------------- -------------- ...
Handler Event Name Total(s)
----------------------------------------- ----------------------- --------------
run.<locals>.log_training_results EPOCH_COMPLETED 19.43245
run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271
run.<locals>.log_time EPOCH_COMPLETED 0.00049
run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106
run.<locals>.log_training_loss ITERATION_COMPLETED 0.059
run.<locals>.log_time COMPLETED not triggered
----------------------------------------- ----------------------- --------------
Total 22.04571
----------------------------------------- ----------------------- --------------
Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0,
mean: 0.00602s, std: 0.00034s]
Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937,
mean: 0.00866s, std: 0.00113s]
"""
# adopted implementation of torch.autograd.profiler.build_table
handler_column_width = max([len(item[0]) for item in results]) + 4 # type: ignore[arg-type]
event_column_width = max([len(item[1]) for item in results]) + 4 # type: ignore[arg-type]
DEFAULT_COLUMN_WIDTH = 14
headers = [
"Handler",
"Event Name",
"Total(s)",
"Min(s)/IDX",
"Max(s)/IDX",
"Mean(s)",
"Std(s)",
]
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_format_lst = [""]
header_sep_lst = [""]
line_length_lst = [-SPACING_SIZE]
def add_column(padding: int, text_dir: str = ">") -> None:
row_format_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE)
header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE)
line_length_lst[0] += padding + SPACING_SIZE
add_column(handler_column_width, text_dir="<")
add_column(event_column_width, text_dir="<")
for _ in headers[2:]:
add_column(DEFAULT_COLUMN_WIDTH)
row_format = row_format_lst[0]
header_sep = header_sep_lst[0]
result = []
def append(s: str) -> None:
result.append(s)
result.append("\n")
result.append("\n")
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
for row in results[:-3]:
# format min/idx and max/idx
row[3] = "{}/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}/{}".format(*row[4]) # type: ignore[misc]
append(row_format.format(*row))
append(header_sep)
# print total handlers time row
append(row_format.format(*results[-3]))
append(header_sep)
summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]"
for row in results[-2:]:
row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc]
del row[1]
append(summary_format.format(*row))
print("".join(result))
| 38.412961
| 119
| 0.582978
| 3,433
| 30,231
| 4.868337
| 0.113895
| 0.063005
| 0.057979
| 0.042123
| 0.574882
| 0.496919
| 0.41369
| 0.355651
| 0.322384
| 0.29486
| 0
| 0.024634
| 0.293672
| 30,231
| 786
| 120
| 38.461832
| 0.758067
| 0.240349
| 0
| 0.235808
| 0
| 0.002183
| 0.088262
| 0.023794
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091703
| false
| 0
| 0.021834
| 0.004367
| 0.148472
| 0.008734
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be0e7ba87c886d267ec11352e01c184c5af3e8dc
| 9,671
|
py
|
Python
|
bellmanford.py
|
asmodehn/aiokraken
|
b260bd41d5aa091e6a4f1818328426fbe6f625c0
|
[
"MIT"
] | null | null | null |
bellmanford.py
|
asmodehn/aiokraken
|
b260bd41d5aa091e6a4f1818328426fbe6f625c0
|
[
"MIT"
] | 82
|
2019-08-30T09:37:49.000Z
|
2022-03-29T14:53:22.000Z
|
bellmanford.py
|
asmodehn/aiokraken
|
b260bd41d5aa091e6a4f1818328426fbe6f625c0
|
[
"MIT"
] | null | null | null |
"""
Bellman Ford Arbitrage implementation over websocket API.
"""
from __future__ import annotations
from collections import namedtuple
from datetime import datetime
from decimal import Decimal
from math import log
import pandas as pd
import numpy as np
import asyncio
import typing
from aiokraken.model.assetpair import AssetPair
from aiokraken.rest import AssetPairs, Assets
from aiokraken.model.asset import Asset
from aiokraken.rest.client import RestClient
from aiokraken.websockets.publicapi import ticker
import networkx as nx
client = RestClient()
async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix):
# For required pairs, get ticket updates
if isinstance(pairs, AssetPairs): # TODO : we need to unify iterable of pairs somehow...
properpairs = pairs
pairs = [p for p in pairs.values()]
else:
properpairs = AssetPairs({p.wsname: p for p in pairs})
tkrs = await client.ticker(pairs=[p for p in pairs])
# TODO : build price matrix
for p, tk in tkrs.items():
# retrieve the actual pair
pair = properpairs[p]
fee = pair.fees[0].get('fee')
# TODO : pick the right fee depending on total traded volume !
await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee)
# TODO : 2 levels :
# - slow updates with wide list of pairs and potential interest (no fees - small data for quick compute)
# - websockets with potential arbitrage (including fees - detailed data & precise compute)
async for upd in ticker(pairs=pairs, restclient=client):
print(f"wss ==> tick: {upd}")
# update pricematrix
base = upd.pairname.base
quote = upd.pairname.quote
fee = properpairs[upd.pairname].fees[0].get('fee')
await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee)
class PriceMatrix:
# Note This matrix is square
# since we want to do arbitrage and find cycles...
df: pd.DataFrame
# we also need to be careful that only one writer can modify data at a time...
wlock: asyncio.Lock
assets: typing.Optional[Assets]
def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]):
self.wlock = asyncio.Lock()
if isinstance(assets, Assets):
assets = [a for a in assets.values()]
self.df = pd.DataFrame(data={c.restname: {c.restname: None for c in assets} for c in assets}, columns=[c.restname for c in assets], dtype='float64')
self.assets = None
async def __call__(self, base: Asset, ask_price: Decimal, quote: Asset, bid_price: Decimal, fee_pct: Decimal):
if self.assets is None: # retrieve assets for filtering calls params, only once.
self.assets = await client.retrieve_assets()
async with self.wlock: # careful with concurrent control.
if not isinstance(base, Asset):
base = self.assets[base].restname
if not isinstance(quote, Asset):
quote = self.assets[quote].restname
# These are done with decimal, but stored as numpy floats for faster compute
self.df[quote][base] = bid_price * ((100 - fee_pct) /100) # bid price to get: quote_curr -- (buy_price - fee) --> base_curr
self.df[base][quote] = ((100 - fee_pct)/100) / ask_price # ask price to get: base_curr -- (sell_price - fee) --> quote_curr
def __getitem__(self, item):
if item not in self.df.columns:
raise KeyError(f"{item} not found")
if item not in self.df:
return pd.Series(dtype=pd.dtype('decimal'))
return self.df[item]
def __len__(self):
return len(self.df.columns)
def __str__(self):
return self.df.to_string()
def neglog(self):
if not self.assets:
return False
newpm = PriceMatrix(assets=[self.assets[c] for c in self.df.columns])
# copy all values and take -log()
for c in self.df.columns:
# TODO : fix this : is it on row, or columns ? which is best ??
newpm.df[c] = np.negative(np.log(self.df[c]))
return newpm
def to_graph(self):
G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph)
# from bokeh.io import output_file, show
# from bokeh.plotting import figure, from_networkx
#
# plot = figure(title="Networkx Integration Demonstration", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1),
# tools="", toolbar_location=None)
#
# graph = from_networkx(G, nx.spring_layout, scale=2, center=(0, 0))
# plot.renderers.append(graph)
#
# output_file("networkx_graph.html")
# show(plot)
return G
def test_pricematrix_mapping():
# testing with string for simplicity for now
pm = PriceMatrix(["EUR", "BTC"])
pm["EUR"]["BTC"] = Decimal(1.234)
pm["BTC"]["EUR"] = Decimal(4.321)
assert pm["EUR"]["BTC"] == Decimal(1.234)
assert pm["BTC"]["EUR"] == Decimal(4.321)
async def arbiter(user_assets):
assets = await client.retrieve_assets()
proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for a in user_assets})
assetpairs = await client.retrieve_assetpairs()
proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values()
if p.wsname is not None and (
p.base in proper_userassets or p.quote in proper_userassets
)})
# retrieving widely related assets
related_assets = set(assets[p.base] for p in proper_userpairs.values()) | set(assets[p.quote] for p in proper_userpairs.values())
proper_related_assets = Assets({a.restname: a for a in related_assets})
pmtx = PriceMatrix(assets=proper_related_assets)
# running ticker updates in background
bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx))
try:
# observe pricematrix changes
while True:
# TODO : efficient TUI lib !
# print(pmtx)
# pricegraph = pmtx.to_graph() # display...
neglog = pmtx.neglog()
if neglog:
negcycle = bellmanford(neglog)
if len(negcycle):
amnt = 1 # arbitrary starting amount
pred = negcycle[-1]
dscr = f"{amnt} {pred}"
for cn in reversed(negcycle[:-1]):
amnt = amnt * pmtx[pred][cn]
pred = cn
dscr = dscr + f" -> {amnt} {pred}"
print(f"ARBITRAGE POSSIBLE: {dscr}")
# TODO : from these we can extract market making opportunities ??
# Another way :
# negloggraph = neglog.to_graph()
#
# negcycle = list()
#
# if nx.negative_edge_cycle(negloggraph):
# # find it !
# print("NEGATIVE CYCLE FOUND !")
#
# # Now find it
# print(f"computing cycles... {datetime.now()}")
#
# for cycle in nx.simple_cycles(negloggraph):
# # for cycle in nx.cycle_basis(negloggraph): # NOT implemented !
# # find negative weight sum (cycle need to be more than one node)
# if sum(negloggraph[n][m].get('weight') for n, m in zip(cycle, cycle[1:])) < 0:
# print(f"Found one: {cycle}")
# negcycle.append(cycle)
# print(negcycle)
# print(f"computing cycles DONE ! {datetime.now()}")
await asyncio.sleep(5)
finally:
# in every case cancel the background task now
bgtsk.cancel()
# TODO: react !
def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'):
n = len(pmatrix_neglog)
min_dist = {source: 0}
min_pred = {}
# Relax edges |V - 1| times
for i in range(n - 1): # iterations
for v in pmatrix_neglog.df.columns: # vertex source
if v in min_dist.keys(): # otherwise distance infinite until we know it...
for w in pmatrix_neglog.df.columns: # vertex target
if w not in min_dist.keys() or min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]:
min_dist[w] = min_dist[v] + pmatrix_neglog[v][w]
min_pred[w] = v
# If we can still relax edges, then we have a negative cycle
for v in pmatrix_neglog.df.columns:
if v in min_dist.keys(): # otherwise node is not yet relevant here
for w in pmatrix_neglog.df.columns:
if min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]:
# print(f"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}")
path = (w, min_pred[w])
while len(set(path)) == len(path): # while no duplicates, cycle is not complete...
path = (*path, min_pred[path[-1]])
# First cycle retrieved is *likely* (?) to be the minimal one -> the only one we are interested in
return path[path.index(path[-1]):]
return ()
if __name__ == '__main__':
asyncio.run(arbiter(user_assets=["XTZ", "ETH", "XBT", "EUR"]), debug=True)
| 39.798354
| 156
| 0.58722
| 1,228
| 9,671
| 4.522801
| 0.276873
| 0.012964
| 0.006482
| 0.005041
| 0.128196
| 0.102089
| 0.052035
| 0.022866
| 0.022866
| 0.022866
| 0
| 0.008208
| 0.307104
| 9,671
| 242
| 157
| 39.96281
| 0.820624
| 0.294489
| 0
| 0.046154
| 0
| 0
| 0.024484
| 0
| 0
| 0
| 0
| 0.004132
| 0.015385
| 1
| 0.061538
| false
| 0
| 0.115385
| 0.015385
| 0.276923
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be139101ad7d93480666b4065956e230585c96d9
| 1,180
|
py
|
Python
|
src/fetchcode/vcs/pip/_internal/utils/entrypoints.py
|
quepop/fetchcode
|
ac2461bdf7a249d8815987b4d421dbc615c043b9
|
[
"Apache-2.0"
] | 7
|
2019-10-04T07:27:41.000Z
|
2021-06-07T04:39:18.000Z
|
src/fetchcode/vcs/pip/_internal/utils/entrypoints.py
|
quepop/fetchcode
|
ac2461bdf7a249d8815987b4d421dbc615c043b9
|
[
"Apache-2.0"
] | 64
|
2019-10-07T12:40:56.000Z
|
2022-02-17T18:44:37.000Z
|
src/fetchcode/vcs/pip/_internal/utils/entrypoints.py
|
quepop/fetchcode
|
ac2461bdf7a249d8815987b4d421dbc615c043b9
|
[
"Apache-2.0"
] | 16
|
2019-10-04T08:48:12.000Z
|
2021-06-11T01:22:56.000Z
|
import sys
from fetchcode.vcs.pip._internal.cli.main import main
from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, List
def _wrapper(args=None):
# type: (Optional[List[str]]) -> int
"""Central wrapper for all old entrypoints.
Historically pip has had several entrypoints defined. Because of issues
arising from PATH, sys.path, multiple Pythons, their interactions, and most
of them having a pip installed, users suffer every time an entrypoint gets
moved.
To alleviate this pain, and provide a mechanism for warning users and
directing them to an appropriate place for help, we now define all of
our old entrypoints as wrappers for the current one.
"""
sys.stderr.write(
"WARNING: pip is being invoked by an old script wrapper. This will "
"fail in a future version of pip.\n"
"Please see https://github.com/pypa/pip/issues/5599 for advice on "
"fixing the underlying issue.\n"
"To avoid this problem you can invoke Python with '-m pip' instead of "
"running pip directly.\n"
)
return main(args)
| 36.875
| 79
| 0.710169
| 175
| 1,180
| 4.748571
| 0.645714
| 0.031288
| 0.038508
| 0.045728
| 0.064982
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004353
| 0.221186
| 1,180
| 31
| 80
| 38.064516
| 0.899891
| 0.424576
| 0
| 0
| 0
| 0
| 0.448438
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.266667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be145918e072dc9949c9e4a6667701e412064948
| 7,896
|
py
|
Python
|
Support/Make_Documentation.py
|
bvbohnen/x4-projects
|
2c9db75a720ddb52ddb9e4160c330d7bb1986aa3
|
[
"MIT"
] | 24
|
2020-04-11T18:43:01.000Z
|
2022-02-23T11:02:02.000Z
|
Support/Make_Documentation.py
|
abouquet/x4-projects
|
27ba6d2faaab95cfb9114bccb41fadbfe56443b7
|
[
"MIT"
] | 10
|
2020-04-11T07:50:33.000Z
|
2022-03-31T05:01:35.000Z
|
Support/Make_Documentation.py
|
abouquet/x4-projects
|
27ba6d2faaab95cfb9114bccb41fadbfe56443b7
|
[
"MIT"
] | 8
|
2020-04-24T05:21:55.000Z
|
2022-03-26T03:02:13.000Z
|
'''
Support for generating documentation readmes for the extensions.
Extracts from decorated lua block comments and xml comments.
'''
from pathlib import Path
from lxml import etree
import sys
from itertools import chain
project_dir = Path(__file__).resolve().parents[1]
# Set up an import from the customizer for some text processing.
x4_customizer_dir = str(project_dir.parent / 'X4_Customizer')
if x4_customizer_dir not in sys.path:
sys.path.append(x4_customizer_dir)
from Framework.Make_Documentation import Merge_Lines
#from Framework.Make_Documentation import Get_BB_Text
# Grab the project specifications.
from Release_Specs import release_specs
def Make():
for spec in release_specs:
# Update all of the content.xml files.
spec.Update_Content_Version()
# Make each of the doc files (if any).
# (Note: this function not included in the class methods to avoid
# import issues with the text helper functions below.)
for rel_path, file_list in spec.doc_specs.items():
# Set up the full path.
doc_path = spec.root_path / rel_path
# Get lines for all files.
doc_lines = []
for file_path in file_list:
if file_path.suffix == '.xml':
doc_lines += Get_XML_Cue_Text(file_path)
elif file_path.suffix == '.lua':
doc_lines += Get_Lua_Text(file_path)
with open(doc_path, 'w') as file:
file.write('\n'.join(doc_lines))
return
def Sections_To_Lines(doc_text_sections):
'''
Converts a dict of {section label: text} to a list of text lines,
with labelling and formatting applied.
Expects the input to start with a 'title', then 'overview', then
a series of names of cues or functions.
'''
# Transfer to annotated/indented lines.
functions_started = False
title = ''
ret_text_lines = []
for key, text in doc_text_sections:
# Extract the title and continue; this isn't printed directly.
if key == 'title':
title = text.strip()
continue
# Header gets an 'overview' label.
if key == 'overview':
ret_text_lines += ['', '### {} Overview'.format(title), '']
indent = ''
# Lua functions are in one lump, like overview.
elif key == 'functions':
ret_text_lines += ['', '### {} Functions'.format(title), '']
indent = ''
# Sections may be multiple.
elif key == 'section':
ret_text_lines += ['','']
indent = ''
# Otherwise these are md cues.
else:
indent = ' '
# Stick a label line when starting the function section.
if not functions_started:
functions_started = True
ret_text_lines += ['', '### {} Cues'.format(title), '']
# Bullet the function name.
ret_text_lines.append('* **{}**'.format(key))
# Process the text a bit.
text = Merge_Lines(text)
# Add indents to functions, and break into convenient lines.
text_lines = [indent + line for line in text.splitlines()]
# Record for output.
ret_text_lines += text_lines
return ret_text_lines
def Get_XML_Cue_Text(xml_path):
'''
Returns a list of lines holding the documentation extracted
from a decorated MD xml file.
'''
# List of tuples of (label, text) hold the extracted text lines.
doc_text_sections = []
# Read the xml and pick out the cues.
tree = etree.parse(str(xml_path))
root = tree.xpath('/*')[0]
cues = tree.xpath('/*/cues')[0]
# Stride through comments/cues in the list.
# Looking for decorated comments.
for node in chain(root.iterchildren(), cues.iterchildren()):
# Skip non-comments.
# Kinda awkward how lxml checks this (isinstance doesn't work).
if node.tag is not etree.Comment:
continue
# Handle title declarations.
if '@doc-title' in node.text:
label = 'title'
text = node.text.replace('@doc-title','')
elif '@doc-overview' in node.text:
label = 'overview'
text = node.text.replace('@doc-overview','')
elif '@doc-section' in node.text:
label = 'section'
text = node.text.replace('@doc-section','')
elif '@doc-cue' in node.text:
label = node.getnext().get('name')
text = node.text.replace('@doc-cue','')
else:
# Unwanted comment; skip.
continue
# Record it.
doc_text_sections.append((label, text))
# Process into lines and return.
return Sections_To_Lines(doc_text_sections)
def Get_Lua_Text(lua_path):
'''
Extract documentation text from a decorated lua file.
'''
text = lua_path.read_text()
ret_text_lines = []
# Extract non-indented comments.
# TODO: maybe regex this.
comment_blocks = []
lua_lines = text.splitlines()
i = 0
while i < len(lua_lines):
this_line = lua_lines[i]
if this_line.startswith('--[['):
# Scan until the closing ]].
these_lines = []
# Record the first line.
these_lines.append(this_line.replace('--[[',''))
i += 1
# Only search to the end of the doc.
while i < len(lua_lines):
next_line = lua_lines[i]
if next_line.startswith(']]'):
# Found the last line; skip it.
break
these_lines.append(next_line)
i += 1
comment_blocks.append('\n'.join(these_lines))
# Check single-line comments after block comments, to avoid
# -- confusion.
elif this_line.startswith('--'):
comment_blocks.append(this_line.replace('--',''))
# Always one increment per loop.
i += 1
# Title to put on label lines.
# Starts blank, filled by decorator.
title = ''
# List of tuples of (label, text) hold the extracted text lines.
doc_text_sections = []
# Go through the comments looking for decorators.
for comment in comment_blocks:
# Handle title declarations.
if '@doc-title' in comment:
label = 'title'
text = comment.replace('@doc-title','')
# Text blocks are either overview or cue.
elif '@doc-overview' in comment:
label = 'overview'
text = comment.replace('@doc-overview','')
# For now, all functions are lumped together in one comment.
elif '@doc-functions' in comment:
label = 'functions'
text = comment.replace('@doc-functions','')
else:
# Unwanted comment; skip.
continue
# Record it.
doc_text_sections.append((label, text))
# Process into lines and return.
return Sections_To_Lines(doc_text_sections)
#-Removed; generally avoiding putting main docs on the forum.
#def Make_BB_Code(doc_dir, header_lines = []):
# '''
# Turn the ext_dir's readme into a bbcode txt file.
# Output is placed in the release folder.
# '''
# release_dir = project_dir / 'Release'
# if not release_dir.exists():
# release_dir.mkdir()
#
# # Grab the readme contents.
# doc_lines = (doc_dir / 'Readme.md').read_text().splitlines()
# # Generate a bbcode version, prefixing with custom header.
# bb_lines = header_lines + Get_BB_Text(doc_lines)
# (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\n'.join(bb_lines))
# return
if __name__ == '__main__':
Make()
| 31.967611
| 86
| 0.58498
| 960
| 7,896
| 4.652083
| 0.263542
| 0.028213
| 0.024183
| 0.022391
| 0.158979
| 0.108822
| 0.102105
| 0.086431
| 0.086431
| 0.086431
| 0
| 0.002023
| 0.311297
| 7,896
| 246
| 87
| 32.097561
| 0.819235
| 0.365628
| 0
| 0.26087
| 0
| 0
| 0.070593
| 0
| 0
| 0
| 0
| 0.004065
| 0
| 1
| 0.034783
| false
| 0
| 0.052174
| 0
| 0.121739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be15fa91cd3274065ddb261552f8c0f2ea292fcd
| 2,960
|
py
|
Python
|
curso 1/04 - caixa de texto/a4.py
|
andersonssh/aprendendo-pyqt5
|
d15ad7378d4573410c11fc39042df19048c656e4
|
[
"MIT"
] | null | null | null |
curso 1/04 - caixa de texto/a4.py
|
andersonssh/aprendendo-pyqt5
|
d15ad7378d4573410c11fc39042df19048c656e4
|
[
"MIT"
] | null | null | null |
curso 1/04 - caixa de texto/a4.py
|
andersonssh/aprendendo-pyqt5
|
d15ad7378d4573410c11fc39042df19048c656e4
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtWidgets import (QApplication,
QMainWindow,
QPushButton,
QToolTip,
QLabel,
QLineEdit)
from PyQt5 import QtGui
class Janela(QMainWindow):
def __init__(self):
super().__init__()
self.topo = 50
self.esquerda = 50
self.largura = 800
self.altura = 600
self.titulo = 'Primeira janela'
self.gera_labels()
self.gera_botoes()
self.gera_imagens()
self.gera_caixas_de_texto()
def carregar_janela(self):
self.setGeometry(self.esquerda, self.topo, self.largura, self.altura)
self.setWindowTitle(self.titulo)
self.show()
def gera_botoes(self):
# botoes
botao1 = QPushButton('Botao 1', self)
botao1.move(100, 100)
botao1.resize(100, 50)
botao1.setStyleSheet(
'QPushButton{background-color: white; color: black;} QPushButton:hover{ background: orange; font-weight: 600;}')
botao1.clicked.connect(self.b1)
botao2 = QPushButton('Botao 2', self)
botao2.move(300, 100)
botao2.resize(100, 50)
botao2.setStyleSheet(
'QPushButton{background-color: blue; color: white;} QPushButton:hover{ background: orange; font-weight: 600}')
botao2.clicked.connect(self.b2)
botao3 = QPushButton('Texto', self)
botao3.move(500, 100)
botao3.resize(100, 50)
botao3.setStyleSheet('QPushButton{background-color: black; color: white;} QPushButton:hover{ background: orange; font-weight: 600}')
botao3.clicked.connect(self.b3)
def gera_labels(self):
self.l1 = QLabel(self)
self.l1.setText('Clique em um botao')
self.l1.move(50, 50)
self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}')
self.l1.resize(250, 50)
self.l2 = QLabel(self)
self.l2.setText('Digitou: ')
self.l2.move(300, 30)
self.l2.resize(260, 50)
self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}')
def gera_imagens(self):
self.carro = QLabel(self)
self.carro.move(25, 200)
self.carro.resize(450, 337)
self.carro.setPixmap(QtGui.QPixmap('carro.jpg'))
def gera_caixas_de_texto(self):
self.caixa_texto = QLineEdit(self)
self.caixa_texto.move(25, 10)
self.caixa_texto.resize(150, 50)
def b1(self):
# forma 1
self.carro.setPixmap(QtGui.QPixmap('carro.jpg'))
def b2(self, l):
# forma 2
self.carro.setPixmap(QtGui.QPixmap('carro2.jpg'))
def b3(self):
conteudo = self.caixa_texto.text()
self.l2.setText('Digitou: {}'.format(conteudo))
if __name__ == '__main__':
app = QApplication(sys.argv)
janela = Janela()
janela.carregar_janela()
sys.exit(app.exec_())
| 31.489362
| 140
| 0.591216
| 341
| 2,960
| 5.029326
| 0.29912
| 0.037318
| 0.032653
| 0.068222
| 0.196501
| 0.179009
| 0.138192
| 0.111953
| 0.06414
| 0
| 0
| 0.063327
| 0.285135
| 2,960
| 94
| 141
| 31.489362
| 0.747164
| 0.007432
| 0
| 0.027027
| 0
| 0.040541
| 0.171721
| 0.029642
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121622
| false
| 0
| 0.040541
| 0
| 0.175676
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be1d04203f18e6f16b60a723e614122b48a08671
| 1,097
|
py
|
Python
|
data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
import os
from kombu import Queue, Exchange
## Broker settings.
BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672')
#BROKER_URL = "amqp://guest:guest@localhost:5672/"
#BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379')
#BROKER_HOST = "localhost"
#BROKER_PORT = 27017
#BROKER_TRANSPORT = 'mongodb'
#BROKER_VHOST = 'celery'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', exchange=Exchange('default'), routing_key='default'),
# Queue('aws_uploads', routing_key='video.uploads'),
)
CELERY_DEFAULT_EXCHANGE = 'default'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_IMPORTS = ('celeryservice.tasks',)
#CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis')
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp')
## Using the database to store task state and results.
#CELERY_RESULT_BACKEND = "mongodb"
#CELERY_MONGODB_BACKEND_SETTINGS = {
# "host": "localhost",
# "port": 27017,
# "database": "celery",
# "taskmeta_collection": "celery_taskmeta",
#}
| 30.472222
| 76
| 0.739289
| 133
| 1,097
| 5.804511
| 0.360902
| 0.05829
| 0.123057
| 0.044041
| 0.268135
| 0.268135
| 0.224093
| 0.224093
| 0.104922
| 0
| 0
| 0.022564
| 0.111212
| 1,097
| 35
| 77
| 31.342857
| 0.769231
| 0.530538
| 0
| 0
| 0
| 0
| 0.272727
| 0.109091
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be1d72eb89ee80a827a9a1150e2c759579770b36
| 21,106
|
py
|
Python
|
timesheet.py
|
dgollub/timesheet-google-thingy
|
3ffab402444dba520ff3416b2327f6d2ceeeac39
|
[
"MIT"
] | null | null | null |
timesheet.py
|
dgollub/timesheet-google-thingy
|
3ffab402444dba520ff3416b2327f6d2ceeeac39
|
[
"MIT"
] | null | null | null |
timesheet.py
|
dgollub/timesheet-google-thingy
|
3ffab402444dba520ff3416b2327f6d2ceeeac39
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
#
from __future__ import print_function
import csv
import os
import re
import sys
import arrow
from gsheets import Sheets
CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
DEBUG = os.environ.get('DEBUG', "0") == "1"
AS_CSV = os.environ.get('CSV', "0") == "1"
COL_DATE = 0
COL_WEEKDAY = 1
COL_TIME_START = 2
COL_TIME_END = 3
COL_LUNCH = 4
COL_TIME = 5 # includes lunch
COL_TIME_FIXED = 6 # does not include lunch
COL_MOVE = 7
COL_WORK_FROM_HOME = 8
COL_NOTES = 9
COL_TASKS_START = 10
SPECIAL_VALUES = ["sick", "ab", "off", "wfh", "hol"]
SATURDAY = 5
SUNDAY = 6
def calc(hour, half_it=False, split_char = ":"):
parts = str(hour).split(split_char)
try:
local_hours = int(parts[0])
local_minutes = int(parts[1])
if half_it:
local_hours = local_hours / 2
local_minutes = local_minutes / 2
return local_hours, local_minutes
except:
if len(parts) == 1:
try:
return int(parts[0]), 0
except:
return 0, 0
def get_client_secret_filenames():
filename = os.path.join(CURRENT_PATH, "client-secrets.json")
cachefile = os.path.join(CURRENT_PATH, "client-secrets-cache.json")
if not os.path.exists(filename):
filename = os.path.expanduser(os.path.join("~", "client-secrets.json"))
cachefile = os.path.expanduser(os.path.join("~", "client-secrets-cache.json"))
if not os.path.exists(filename):
raise Exception("Please provide a client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart")
return filename, cachefile
def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')):
print("Opening timesheet for %s ..." % (date))
sheets = api.get(timesheet_url)
sheet = sheets.sheets[0]
print(u"Timesheet [%s] sheet [%s] opened. Accessing cell data ..." % (sheets.title or "???", sheet.title or "???"))
rows = sheet.values()
return rows
def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name):
now = arrow.now()
today = now.format('YYYYMMDD')
try:
other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD')
except arrow.parser.ParserError:
other_date = today
use_date = other_date
rows = load_first_sheet_rows(api, timesheet_url, use_date)
timesheet = get_timesheet_for_date(rows, use_date, user_full_name)
if timesheet:
print("\n\n")
print("Timesheet for %s" % (use_date))
print(timesheet)
print("\n")
else:
print("No entry found for %s" % use_date)
def get_timesheet_for_date(rows, date, user_full_name):
# find the row with the first column that has today's date in it
result_rows = [row for row in rows if row and str(row[COL_DATE]) == date]
if result_rows is None or not result_rows:
return None
if len(result_rows) != 1:
print("More than one entry (%d) found for date %s! Please fix your sheet!" % (len(result_rows), date))
return None
found_row = result_rows[0]
found_index = rows.index(found_row)
start_val = found_row[COL_TIME_START]
end_val = found_row[COL_TIME_END]
duration_val = found_row[COL_TIME_FIXED]
max_cols = len(found_row)
if not start_val:
if start_val in SPECIAL_VALUES:
print("You forgot to add your start time.")
return None
if not end_val:
if end_val in SPECIAL_VALUES:
print("You forgot to add your end time.")
return None
#if max_cols >= COL_NOTES:
# print("No notes/tasks entered yet.")
# return None
def parse_hours(val):
try:
return arrow.get(val, "HH:mm")
except arrow.parser.ParserError:
return arrow.get(val, "H:mm")
start = parse_hours(start_val).format("HH:mm")
end = parse_hours(end_val).format("HH:mm")
duration = str(duration_val)
notes_str = found_row[COL_NOTES]
notes = notes_str.split('\n')
# check the previous Friday entry (if today is not Friday), to see what work from home
# days were were selected
weekday = (found_row[COL_WEEKDAY] or "").lower()
check_start_index = found_index if weekday.startswith("fr") else found_index - 7
check_row = found_row
while (check_start_index < found_index):
check_row = rows[check_start_index]
if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or "").lower().startswith("fr"):
break
check_start_index += 1
is_same_day = None
if check_start_index != found_index:
# print("HA! GOT PREVS FRIDAY.")
is_same_day = False
else:
# print("SAME DAY")
is_same_day = True
wfh = u"" if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME]
wfh = wfh.replace("Mon", "Monday")
wfh = wfh.replace("Tue", "Tuesday")
wfh = wfh.replace("Wed", "Wednesday")
wfh = wfh.replace("Thu", "Thursday")
wfh = wfh.replace("Fri", "Friday")
wfh = wfh.replace(", ", ",").replace(",", " and ")
wfh_extra = "Next week" if is_same_day else "This week"
wfh_info = """%s %s""" % (wfh_extra, wfh) if wfh != "" else "all days"
# 2021-01-04 just make this the default for now
wfh_info = "at all times, unless mentioned otherwise below"
# regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))
# text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h]
# 3 groups:
# SCAN-4167
# As a developer, I want to update AIScanRobo every week [
# 1h
r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))")
total_time_minutes_from_tasks = 0
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = found_row[idx].strip()
if task:
t = task.split('\n')[0] if '\n' in task else task
try:
g = r.match(t).groups()
except Exception as ex:
print("ERROR: %s - %s" % (t, str(ex)))
continue
if DEBUG:
print("task: %s" % (t))
print("groups: %s" % len(g))
[task_number, task_details, task_duration] = g
hours, half_hours = calc(task_duration.replace("h", ""), split_char=".")
minutes = (hours * 60) + (6 * half_hours)
total_time_minutes_from_tasks += minutes
other_lines = task.split('\n')[1:]
tasks.append("%s %s\n%s" % (task_number.strip(), task_details[:-2].strip(), '\n'.join(other_lines)))
def format_tasks(tasks):
if not tasks:
return ''
result = 'Tasks:\n'
for task in tasks:
if '\n' in task:
sub_tasks = task.split('\n')
if len(sub_tasks) > 1:
result += '\n* ' + sub_tasks[0] # main task
for sub_task in sub_tasks[1:]: # actual sub tasks
result += '\n\t' + sub_task
result += '\n'
else:
result += '\n* ' + task
else:
result += '\n* ' + task
return result
def format_notes(notes):
if not notes or (len(notes) == 1 and not notes[0]):
return ''
result = 'Additional Notes:\n'
for note in notes:
result += '\n* ' + note
return result
total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2)
total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2)
total_duration = "%s:%s" % (total_hours, total_minutes)
test_duration = duration
if len(test_duration) <= 4:
test_duration = "0%s" % duration
if total_duration != test_duration:
print("")
print("")
print("The task times do not add up! Tasks vs time entered: %s != %s" % (total_duration, test_duration))
print("")
print("")
# Time: %(start)s - %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s])
msg = """
[Daily Report] %(date)s
WFH: %(wfh_info)s
Hi,
Daily Report for Date: %(date)s
%(tasks)s
%(notes)s
Kind regards,
%(user_full_name)s
""".strip() % {
"date": date,
"user_full_name": user_full_name,
"start": start,
"end": end,
"duration": duration,
"wfh_info": wfh_info,
"tasks": format_tasks(tasks) if tasks else "",
"notes": format_notes(notes) if notes else "",
"total_hours": total_hours,
"total_minutes": total_minutes,
}
print("Total time for all tasks (%s): %s - %s:%s" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes))
return msg
def _load_sheet_data(api, timesheet_url, arg_date=None):
try:
date = arrow.get(arg_date, 'YYYYMM')
except Exception: # pylint: disable=W0703
now = arrow.now()
date = now.format('YYYYMM')
rows = load_first_sheet_rows(api, timesheet_url, date)
date_str = str(date.format('YYYYMM'))
return (rows, date_str)
def export_csv(api, timesheet_url, arg_date):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
csv_filename = os.path.join(os.getcwd(), "%s.csv" % (arg_date))
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
print("Writing to %s" % (csv_filename))
with open(csv_filename, mode='w') as f:
f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# f.writerow(['John Smith', 'Accounting', 'November'])
f.writerow(["username", "date", "task", "duration", "work_type", "details"])
def w(task, duration_minutes, details = ""):
work_type = "Meeting" if "meeting" in details.lower() else "Development"
# Needed CSV columns
# username|date|task|duration|work_type|details
f.writerow(["daniel", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, "%dm" % (duration_minutes), work_type, details])
# regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))
# text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h]
# 3 groups:
# SCAN-4167
# As a developer, I want to update AIScanRobo every week [
# 1h
r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))")
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
if time_start is None or time_end is None or date is None:
continue
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = row[idx].strip()
if task:
tasks.append(task)
if len(tasks) == 0:
print("%s: no tasks found! %s" % (date, time_start))
continue
print("%s: %d tasks found!" % (date, len(tasks)))
for task in tasks:
t = task.split('\n')[0] if '\n' in task else task
try:
g = r.match(t).groups()
except Exception as ex:
print("ERROR: %s - %s" % (t, str(ex)))
continue
if DEBUG:
print("task: %s" % (t))
print("groups: %s" % len(g))
[task_number, task_details, duration] = g
hours, half_hours = calc(duration.replace("h", ""), split_char=".")
minutes = (hours * 60) + (6 * half_hours)
if DEBUG:
print("time: %s, %s $ %s $ %s" % (hours, half_hours, duration, minutes))
details = "%s %s" % (task_number, task_details[:-1].strip())
w(task_number, minutes, details.strip())
print("")
print("CSV output to: %s" % (csv_filename))
def calc_daily_hours_for_month(api, timesheet_url, arg_date):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
minutes = 0
days = 0
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
worked_at = row[COL_MOVE] if max_cols >= COL_MOVE else None
notes = row[COL_NOTES] if max_cols >= COL_NOTES else ""
if time_start is None or time_end is None or date is None:
continue
start_hours, start_minutes = calc(time_start)
end_hours, end_minutes = calc(time_end)
if start_hours == 0:
print("%s: Day off because of %s" % (date, "whatever" if time_start == 0 else time_start))
continue
extra_info = ""
the_date = arrow.get(str(date), 'YYYYMMDD')
if the_date.weekday() in [SATURDAY, SUNDAY]:
extra_info += " - Weekend work"
half_day = 'half' in row[COL_WORK_FROM_HOME]
if half_day:
extra_info += " - half day PTO"
if worked_at in ['o', 'O'] or "OFFICE" in notes.upper():
extra_info += " - Commute to office"
minutes_day = abs(end_hours - start_hours) * 60
minutes_day += end_minutes - start_minutes
minutes += minutes_day
hours_day = int(minutes_day / 60)
hours_day_without_lunch = hours_day - 1
minutes_day = minutes_day % 60
total_time_for_date = str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2)
days += 1
no_lunch = str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2)
print("%s: %s to %s = %s (without lunch: %s)%s" % (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info))
hours = str(minutes / 60).zfill(2)
minutes = str(minutes % 60).zfill(2)
lunch_hours = str(int(float(hours)) - days).zfill(2)
print("")
print("Total days worked: %s" % str(days))
print("Total hours: %s:%s (with 1 hour lunch: %s:%s)" % (hours, minutes, lunch_hours, minutes))
print("")
def calc_stats(api, timesheet_url, arg_date=None):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
# find the rows for the given month
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
if not AS_CSV:
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
dates, hours = [], []
half_days = {}
first = None
last = None
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = row[idx].strip()
if task:
tasks.append(task)
day_type = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
if day_type is None:
continue
if day_type in SPECIAL_VALUES:
time = day_type
hours.append(time)
dates.append(date)
continue
elif not tasks:
continue
# If it was a half day, meaning I took half a day off, then only count half the time
half_day = 'half' in row[COL_WORK_FROM_HOME]
if half_day:
half_days[date] = time
hours.append(time)
dates.append(date)
if first is None:
first = row
else:
last = row
total_hours, total_minutes, total_time = 0, 0, ""
for index, hour in enumerate(hours):
date = dates[index]
local_hours, local_minutes = calc(hour, date in half_days)
total_hours += local_hours
total_minutes += local_minutes
if total_minutes >= 60:
total_hours += (total_minutes / 60)
total_minutes = total_minutes % 60
total_time = "%d:%d hours:minutes" % (total_hours, total_minutes)
expected = 0
actual_h, actual_m = 0, 0
if not AS_CSV:
print("*" * 50)
print("")
print("Valid hours entries: %s\t[required vs actual]" % len(hours))
deduct_work_hours = 0
work_hours = 0
work_minutes = 0
days = 0
expected_hours_accumulated_total = 0
for index, worked_date in enumerate(dates):
days += 1
if hours[index] in SPECIAL_VALUES:
if not AS_CSV:
print(" %s: Off, because %s" % (worked_date, hours[index]))
else:
pass
else:
half_day = worked_date in half_days
# each workday has 8 hours of work, but on half days it is only half of 8, aka 4.
work_hours_for_the_day = 8 if not half_day else 4
expected_hours_accumulated_total += 8 - (8 - work_hours_for_the_day)
expected_minutes_accumulated_total = expected_hours_accumulated_total * 60
# hours[index] is the actual time worked, e.g. 6:30 means 6 hours and 30 minutes
local_h, local_m = calc(hours[index])
work_hours += local_h
work_minutes += local_m
actual_h = work_hours
# 330 minutes = 6 hours and 30 minutes
actual_h += int(work_minutes / 60)
actual_m = work_minutes % 60
if AS_CSV:
print("%s;%s;" % (worked_date, hours[index]))
else:
print(" %s: %s\t[%s:00 vs %s:%s] %s" % (worked_date, hours[index], expected_hours_accumulated_total,
str(actual_h).zfill(2), str(actual_m).zfill(2),
"Half day" if half_day else ""))
if not AS_CSV:
print("")
print("First:", "<first> not found" if first is None else first[COL_DATE])
print("Last:", "<last> not found" if last is None else last[COL_DATE])
print("")
print("Total time in %s: %s" % (date, total_time))
print("")
print("*" * 50)
def main():
# print("Checking environment variable TIMESHEET_URL for spreadsheet URL...")
timesheet_url = os.environ.get('TIMESHEET_URL', "").strip()
if not timesheet_url:
raise Exception("Please set the TIMESHEET_URL environment variable accordingly.")
# print("Checking environment variable USER_FULL_NAME for spreadsheet URL...")
user_full_name = os.environ.get('USER_FULL_NAME', "").strip()
if not user_full_name:
print("Warning: USER_FULL_NAME environment variable not set!")
user_full_name = "Herman Toothrot"
print("")
print("Usage: python timesheet.py [command|date] [date]")
print("Example: python timesheet.py stats 202011")
print("Example: python timesheet.py 20201130")
print("")
print("Available commands:")
print("- stats: show summed up hours and minutes for the given/current month")
print(" use \"CSV=1 python timesheet.py stats\" to format the output")
print(" as CSV")
print("- daily: same as stats, except ready to email to HR")
print("- csv: task breakdown for the month and time spend on each task")
print("")
print("""Tip: use "DEBUG=1 timesheet <parameter>" to enable debug output""")
print("")
print("Trying to load client-secrets.json file ...")
secrets_file, cache_file = get_client_secret_filenames()
sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False)
print("Success.")
date = None if len(sys.argv) < 3 else sys.argv[2].strip()
arg = "read today" if len(sys.argv) < 2 else sys.argv[1].strip()
if arg == "stats":
calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
elif arg == "daily":
calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
elif arg == "csv":
export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
else:
date_to_use = "read today" if arg == '' else arg
load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name)
print("Done.")
if __name__ == "__main__":
main()
| 34.6
| 158
| 0.588032
| 2,890
| 21,106
| 4.095156
| 0.131488
| 0.013688
| 0.010646
| 0.014195
| 0.351584
| 0.289142
| 0.260161
| 0.244698
| 0.226531
| 0.216392
| 0
| 0.012661
| 0.285227
| 21,106
| 609
| 159
| 34.656814
| 0.771841
| 0.073249
| 0
| 0.324444
| 0
| 0.002222
| 0.136431
| 0.006353
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031111
| false
| 0.002222
| 0.015556
| 0
| 0.091111
| 0.16
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be1da4c3a9cd8b6f92a68b6f9d9dd0277f9d55ce
| 7,578
|
py
|
Python
|
league/game.py
|
Orpheon/All-in
|
016901953904250226f388422318ef2f739bf82e
|
[
"MIT"
] | null | null | null |
league/game.py
|
Orpheon/All-in
|
016901953904250226f388422318ef2f739bf82e
|
[
"MIT"
] | null | null | null |
league/game.py
|
Orpheon/All-in
|
016901953904250226f388422318ef2f739bf82e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pickle
import treys
import constants
FULL_DECK = np.array(treys.Deck.GetFullDeck())
class GameEngine:
def __init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger):
self.BATCH_SIZE = BATCH_SIZE
self.INITIAL_CAPITAL = INITIAL_CAPITAL
self.SMALL_BLIND = SMALL_BLIND
self.BIG_BLIND = BIG_BLIND
self.logger = logger
self.N_PLAYERS = 6
def generate_cards(self):
cards = np.tile(np.arange(52), (self.BATCH_SIZE, 1))
for i in range(self.BATCH_SIZE):
cards[i, :] = FULL_DECK[np.random.permutation(cards[i, :])]
community_cards = cards[:, :5]
hole_cards = np.reshape(cards[:, 5:5 + 2 * self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2))
return community_cards, hole_cards
def run_game(self, players):
if len(players) != self.N_PLAYERS:
raise ValueError('Only {} players allowed'.format(self.N_PLAYERS))
community_cards, hole_cards = self.generate_cards()
folded = np.zeros((self.BATCH_SIZE, len(players)), dtype=bool)
prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)), dtype=int)
for player in players:
player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS)
# Pre-flop
bets, _ = self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP, hole_cards, community_cards[:, :0])
prev_round_investment += bets
# Flop
bets, _ = self.run_round(players, prev_round_investment, folded, constants.FLOP, hole_cards, community_cards[:, :3])
prev_round_investment += bets
# Turn
bets, _ = self.run_round(players, prev_round_investment, folded, constants.TURN, hole_cards, community_cards[:, :4])
prev_round_investment += bets
# River
bets, end_state = self.run_round(players, prev_round_investment, folded, constants.RIVER, hole_cards, community_cards)
prev_round_investment += bets
# Showdown
pool = np.sum(prev_round_investment, axis=1)
total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float)
hand_scores = self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded))
ranks = np.argsort(hand_scores, axis=1)
sorted_hands = np.take_along_axis(hand_scores, indices=ranks, axis=1)
# Get everyone who has the best hand and among which pots will be split
participants = hand_scores == sorted_hands[:, 0][:, None]
# Get the number of times each pot will be split
n_splits_per_game = participants.sum(axis=1)
# Split and distribute the money
gains = pool / n_splits_per_game
total_winnings += participants * gains[:, None]
total_winnings -= prev_round_investment
self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p) for p in players], folded, hole_cards))
self.logger.save_to_file()
for player_idx, player in enumerate(players):
round, current_bets, min_raise, prev_round_investment, folded, last_raiser = end_state
player.end_trajectory(player_idx, round, current_bets, min_raise, prev_round_investment, folded, last_raiser,
hole_cards[:, player_idx, :], community_cards, total_winnings[:, player_idx])
return total_winnings
def run_round(self, players, prev_round_investment, folded, round, hole_cards, community_cards):
"""
:param players: [Player]
:param prev_round_investment: np.ndarray(batchsize, n_players) = int
:param folded: np.ndarray(batchsize, n_players) = bool
:param round: int ∈ {0..3}
:param hole_cards: np.ndarray(batchsize, n_players, 2) = treys.Card
:param community_cards: np.ndarray(batchsize, n_players, {0,3,4,5}) = treys.Card
:return: current_bets: np.ndarray(batchsize, n_players)=int {0-200}
"""
current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int)
max_bets = np.zeros(self.BATCH_SIZE, dtype=int)
min_raise = np.zeros(self.BATCH_SIZE, dtype=int)
min_raise[:] = self.BIG_BLIND
last_raiser = np.zeros(self.BATCH_SIZE, dtype=int)
player_order = list(enumerate(players))
round_countdown = np.zeros(self.BATCH_SIZE, dtype=int)
round_countdown[:] = self.N_PLAYERS
if round == constants.PRE_FLOP:
current_bets[:, 0] = self.SMALL_BLIND
current_bets[:, 1] = self.BIG_BLIND
max_bets[:] = self.BIG_BLIND
player_order = player_order[2:] + player_order[:2]
while True:
running_games = np.nonzero(round_countdown > 0)[0]
for player_idx, player in player_order:
actions, amounts = player.act(player_idx, round, round_countdown > 0, current_bets, min_raise,
prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :],
community_cards)
# Disabled when not necessary because it bloats the log size (by ~500 kB or so, which triples the size)
# self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions, amounts, round_countdown, folded[:, player_idx]))
# People who have already folded continue to fold
actions[folded[:, player_idx] == 1] = constants.FOLD
# People who have gone all-in continue to be all-in
actions[prev_round_investment[:, player_idx] + current_bets[:, player_idx] == self.INITIAL_CAPITAL] = constants.CALL
###########
# CALLING #
###########
calls = np.where(np.logical_and(round_countdown > 0, actions == constants.CALL))[0]
if calls.size > 0:
investment = np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx], max_bets[calls])
# Reset the bets and countdown
current_bets[calls, player_idx] = investment
###########
# RAISING #
###########
raises = np.where(np.logical_and(round_countdown > 0, actions == constants.RAISE))[0]
if raises.size > 0:
# print("True raises", raises, amounts[raises])
investment = np.maximum(current_bets[raises, player_idx] + amounts[raises], max_bets[raises] + min_raise[raises])
min_raise[raises] = investment - max_bets[raises]
max_bets[raises] = investment
# Reset the bets and countdown
current_bets[raises, player_idx] = np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx])
round_countdown[raises] = self.N_PLAYERS
last_raiser[raises] = player_idx
###########
# FOLDING #
###########
folded[np.where(np.logical_and(round_countdown > 0, actions == constants.FOLD))[0], player_idx] = 1
round_countdown[running_games] -= 1
#TODO: if all folded stops game, improves performance but breaks tests
# test is not broken, is there another reason?
round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] = 0
if np.max(round_countdown[running_games]) <= 0:
return current_bets, (round, current_bets, min_raise, prev_round_investment, folded, last_raiser)
def evaluate_hands(self, community_cards, hole_cards, contenders):
evaluator = treys.Evaluator()
# 7463 = 1 lower than the lowest score a hand can have (scores are descending to 1)
results = np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int)
for game_idx,community in enumerate(community_cards):
for player_idx,hole in enumerate(hole_cards[game_idx]):
if contenders[game_idx, player_idx]:
results[game_idx, player_idx] = evaluator.evaluate(community.tolist(), hole.tolist())
return results
| 44.05814
| 133
| 0.684613
| 1,022
| 7,578
| 4.829746
| 0.202544
| 0.040113
| 0.076985
| 0.045583
| 0.317261
| 0.257496
| 0.206442
| 0.180916
| 0.166734
| 0.126418
| 0
| 0.010401
| 0.200713
| 7,578
| 171
| 134
| 44.315789
| 0.804359
| 0.161652
| 0
| 0.04
| 0
| 0
| 0.003695
| 0
| 0
| 0
| 0
| 0.005848
| 0
| 1
| 0.05
| false
| 0
| 0.04
| 0
| 0.14
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be1dddb28d3c0ea4aa8ef940a579e9c73af88093
| 2,487
|
py
|
Python
|
cms/admin/views.py
|
miloprice/django-cms
|
c6f548f0983a7488609e07a57552b47675d8d78e
|
[
"BSD-3-Clause"
] | null | null | null |
cms/admin/views.py
|
miloprice/django-cms
|
c6f548f0983a7488609e07a57552b47675d8d78e
|
[
"BSD-3-Clause"
] | null | null | null |
cms/admin/views.py
|
miloprice/django-cms
|
c6f548f0983a7488609e07a57552b47675d8d78e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from cms.models import Page, Title, CMSPlugin, Placeholder
from cms.utils import get_language_from_request
from django.http import Http404
from django.shortcuts import get_object_or_404
def revert_plugins(request, version_id, obj):
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
cms_plugin_list = []
placeholders = {}
plugin_list = []
titles = []
others = []
page = obj
lang = get_language_from_request(request)
for rev in revs:
obj = rev.object
if obj.__class__ == Placeholder:
placeholders[obj.pk] = obj
if obj.__class__ == CMSPlugin:
cms_plugin_list.append(obj)
elif hasattr(obj, 'cmsplugin_ptr_id'):
plugin_list.append(obj)
elif obj.__class__ == Page:
pass
#page = obj #Page.objects.get(pk=obj.pk)
elif obj.__class__ == Title:
titles.append(obj)
else:
others.append(rev)
if not page.has_change_permission(request):
raise Http404
current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page))
for pk, placeholder in placeholders.items():
# admin has already created the placeholders/ get them instead
try:
placeholders[pk] = page.placeholders.get(slot=placeholder.slot)
except Placeholder.DoesNotExist:
placeholders[pk].save()
page.placeholders.add(placeholders[pk])
for plugin in cms_plugin_list:
# connect plugins to the correct placeholder
plugin.placeholder = placeholders[plugin.placeholder_id]
plugin.save(no_signals=True)
for plugin in cms_plugin_list:
plugin.save()
for p in plugin_list:
if int(p.cmsplugin_ptr_id) == int(plugin.pk):
plugin.set_base_attr(p)
p.save()
for old in current_plugins:
if old.pk == plugin.pk:
plugin.save()
current_plugins.remove(old)
for title in titles:
title.page = page
try:
title.save()
except:
title.pk = Title.objects.get(page=page, language=title.language).pk
title.save()
for other in others:
other.object.save()
for plugin in current_plugins:
plugin.delete()
| 36.573529
| 101
| 0.62686
| 300
| 2,487
| 4.99
| 0.29
| 0.04676
| 0.034736
| 0.029392
| 0.062792
| 0.032064
| 0
| 0
| 0
| 0
| 0
| 0.007299
| 0.283876
| 2,487
| 68
| 102
| 36.573529
| 0.83324
| 0.065943
| 0
| 0.129032
| 0
| 0
| 0.006903
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0.016129
| 0.080645
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be1f5618419f3d6206980e4841ac306ca5a5ac13
| 854
|
py
|
Python
|
数据分析/matplotlib/03.demo.py
|
likedeke/python-spider-study
|
09bee3cbe833234a86efcc28d62ace000e2fbb4b
|
[
"Apache-2.0"
] | 1
|
2021-08-20T11:47:51.000Z
|
2021-08-20T11:47:51.000Z
|
数据分析/matplotlib/03.demo.py
|
likedeke/python-spider-study
|
09bee3cbe833234a86efcc28d62ace000e2fbb4b
|
[
"Apache-2.0"
] | null | null | null |
数据分析/matplotlib/03.demo.py
|
likedeke/python-spider-study
|
09bee3cbe833234a86efcc28d62ace000e2fbb4b
|
[
"Apache-2.0"
] | null | null | null |
# - - - - - - - - - - -
# @author like
# @since 2021-02-23 11:08
# @email 980650920@qq.com
# 十点到十二点的气温变化
from matplotlib import pyplot as plt
from matplotlib import rc
from matplotlib import font_manager
import random
x = range(0, 120)
y = [random.randint(20, 35) for i in range(120)]
plt.figure(figsize=(20, 8), dpi=80)
plt.plot(x, y)
# 中文字体
chFont = font_manager.FontProperties(family="SimHei") # SimHei
# chFont = font_manager.FontProperties(fname="C:/Windows/Fonts/SIMHEI.TTF")
# 刻度相关设置
step = 10
xLabels = ["10点,{}分".format(i) for i in range(60)]
xLabels += ["11点,{}分".format(i) for i in range(60)]
plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont)
# 添加描述信息
plt.xlabel("时间", fontProperties=chFont)
plt.ylabel("温度 单位(℃)", fontProperties=chFont)
plt.title("10点到12点每分钟的气温变化", fontProperties=chFont)
plt.show()
| 23.722222
| 80
| 0.696721
| 128
| 854
| 4.640625
| 0.578125
| 0.13468
| 0.10101
| 0.055556
| 0.070707
| 0.070707
| 0.070707
| 0.070707
| 0
| 0
| 0
| 0.071429
| 0.131148
| 854
| 35
| 81
| 24.4
| 0.726415
| 0.228337
| 0
| 0
| 0
| 0
| 0.069444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.235294
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be1f96521bb4c93e3fbc514880ddde1a151dfa0d
| 1,351
|
py
|
Python
|
testing/vcs/test_vcs_isoline_labels.py
|
xylar/cdat
|
8a5080cb18febfde365efc96147e25f51494a2bf
|
[
"BSD-3-Clause"
] | 62
|
2018-03-30T15:46:56.000Z
|
2021-12-08T23:30:24.000Z
|
testing/vcs/test_vcs_isoline_labels.py
|
xylar/cdat
|
8a5080cb18febfde365efc96147e25f51494a2bf
|
[
"BSD-3-Clause"
] | 114
|
2018-03-21T01:12:43.000Z
|
2021-07-05T12:29:54.000Z
|
testing/vcs/test_vcs_isoline_labels.py
|
CDAT/uvcdat
|
5133560c0c049b5c93ee321ba0af494253b44f91
|
[
"BSD-3-Clause"
] | 14
|
2018-06-06T02:42:47.000Z
|
2021-11-26T03:27:00.000Z
|
import os, sys, cdms2, vcs, vcs.testing.regression as regression
dataset = cdms2.open(os.path.join(vcs.sample_data,"clt.nc"))
data = dataset("clt")
canvas = regression.init()
isoline = canvas.createisoline()
isoline.label="y"
texts=[]
colors = []
for i in range(10):
text = canvas.createtext()
text.color = 50 + 12 * i
text.height = 12
colors.append(100 + 12 * i)
if i%2 == 0:
texts.append(text.name)
else:
texts.append(text)
isoline.text = texts
# First test using isoline.text[...].color
canvas.plot(data, isoline, bg=1)
baseline = os.path.splitext(sys.argv[1])
baselineImage = "%s%s"%baseline
ret = regression.run_wo_terminate(canvas, "test_vcs_isoline_labels.png", baselineImage)
# Now set isoline.linecolors and test again.
canvas.clear()
isoline.linecolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 2, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels2.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
# Now set isoline.textcolors and test again.
canvas.clear()
isoline.textcolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 3, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels3.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
sys.exit(ret)
| 29.369565
| 87
| 0.721688
| 197
| 1,351
| 4.86802
| 0.370558
| 0.025026
| 0.043796
| 0.065693
| 0.451512
| 0.451512
| 0.32951
| 0.32951
| 0.32951
| 0.114703
| 0
| 0.024639
| 0.128793
| 1,351
| 45
| 88
| 30.022222
| 0.790144
| 0.093264
| 0
| 0.2
| 0
| 0
| 0.089271
| 0.067977
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028571
| 0
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be204f98e2c8943df601cdf5f75bb96f08fc6392
| 34,671
|
py
|
Python
|
src/Python_version/ICE_py36.py
|
ds-utilities/ICE
|
9461bbb8d6c7b3d3b32eac8ee29bd4ae3ccb286f
|
[
"MIT"
] | 2
|
2019-08-05T08:26:38.000Z
|
2020-05-16T14:10:00.000Z
|
src/Python_version/ICE_py36.py
|
postyear/ICE
|
9461bbb8d6c7b3d3b32eac8ee29bd4ae3ccb286f
|
[
"MIT"
] | null | null | null |
src/Python_version/ICE_py36.py
|
postyear/ICE
|
9461bbb8d6c7b3d3b32eac8ee29bd4ae3ccb286f
|
[
"MIT"
] | 2
|
2020-05-16T14:10:01.000Z
|
2021-02-09T20:05:46.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 05:47:03 2018
@author: zg
"""
import numpy as np
#from scipy import io
import scipy.io
#import pickle
from sklearn.model_selection import StratifiedKFold
#import sklearn
from scipy.sparse import spdiags
from scipy.spatial import distance
#import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingClassifier
from sklearn import svm
#from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn import tree
import copy
import numpy.matlib
from sklearn.exceptions import NotFittedError
#import FuzzyRwrBagging as frb
#from joblib import Parallel, delayed
#import multiprocessing
def RWR(A, nSteps, laziness, p0 = None):
'''
% the random walk algorithm.
% A is the input net matrix, with the diag to be 0.
% nSteps: how many steps to walk
% laziness: the probablity to go back.
% p0: the initial probability. usually it is a zero matrix with the diag to
% be 1.
%
% for example, A could be:
% A = [0,2,2,0,0,0,0;...
% 2,0,1,1,0,0,0;...
% 2,1,0,0,1,0,0;...
% 0,1,0,0,0,1,1;...
% 0,0,1,0,0,0,0;...
% 0,0,0,1,0,0,1;...
% 0,0,0,1,0,1,0]
%
% if nSteps is 1000 and laziness is 0.3, p0 is default, the result is:
% [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;...
% 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;...
% 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;...
% 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;...
% 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425]
%
% Each column represents the propability for each node. each element in the
% column means the probability to go to that node.
% This algorithm will converge. For example, for the above matrix, nSteps =
% 100, 1000 or 10000, will give the same result.
'''
n = len(A)
if p0 == None:
p0 = np.eye(n)
'''
% In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be
% 0.2500 0 0 0 0 0 0
% 0 0.2500 0 0 0 0 0
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0
% 0 0 0 0 1.0000 0 0
% 0 0 0 0 0 0.5000 0
% 0 0 0 0 0 0 0.5000
% W will be:
% 0 0.5000 0.5000 0 0 0 0
% 0.5000 0 0.2500 0.3333 0 0 0
% 0.5000 0.2500 0 0 1.0000 0 0
% 0 0.2500 0 0 0 0.5000 0.5000
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0.5000
% 0 0 0 0.3333 0 0.5000 0
'''
#W = A * spdiags(sum(A)'.^(-1), 0, n, n);
#W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray()
W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \
0, n, n).toarray() )
p = p0
pl2norm = np.inf
unchanged = 0
for i in range(1, nSteps+1):
if i % 100 == 0:
print(' done rwr ' + str(i-1) )
pnew = (1-laziness) * W.dot(p) + laziness * p0
l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) )
p = pnew
if l2norm < np.finfo(float).eps:
break
else:
if l2norm == pl2norm:
unchanged = unchanged +1
if unchanged > 10:
break
else:
unchanged = 0
pl2norm = l2norm
return p
# test RWR()
'''
A = np.array([[0,2,2,0,0,0,0],\
[2,0,1,1,0,0,0],\
[2,1,0,0,1,0,0],\
[0,1,0,0,0,1,1],\
[0,0,1,0,0,0,0],\
[0,0,0,1,0,0,1],\
[0,0,0,1,0,1,0]])
nSteps = 1000
lazi = 0.3
RWR(A, nSteps, lazi, None)
'''
# test
#dst = distance.euclidean(A)
# corrent, the same as in Matlab
def f_sim_2_aRankNet(sim, k=3):
'''
% Convert the similarity matrix to a network graph where each node
% has k edges to other nodes (aRank).
'''
# delete the diagnal values.
# sim = sim-diag(diag(sim) );
np.fill_diagonal(sim, 0)
# [~, I] = sort(sim-diag(diag(sim) ) );
I = np.argsort(sim, kind='mergesort') + 1
# [~, I2] = sort(I);
I2 = (np.argsort(I, kind='mergesort').T + 1).T
# for every column, just keep the top k edges.
#aRankNet = (I2 >length(sim)-k);
aRankNet = I2 > (len(sim) - k)
# make it a diagonal matrix
# aRankNet = max(aRankNet, aRankNet');
aRankNet = np.logical_or(aRankNet, aRankNet.T)
# remove the diagonal 1s.
# aRankNet = aRankNet-diag(diag(aRankNet) );
np.fill_diagonal(aRankNet, False)
return aRankNet
# test
#sim = np.array([[0, 0.5566, 0.6448, 0.3289], \
# [0.5566, 0, -0.0842, -0.0170], \
# [0.6448, -0.0842, 0, 0.8405], \
# [0.3289, -0.0170, 0.8405, 0]])
#
#f_sim_2_aRankNet(sim,1)
#f_sim_2_aRankNet(sim,2)
#f_sim_2_aRankNet(sim,3)
#
#array([[False, True, True, False],
# [ True, False, False, False],
# [ True, False, False, True],
# [False, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, False],
# [ True, False, False, True],
# [ True, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, True],
# [ True, False, False, True],
# [ True, True, True, False]])
def f_find_centers_rwMat(rw_mat, k):
'''
% on the rw_mat matrix, find some nodes as the centroids for soft
% clustering. If we just random pickup some nodes as centroids, that is
% not good for fuzzy clusters.
% k is the number of centroids.
'''
ixs = []
# 1. find the most connected center node as the first centroid.
a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col
# % most connected node.
ix = np.argmax(a)
ixs.append(ix)
# % 2. iteratively find the rest nodes
for i in range(1, k):
tmp = rw_mat[:, ixs]
b = np.sum(tmp, axis=1)
b[ixs] = np.inf
# % find the farthest node
ix = np.argmin(b)
ixs.append(ix)
return ixs
# test
#tmp = f_find_centers_rwMat(rw_mat, 10)
def getCutoff(rw_mat, avgNeighborsSize):
tmp = rw_mat.flatten('F')
a = np.flip(np.sort(tmp), 0)
len1 = len(rw_mat)
#cutoffs = []
all_neibs = int( avgNeighborsSize * len1 )
print( all_neibs)
ct = a[all_neibs]
return ct
#test
#>>> a = np.array([[1,2], [3,4]])
#>>> a.flatten()
#array([1, 2, 3, 4])
#>>> a.flatten('F')
#array([1, 3, 2, 4])
'''
a = np.array( range(0,100) )
b = np.matlib.repmat(a, 100, 1)
ct = getCutoff(b, 70)
'''
def f_len_of_each_ele(c1):
#% Assume c1 is a 1-dimension cell array, and each element is a 1d double
#% array. This function counts the length of each double array.
lens = np.zeros(len(c1))
for i in range(0, len(c1)):
lens[i] = len(c1[i])
return lens
def f_eu_dist(X):
'''
calculate the euclidean distance between instances
'''
sim = np.zeros(( len(X), len(X) ))
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
np.fill_diagonal(sim, 0)
return sim
#test
#sim = f_eu_dist(X)
def f_eu_dist2(X1, X2):
'''
calculate the euclidean distance between instances from two datasets
'''
sim = np.zeros(( len(X1), len(X2) ))
for i in range(0, len(X1) ):
for j in range(0, len(X2) ):
tmp = distance.euclidean(X1[i], X2[j])
sim[i][j] = tmp
sim = -sim
return sim
#test
#sim = f_eu_dist2(X_tr, X_te)
def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None):
# X: data
# k: number of clusters
'''
The return variable clus stores the instance indices for each cluster.
However, this data structure is not easy to find for a instance, which are
the clusters it belongs to, thus we also need to convert clus to a
true-false matrix.
'''
if each_clus_sz == None:
# on average, how many clusters does one inst belongs to.
#overlap_factor = 2;
# the estimated size of each cluster. default is half the number of
# instances.
each_clus_sz=len(X)/3
print('RWR-based fuzzy clustering starts...')
print(' NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) )
# sim = squareform(pdist(X));
# sim = -sim;
sim = np.zeros((len(X), len(X) ) )
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
print(' done calculating the Euclidean distance matrix')
# ---------------------------------------------------------------
aRank_k_neighbors = np.ceil(np.log10(len(sim)) )
ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors)
print(' done calculating the A-rank KNN graph')
# % -------- RWR --------
nSteps = 1000
lazi = 0.3
rw = RWR(ori_graph, nSteps, lazi)
# remove probability of returning start node
np.fill_diagonal(rw, 0)
rw_mat = rw
print(' done RWR')
# ---------------------------------------------------------------
ixs_centers = f_find_centers_rwMat(rw_mat, k)
ct = getCutoff(rw_mat, each_clus_sz)
rw_net = rw_mat > ct
# % set the diagnal to 1
np.fill_diagonal(rw_net, True)
clus = []
for i in range(0, k):
tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten()
clus.append(tmp)
# ---------------------------------------------------------------
# % sort the clusters
lens = f_len_of_each_ele(clus)
ix = np.argsort(lens)[::-1]
clus_ordered = [clus[i] for i in ix]
print(' center inst. index of each cluster: ')
ixs_centers = np.array(ixs_centers)
print(ixs_centers[ix])
print(' size of each cluster: ')
print(lens[ix])
print(' done RWR clustering')
return clus_ordered
#test
#clus = f_fuzzy_rwr_clusters(X, 100)
# pass
def f_clus_to_tfs(clus, n_inst):
#% convert the cluster information from cell array to mat. But for each
#% instance, the rank of clusters information will be lost - you won't know
#% what is the top 1/2/3 cluster it belongs to.
#%
#% clus e.g:
#% 1x5 cell
#% 1x195 double 1x193 double 1x169 double 1x161 double 1x62 double
#%
#% tfs e.g:
#% 295x5 double
#% 1 0 0 0 0
#% 1 1 1 1 0
#% 1 1 1 0 0
#% 1 1 0 0 0
#% 1 1 1 1 0
#% ...
#% 1 1 1 1 1
#% 1 0 0 0 0
#% 1 1 1 0 0
tfs = np.zeros((n_inst, len(clus)), dtype=bool)
for i in range(0, len(clus)):
tfs[clus[i], i] = True
return tfs
# test
#tfs = f_clus_to_tfs(clus, len(X))
# pass
def f_tfs_2_instClus(tfs):
'''
convert the boolean table representation of clustering result to for each
instance, what clusters it belongs to.
'''
inst_clus = []
for i in range(0, len(tfs)):
row = list( np.where(tfs[i, :] ) [0] )
inst_clus.append(row)
return inst_clus
# test
#inst_clus = f_tfs_2_instClus(tfs)
#def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te):
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
# test
'''
X_tr = X
y_tr = y
X_te = X
y_te = y
[y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te)
'''
#def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging):
# '''
# corresponds to f_weka_bg_svm_tr_te() in Matlab version
# '''
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
def f_tr(X_tr, y_tr, model):
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
return model_inner
def f_te(X_te, model):
y_pred = model.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
return y_pred
def f_tr_te(X_tr, y_tr, X_te, model):
'''
corresponds to f_weka_bg_svm_tr_te() in Matlab version
'''
#bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
#bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
y_pred = model_inner.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
#auc = roc_auc_score(y_te.flatten(), y_pred)
return y_pred
def f_k_fo(X, y, model, k_fold=10):
'''
corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version
'''
y = y.flatten()
y_pred = np.zeros(y.size)
skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True)
skf.get_n_splits(X, y)
for train_index, test_index in skf.split(X, y):
#print("TRAIN: ", train_index, " TEST: ", test_index)
X_tr, X_te = X[train_index], X[test_index]
#y_tr, y_te = y[train_index], y[test_index]
y_tr = y[train_index]
if np.unique(y_tr).size == 1:
y_pred_fo = np.zeros( len(test_index) )
#print len(X_te)
#print len(test_index)
#print y_pred_fo
y_pred_fo.fill(np.unique(y_tr)[0] )
#print y_pred_fo
else:
y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model)
y_pred[test_index] = y_pred_fo
#auc = roc_auc_score(y.flatten(), y_pred)
return y_pred
# test
#pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
#
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
#y_pred = f_k_fo(X, y, model, k_fold=10)
#
#print roc_auc_score(y.flatten(), y_pred)
# the easy dataset mesothelioma get 1.0 CV result.
# breast cancer get 0.599
# all results are correct.
def f_quantileNorm(templete, target):
'''
Templete is the standard, change the target to the values in the templete.
Target may have a very different range than the templete.
templete and target should be 1d n by 1 array.
f_my_quantileNorm()
'''
ix_target = np.argsort(target, kind='mergesort')
ix_templete = np.argsort(templete, kind='mergesort')
target[ix_target] = templete[ix_templete]
new = target
return new
# test
#templete = X[:, 0]
#target = X[:, 1]
#new = f_quantileNorm(templete, target)
#def f_bg_k_fo_3(X, y, k_fold=10):
# '''
# corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version
# corresponds to f_k_fo()
# '''
# y_pred = np.zeros((y.size, 1))
#
# skf = StratifiedKFold(n_splits=k_fold)
# skf.get_n_splits(X, y)
#
# for train_index, test_index in skf.split(X, y):
# #print("TRAIN:", train_index, "TEST:", test_index)
# X_tr, X_te = X[train_index], X[test_index]
# y_tr, y_te = y[train_index], y[test_index]
def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner):
'''
% using each cluster data to predict the whole instances, while self
% prediction using 10-fold CV.
corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version
'''
n_clusters = len(clus)
y_pred_multi = np.zeros((y.size, n_clusters) )
models = []
for j in range(0, n_clusters):
# for each cluster
Xj = X[clus[j].flatten(), :]
yj = y[clus[j].flatten() ]
model_a_clust = copy.deepcopy(model)
print(' Cluster '+str(j)+' started...')
#if len(yj) > 10:
if len(yj) > 15 and np.unique(yj).size != 1:
# ------------------ for self ------------------
#if np.unique(yj).size == 1:
# y_pred = np.zeros(yj.size)
# y_pred.fill(np.unique(yj)[0])
#else:
try:
y_pred = f_k_fo(Xj, yj, model, fo_inner)
# quantileNorm
templete = y_pred_whole[clus[j].flatten()]
target = y_pred
y_pred = f_quantileNorm(templete, target)
# copy the normed prediction to the whole data.
y_pred_multi[clus[j].flatten(), j] = y_pred
print(' c-'+str(j)+' done predicting local instances')
# ------------------ for other -----------------
ix_other = set(range(0, y.size)) - set(clus[j].flatten())
ix_other = list(ix_other)
#print ix_other
X_other = X[ix_other , :]
#y_other = y[ix_other ]
# predict
#y_pred = f_tr_te(Xj, yj, X_other, model)
#if np.unique(yj).size != 1:
model_a_clust.fit(Xj, yj)
y_pred = model_a_clust.predict_proba(X_other)
y_pred = y_pred[:, 1].flatten()
# quantileNorm
templete = y_pred_whole[ix_other]
target = y_pred
y_pred = f_quantileNorm(templete, target)
#else:
# y_pred = np.zeros(X_other.size)
# y_pred.fill(np.unique(yj)[0])
# copy to the whole array
y_pred_multi[ix_other, j] = y_pred
print(' c-'+str(j)+' done predicting remote instances')
except ValueError as e:
print(e)
print(' skip this cluster')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
else:
if len(yj) <= 15:
print (' '+str(len(yj))+' insts in cluster, <= 15, skip...')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
if np.unique(yj).size == 1:
print (' warning, #unique class label(s) == 1')
y_pred = np.zeros(y.size)
y_pred.fill(np.unique(yj)[0])
y_pred_multi[:, j] = y_pred
model_a_clust = np.unique(yj)[0]
models.append(model_a_clust)
return [y_pred_multi, models]
# test
#[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model)
#def f_dec_tab_4_bg_svm(X, y, clus):
# '''
# Calculate the decision table
# % This version changed from the cluster-cluster dec_mat to instance-cluster
# % dec_mat. This solution will avoid the case that if one cluster decision
# % is wrong leading entrie cluster prediction is wrong, which is the reason
# % of instability. However, we cannot use a systematic evaluation criteria
# % such as AUC, I will try using the predicted prob at first.
#
# % This version 3 adds the support for fuzzy clustering - one instance may
# % belongs to more than one cluster.
# % This updated version also outputs the predicted values of y.
# % support more than 3 clusters
# % normalization take place in y_pred_self and y_pred_other, thus do not
# % need normalization when predict y_pred_ICE.
# % ixsp is another cluster form.
#
# corresponds to f_dec_tab_4_bg_svm() in Matlab version
# '''
# #n_clusters = len(clus)
# ## dec_mat stores the prediction error.
# #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# #
# ## k_fold of inner cross-validation
# #fo_inner = 10
# # --------------------------- WHOLE -------------------------
#
# # --------------------------- SELF -------------------------
def f_err_mat(X, y, clus, model):
'''
Calculate the decision table
corresponds to f_dec_tab_4_bg_svm() in Matlab version
'''
n_clusters = len(clus)
# err_mat stores the prediction error.
pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# col 0 to col n_clusters-1 store the predictions by each cluster
# the last col stores the pred by whole data
#models = []
# k_fold of inner cross-validation
fo_inner = 5
# --------------------------- WHOLE -------------------------
# Predict each cluster using the whole data.
model_whole = copy.deepcopy(model)
y_pred_whole = f_k_fo(X, y, model_whole, fo_inner)
model_whole.fit(X, y) # fit a model using all data rather than only a fold
pred_prob_mat[:, n_clusters] = y_pred_whole
print (' Done evaluation using whole instances')
print (' Start to evaluate each cluster ')
# --------------------------- SELF -------------------------
# predict the whole instances using each cluster data, while self
# prediction using 10-fold CV.
[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \
y_pred_whole, model, fo_inner)
print (' Done evaluation using each cluster')
models.append(model_whole)
pred_prob_mat[:, 0:n_clusters] = y_pred_multi
# make a tmp array a stores y
tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1)
err_mat = abs(pred_prob_mat - tmp )
print (' Done calculating error table and fitting ICE models')
return [err_mat, models]
"""
#mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\
# '3_scripts/2017_4_4/data/names.mat')['names']
#mat = io.loadmat('/Users/zg/Desktop/a.mat')['names']
#test
pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
n_clus = 3
clus = f_fuzzy_rwr_clusters(X, n_clus)
tfs = f_clus_to_tfs(clus, len(X))
y = y.astype(float)
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVR(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVC(), \
model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X, y, clus, model)
"""
def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5):
'''
Convert the err table to decision table.
'''
dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool)
# dec_ixs: for each instance, which clusters should be used.
dec_ixs = []
inst_clus = f_tfs_2_instClus(tfs)
for i in range(0, len(err_mat)):
# Matlab code:
#dec_row = dec_mat(cur_nb_ix, :);
#dec_row(:, end ) = dec_row(:, end ) - adv_whole;
#dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self;
row = np.copy( err_mat[i, :] )
#print row
row[-1] = row[-1] - adv_whole
inst_i_clus = inst_clus[i]
if len(inst_i_clus) > 0:
row[inst_i_clus] = row[inst_i_clus] - adv_self
#print row
ix_good_clus = list( np.where( row < row[-1] ) [0] )
#print ix_good_clus
if len(ix_good_clus) > 0:
dec_mat[i, ix_good_clus] = True
dec_ixs.append(ix_good_clus)
else:
dec_ixs.append([])
return [dec_mat, dec_ixs]
#[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs)
def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True):
'''
Use the training data to predict the testing data.
Use whole training data to predict
Use each cluster of training data to predict the testing data.
'''
y_pred_all = np.zeros(( len(X_te), len(clus) + 1 ))
# the first col is the prediction using the whole data
model_whole = models[-1]
y_pred_all[:, 0] = f_te(X_te, model_whole)
#y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model)
#print 'whole model good '
# start from the second col, the result is by each cluster
for i in range(0, len(clus)):
#Xi = X_tr[clus[i].flatten(), :]
#yi = y_tr[clus[i].flatten() ]
model_i = models[i]
#model_a_clust = copy.deepcopy(model)
try:
y_pred_te = f_te(X_te, model_i)
except :
if model_i == 0:
y_pred_te = np.zeros(len(X_te))
elif model_i == 1:
y_pred_te = np.ones(len(X_te))
else:
y_pred_te = np.zeros(len(X_te))
y_pred_te.fill(np.nan)
#except NotFittedError as e:
# print(repr(e))
# y_pred_te = np.zeros(len(X_te))
# y_pred_te.fill(np.nan)
#print 'model '+str(i)+' good '
#y_pred_te = f_tr_te(Xi, yi, X_te, model)
if doNorm == True:
templete = y_pred_all[:, 0]
target = y_pred_te
y_pred = f_quantileNorm(templete, target)
else:
y_pred = y_pred_te
y_pred_all[:, i+1] = y_pred
return y_pred_all
# test
#y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model)
def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
#def_deal_miss_v_1(d):
'''
deal with missing values by replacing them by mean.
'''
def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
This version use the err mat to re-clustering
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# ******************** re-clustering ********************
n_iter = 2
for i in range(0, n_iter):
clus = f_fuzzy_rwr_clusters(err_mat, n_clus)
tfs = f_clus_to_tfs(clus, len(X_tr))
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# *******************************************************
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1):
'''
clus and inst_clus contains the same information that clus is the instances
ids for each cluster, while inst_clus stores that for each instance, which
cluster(s) it belongs to.
dec_ixs stores the good cluster(s) for each instance, which may include
even a remote cluster. each instance in dec_ixs does not contain the whole
set of instances.
'''
# the first col is the prediction using the whole data
# start from the second col, the result is by each cluster
y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models)
y_pred_ICE = np.zeros( len(X_te) )
neighbour_mat = f_eu_dist2(X_tr, X_te)
# ---------- for each testing instance ----------
#n_partials = np.zeros( len(X_te) )
#n_wholes = np.zeros( len(X_te) )
for j in range(0, len(X_te) ):
# for each testing instance
# find the top 10 neighbors for each test instance
neighbour_col = neighbour_mat[:, j].flatten()
ix = np.argsort(neighbour_col )
ix = ix[::-1]
ix_top_neighbors = ix[0:N]
#print 'testing inst ' + str(j)
#print ' ix of top neighbors:'
#print ix_top_neighbors
# ---------- find all neighbors' picks ----------
clus_ids_to_use = []
nei_labels = []
for cur_nb in range(0, N):
# for each neighbour
# find each neighbour's pick
cur_nb_ix = ix_top_neighbors[cur_nb]
clus_id_to_use = list( dec_ixs[cur_nb_ix] )
clus_ids_to_use = clus_ids_to_use + clus_id_to_use
# also find neighbor's label. maybe will be used later as KNN pred
# instead of using whole to pred.
nei_labels = nei_labels + list( y_tr[cur_nb_ix] )
#print ' clus_ids_to_use:'
#print clus_ids_to_use
# cluster id + 1 to make the ix fit the col id in y_pred_all
a = clus_ids_to_use
a = list( np.array(a) + 1 )
clus_ids_to_use = a
# number of partial models used
n_partial = len(clus_ids_to_use)
# number of whole models used, based on parameters alpha, beta and N.
n_whole = int( round( alpha*n_partial + beta*N ) )
clus_ids_to_use = clus_ids_to_use + [0] * n_whole
#print ' clus_ids_to_use:'
#print clus_ids_to_use
#print nei_labels
y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use])
print ('Done predicting testing instances.')
return y_pred_ICE
# test
# pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
# pa = '/Users/zg/Dropbox/bio/ICE_2018/'
# pa = './'
pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/'
n_clus = 100
w = 0.4
s = 0.5
N = 5
alpha = 1
beta = 1
k_fold = 10
aucs_ICE = []
aucs_whole = []
# f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt'
#f_res = pa + 'data/res_ICE_bg_svm_py.txt'
f_res = pa + 'data/res_ICE_SVM_py.txt'
f = open(f_res, 'w')
#for j in range(1, 50):
for j in range(1, 49):
try:
X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y']
#imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto')
#plt.show()
#sim = np.corrcoef(X)
#np.fill_diagonal(sim, 0)
#n_clus = 100
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
model = svm.SVC(kernel='linear', probability = True)
skf = StratifiedKFold(n_splits=k_fold)
skf.get_n_splits(X, y)
y_preds_ICE = np.zeros( y.size )
y_preds_whole = np.zeros( y.size )
fold_i = 1
for train_index, test_index in skf.split(X, y):
# print("TRAIN:", train_index, "TEST:", test_index)
X_tr, X_te = X[train_index], X[test_index]
y_tr, y_te = y[train_index], y[test_index]
[clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s)
#[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s)
y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta)
y_preds_ICE[test_index] = y_pred_ICE
y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model)
y_preds_whole[test_index] = y_pred_whole
print( j)
print( 'fold ' + str(fold_i) + ' finished')
fold_i = fold_i + 1
auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() )
auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() )
print (auc_ICE, auc_whole)
aucs_ICE.append(auc_ICE)
aucs_whole.append(auc_whole)
f.write(str(j) + '\t' + str(auc_ICE) + ' \t ' + str(auc_whole) + '\n')
except:
continue
| 31.348101
| 93
| 0.551123
| 5,133
| 34,671
| 3.526593
| 0.116111
| 0.013811
| 0.013424
| 0.01149
| 0.459397
| 0.417191
| 0.359297
| 0.324936
| 0.294443
| 0.277373
| 0
| 0.043658
| 0.309625
| 34,671
| 1,106
| 94
| 31.348101
| 0.712609
| 0.416602
| 0
| 0.243243
| 0
| 0
| 0.052622
| 0.003356
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059459
| false
| 0
| 0.032432
| 0
| 0.151351
| 0.078378
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be20c61ee255e8ce67c5713e68e8dff144cc5ef4
| 44,105
|
py
|
Python
|
xc/common/utils/prjxray_routing_import.py
|
FireFox317/symbiflow-arch-defs
|
f0e7b4212544e1d55da776fb7a2ff79117e01454
|
[
"ISC"
] | 1
|
2020-09-23T17:57:07.000Z
|
2020-09-23T17:57:07.000Z
|
xc/common/utils/prjxray_routing_import.py
|
tcal-x/symbiflow-arch-defs
|
1e513ac778371608c51fa86a98e54279e3c74752
|
[
"ISC"
] | null | null | null |
xc/common/utils/prjxray_routing_import.py
|
tcal-x/symbiflow-arch-defs
|
1e513ac778371608c51fa86a98e54279e3c74752
|
[
"ISC"
] | null | null | null |
#!/usr/bin/env python3
""" Imports 7-series routing fabric to the rr graph.
For ROI configurations, this also connects the synthetic IO tiles to the routing
node specified.
Rough structure:
Add rr_nodes for CHANX and CHANY from the database. IPIN and OPIN rr_nodes
should already be present from the input rr_graph.
Create a mapping between database graph_nodes and IPIN, OPIN, CHANX and CHANY
rr_node ids in the rr_graph.
Add rr_edge for each row in the graph_edge table.
Import channel XML node from connection database and serialize output to
rr_graph XML.
"""
import argparse
import os.path
from hilbertcurve.hilbertcurve import HilbertCurve
import math
import prjxray.db
from prjxray.roi import Roi
import prjxray.grid as grid
from lib.rr_graph import graph2
from lib.rr_graph import tracks
from lib.connection_database import get_wire_pkey, get_track_model
import lib.rr_graph_capnp.graph2 as capnp_graph2
from prjxray_constant_site_pins import feature_when_routed
from prjxray_tile_import import remove_vpr_tile_prefix
import simplejson as json
from lib import progressbar_utils
import datetime
import re
import functools
import pickle
import sqlite3
now = datetime.datetime.now
HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+')
CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)')
CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)')
CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*')
BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+')
BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+')
CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+')
HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)')
IOI_OCLK = re.compile('IOI_OCLK_([01])')
# Regex for [LR]IOI_SING tiles
IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_']
IOI_SING_REGEX = re.compile(
r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\.IOI_)({})([01])(.*)'.format(
"|".join(IOI_SITE_PIPS)
)
)
def reduce_connection_box(box):
""" Reduce the number of connection boxes by merging some.
Examples:
>>> reduce_connection_box('IMUX0')
'IMUX'
>>> reduce_connection_box('IMUX1')
'IMUX'
>>> reduce_connection_box('IMUX10')
'IMUX'
>>> reduce_connection_box('BRAM_ADDR')
'IMUX'
>>> reduce_connection_box('A_L10')
'A'
>>> reduce_connection_box('B')
'B'
>>> reduce_connection_box('B_L')
'B'
"""
box = CONNECTION_BOX_FILTER.match(box).group(1)
if 'BRAM_ADDR' in box:
box = 'IMUX'
if box.endswith('_L'):
box = box.replace('_L', '')
return box
REBUF_NODES = {}
REBUF_SOURCES = {}
def get_clk_hrow_and_rebuf_tiles_sorted(cur):
"""
Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles.
returns them in a list sorted according to their Y coordinates.
"""
cur.execute(
"""
SELECT name
FROM phy_tile
WHERE
name LIKE "CLK_HROW_BOT_R_%"
OR
name LIKE "CLK_HROW_TOP_R_%"
OR
name LIKE "CLK_BUFG_REBUF_%"
ORDER BY grid_y DESC;
"""
)
return [t[0] for t in cur.fetchall()]
def populate_bufg_rebuf_map(conn):
global REBUF_NODES
REBUF_NODES = {}
global REBUF_SOURCES
REBUF_SOURCES = {}
rebuf_wire_regexp = re.compile(
'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)'
)
cur = conn.cursor()
# Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF tiles.
rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur)
# Append None on both ends of the list to simplify the code below.
rebuf_and_hrow_tiles = [None] + rebuf_and_hrow_tiles + [None]
def maybe_get_clk_hrow(i):
"""
Returns a name of CLK_HROW tile only if its there on the list.
"""
tile = rebuf_and_hrow_tiles[i]
if tile is not None and tile.startswith("CLK_HROW"):
return tile
return None
# Assign each REBUF tile its above and below CLK_HROW tile. Note that in
# VPR coords terms. "above" and "below" mean the opposite...
rebuf_to_hrow_map = {}
for i, tile_name in enumerate(rebuf_and_hrow_tiles):
if tile_name is not None and tile_name.startswith("CLK_BUFG_REBUF"):
rebuf_to_hrow_map[tile_name] = {
"above": maybe_get_clk_hrow(i - 1),
"below": maybe_get_clk_hrow(i + 1),
}
# Find nodes touching rebuf wires.
cur.execute(
"""
WITH
rebuf_wires(wire_in_tile_pkey) AS (
SELECT pkey
FROM wire_in_tile
WHERE
name LIKE "CLK_BUFG_REBUF_R_CK_GCLK%_BOT"
OR
name LIKE "CLK_BUFG_REBUF_R_CK_GCLK%_TOP"
),
rebuf_nodes(node_pkey) AS (
SELECT DISTINCT node_pkey
FROM wire
WHERE wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires)
)
SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name
FROM rebuf_nodes
INNER JOIN wire ON wire.node_pkey = rebuf_nodes.node_pkey
INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey
INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey
WHERE wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires)
ORDER BY rebuf_nodes.node_pkey;"""
)
for node_pkey, rebuf_tile, rebuf_wire_name in cur:
if node_pkey not in REBUF_NODES:
REBUF_NODES[node_pkey] = []
m = rebuf_wire_regexp.fullmatch(rebuf_wire_name)
if m.group(2) == 'TOP':
REBUF_NODES[node_pkey].append(
'{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1))
)
hrow_tile = rebuf_to_hrow_map[rebuf_tile]["below"]
if hrow_tile is not None:
REBUF_NODES[node_pkey].append(
"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE".format(
hrow_tile, m.group(1)
)
)
elif m.group(2) == 'BOT':
REBUF_NODES[node_pkey].append(
'{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1))
)
hrow_tile = rebuf_to_hrow_map[rebuf_tile]["above"]
if hrow_tile is not None:
REBUF_NODES[node_pkey].append(
"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE".format(
hrow_tile, m.group(1)
)
)
else:
assert False, (rebuf_tile, rebuf_wire_name)
for node_pkey in REBUF_NODES:
cur.execute(
"""
SELECT phy_tile.name, wire_in_tile.name
FROM wire
INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey
INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey
WHERE wire.node_pkey = ?;""", (node_pkey, )
)
for tile, wire_name in cur:
REBUF_SOURCES[(tile, wire_name)] = node_pkey
HCLK_CMT_TILES = {}
def populate_hclk_cmt_tiles(db):
global HCLK_CMT_TILES
HCLK_CMT_TILES = {}
grid = db.grid()
_, x_max, _, _ = grid.dims()
for tile in grid.tiles():
gridinfo = grid.gridinfo_at_tilename(tile)
if gridinfo.tile_type not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']:
continue
hclk_x, hclk_y = grid.loc_of_tilename(tile)
hclk_cmt_x = hclk_x
hclk_cmt_y = hclk_y
while hclk_cmt_x > 0:
hclk_cmt_x -= 1
gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y))
if gridinfo.tile_type == 'HCLK_CMT':
HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc(
(hclk_cmt_x, hclk_cmt_y)
)
break
hclk_cmt_x = hclk_x
while hclk_cmt_x < x_max:
hclk_cmt_x += 1
gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y))
if gridinfo.tile_type == 'HCLK_CMT_L':
HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc(
(hclk_cmt_x, hclk_cmt_y)
)
break
def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number):
if (hclk_tile, lr) not in HCLK_CMT_TILES:
return []
hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)]
return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)]
def check_feature(feature):
""" Check if enabling this feature requires other features to be enabled.
Some pips imply other features. Example:
.HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10
implies:
.ENABLE_BUFFER.HCLK_CK_BUFHCLK10
"""
# IOI_SING tiles have bits in common with the IOI tiles.
#
# The difference is that the TOP IOI_SING tile shares bits with
# the bottom half of a normal IOI tile, while the BOTTOM IOI_SING
# shares bits with the top half of a normal IOI TILE.
#
# The following, is to change the edge feature to accomodate this
# need, as the IOI_SING tiles have the same wire, and pip names
# despite they are found on the TOP or BOTTOM of an IOI column
m = IOI_SING_REGEX.fullmatch(feature)
if m:
# Each clock region spans a total of 50 IOBs.
# The IOI_SING are found on top or bottom of the whole
# IOI/IOB column. The Y coordinate identified with the
# second capture group is dived by 50 to get the relative
# position of the IOI_SING within the clock region column
is_bottom_sing = int(m.group(2)) % 50 == 0
# This is the value to attach to the source pip name that
# changes based on which IOI_SING is selected (top or bottom)
#
# Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1
src_value = '1' if is_bottom_sing else '0'
# This is the value to attach to the IOI_SITE_PIPS names
# in the destination wire of the pip
#
# Example: IOI_OLOGIC0 -> IOI_OLOGIC1
dst_value = '0' if is_bottom_sing else '1'
unchanged_feature = "{}{}{}{}".format(
m.group(1), m.group(2), m.group(3), m.group(4)
)
src_wire = m.group(6).replace('_SING', '')
for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']:
if pip in src_wire:
src_wire = src_wire.replace('_0', '_{}'.format(src_value))
if 'IOI_OCLK' in src_wire:
src_wire = src_wire.replace('_0', '_{}'.format(dst_value))
changed_feature = "{}{}".format(dst_value, src_wire)
feature = "{}{}".format(unchanged_feature, changed_feature)
feature_path = feature.split('.')
# IOB_DIFFO_OUT0->IOB_DIFFO_IN1
#
# When this PIP is active the IOB operates in the differential output mode.
# There is no feature assosciated with that PIP in the prjxray db but there
# is a tile-wide feature named "DIFF_OUT".
#
# The "DIFF_OUT" cannot be set in the architecture as it is defined one
# level up in the hierarchy (its tile-wide, not site-wide). So here we
# map the PIP's feature to "DIFF_OUT"
if feature_path[2] == "IOB_DIFFO_OUT0" and \
feature_path[1] == "IOB_DIFFO_IN1":
return '{}.OUT_DIFF'.format(feature_path[0])
# IOB_PADOUT0->IOB_DIFFI_IN1
# IOB_PADOUT1->IOB_DIFFI_IN0
#
# These connections are hard wires that connect IOB33M and IOB33S sites.
# They are used in differential input mode.
#
# Vivado does not report this connection as a PIP but in the prjxray db it
# is a pip. Instead of making it a pseudo-pip we simply reject fasm
# features here.
if feature_path[2] == "IOB_PADOUT0" and feature_path[1] == "IOB_DIFFI_IN1":
return ''
if feature_path[2] == "IOB_PADOUT1" and feature_path[1] == "IOB_DIFFI_IN0":
return ''
# REBUF stuff
rebuf_key = (feature_path[0], feature_path[1])
if rebuf_key in REBUF_SOURCES:
return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]])
m = IOI_OCLK.fullmatch(feature_path[1])
if m:
enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format(
feature_path[0], m.group(1), feature_path[-1]
)
return ' '.join((feature, enable_oclkm_feature))
if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]):
enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_buffer_feature))
# BUFHCE sites are now routed through, without the need of placing them, therefore,
# when the relative pip is traversed, the correct fasm feature needs to be added.
# The relevant features are:
# - IN_USE: to enable the BUFHCE site
# - ZINV_CE: to disable the inverter on CE input which is connected to VCC.
# This sets the CE signal to constant 1
m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1])
if m:
x_loc_str = m.group(1)
if 'L' in x_loc_str:
x_loc = 0
elif 'R' in x_loc_str:
x_loc = 1
else:
assert False, "Impossible to determine X location of BUFHCE"
y_loc = m.group(2)
bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc)
enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format(
feature_path[0], bufhce_loc
)
enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\'b1'.format(
feature_path[0], bufhce_loc
)
return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce))
if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]):
enable_feature = '{}.{}_ACTIVE'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_feature))
if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]):
enable_feature = '{}.{}_ACTIVE'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_feature))
if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]):
features = [feature]
features.append(
'{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1])
)
features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1]))
return ' '.join(features)
m = HCLK_OUT.fullmatch(feature_path[-1])
if m:
return ' '.join(
[feature] + find_hclk_cmt_hclk_feature(
feature_path[0], m.group(1), m.group(2)
)
)
m = CASCOUT_REGEX.fullmatch(feature_path[-2])
if m:
enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format(
feature_path[0], m.group(1)
)
return ' '.join((feature, enable_cascout))
parts = feature.split('.')
wire_feature = feature_when_routed(parts[1])
if wire_feature is not None:
return '{} {}.{}'.format(feature, parts[0], wire_feature)
return feature
# CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1)
PIN_NAME_TO_PARTS = re.compile(r'^([^\.]+)\.([^\]]+)\[0\]$')
def set_connection_box(
graph, node_idx, grid_x, grid_y, box_id, site_pin_delay
):
""" Assign a connection box to an IPIN node. """
node_dict = graph.nodes[node_idx]._asdict()
node_dict['connection_box'] = graph2.ConnectionBox(
x=grid_x,
y=grid_y,
id=box_id,
site_pin_delay=site_pin_delay,
)
graph.nodes[node_idx] = graph2.Node(**node_dict)
def update_connection_box(
conn, graph, graph_node_pkey, node_idx, connection_box_map
):
""" Update connection box of IPIN node if needed. """
cur = conn.cursor()
cur.execute(
"""
SELECT connection_box_wire_pkey
FROM graph_node WHERE pkey = ?""", (graph_node_pkey, )
)
connection_box_wire_pkey = cur.fetchone()[0]
if connection_box_wire_pkey is not None:
cur.execute(
"""
SELECT grid_x, grid_y FROM phy_tile WHERE pkey = (
SELECT phy_tile_pkey FROM wire WHERE pkey = ?
)""", (connection_box_wire_pkey, )
)
grid_x, grid_y = cur.fetchone()
cur.execute(
"SELECT wire_in_tile_pkey FROM wire WHERE pkey = ?",
(connection_box_wire_pkey, )
)
wire_in_tile_pkey = cur.fetchone()[0]
box_id = connection_box_map[wire_in_tile_pkey]
cur.execute(
"""
SELECT switch.intrinsic_delay
FROM switch
WHERE pkey = (
SELECT site_pin_switch_pkey
FROM wire_in_tile
WHERE pkey = (
SELECT wire_in_tile_pkey
FROM wire
WHERE pkey = (
SELECT site_wire_pkey
FROM node
WHERE pkey = (
SELECT node_pkey
FROM graph_node
WHERE pkey = ?
)
)
)
)""", (graph_node_pkey, )
)
site_pin_delay = cur.fetchone()[0]
set_connection_box(
graph, node_idx, grid_x, grid_y, box_id, site_pin_delay
)
def create_get_tile_and_site_as_tile_pkey(cur):
tiles = {}
for tile_pkey, site_as_tile_pkey, grid_x, grid_y in cur.execute("""
SELECT pkey, site_as_tile_pkey, grid_x, grid_y FROM tile;"""):
tiles[(grid_x, grid_y)] = (tile_pkey, site_as_tile_pkey)
def get_tile_and_site_as_tile_pkey(x, y):
return tiles[(x, y)]
return get_tile_and_site_as_tile_pkey
def create_get_site_as_tile_wire(cur):
@functools.lru_cache(maxsize=0)
def get_site_from_site_as_tile(site_as_tile_pkey):
cur.execute(
"""
SELECT site.site_type_pkey, site_as_tile.site_pkey
FROM site_as_tile
INNER JOIN site ON site.pkey = site_as_tile.site_pkey
WHERE site_as_tile.pkey = ?""", (site_as_tile_pkey, )
)
results = cur.fetchall()
assert len(results) == 1, site_as_tile_pkey
return results[0]
@functools.lru_cache(maxsize=0)
def get_site_as_tile_wire(site_as_tile_pkey, pin):
site_type_pkey, site_pkey = get_site_from_site_as_tile(
site_as_tile_pkey
)
cur.execute(
"""
SELECT
pkey
FROM
wire_in_tile
WHERE
site_pin_pkey = (
SELECT
pkey
FROM
site_pin
WHERE
site_type_pkey = ?
AND name = ?
)
AND
site_pkey = ?
;""", (site_type_pkey, pin, site_pkey)
)
results = cur.fetchall()
assert len(results) == 1
wire_in_tile_pkey = results[0][0]
return wire_in_tile_pkey
return get_site_as_tile_wire
def import_graph_nodes(conn, graph, node_mapping, connection_box_map):
cur = conn.cursor()
get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur)
get_site_as_tile_wire = create_get_site_as_tile_wire(cur)
for node_idx, node in enumerate(graph.nodes):
if node.type not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN):
continue
gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)]
pin_name = graph.pin_ptc_to_name_map[
(gridloc.block_type_id, node.loc.ptc)]
# Synthetic blocks are handled below.
if pin_name.startswith('SYN-'):
set_connection_box(
graph,
node_idx,
node.loc.x_low,
node.loc.y_low,
box_id=graph.maybe_add_connection_box('IMUX'),
site_pin_delay=0.,
)
continue
m = PIN_NAME_TO_PARTS.match(pin_name)
assert m is not None, pin_name
tile_type = m.group(1)
tile_type = remove_vpr_tile_prefix(tile_type)
pin = m.group(2)
tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey(
node.loc.x_low, node.loc.y_low
)
if site_as_tile_pkey is not None:
wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin)
else:
cur.execute(
"""
SELECT
pkey
FROM
wire_in_tile
WHERE
name = ?
AND
phy_tile_type_pkey IN (
SELECT tile_type_pkey FROM phy_tile WHERE pkey IN (
SELECT phy_tile_pkey FROM tile_map WHERE tile_pkey = ?
)
);""", (pin, tile_pkey)
)
results = cur.fetchall()
assert len(results) == 1
wire_in_tile_pkey = results[0][0]
tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1])
cur.execute(
"""
SELECT
top_graph_node_pkey, bottom_graph_node_pkey,
left_graph_node_pkey, right_graph_node_pkey FROM wire
WHERE
wire_in_tile_pkey = ? AND tile_pkey = ?;""",
(wire_in_tile_pkey, tile_pkey)
)
result = cur.fetchone()
assert result is not None, (wire_in_tile_pkey, tile_pkey)
(
top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey,
right_graph_node_pkey
) = result
side = node.loc.side
if side == tracks.Direction.LEFT:
assert left_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[left_graph_node_pkey] = node.id
update_connection_box(
conn, graph, left_graph_node_pkey, node_idx, connection_box_map
)
elif side == tracks.Direction.RIGHT:
assert right_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[right_graph_node_pkey] = node.id
update_connection_box(
conn, graph, right_graph_node_pkey, node_idx,
connection_box_map
)
elif side == tracks.Direction.TOP:
assert top_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[top_graph_node_pkey] = node.id
update_connection_box(
conn, graph, top_graph_node_pkey, node_idx, connection_box_map
)
elif side == tracks.Direction.BOTTOM:
assert bottom_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[bottom_graph_node_pkey] = node.id
update_connection_box(
conn, graph, bottom_graph_node_pkey, node_idx,
connection_box_map
)
else:
assert False, side
def import_tracks(conn, alive_tracks, node_mapping, graph, default_segment_id):
cur = conn.cursor()
cur2 = conn.cursor()
for (graph_node_pkey, track_pkey, graph_node_type, x_low, x_high, y_low,
y_high, ptc, capacitance,
resistance) in progressbar_utils.progressbar(cur.execute("""
SELECT
pkey,
track_pkey,
graph_node_type,
x_low,
x_high,
y_low,
y_high,
ptc,
capacitance,
resistance
FROM
graph_node WHERE track_pkey IS NOT NULL;""")):
if track_pkey not in alive_tracks:
continue
cur2.execute(
"""
SELECT name FROM segment WHERE pkey = (
SELECT segment_pkey FROM track WHERE pkey = ?
)""", (track_pkey, )
)
result = cur2.fetchone()
if result is not None:
segment_name = result[0]
segment_id = graph.get_segment_id_from_name(segment_name)
else:
segment_id = default_segment_id
node_type = graph2.NodeType(graph_node_type)
if node_type == graph2.NodeType.CHANX:
direction = 'X'
x_low = max(x_low, 1)
elif node_type == graph2.NodeType.CHANY:
direction = 'Y'
y_low = max(y_low, 1)
else:
assert False, node_type
canonical_loc = None
cur2.execute(
"""
SELECT grid_x, grid_y FROM phy_tile WHERE pkey = (
SELECT canon_phy_tile_pkey FROM track WHERE pkey = ?
)""", (track_pkey, )
)
result = cur2.fetchone()
if result:
canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1])
track = tracks.Track(
direction=direction,
x_low=x_low,
x_high=x_high,
y_low=y_low,
y_high=y_high,
)
assert graph_node_pkey not in node_mapping
node_mapping[graph_node_pkey] = graph.add_track(
track=track,
segment_id=segment_id,
ptc=ptc,
timing=graph2.NodeTiming(
r=resistance,
c=capacitance,
),
canonical_loc=canonical_loc
)
def create_track_rr_graph(
conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id
):
cur = conn.cursor()
cur.execute("""SELECT count(*) FROM track;""")
(num_channels, ) = cur.fetchone()
print('{} Import alive tracks'.format(now()))
alive_tracks = set()
for (track_pkey,
) in cur.execute("SELECT pkey FROM track WHERE alive = 1;"):
alive_tracks.add(track_pkey)
print('{} Importing alive tracks'.format(now()))
import_tracks(conn, alive_tracks, node_mapping, graph, segment_id)
print('original {} final {}'.format(num_channels, len(alive_tracks)))
def add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles):
cur = conn.cursor()
delayless_switch = graph.get_switch_id('__vpr_delayless_switch__')
for tile_name, synth_tile in synth_tiles['tiles'].items():
num_inpad = len(
list(
filter(
lambda t: t['port_type'] == 'output', synth_tile['pins']
)
)
)
num_outpad = len(
list(
filter(
lambda t: t['port_type'] == 'input', synth_tile['pins']
)
)
)
for pin in synth_tile['pins']:
if pin['port_type'] in ['input', 'output']:
wire_pkey = get_wire_pkey(conn, tile_name, pin['wire'])
cur.execute(
"""
SELECT
track_pkey
FROM
node
WHERE
pkey = (
SELECT
node_pkey
FROM
wire
WHERE
pkey = ?
);""", (wire_pkey, )
)
(track_pkey, ) = cur.fetchone()
assert track_pkey is not None, (
tile_name, pin['wire'], wire_pkey
)
elif pin['port_type'] == 'VCC':
cur.execute('SELECT vcc_track_pkey FROM constant_sources')
(track_pkey, ) = cur.fetchone()
elif pin['port_type'] == 'GND':
cur.execute('SELECT gnd_track_pkey FROM constant_sources')
(track_pkey, ) = cur.fetchone()
else:
assert False, pin['port_type']
tracks_model, track_nodes = get_track_model(conn, track_pkey)
option = list(
tracks_model.get_tracks_for_wire_at_coord(
tuple(synth_tile['loc'])
).values()
)
assert len(option) > 0, (pin, len(option))
if pin['port_type'] == 'input':
tile_type = synth_tile['tile_name']
wire = 'outpad'
elif pin['port_type'] == 'output':
tile_type = synth_tile['tile_name']
wire = 'inpad'
elif pin['port_type'] == 'VCC':
tile_type = 'SYN-VCC'
wire = 'VCC'
elif pin['port_type'] == 'GND':
tile_type = 'SYN-GND'
wire = 'GND'
else:
assert False, pin
track_node = track_nodes[option[0]]
assert track_node in node_mapping, (track_node, track_pkey)
if wire == 'inpad' and num_inpad > 1:
pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin(
tile_type, pin['z_loc'], wire
)
elif wire == 'outpad' and num_outpad > 1:
pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin(
tile_type, (pin['z_loc'] - num_inpad), wire
)
else:
pin_name = graph.create_pin_name_from_tile_type_and_pin(
tile_type, wire
)
pin_node = graph.get_nodes_for_pin(
tuple(synth_tile['loc']), pin_name
)
if pin['port_type'] == 'input':
graph.add_edge(
src_node=node_mapping[track_node],
sink_node=pin_node[0][0],
switch_id=delayless_switch,
name='synth_{}_{}'.format(tile_name, pin['wire']),
)
elif pin['port_type'] in ['VCC', 'GND', 'output']:
graph.add_edge(
src_node=pin_node[0][0],
sink_node=node_mapping[track_node],
switch_id=delayless_switch,
name='synth_{}_{}'.format(tile_name, pin['wire']),
)
else:
assert False, pin
def get_switch_name(conn, graph, switch_name_map, switch_pkey):
assert switch_pkey is not None
if switch_pkey not in switch_name_map:
cur = conn.cursor()
cur.execute(
"""SELECT name FROM switch WHERE pkey = ?;""", (switch_pkey, )
)
(switch_name, ) = cur.fetchone()
switch_id = graph.get_switch_id(switch_name)
switch_name_map[switch_pkey] = switch_id
else:
switch_id = switch_name_map[switch_pkey]
return switch_id
def create_get_tile_name(conn):
cur = conn.cursor()
@functools.lru_cache(maxsize=None)
def get_tile_name(tile_pkey):
cur.execute(
"""
SELECT name FROM phy_tile WHERE pkey = ?;
""", (tile_pkey, )
)
return cur.fetchone()[0]
return get_tile_name
def create_get_pip_wire_names(conn):
cur = conn.cursor()
@functools.lru_cache(maxsize=None)
def get_pip_wire_names(pip_pkey):
cur.execute(
"""SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey
FROM pip_in_tile WHERE pkey = ?;""", (pip_pkey, )
)
src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone()
cur.execute(
"""SELECT name FROM wire_in_tile WHERE pkey = ?;""",
(src_wire_in_tile_pkey, )
)
(src_net, ) = cur.fetchone()
cur.execute(
"""SELECT name FROM wire_in_tile WHERE pkey = ?;""",
(dest_wire_in_tile_pkey, )
)
(dest_net, ) = cur.fetchone()
return (src_net, dest_net)
return get_pip_wire_names
def get_number_graph_edges(conn, graph, node_mapping):
num_edges = len(graph.edges)
print('{} Counting edges.'.format(now()))
cur = conn.cursor()
cur.execute("SELECT count() FROM graph_edge;" "")
for src_graph_node, dest_graph_node in cur.execute("""
SELECT
src_graph_node_pkey,
dest_graph_node_pkey
FROM
graph_edge;
"""):
if src_graph_node not in node_mapping:
continue
if dest_graph_node not in node_mapping:
continue
num_edges += 1
return num_edges
def import_graph_edges(conn, graph, node_mapping):
# First yield existing edges
print('{} Importing existing edges.'.format(now()))
for edge in graph.edges:
yield (edge.src_node, edge.sink_node, edge.switch_id, None)
# Then yield edges from database.
cur = conn.cursor()
cur.execute("SELECT count() FROM graph_edge;" "")
(num_edges, ) = cur.fetchone()
get_tile_name = create_get_tile_name(conn)
get_pip_wire_names = create_get_pip_wire_names(conn)
switch_name_map = {}
print('{} Importing edges from database.'.format(now()))
with progressbar_utils.ProgressBar(max_value=num_edges) as bar:
for idx, (src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey,
pip_pkey, backward) in enumerate(cur.execute("""
SELECT
src_graph_node_pkey,
dest_graph_node_pkey,
switch_pkey,
phy_tile_pkey,
pip_in_tile_pkey,
backward
FROM
graph_edge;
""")):
if src_graph_node not in node_mapping:
continue
if dest_graph_node not in node_mapping:
continue
if pip_pkey is not None:
tile_name = get_tile_name(phy_tile_pkey)
src_net, dest_net = get_pip_wire_names(pip_pkey)
if not backward:
pip_name = '{}.{}.{}'.format(tile_name, dest_net, src_net)
else:
pip_name = '{}.{}.{}'.format(tile_name, src_net, dest_net)
else:
pip_name = None
switch_id = get_switch_name(
conn, graph, switch_name_map, switch_pkey
)
src_node = node_mapping[src_graph_node]
sink_node = node_mapping[dest_graph_node]
if pip_name is not None:
feature = check_feature(pip_name)
if feature:
yield (
src_node, sink_node, switch_id,
(('fasm_features', feature), )
)
else:
yield (src_node, sink_node, switch_id, ())
else:
yield (src_node, sink_node, switch_id, ())
if idx % 1024 == 0:
bar.update(idx)
def create_channels(conn):
cur = conn.cursor()
cur.execute(
"""
SELECT chan_width_max, x_min, x_max, y_min, y_max FROM channel;"""
)
chan_width_max, x_min, x_max, y_min, y_max = cur.fetchone()
cur.execute('SELECT idx, info FROM x_list;')
x_list = []
for idx, info in cur:
x_list.append(graph2.ChannelList(idx, info))
cur.execute('SELECT idx, info FROM y_list;')
y_list = []
for idx, info in cur:
y_list.append(graph2.ChannelList(idx, info))
return graph2.Channels(
chan_width_max=chan_width_max,
x_min=x_min,
y_min=y_min,
x_max=x_max,
y_max=y_max,
x_list=x_list,
y_list=y_list,
)
def create_connection_boxes(conn, graph):
""" Assign connection box ids for all connection box types. """
cur = conn.cursor()
cur.execute(
"""
SELECT pkey, tile_type_pkey, name FROM wire_in_tile WHERE pkey IN (
SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE pkey IN (
SELECT connection_box_wire_pkey FROM graph_node
WHERE connection_box_wire_pkey IS NOT NULL
)
);"""
)
connection_box_map = {}
for wire_in_tile_pkey, tile_type_pkey, wire_name in cur:
connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box(
reduce_connection_box(wire_name)
)
return connection_box_map
def yield_nodes(nodes):
with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar:
for idx, node in enumerate(nodes):
yield node
if idx % 1024 == 0:
bar.update(idx)
def phy_grid_dims(conn):
""" Returns physical grid dimensions. """
cur = conn.cursor()
cur.execute("SELECT grid_x FROM phy_tile ORDER BY grid_x DESC LIMIT 1;")
x_max = cur.fetchone()[0]
cur.execute("SELECT grid_y FROM phy_tile ORDER BY grid_y DESC LIMIT 1;")
y_max = cur.fetchone()[0]
return x_max + 1, y_max + 1
def find_constant_network(graph):
""" Find VCC and GND tiles and create synth_tiles input.
All arches should have these synthetic tiles, search the input rr graph
for the SYN-GND and SYN-VCC tiles.
"""
block_types = {}
for block_type in graph.block_types:
block_types[block_type.name] = block_type.id
assert 'SYN-GND' in block_types
assert 'SYN-VCC' in block_types
gnd_block_id = block_types['SYN-GND']
vcc_block_id = block_types['SYN-VCC']
gnd_loc = None
vcc_loc = None
for grid_loc in graph.grid:
if gnd_block_id == grid_loc.block_type_id:
assert gnd_loc is None
gnd_loc = (grid_loc.x, grid_loc.y)
if vcc_block_id == grid_loc.block_type_id:
assert vcc_loc is None
vcc_loc = (grid_loc.x, grid_loc.y)
assert gnd_loc is not None
assert vcc_loc is not None
synth_tiles = {
'tiles':
{
"VCC":
{
'loc':
vcc_loc,
'pins':
[
{
'wire': 'VCC',
'pad': 'VCC',
'port_type': 'VCC',
'is_clock': False,
},
],
},
"GND":
{
'loc':
gnd_loc,
'pins':
[
{
'wire': 'GND',
'pad': 'GND',
'port_type': 'GND',
'is_clock': False,
},
],
},
}
}
return synth_tiles
def create_node_remap(nodes, channels_obj):
N = 2
p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max)))
point_map = {}
for node in nodes:
x = node.loc.x_low
y = node.loc.y_low
if (x, y) not in point_map:
point_map[(x, y)] = []
point_map[(x, y)].append(node.id)
hilbert_curve = HilbertCurve(p, N)
idx = 0
id_map = {}
for h in range(hilbert_curve.max_h + 1):
coord = tuple(hilbert_curve.coordinates_from_distance(h))
if coord not in point_map:
continue
for old_id in point_map[coord]:
id_map[old_id] = idx
idx += 1
del point_map[coord]
return lambda x: id_map[x]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--db_root', required=True, help='Project X-Ray Database'
)
parser.add_argument('--part', required=True, help='FPGA part')
parser.add_argument(
'--read_rr_graph', required=True, help='Input rr_graph file'
)
parser.add_argument(
'--write_rr_graph', required=True, help='Output rr_graph file'
)
parser.add_argument(
'--write_rr_node_map',
required=True,
help='Output map of graph_node_pkey to rr inode file'
)
parser.add_argument(
'--connection_database',
help='Database of fabric connectivity',
required=True
)
parser.add_argument(
'--synth_tiles',
help='If using an ROI, synthetic tile defintion from prjxray-arch-import'
)
parser.add_argument(
'--graph_limit',
help='Limit grid to specified dimensions in x_min,y_min,x_max,y_max',
)
parser.add_argument(
'--vpr_capnp_schema_dir',
help='Directory container VPR schema files',
)
print('{} Starting routing import'.format(now()))
args = parser.parse_args()
db = prjxray.db.Database(args.db_root, args.part)
populate_hclk_cmt_tiles(db)
synth_tiles = None
if args.synth_tiles:
use_roi = True
with open(args.synth_tiles) as f:
synth_tiles = json.load(f)
roi = Roi(
db=db,
x1=synth_tiles['info']['GRID_X_MIN'],
y1=synth_tiles['info']['GRID_Y_MIN'],
x2=synth_tiles['info']['GRID_X_MAX'],
y2=synth_tiles['info']['GRID_Y_MAX'],
)
print('{} generating routing graph for ROI.'.format(now()))
elif args.graph_limit:
use_roi = True
x_min, y_min, x_max, y_max = map(int, args.graph_limit.split(','))
roi = Roi(
db=db,
x1=x_min,
y1=y_min,
x2=x_max,
y2=y_max,
)
else:
use_roi = False
roi = None
synth_tiles = None
capnp_graph = capnp_graph2.Graph(
rr_graph_schema_fname=os.path.join(
args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp'
),
input_file_name=args.read_rr_graph,
progressbar=progressbar_utils.progressbar,
output_file_name=args.write_rr_graph,
)
graph = capnp_graph.graph
if synth_tiles is None:
synth_tiles = find_constant_network(graph)
with sqlite3.connect("file:{}?mode=ro".format(args.connection_database),
uri=True) as conn:
populate_bufg_rebuf_map(conn)
cur = conn.cursor()
for name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, \
switch_type in cur.execute("""
SELECT
name,
internal_capacitance,
drive_resistance,
intrinsic_delay,
penalty_cost,
switch_type
FROM
switch;"""):
# Add back missing switchs, which were unused in arch xml, and so
# were not emitted in rrgraph XML.
#
# TODO: This can be removed once
# https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354
# is fixed.
try:
graph.get_switch_id(name)
continue
except KeyError:
capnp_graph.add_switch(
graph2.Switch(
id=None,
name=name,
type=graph2.SwitchType[switch_type.upper()],
timing=graph2.SwitchTiming(
r=drive_resistance,
c_in=0.0,
c_out=0.0,
c_internal=internal_capacitance,
t_del=intrinsic_delay,
p_cost=penalty_cost,
),
sizing=graph2.SwitchSizing(
mux_trans_size=0,
buf_size=0,
),
)
)
# Mapping of graph_node.pkey to rr node id.
node_mapping = {}
print('{} Creating connection box list'.format(now()))
connection_box_map = create_connection_boxes(conn, graph)
# Match site pins rr nodes with graph_node's in the connection_database.
print('{} Importing graph nodes'.format(now()))
import_graph_nodes(conn, graph, node_mapping, connection_box_map)
# Walk all track graph nodes and add them.
print('{} Creating tracks'.format(now()))
segment_id = graph.get_segment_id_from_name('dummy')
create_track_rr_graph(
conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id
)
# Set of (src, sink, switch_id) tuples that pip edges have been sent to
# VPR. VPR cannot handle duplicate paths with the same switch id.
print('{} Adding synthetic edges'.format(now()))
add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles)
print('{} Creating channels.'.format(now()))
channels_obj = create_channels(conn)
node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj)
x_dim, y_dim = phy_grid_dims(conn)
connection_box_obj = graph.create_connection_box_object(
x_dim=x_dim, y_dim=y_dim
)
num_edges = get_number_graph_edges(conn, graph, node_mapping)
print('{} Serializing to disk.'.format(now()))
capnp_graph.serialize_to_capnp(
channels_obj=channels_obj,
connection_box_obj=connection_box_obj,
num_nodes=len(capnp_graph.graph.nodes),
nodes_obj=yield_nodes(capnp_graph.graph.nodes),
num_edges=num_edges,
edges_obj=import_graph_edges(conn, graph, node_mapping),
node_remap=node_remap,
)
for k in node_mapping:
node_mapping[k] = node_remap(node_mapping[k])
print('{} Writing node map.'.format(now()))
with open(args.write_rr_node_map, 'wb') as f:
pickle.dump(node_mapping, f)
print('{} Done writing node map.'.format(now()))
if __name__ == '__main__':
main()
| 30.375344
| 90
| 0.590114
| 5,808
| 44,105
| 4.156853
| 0.101067
| 0.023195
| 0.016568
| 0.016816
| 0.4224
| 0.343619
| 0.282856
| 0.256513
| 0.20888
| 0.174212
| 0
| 0.008274
| 0.312187
| 44,105
| 1,451
| 91
| 30.396278
| 0.787579
| 0.105793
| 0
| 0.227807
| 0
| 0
| 0.098637
| 0.017309
| 0
| 0
| 0
| 0.000689
| 0.028877
| 1
| 0.034225
| false
| 0
| 0.035294
| 0.00107
| 0.108021
| 0.017112
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be20fd972c9533d7359e606c8ff9c31f5c519ad2
| 17,854
|
py
|
Python
|
testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py
|
andrewbowen19/ClusterEclipsingBinaries
|
e554cb6bb613e0d3703314e50fcf5289f50bf572
|
[
"MIT"
] | null | null | null |
testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py
|
andrewbowen19/ClusterEclipsingBinaries
|
e554cb6bb613e0d3703314e50fcf5289f50bf572
|
[
"MIT"
] | null | null | null |
testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py
|
andrewbowen19/ClusterEclipsingBinaries
|
e554cb6bb613e0d3703314e50fcf5289f50bf572
|
[
"MIT"
] | null | null | null |
#########################
#########################
# Need to account for limit in input period
#########################
#########################
# Baseline M67 long script -- NO crowding
# New script copied from quest - want to take p and ecc from each population (all, obs, rec) and put them into separate file
# Doing this so we don't have to run analyse each time
# Can write separate script for p-ecc plots
# Quest paths in this version of script
import pandas as pd
import numpy as np
import os
from astropy.coordinates import SkyCoord
from astropy import units, constants
from astropy.modeling import models, fitting
import scipy.stats
from scipy.integrate import quad
#for Quest
import matplotlib
matplotlib.use('Agg')
doIndividualPlots = True
from matplotlib import pyplot as plt
def file_len(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass):
Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.)
return Phs.decompose().to(units.day)
#similar to field, but limiting by the hard-soft boundary
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
def RagNormal(x, cdf = False):
mean = 5.03
std = 2.28
if (cdf):
return scipy.stats.norm.cdf(x,mean,std)
return scipy.stats.norm.pdf(x,mean,std)
def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']):
c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster)
c2 = '#A62B1F' #Dai Red
c3 = '#BF8A26' #Dali Beige
fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1, ax2
histAll = np.insert(histAll,0,0)
histObs = np.insert(histObs,0,0)
for f in filters:
histRec[f] = np.insert(histRec[f],0,0)
#PDF
ax1.step(bin_edges, histAll/np.sum(histAll), color=c1)
ax1.step(bin_edges, histObs/np.sum(histObs), color=c2)
for f in filters:
lw = 1
if (f == 'all'):
lw = 0.5
ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw)
ax1.set_ylabel('PDF')
ax1.set_yscale('log')
ax1.set_title('Globular Clusters - Baseline', fontsize = 16)
ax1.set_xlabel(xtitle)
#CDF
#cdfAll = []
#cdfObs = []
#cdfRec = dict()
#for f in filters:
# cdfRec[f] = []
# for i in range(len(histAll)):
# cdfAll.append(np.sum(histAll[:i])/np.sum(histAll))
# for i in range(len(histObs)):
# cdfObs.append(np.sum(histObs[:i])/np.sum(histObs))
# for f in filters:
# for i in range(len(histRec[f])):
# cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f]))
#ax2.step(bin_edges, cdfAll, color=c1)
#ax2.step(bin_edges, cdfObs, color=c2)
#for f in filters:
# lw = 1
# if (f == 'all'):
# lw = 0.5
# ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw)
#ax2.set_ylabel('CDF')
#ax2.set_xlabel(xtitle)
fig.subplots_adjust(hspace=0)
fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight')
#write to a text file
with open('./eblsst_files/' + fname+'.csv','w') as fl:
outline = 'binEdges,histAll,histObs'
for f in filters:
outline += ','+f+'histRec'
outline += '\n'
fl.write(outline)
for i in range(len(bin_edges)):
outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i])
for f in filters:
outline += ','+str(histRec[f][i])
outline += '\n'
fl.write(outline)
if __name__ == "__main__":
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all']
#get the Raghavan binary fraction fit
fbFit= fitRagfb()
print(fbFit)
#to normalize
intAll, err = quad(RagNormal, -20, 20)
intCut, err = quad(RagNormal, -20, np.log10(365*10.))
intNorm = intCut/intAll
#cutoff in percent error for "recovered"
Pcut = 0.1
#assumed mean stellar mass
mMean = 0.5
#minimum number of lines to consider in file
Nlim = 3
if (doIndividualPlots):
fmass, axmass = plt.subplots()
fqrat, axqrat = plt.subplots()
fecc, axecc = plt.subplots()
flper, axlper = plt.subplots()
fdist, axdist = plt.subplots()
fmag, axmag = plt.subplots()
frad, axrad = plt.subplots()
#bins for all the histograms
Nbins = 25
mbins = np.arange(0,10, 0.1, dtype='float')
qbins = np.arange(0,1, 0.1, dtype='float')
ebins = np.arange(0, 1.05, 0.05, dtype='float')
lpbins = np.arange(-2, 10, 0.5, dtype='float')
dbins = np.arange(0, 40, 1, dtype='float')
magbins = np.arange(11, 25, 1, dtype='float')
rbins = np.arange(0, 100, 0.2, dtype='float')
#blanks for the histograms
#All
m1hAll = np.zeros_like(mbins)[1:]
qhAll = np.zeros_like(qbins)[1:]
ehAll = np.zeros_like(ebins)[1:]
lphAll = np.zeros_like(lpbins)[1:]
dhAll = np.zeros_like(dbins)[1:]
maghAll = np.zeros_like(magbins)[1:]
rhAll = np.zeros_like(rbins)[1:]
#Observable
m1hObs = np.zeros_like(mbins)[1:]
qhObs = np.zeros_like(qbins)[1:]
ehObs = np.zeros_like(ebins)[1:]
lphObs = np.zeros_like(lpbins)[1:]
dhObs = np.zeros_like(dbins)[1:]
maghObs = np.zeros_like(magbins)[1:]
rhObs = np.zeros_like(rbins)[1:]
#Recovered
m1hRec = dict()
qhRec = dict()
ehRec = dict()
lphRec = dict()
dhRec = dict()
maghRec = dict()
rhRec = dict()
for f in filters:
m1hRec[f] = np.zeros_like(mbins)[1:]
qhRec[f] = np.zeros_like(qbins)[1:]
ehRec[f] = np.zeros_like(ebins)[1:]
lphRec[f] = np.zeros_like(lpbins)[1:]
dhRec[f] = np.zeros_like(dbins)[1:]
maghRec[f] = np.zeros_like(magbins)[1:]
rhRec[f] = np.zeros_like(rbins)[1:]
RA = []
Dec = []
recFrac = []
recN = []
rawN = []
obsN = []
fileN = []
fileObsN = []
fileRecN = []
allNPrsa = []
obsNPrsa = []
recNPrsa = []
# Lists for period and eccentricity for Andrew's circularization plots
eccAll = []
eccObs = []
eccRec = []
pAll = []
pObs = []
pRec = []
# Using prsa dataframes for these lists because of period cutoff at 1000 days
# Dataframes to write to files later; 3 files for each sub-population - append everything to these
peccAll = pd.DataFrame(columns = ['e', 'p'])
peccObs = pd.DataFrame(columns = ['e', 'p'])
peccRec = pd.DataFrame(columns = ['e', 'p'])
#Read in all the data and make the histograms
d = "./input_files/"
files = os.listdir(d)
IDs = []
for i, f in enumerate(files):
print(round(i/len(files),4), f)
fl = file_len(d+f)
if (fl >= 4):
#read in the header
header = pd.read_csv(d+f, nrows=1)
######################
#NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms
#####################
Nmult = header['clusterMass'][0]/mMean
#Nmult = 1.
RA.append(header['OpSimRA'])
Dec.append(header['OpSimDec'])
#read in rest of the file
data = pd.read_csv(d+f, header = 2).fillna(-999)
rF = 0.
rN = 0.
Nrec = 0.
Nobs = 0.
raN = 0.
obN = 0.
fiN = 0.
fioN = 0.
firN = 0.
NallPrsa = 0.
NobsPrsa = 0.
NrecPrsa = 0.
Nall = len(data.index)/intNorm ###is this correct? (and the only place I need to normalize?)
prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)]
# Appending for Andrew
eccAll.append(prsa['e'].values)
pAll.append(prsa['p'].values)
NallPrsa = len(prsa.index)
if (Nall >= Nlim):
#create histograms
#All
m1hAll0, m1b = np.histogram(data["m1"], bins=mbins)
qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins)
ehAll0, eb = np.histogram(data["e"], bins=ebins)
lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins)
dhAll0, db = np.histogram(data["d"], bins=dbins)
maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins)
rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins)
if (doIndividualPlots):
axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1)
axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1)
axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1)
axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1)
axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1)
axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1)
axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1)
#account for the binary fraction, as a function of mass
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val))
#account for the hard-soft boundary
Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value
fb *= RagNormal(np.log10(Phs), cdf = True)
print("fb, Phs = ", fb, Phs)
Nmult *= fb
m1hAll += m1hAll0/Nall*Nmult
qhAll += qhAll0/Nall*Nmult
ehAll += ehAll0/Nall*Nmult
lphAll += lphAll0/Nall*Nmult
dhAll += dhAll0/Nall*Nmult
maghAll += maghAll0/Nall*Nmult
rhAll += rhAll0/Nall*Nmult
#Obs
obs = data.loc[data['LSM_PERIOD'] != -999]
Nobs = len(obs.index)
prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)]
NobsPrsa = len(prsaObs.index)
# Appending for Andrew's files
eccObs.append(prsaObs['e'].values)
pObs.append(prsaObs['p'].values)
if (Nobs >= Nlim):
m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins)
qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins)
ehObs0, eb = np.histogram(obs["e"], bins=ebins)
lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins)
dhObs0, db = np.histogram(obs["d"], bins=dbins)
maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins)
rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins)
m1hObs += m1hObs0/Nall*Nmult
qhObs += qhObs0/Nall*Nmult
ehObs += ehObs0/Nall*Nmult
lphObs += lphObs0/Nall*Nmult
dhObs += dhObs0/Nall*Nmult
maghObs += maghObs0/Nall*Nmult
rhObs += rhObs0/Nall*Nmult
#Rec
recCombined = pd.DataFrame()
prsaRecCombined = pd.DataFrame()
for filt in filters:
key = filt+'LSS_PERIOD'
if (filt == 'all'):
key = 'LSM_PERIOD'
fullP = abs(data[key] - data['p'])/data['p']
halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p'])
twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p'])
rec = data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
Nrec = len(rec.index)
#I'd like to account for all filters here to have more accurate numbers
recCombined = recCombined.append(rec)
prsaRecCombined = prsaRecCombined.append(prsaRec)
# Going to use prsaRecCombined for ecc-p plots to account for all filters
eccRec.append(prsaRec['e'].values)
pRec.append(prsaRec['p'].values)
if (filt == 'all'):
recCombined.drop_duplicates(inplace=True)
prsaRecCombined.drop_duplicates(inplace=True)
if (Nrec >= Nlim):
m1hRec0, m1b = np.histogram(rec["m1"], bins=mbins)
qhRec0, qb = np.histogram(rec["m2"]/rec["m1"], bins=qbins)
ehRec0, eb = np.histogram(rec["e"], bins=ebins)
lphRec0, lpb = np.histogram(np.ma.log10(rec["p"].values).filled(-999), bins=lpbins)
dhRec0, db = np.histogram(rec["d"], bins=dbins)
maghRec0, magb = np.histogram(rec["appMagMean_r"], bins=magbins)
rhRec0, rb = np.histogram(rec["r2"]/rec["r1"], bins=rbins)
m1hRec[filt] += m1hRec0/Nall*Nmult
qhRec[filt] += qhRec0/Nall*Nmult
ehRec[filt] += ehRec0/Nall*Nmult
lphRec[filt] += lphRec0/Nall*Nmult
dhRec[filt] += dhRec0/Nall*Nmult
maghRec[filt] += maghRec0/Nall*Nmult
rhRec[filt] += rhRec0/Nall*Nmult
#for the mollweide
if (filt == 'all'):
Nrec = len(recCombined.index)
rF = Nrec/Nall
rN = Nrec/Nall*Nmult
raN = Nmult
obN = Nobs/Nall*Nmult
fiN = Nall
fioN = Nobs
firN = Nrec
NrecPrsa = len(prsaRecCombined.index)
NrecPrsa = NrecPrsa/Nall*Nmult
NobsPrsa = NobsPrsa/Nall*Nmult
NallPrsa = NallPrsa/Nall*Nmult
recFrac.append(rF)
recN.append(rN)
rawN.append(raN)
obsN.append(obN)
fileN.append(fiN)
fileObsN.append(fioN)
fileRecN.append(firN)
allNPrsa.append(NallPrsa)
obsNPrsa.append(NobsPrsa)
recNPrsa.append(NrecPrsa)
#print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN))
# Concatenating p and ecc lists
eccAll = np.concatenate(eccAll)
eccObs = np.concatenate(eccObs)
eccRec = np.concatenate(eccRec)
pAll = np.concatenate(pAll)
pObs = np.concatenate(pObs)
pRec = np.concatenate(pRec)
# print('Ecc lists:', eccAll, eccObs, eccRec)
# print('P lists:', pAll, pObs, pRec)
# Appending lists with all the p/ecc values to our dataframes
# All dataframe
peccAll['e'] = eccAll
peccAll['p'] = pAll
# Observable dataframe
peccObs['e'] = eccObs
peccObs['p'] = pObs
# Recovered dataframe
peccRec['e'] = eccRec
peccRec['p'] = pRec
# print('Final Dataframes:', peccAll, peccObs, peccRec)
# print(peccRec.columns)
# 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding)
peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p'])
peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p'])
peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p'])
#plot and save the histograms
saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist')
saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist')
saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist')
saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist')
saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist')
saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist')
saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist')
#make the mollweide
coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs')
lGal = coords.galactic.l.wrap_at(180.*units.degree).degree
bGal = coords.galactic.b.wrap_at(180.*units.degree).degree
RAwrap = coords.ra.wrap_at(180.*units.degree).degree
Decwrap = coords.dec.wrap_at(180.*units.degree).degree
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'% recovered')
f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight')
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'log10(N) recovered')
f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight')
if (doIndividualPlots):
fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight')
fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight')
fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight')
flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight')
fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight')
fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight')
frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight')
print("###################")
print("number of binaries in input files (raw, log):",np.sum(fileN), np.log10(np.sum(fileN)))
print("number of binaries in tested with gatspy (raw, log):",np.sum(fileObsN), np.log10(np.sum(fileObsN)))
print("number of binaries in recovered with gatspy (raw, log):",np.sum(fileRecN), np.log10(np.sum(fileRecN)))
print("recovered/observable*100 with gatspy:",np.sum(fileRecN)/np.sum(fileObsN)*100.)
print("###################")
print("total in sample (raw, log):",np.sum(rawN), np.log10(np.sum(rawN)))
print("total observable (raw, log):",np.sum(obsN), np.log10(np.sum(obsN)))
print("total recovered (raw, log):",np.sum(recN), np.log10(np.sum(recN)))
print("recovered/observable*100:",np.sum(recN)/np.sum(obsN)*100.)
print("###################")
print("total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(allNPrsa), np.log10(np.sum(allNPrsa)))
print("total observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa)))
print("total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(recNPrsa), np.log10(np.sum(recNPrsa)))
print("Prsa 15.8<r<19.5 P<1000d rec/obs*100:",np.sum(recNPrsa)/np.sum(obsNPrsa)*100.)
| 35.284585
| 213
| 0.641089
| 2,717
| 17,854
| 4.167832
| 0.195068
| 0.022519
| 0.020399
| 0.014129
| 0.271459
| 0.172995
| 0.117361
| 0.106853
| 0.098552
| 0.096344
| 0
| 0.039301
| 0.163437
| 17,854
| 505
| 214
| 35.354455
| 0.718867
| 0.179456
| 0
| 0.080692
| 0
| 0.008646
| 0.117994
| 0.010474
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014409
| false
| 0.002882
| 0.028818
| 0
| 0.057637
| 0.051873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be23cbbbbbb53c2c62b109846cda81e757eb1b58
| 14,527
|
py
|
Python
|
tests/engine/knowledge_base.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
tests/engine/knowledge_base.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
tests/engine/knowledge_base.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the knowledge base."""
import unittest
from plaso.containers import artifacts
from plaso.engine import knowledge_base
from tests import test_lib as shared_test_lib
class KnowledgeBaseTest(shared_test_lib.BaseTestCase):
"""Tests for the knowledge base."""
# pylint: disable=protected-access
_MACOS_PATHS = [
'/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions',
('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/'
'apdfllckaahabafndbhieahigkjlhalf'),
'/private/var/log/system.log',
'/Users/frank/Library/Application Data/Google/Chrome/Default',
'/Users/hans/Library/Application Data/Google/Chrome/Default',
('/Users/frank/Library/Application Data/Google/Chrome/Default/'
'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'),
'/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions']
_MACOS_USERS = [
{'name': 'root', 'path': '/var/root', 'sid': '0'},
{'name': 'frank', 'path': '/Users/frank', 'sid': '4052'},
{'name': 'hans', 'path': '/Users/hans', 'sid': '4352'},
{'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}]
_WINDOWS_PATHS = [
'C:\\Users\\Dude\\SomeFolder\\Chrome\\Default\\Extensions',
('C:\\Users\\Dude\\SomeNoneStandardFolder\\Chrome\\Default\\Extensions\\'
'hmjkmjkepdijhoojdojkdfohbdgmmhki'),
('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\'
'blpcfgokakmgnkcojhhkbfbldkacnbeo'),
'C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions',
('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\'
'icppfcnhkcmnfdhfhphakoifcfokfdhg'),
'C:\\Windows\\System32',
'C:\\Stuff/with path separator\\Folder']
_WINDOWS_USERS = [
{'name': 'dude', 'path': 'C:\\Users\\dude', 'sid': 'S-1'},
{'name': 'frank', 'path': 'C:\\Users\\frank', 'sid': 'S-2'}]
def _SetUserAccounts(self, knowledge_base_object, users):
"""Sets the user accounts in the knowledge base.
Args:
knowledge_base_object (KnowledgeBase): knowledge base.
users (list[dict[str,str])): users.
"""
for user in users:
identifier = user.get('sid', user.get('uid', None))
if not identifier:
continue
user_account = artifacts.UserAccountArtifact(
identifier=identifier, user_directory=user.get('path', None),
username=user.get('name', None))
knowledge_base_object.AddUserAccount(user_account)
def testCodepageProperty(self):
"""Tests the codepage property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.codepage, 'cp1252')
def testHostnameProperty(self):
"""Tests the hostname property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.hostname, '')
def testOperatingSystemProperty(self):
"""Tests the operating_system property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertIsNone(operating_system)
knowledge_base_object.SetValue('operating_system', 'Windows')
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertEqual(operating_system, 'Windows')
def testTimezoneProperty(self):
"""Tests the timezone property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.timezone.zone, 'UTC')
def testUserAccountsProperty(self):
"""Tests the user accounts property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(len(knowledge_base_object.user_accounts), 0)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
self.assertEqual(len(knowledge_base_object.user_accounts), 1)
def testYearProperty(self):
"""Tests the year property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.year, 0)
def testAddUserAccount(self):
"""Tests the AddUserAccount function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
with self.assertRaises(KeyError):
knowledge_base_object.AddUserAccount(user_account)
def testAddEnvironmentVariable(self):
"""Tests the AddEnvironmentVariable function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
with self.assertRaises(KeyError):
knowledge_base_object.AddEnvironmentVariable(environment_variable)
def testGetEnvironmentVariable(self):
"""Tests the GetEnvironmentVariable functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'SystemRoot')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'sYsTeMrOoT')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'Bogus')
self.assertIsNone(test_environment_variable)
def testGetEnvironmentVariables(self):
"""Tests the GetEnvironmentVariables function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='WinDir', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variables = knowledge_base_object.GetEnvironmentVariables()
self.assertEqual(len(environment_variables), 2)
def testGetHostname(self):
"""Tests the GetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, '')
# TODO: add tests for GetMountPoint.
def testGetSourceConfigurationArtifacts(self):
"""Tests the GetSourceConfigurationArtifacts function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
source_configurations = (
knowledge_base_object.GetSourceConfigurationArtifacts())
self.assertEqual(len(source_configurations), 1)
self.assertIsNotNone(source_configurations[0])
system_configuration = source_configurations[0].system_configuration
self.assertIsNotNone(system_configuration)
self.assertIsNotNone(system_configuration.hostname)
self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')
def testGetSystemConfigurationArtifact(self):
"""Tests the _GetSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
system_configuration = (
knowledge_base_object._GetSystemConfigurationArtifact())
self.assertIsNotNone(system_configuration)
self.assertIsNotNone(system_configuration.hostname)
self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')
# TODO: add tests for GetTextPrepend.
def testGetUsernameByIdentifier(self):
"""Tests the GetUsernameByIdentifier function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
usename = knowledge_base_object.GetUsernameByIdentifier('1000')
self.assertEqual(usename, 'testuser')
usename = knowledge_base_object.GetUsernameByIdentifier(1000)
self.assertEqual(usename, '')
usename = knowledge_base_object.GetUsernameByIdentifier('1001')
self.assertEqual(usename, '')
def testGetUsernameForPath(self):
"""Tests the GetUsernameForPath function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS)
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[0])
self.assertEqual(username, 'dude')
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[4])
self.assertEqual(username, 'hans')
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[0])
self.assertIsNone(username)
knowledge_base_object = knowledge_base.KnowledgeBase()
self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS)
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[0])
self.assertEqual(username, 'dude')
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[2])
self.assertEqual(username, 'frank')
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[2])
self.assertIsNone(username)
def testGetSetValue(self):
"""Tests the Get and SetValue functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
expected_value = 'test value'
knowledge_base_object.SetValue('Test', expected_value)
value = knowledge_base_object.GetValue('Test')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('tEsT')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('Bogus')
self.assertIsNone(value)
def testHasUserAccounts(self):
"""Tests the HasUserAccounts function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertFalse(knowledge_base_object.HasUserAccounts())
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
self.assertTrue(knowledge_base_object.HasUserAccounts())
def testReadSystemConfigurationArtifact(self):
"""Tests the ReadSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
system_configuration = artifacts.SystemConfigurationArtifact()
system_configuration.hostname = artifacts.HostnameArtifact(
name='myhost.mydomain')
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
system_configuration.user_accounts.append(user_account)
knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration)
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, 'myhost.mydomain')
def testSetActiveSession(self):
"""Tests the SetActiveSession function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a')
self.assertEqual(
knowledge_base_object._active_session,
'ddda05bedf324cbd99fa8c24b8a0037a')
knowledge_base_object.SetActiveSession(
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
self.assertEqual(
knowledge_base_object._active_session,
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
def testSetCodepage(self):
"""Tests the SetCodepage function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetCodepage('cp1252')
with self.assertRaises(ValueError):
knowledge_base_object.SetCodepage('bogus')
def testSetHostname(self):
"""Tests the SetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
# TODO: add tests for SetMountPoint.
# TODO: add tests for SetTextPrepend.
def testSetTimeZone(self):
"""Tests the SetTimeZone function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
time_zone_artifact = artifacts.TimeZoneArtifact(
localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112',
name='Eastern Standard Time')
knowledge_base_object.AddAvailableTimeZone(time_zone_artifact)
# Set an IANA time zone name.
knowledge_base_object.SetTimeZone('Europe/Zurich')
self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich')
# Set a Windows time zone name.
knowledge_base_object.SetTimeZone('Eastern Standard Time')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
# Set a localized Windows time zone name.
knowledge_base_object.SetTimeZone('Eastern (standaardtijd)')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
# Set a MUI form Windows time zone name.
knowledge_base_object.SetTimeZone('@tzres.dll,-112')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
with self.assertRaises(ValueError):
knowledge_base_object.SetTimeZone('Bogus')
if __name__ == '__main__':
unittest.main()
| 37.153453
| 79
| 0.748537
| 1,441
| 14,527
| 7.288688
| 0.149896
| 0.151004
| 0.170047
| 0.061316
| 0.641817
| 0.614777
| 0.601638
| 0.536799
| 0.460535
| 0.394459
| 0
| 0.00933
| 0.144145
| 14,527
| 390
| 80
| 37.248718
| 0.835438
| 0.095684
| 0
| 0.477551
| 0
| 0
| 0.150493
| 0.074938
| 0
| 0
| 0
| 0.002564
| 0.187755
| 1
| 0.093878
| false
| 0
| 0.016327
| 0
| 0.130612
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be247dcc0b3afb4ed9e9527cdfcf9da7e14edb83
| 2,244
|
py
|
Python
|
Problems/Dynamic Programming/140. Word Break II.py
|
BYJRK/LeetCode-Solutions
|
008467e1717309066a519acb8623d2f84071b64a
|
[
"MIT"
] | null | null | null |
Problems/Dynamic Programming/140. Word Break II.py
|
BYJRK/LeetCode-Solutions
|
008467e1717309066a519acb8623d2f84071b64a
|
[
"MIT"
] | null | null | null |
Problems/Dynamic Programming/140. Word Break II.py
|
BYJRK/LeetCode-Solutions
|
008467e1717309066a519acb8623d2f84071b64a
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/word-break-ii/
from typing import List
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
# 做一个快速的检查,如果 s 中存在所有 word 都不包含的字母,则直接退出
set1 = set(s)
set2 = set(''.join(wordDict))
if not set1.issubset(set2):
return []
# dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割
# 如果是 [[]] 则表示开头
# 如果是 [None],则表示还没有访问到,或没有办法进行分割
# 如果是 [['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串
dp = [None] * (len(s) + 1)
dp[0] = [[]]
for i in range(len(s) + 1):
# 如果当前子字符串无法分割,则跳过
if dp[i] is None:
continue
tmp = s[i:]
for w in wordDict:
idx = len(w) + i
if idx > len(s):
continue
if tmp.startswith(w):
if dp[idx] is None:
dp[idx] = []
# 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词
for dic in dp[i]:
dp[idx].append(dic + [w])
if dp[-1] is None:
return []
return [' '.join(res) for res in dp[-1]]
def wordBreak_dfs(self, s: str, wordDict: List[str]) -> List[str]:
def dfs(s: str, memo={}):
if s in memo:
return memo[s]
if len(s) == 0:
return [[]]
res = []
for w in wordDict:
if s.startswith(w):
tmp = s[len(w):]
combos = dfs(tmp, memo)
for combo in combos:
res.append([w] + combo)
memo[s] = res
return res
return dfs(s)
s = Solution()
print(s.wordBreak_dfs('catsanddog', ["cat", "cats", "and", "sand", "dog"]))
print(s.wordBreak_dfs('pineapplepenapple', [
"apple", "pen", "applepen", "pine", "pineapple"]))
# text = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
# words = ["a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa",
# "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"]
# print(s.wordBreak(text, words))
| 29.142857
| 162
| 0.483512
| 237
| 2,244
| 4.565401
| 0.396624
| 0.025878
| 0.04159
| 0.029575
| 0.055453
| 0.055453
| 0.055453
| 0.055453
| 0
| 0
| 0
| 0.007138
| 0.375668
| 2,244
| 76
| 163
| 29.526316
| 0.765168
| 0.251783
| 0
| 0.133333
| 0
| 0
| 0.044418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.022222
| 0
| 0.266667
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be260edf2b0780a31f443fdc8e024043c1398df0
| 30,595
|
py
|
Python
|
neutron/tests/unit/db/test_migration.py
|
banhr/neutron
|
4b3e73648327ce9f4d3437986a8663372f577f1b
|
[
"Apache-2.0"
] | 1
|
2018-07-04T07:59:31.000Z
|
2018-07-04T07:59:31.000Z
|
neutron/tests/unit/db/test_migration.py
|
weiqiLee/neutron
|
ddc72ebd41a0e7804b33a21583d3add008191229
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/db/test_migration.py
|
weiqiLee/neutron
|
ddc72ebd41a0e7804b33a21583d3add008191229
|
[
"Apache-2.0"
] | 1
|
2018-08-28T17:13:16.000Z
|
2018-08-28T17:13:16.000Z
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import re
import sys
import textwrap
from alembic.autogenerate import api as alembic_ag_api
from alembic import config as alembic_config
from alembic.operations import ops as alembic_ops
from alembic import script as alembic_script
import fixtures
import mock
from neutron_lib.utils import helpers
from oslo_utils import fileutils
import pkg_resources
import sqlalchemy as sa
from testtools import matchers
from neutron.conf.db import migration_cli
from neutron.db import migration
from neutron.db.migration import autogen
from neutron.db.migration import cli
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit import testlib_api
class FakeConfig(object):
service = ''
class FakeRevision(object):
path = 'fakepath'
def __init__(self, labels=None, down_revision=None, is_branch_point=False):
if not labels:
labels = set()
self.branch_labels = labels
self.down_revision = down_revision
self.is_branch_point = is_branch_point
self.revision = helpers.get_random_string(10)
self.module = mock.MagicMock()
class MigrationEntrypointsMemento(fixtures.Fixture):
'''Create a copy of the migration entrypoints map so it can be restored
during test cleanup.
'''
def _setUp(self):
self.ep_backup = {}
for proj, ep in migration_cli.migration_entrypoints.items():
self.ep_backup[proj] = copy.copy(ep)
self.addCleanup(self.restore)
def restore(self):
migration_cli.migration_entrypoints = self.ep_backup
class TestDbMigration(base.BaseTestCase):
def setUp(self):
super(TestDbMigration, self).setUp()
mock.patch('alembic.op.get_bind').start()
self.mock_alembic_is_offline = mock.patch(
'alembic.context.is_offline_mode', return_value=False).start()
self.mock_alembic_is_offline.return_value = False
self.mock_sa_inspector = mock.patch(
'sqlalchemy.engine.reflection.Inspector').start()
def _prepare_mocked_sqlalchemy_inspector(self):
mock_inspector = mock.MagicMock()
mock_inspector.get_table_names.return_value = ['foo', 'bar']
mock_inspector.get_columns.return_value = [{'name': 'foo_column'},
{'name': 'bar_column'}]
self.mock_sa_inspector.from_engine.return_value = mock_inspector
def test_schema_has_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_table('foo'))
def test_schema_has_table_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_table, 'foo')
def test_schema_has_column_missing_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column('meh', 'meh'))
def test_schema_has_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_column('foo', 'foo_column'))
def test_schema_has_column_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_column,
'foo', 'foo_col')
def test_schema_has_column_missing_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column(
'foo', column_name='meh'))
class TestCli(base.BaseTestCase):
def setUp(self):
super(TestCli, self).setUp()
self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')
self.do_alembic_cmd = self.do_alembic_cmd_p.start()
self.mock_alembic_err = mock.patch('alembic.util.err').start()
self.mock_alembic_warn = mock.patch('alembic.util.warn').start()
self.mock_alembic_err.side_effect = SystemExit
def mocked_root_dir(cfg):
return os.path.join('/fake/dir', cli._get_project_base(cfg))
mock_root = mock.patch.object(cli, '_get_package_root_dir').start()
mock_root.side_effect = mocked_root_dir
# Avoid creating fake directories
mock.patch('oslo_utils.fileutils.ensure_tree').start()
# Set up some configs and entrypoints for tests to chew on
self.configs = []
self.projects = ('neutron', 'networking-foo', 'neutron-fwaas')
ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini')
self.useFixture(MigrationEntrypointsMemento())
migration_cli.migration_entrypoints = {}
for project in self.projects:
config = alembic_config.Config(ini)
config.set_main_option('neutron_project', project)
module_name = project.replace('-', '_') + '.db.migration'
attrs = ('alembic_migrations',)
script_location = ':'.join([module_name, attrs[0]])
config.set_main_option('script_location', script_location)
self.configs.append(config)
entrypoint = pkg_resources.EntryPoint(project,
module_name,
attrs=attrs)
migration_cli.migration_entrypoints[project] = entrypoint
def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]):
with mock.patch.object(sys, 'argv', argv),\
mock.patch.object(cli, 'run_sanity_checks'),\
mock.patch.object(cli, 'validate_revisions'):
cli.main()
def _append_version_path(args):
args = copy.copy(args)
if 'autogenerate' in args and not args['autogenerate']:
args['version_path'] = mock.ANY
return args
self.do_alembic_cmd.assert_has_calls(
[mock.call(mock.ANY, func_name, **_append_version_path(kwargs))
for kwargs in exp_kwargs]
)
def test_stamp(self):
self._main_test_helper(
['prog', 'stamp', 'foo'],
'stamp',
[{'revision': 'foo', 'sql': False}]
)
self._main_test_helper(
['prog', 'stamp', 'foo', '--sql'],
'stamp',
[{'revision': 'foo', 'sql': True}]
)
def _validate_cmd(self, cmd):
self._main_test_helper(
['prog', cmd],
cmd,
[{'verbose': False}])
self._main_test_helper(
['prog', cmd, '--verbose'],
cmd,
[{'verbose': True}])
def test_branches(self):
self._validate_cmd('branches')
def test_current(self):
self._validate_cmd('current')
def test_history(self):
self._validate_cmd('history')
def test_heads(self):
self._validate_cmd('heads')
def test_check_migration(self):
with mock.patch.object(cli, 'validate_head_files') as validate:
self._main_test_helper(['prog', 'check_migration'], 'branches')
self.assertEqual(len(self.projects), validate.call_count)
def _test_database_sync_revision(self, separate_branches=True):
with mock.patch.object(cli, 'update_head_files') as update:
if separate_branches:
mock.patch('os.path.exists').start()
expected_kwargs = [{
'message': 'message', 'sql': False, 'autogenerate': True,
}]
self._main_test_helper(
['prog', 'revision', '--autogenerate', '-m', 'message'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
expected_kwargs = [{
'message': 'message',
'sql': True,
'autogenerate': False,
'head': cli._get_branch_head(branch)
} for branch in cli.MIGRATION_BRANCHES]
for kwarg in expected_kwargs:
kwarg['autogenerate'] = False
kwarg['sql'] = True
self._main_test_helper(
['prog', 'revision', '--sql', '-m', 'message'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
expected_kwargs = [{
'message': 'message',
'sql': False,
'autogenerate': False,
'head': 'expand@head'
}]
self._main_test_helper(
['prog', 'revision', '-m', 'message', '--expand'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
for kwarg in expected_kwargs:
kwarg['head'] = 'contract@head'
self._main_test_helper(
['prog', 'revision', '-m', 'message', '--contract'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
def test_database_sync_revision(self):
self._test_database_sync_revision()
def test_database_sync_revision_no_branches(self):
# Test that old branchless approach is still supported
self._test_database_sync_revision(separate_branches=False)
def test_upgrade_revision(self):
self._main_test_helper(
['prog', 'upgrade', '--sql', 'head'],
'upgrade',
[{'desc': None, 'revision': 'heads', 'sql': True}]
)
def test_upgrade_delta(self):
self._main_test_helper(
['prog', 'upgrade', '--delta', '3'],
'upgrade',
[{'desc': None, 'revision': '+3', 'sql': False}]
)
def test_upgrade_revision_delta(self):
self._main_test_helper(
['prog', 'upgrade', 'kilo', '--delta', '3'],
'upgrade',
[{'desc': None, 'revision': 'kilo+3', 'sql': False}]
)
def test_upgrade_expand(self):
self._main_test_helper(
['prog', 'upgrade', '--expand'],
'upgrade',
[{'desc': cli.EXPAND_BRANCH,
'revision': 'expand@head',
'sql': False}]
)
def test_upgrade_expand_contract_are_mutually_exclusive(self):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--expand --contract'], 'upgrade')
def _test_upgrade_conflicts_with_revision(self, mode):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--%s revision1' % mode], 'upgrade')
def _test_upgrade_conflicts_with_delta(self, mode):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--%s +3' % mode], 'upgrade')
def _test_revision_autogenerate_conflicts_with_branch(self, branch):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'revision', '--autogenerate', '--%s' % branch],
'revision')
def test_revision_autogenerate_conflicts_with_expand(self):
self._test_revision_autogenerate_conflicts_with_branch(
cli.EXPAND_BRANCH)
def test_revision_autogenerate_conflicts_with_contract(self):
self._test_revision_autogenerate_conflicts_with_branch(
cli.CONTRACT_BRANCH)
def test_upgrade_expand_conflicts_with_revision(self):
self._test_upgrade_conflicts_with_revision('expand')
def test_upgrade_contract_conflicts_with_revision(self):
self._test_upgrade_conflicts_with_revision('contract')
def test_upgrade_expand_conflicts_with_delta(self):
self._test_upgrade_conflicts_with_delta('expand')
def test_upgrade_contract_conflicts_with_delta(self):
self._test_upgrade_conflicts_with_delta('contract')
def test_upgrade_contract(self):
self._main_test_helper(
['prog', 'upgrade', '--contract'],
'upgrade',
[{'desc': cli.CONTRACT_BRANCH,
'revision': 'contract@head',
'sql': False}]
)
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_upgrade_milestone_expand_before_contract(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)]
e_revs[3].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs + e_revs
self._main_test_helper(
['prog', '--subproject', 'neutron', 'upgrade', 'liberty'],
'upgrade',
[{'desc': cli.EXPAND_BRANCH,
'revision': e_revs[3].revision,
'sql': False},
{'desc': cli.CONTRACT_BRANCH,
'revision': c_revs[1].revision,
'sql': False}]
)
def assert_command_fails(self, command):
# Avoid cluttering stdout with argparse error messages
mock.patch('argparse.ArgumentParser._print_message').start()
with mock.patch.object(sys, 'argv', command), mock.patch.object(
cli, 'run_sanity_checks'):
self.assertRaises(SystemExit, cli.main)
def test_downgrade_fails(self):
self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno'])
def test_upgrade_negative_relative_revision_fails(self):
self.assert_command_fails(['prog', 'upgrade', '-2'])
def test_upgrade_negative_delta_fails(self):
self.assert_command_fails(['prog', 'upgrade', '--delta', '-2'])
def test_upgrade_rejects_delta_with_relative_revision(self):
self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3'])
def _test_validate_head_files_helper(self, heads, contract_head='',
expand_head=''):
fake_config = self.configs[0]
head_files_not_exist = (contract_head == expand_head == '')
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\
mock.patch('os.path.exists') as os_mock:
if head_files_not_exist:
os_mock.return_value = False
else:
os_mock.return_value = True
fc.return_value.get_heads.return_value = heads
revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),
heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}
fc.return_value.get_revision.side_effect = revs.__getitem__
mock_open_con = self.useFixture(
tools.OpenFixture(cli._get_contract_head_file_path(
fake_config), contract_head + '\n')).mock_open
mock_open_ex = self.useFixture(
tools.OpenFixture(cli._get_expand_head_file_path(
fake_config), expand_head + '\n')).mock_open
if contract_head in heads and expand_head in heads:
cli.validate_head_files(fake_config)
elif head_files_not_exist:
cli.validate_head_files(fake_config)
self.assertTrue(self.mock_alembic_warn.called)
else:
self.assertRaises(
SystemExit,
cli.validate_head_files,
fake_config
)
self.assertTrue(self.mock_alembic_err.called)
if contract_head in heads and expand_head in heads:
mock_open_ex.assert_called_with(
cli._get_expand_head_file_path(fake_config))
mock_open_con.assert_called_with(
cli._get_contract_head_file_path(fake_config))
if not head_files_not_exist:
fc.assert_called_once_with(fake_config)
def test_validate_head_files_success(self):
self._test_validate_head_files_helper(['a', 'b'], contract_head='a',
expand_head='b')
def test_validate_head_files_missing_file(self):
self._test_validate_head_files_helper(['a', 'b'])
def test_validate_head_files_wrong_contents(self):
self._test_validate_head_files_helper(['a', 'b'], contract_head='c',
expand_head='d')
@mock.patch.object(fileutils, 'delete_if_exists')
def test_update_head_files_success(self, *mocks):
heads = ['a', 'b']
mock_open_con = self.useFixture(
tools.OpenFixture(cli._get_contract_head_file_path(
self.configs[0]))).mock_open
mock_open_ex = self.useFixture(
tools.OpenFixture(cli._get_expand_head_file_path(
self.configs[0]))).mock_open
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
fc.return_value.get_heads.return_value = heads
revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),
heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}
fc.return_value.get_revision.side_effect = revs.__getitem__
cli.update_head_files(self.configs[0])
mock_open_con.return_value.write.assert_called_with(
heads[0] + '\n')
mock_open_ex.return_value.write.assert_called_with(heads[1] + '\n')
old_head_file = cli._get_head_file_path(
self.configs[0])
old_heads_file = cli._get_heads_file_path(
self.configs[0])
delete_if_exists = mocks[0]
self.assertIn(mock.call(old_head_file),
delete_if_exists.call_args_list)
self.assertIn(mock.call(old_heads_file),
delete_if_exists.call_args_list)
def test_get_project_base(self):
config = alembic_config.Config()
config.set_main_option('script_location', 'a.b.c:d')
proj_base = cli._get_project_base(config)
self.assertEqual('a', proj_base)
def test_get_root_versions_dir(self):
config = alembic_config.Config()
config.set_main_option('script_location', 'a.b.c:d')
versions_dir = cli._get_root_versions_dir(config)
self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir)
def test_get_subproject_script_location(self):
foo_ep = cli._get_subproject_script_location('networking-foo')
expected = 'networking_foo.db.migration:alembic_migrations'
self.assertEqual(expected, foo_ep)
def test_get_subproject_script_location_not_installed(self):
self.assertRaises(
SystemExit, cli._get_subproject_script_location, 'not-installed')
def test_get_subproject_base_not_installed(self):
self.assertRaises(
SystemExit, cli._get_subproject_base, 'not-installed')
def test__compare_labels_ok(self):
labels = {'label1', 'label2'}
fake_revision = FakeRevision(labels)
cli._compare_labels(fake_revision, {'label1', 'label2'})
def test__compare_labels_fail_unexpected_labels(self):
labels = {'label1', 'label2', 'label3'}
fake_revision = FakeRevision(labels)
self.assertRaises(
SystemExit,
cli._compare_labels, fake_revision, {'label1', 'label2'})
@mock.patch.object(cli, '_compare_labels')
def test__validate_single_revision_labels_branchless_fail_different_labels(
self, compare_mock):
fake_down_revision = FakeRevision()
fake_revision = FakeRevision(down_revision=fake_down_revision)
script_dir = mock.Mock()
script_dir.get_revision.return_value = fake_down_revision
cli._validate_single_revision_labels(script_dir, fake_revision,
label=None)
expected_labels = set()
compare_mock.assert_has_calls(
[mock.call(revision, expected_labels)
for revision in (fake_revision, fake_down_revision)]
)
@mock.patch.object(cli, '_compare_labels')
def test__validate_single_revision_labels_branches_fail_different_labels(
self, compare_mock):
fake_down_revision = FakeRevision()
fake_revision = FakeRevision(down_revision=fake_down_revision)
script_dir = mock.Mock()
script_dir.get_revision.return_value = fake_down_revision
cli._validate_single_revision_labels(
script_dir, fake_revision, label='fakebranch')
expected_labels = {'fakebranch'}
compare_mock.assert_has_calls(
[mock.call(revision, expected_labels)
for revision in (fake_revision, fake_down_revision)]
)
@mock.patch.object(cli, '_validate_single_revision_labels')
def test__validate_revision_validates_branches(self, validate_mock):
script_dir = mock.Mock()
fake_revision = FakeRevision()
branch = cli.MIGRATION_BRANCHES[0]
fake_revision.path = os.path.join('/fake/path', branch)
cli._validate_revision(script_dir, fake_revision)
validate_mock.assert_called_with(
script_dir, fake_revision, label=branch)
@mock.patch.object(cli, '_validate_single_revision_labels')
def test__validate_revision_validates_branchless_migrations(
self, validate_mock):
script_dir = mock.Mock()
fake_revision = FakeRevision()
cli._validate_revision(script_dir, fake_revision)
validate_mock.assert_called_with(script_dir, fake_revision)
@mock.patch.object(cli, '_validate_revision')
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_validate_revisions_walks_thru_all_revisions(
self, walk_mock, validate_mock):
revisions = [FakeRevision() for i in range(10)]
walk_mock.return_value = revisions
cli.validate_revisions(self.configs[0])
validate_mock.assert_has_calls(
[mock.call(mock.ANY, revision) for revision in revisions]
)
@mock.patch.object(cli, '_validate_revision')
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_validate_revisions_fails_on_multiple_branch_points(
self, walk_mock, validate_mock):
revisions = [FakeRevision(is_branch_point=True) for i in range(2)]
walk_mock.return_value = revisions
self.assertRaises(
SystemExit, cli.validate_revisions, self.configs[0])
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__get_branch_points(self, walk_mock):
revisions = [FakeRevision(is_branch_point=tools.get_random_boolean)
for i in range(50)]
walk_mock.return_value = revisions
script_dir = alembic_script.ScriptDirectory.from_config(
self.configs[0])
self.assertEqual(set(rev for rev in revisions if rev.is_branch_point),
set(cli._get_branch_points(script_dir)))
@mock.patch.object(cli, '_get_version_branch_path')
def test_autogen_process_directives(self, get_version_branch_path):
get_version_branch_path.side_effect = lambda cfg, release, branch: (
"/foo/expand" if branch == 'expand' else "/foo/contract")
migration_script = alembic_ops.MigrationScript(
'eced083f5df',
# these directives will be split into separate
# expand/contract scripts
alembic_ops.UpgradeOps(
ops=[
alembic_ops.CreateTableOp(
'organization',
[
sa.Column('id', sa.Integer(), primary_key=True),
sa.Column('name', sa.String(50), nullable=False)
]
),
alembic_ops.ModifyTableOps(
'user',
ops=[
alembic_ops.AddColumnOp(
'user',
sa.Column('organization_id', sa.Integer())
),
alembic_ops.CreateForeignKeyOp(
'org_fk', 'user', 'organization',
['organization_id'], ['id']
),
alembic_ops.DropConstraintOp(
'user', 'uq_user_org'
),
alembic_ops.DropColumnOp(
'user', 'organization_name'
)
]
)
]
),
# these will be discarded
alembic_ops.DowngradeOps(
ops=[
alembic_ops.AddColumnOp(
'user', sa.Column(
'organization_name', sa.String(50), nullable=True)
),
alembic_ops.CreateUniqueConstraintOp(
'uq_user_org', 'user',
['user_name', 'organization_name']
),
alembic_ops.ModifyTableOps(
'user',
ops=[
alembic_ops.DropConstraintOp('org_fk', 'user'),
alembic_ops.DropColumnOp('user', 'organization_id')
]
),
alembic_ops.DropTableOp('organization')
]
),
message='create the organization table and '
'replace user.organization_name'
)
directives = [migration_script]
autogen.process_revision_directives(
mock.Mock(), mock.Mock(), directives
)
expand = directives[0]
contract = directives[1]
self.assertEqual("/foo/expand", expand.version_path)
self.assertEqual("/foo/contract", contract.version_path)
self.assertTrue(expand.downgrade_ops.is_empty())
self.assertTrue(contract.downgrade_ops.is_empty())
def _get_regex(s):
s = textwrap.dedent(s)
s = re.escape(s)
# alembic 0.8.9 added additional leading '# ' before comments
return s.replace('\\#\\#\\#\\ ', '(# )?### ')
expected_regex = ("""\
### commands auto generated by Alembic - please adjust! ###
op.create_table('organization',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.add_column('user', """
"""sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key('org_fk', 'user', """
"""'organization', ['organization_id'], ['id'])
### end Alembic commands ###""")
self.assertThat(
alembic_ag_api.render_python_code(expand.upgrade_ops),
matchers.MatchesRegex(_get_regex(expected_regex)))
expected_regex = ("""\
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('user', 'uq_user_org', type_=None)
op.drop_column('user', 'organization_name')
### end Alembic commands ###""")
self.assertThat(
alembic_ag_api.render_python_code(contract.upgrade_ops),
matchers.MatchesRegex(_get_regex(expected_regex)))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_one_branch(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs
m = cli._find_milestone_revisions(self.configs[0], 'liberty',
cli.CONTRACT_BRANCH)
self.assertEqual(1, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'liberty',
cli.EXPAND_BRANCH)
self.assertEqual(0, len(m))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_two_branches(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)]
e_revs[3].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs + e_revs
m = cli._find_milestone_revisions(self.configs[0], 'liberty')
self.assertEqual(2, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'mitaka')
self.assertEqual(0, len(m))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_branchless(self, walk_mock):
revisions = [FakeRevision() for r in range(5)]
revisions[2].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = revisions
m = cli._find_milestone_revisions(self.configs[0], 'liberty')
self.assertEqual(1, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'mitaka')
self.assertEqual(0, len(m))
class TestSafetyChecks(base.BaseTestCase):
def test_validate_revisions(self, *mocks):
cli.validate_revisions(cli.get_neutron_config())
| 40.469577
| 79
| 0.610557
| 3,337
| 30,595
| 5.267006
| 0.126161
| 0.023498
| 0.015931
| 0.019458
| 0.583921
| 0.508421
| 0.440658
| 0.379267
| 0.344447
| 0.312358
| 0
| 0.004247
| 0.284295
| 30,595
| 755
| 80
| 40.523179
| 0.79842
| 0.034581
| 0
| 0.348185
| 0
| 0
| 0.117687
| 0.030498
| 0
| 0
| 0
| 0
| 0.087459
| 1
| 0.117162
| false
| 0
| 0.037954
| 0.00165
| 0.173267
| 0.00165
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be26276b9a7545ff4607b3e77287b80155ccbf7d
| 959
|
py
|
Python
|
withdrawal/floor_ceiling.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | 23
|
2016-09-07T06:13:37.000Z
|
2022-02-17T23:49:03.000Z
|
withdrawal/floor_ceiling.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | null | null | null |
withdrawal/floor_ceiling.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | 12
|
2016-06-30T17:27:39.000Z
|
2021-12-12T07:54:27.000Z
|
from decimal import Decimal
from .abc import WithdrawalStrategy
# Bengen's Floor-to-Ceiling, as described in McClung's Living Off Your Money
class FloorCeiling(WithdrawalStrategy):
def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25):
super().__init__(portfolio, harvest_strategy)
self.floor = Decimal(floor)
self.ceiling = Decimal(ceiling)
self.rate = Decimal(rate)
def start(self):
amount = self.rate * self.portfolio.value
self.initial_amount = amount
return amount
def next(self):
amount = self.rate * self.portfolio.value
initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation
floor = initial_amount_inflation_adjusted * self.floor
ceiling = initial_amount_inflation_adjusted * self.ceiling
amount = max(amount, floor)
amount = min(amount, ceiling)
return amount
| 30.935484
| 91
| 0.693431
| 113
| 959
| 5.690265
| 0.380531
| 0.101089
| 0.102644
| 0.139969
| 0.270607
| 0.111975
| 0.111975
| 0
| 0
| 0
| 0
| 0.008075
| 0.225235
| 959
| 30
| 92
| 31.966667
| 0.857335
| 0.077164
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.1
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be2647506be1ffc3fcefa8eacc15a737776b73ab
| 8,288
|
py
|
Python
|
20190426/6_BME280_WiFi/bme280.py
|
rcolistete/MicroPython_MiniCurso_ProjOrientado
|
c82affe833587141c4c05ee08ea84b095bfe845f
|
[
"MIT"
] | null | null | null |
20190426/6_BME280_WiFi/bme280.py
|
rcolistete/MicroPython_MiniCurso_ProjOrientado
|
c82affe833587141c4c05ee08ea84b095bfe845f
|
[
"MIT"
] | null | null | null |
20190426/6_BME280_WiFi/bme280.py
|
rcolistete/MicroPython_MiniCurso_ProjOrientado
|
c82affe833587141c4c05ee08ea84b095bfe845f
|
[
"MIT"
] | null | null | null |
"""
MicroPython driver for Bosh BME280 temperature, pressure and humidity I2C sensor:
https://www.bosch-sensortec.com/bst/products/all_products/bme280
Authors: Nelio Goncalves Godoi, Roberto Colistete Jr
Version: 3.1.2 @ 2018/04
License: MIT License (https://opensource.org/licenses/MIT)
"""
import time
from ustruct import unpack, unpack_from
from array import array
# BME280 default address
BME280_I2CADDR = 0x76
# BME280_I2CADDR = 0x77
OSAMPLE_0 = 0
OSAMPLE_1 = 1
OSAMPLE_2 = 2
OSAMPLE_4 = 3
OSAMPLE_8 = 4
OSAMPLE_16 = 5
BME280_REGISTER_STATUS = 0xF3
BME280_REGISTER_CONTROL_HUM = 0xF2
BME280_REGISTER_CONTROL = 0xF4
BME280_REGISTER_CONTROL_IIR = 0xF5
FILTER_OFF = 0
FILTER_2 = 1
FILTER_4 = 2
FILTER_8 = 3
FILTER_16 = 4
CELSIUS = 'C'
FAHRENHEIT = 'F'
KELVIN = 'K'
class BME280(object):
def __init__(self,
temperature_mode=OSAMPLE_2,
pressure_mode=OSAMPLE_16,
humidity_mode=OSAMPLE_1,
temperature_scale=CELSIUS,
iir=FILTER_16,
address=BME280_I2CADDR,
i2c=None):
osamples = [
OSAMPLE_0,
OSAMPLE_1,
OSAMPLE_2,
OSAMPLE_4,
OSAMPLE_8,
OSAMPLE_16]
msg_error = 'Unexpected {} operating mode value {0}.'
if temperature_mode not in osamples:
raise ValueError(msg_error.format("temperature", temperature_mode))
self.temperature_mode = temperature_mode
if pressure_mode not in osamples:
raise ValueError(msg_error.format("pressure", pressure_mode))
self.pressure_mode = pressure_mode
if humidity_mode not in osamples:
raise ValueError(msg_error.format("humidity", humidity_mode))
self.humidity_mode = humidity_mode
msg_error = 'Unexpected low pass IIR filter setting value {0}.'
if iir not in [FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16]:
raise ValueError(msg_error.format(iir))
self.iir = iir
msg_error = 'Unexpected temperature scale value {0}.'
if temperature_scale not in [CELSIUS, FAHRENHEIT, KELVIN]:
raise ValueError(msg_error.format(temperature_scale))
self.temperature_scale = temperature_scale
del msg_error
self.address = address
if i2c is None:
raise ValueError('An I2C object is required.')
self.i2c = i2c
dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26)
dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7)
self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \
self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \
self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \
_, self.dig_H1 = unpack("<HhhHhhhhhhhhBB", dig_88_a1)
self.dig_H2, self.dig_H3 = unpack("<hB", dig_e1_e7)
e4_sign = unpack_from("<b", dig_e1_e7, 3)[0]
self.dig_H4 = (e4_sign << 4) | (dig_e1_e7[4] & 0xF)
e6_sign = unpack_from("<b", dig_e1_e7, 5)[0]
self.dig_H5 = (e6_sign << 4) | (dig_e1_e7[4] >> 4)
self.dig_H6 = unpack_from("<b", dig_e1_e7, 6)[0]
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL,
bytearray([0x24]))
time.sleep(0.002)
self.t_fine = 0
self._l1_barray = bytearray(1)
self._l8_barray = bytearray(8)
self._l3_resultarray = array("i", [0, 0, 0])
self._l1_barray[0] = self.iir << 2
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL_IIR,
self._l1_barray)
time.sleep(0.002)
self._l1_barray[0] = self.humidity_mode
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL_HUM,
self._l1_barray)
def read_raw_data(self, result):
self._l1_barray[0] = (
self.pressure_mode << 5 |
self.temperature_mode << 2 | 1)
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL,
self._l1_barray)
osamples_1_16 = [
OSAMPLE_1,
OSAMPLE_2,
OSAMPLE_4,
OSAMPLE_8,
OSAMPLE_16]
sleep_time = 1250
if self.temperature_mode in osamples_1_16:
sleep_time += 2300*(1 << self.temperature_mode)
if self.pressure_mode in osamples_1_16:
sleep_time += 575 + (2300*(1 << self.pressure_mode))
if self.humidity_mode in osamples_1_16:
sleep_time += 575 + (2300*(1 << self.humidity_mode))
time.sleep_us(sleep_time)
while (unpack('<H',
self.i2c.readfrom_mem(
self.address,
BME280_REGISTER_STATUS, 2))[0] & 0x08):
time.sleep(0.001)
self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray)
readout = self._l8_barray
raw_press = ((readout[0] << 16) | (readout[1] << 8) | readout[2]) >> 4
raw_temp = ((readout[3] << 16) | (readout[4] << 8) | readout[5]) >> 4
raw_hum = (readout[6] << 8) | readout[7]
result[0] = raw_temp
result[1] = raw_press
result[2] = raw_hum
def read_compensated_data(self, result=None):
""" Get raw data and compensa the same """
self.read_raw_data(self._l3_resultarray)
raw_temp, raw_press, raw_hum = self._l3_resultarray
var1 = ((raw_temp >> 3) - (self.dig_T1 << 1)) * (self.dig_T2 >> 11)
var2 = (raw_temp >> 4) - self.dig_T1
var2 = var2 * ((raw_temp >> 4) - self.dig_T1)
var2 = ((var2 >> 12) * self.dig_T3) >> 14
self.t_fine = var1 + var2
temp = (self.t_fine * 5 + 128) >> 8
var1 = self.t_fine - 128000
var2 = var1 * var1 * self.dig_P6
var2 = var2 + ((var1 * self.dig_P5) << 17)
var2 = var2 + (self.dig_P4 << 35)
var1 = (((var1 * var1 * self.dig_P3) >> 8) +
((var1 * self.dig_P2) << 12))
var1 = (((1 << 47) + var1) * self.dig_P1) >> 33
if var1 == 0:
pressure = 0
else:
p = 1048576 - raw_press
p = (((p << 31) - var2) * 3125) // var1
var1 = (self.dig_P9 * (p >> 13) * (p >> 13)) >> 25
var2 = (self.dig_P8 * p) >> 19
pressure = ((p + var1 + var2) >> 8) + (self.dig_P7 << 4)
h = self.t_fine - 76800
h = (((((raw_hum << 14) - (self.dig_H4 << 20) -
(self.dig_H5 * h)) + 16384)
>> 15) * (((((((h * self.dig_H6) >> 10) *
(((h * self.dig_H3) >> 11) + 32768)) >> 10) +
2097152) * self.dig_H2 + 8192) >> 14))
h = h - (((((h >> 15) * (h >> 15)) >> 7) * self.dig_H1) >> 4)
h = 0 if h < 0 else h
h = 419430400 if h > 419430400 else h
humidity = h >> 12
if result:
result[0] = temp
result[1] = pressure
result[2] = humidity
return result
return array("i", (temp, pressure, humidity))
@property
def values(self):
temp, pres, humi = self.read_compensated_data()
temp = temp/100
if self.temperature_scale == 'F':
temp = 32 + (temp*1.8)
elif self.temperature_scale == 'K':
temp = temp + 273.15
pres = pres/256
humi = humi/1024
return (temp, pres, humi)
@property
def formated_values(self):
t, p, h = self.values
temp = "{} "+self.temperature_scale
return (temp.format(t), "{} Pa".format(p), "{} %".format(h))
@property
def temperature(self):
t, _, _ = self.values
return t
@property
def pressure(self):
_, p, _ = self.values
return p
@property
def pressure_precision(self):
_, p, _ = self.read_compensated_data()
pi = float(p // 256)
pd = (p % 256)/256
return (pi, pd)
@property
def humidity(self):
_, _, h = self.values
return h
def altitude(self, pressure_sea_level=1013.25):
pi, pd = self.pressure_precision()
return 44330*(1-((float(pi+pd)/100)/pressure_sea_level)**(1/5.255))
| 33.554656
| 81
| 0.558518
| 1,075
| 8,288
| 4.064186
| 0.205581
| 0.060884
| 0.033646
| 0.026322
| 0.211032
| 0.18265
| 0.140993
| 0.124971
| 0.124971
| 0.036164
| 0
| 0.094477
| 0.320584
| 8,288
| 246
| 82
| 33.691057
| 0.681407
| 0.04404
| 0
| 0.156098
| 0
| 0
| 0.028456
| 0
| 0
| 0
| 0.005438
| 0
| 0
| 1
| 0.04878
| false
| 0.004878
| 0.014634
| 0
| 0.112195
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be2674ce54565aac0c872fd9c167bb04e3da2fda
| 9,749
|
py
|
Python
|
airflow/contrib/secrets/hashicorp_vault.py
|
colpal/airfloss
|
1857cf309b69d4c2d60e9bb67f731eb01d0ecda1
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/contrib/secrets/hashicorp_vault.py
|
colpal/airfloss
|
1857cf309b69d4c2d60e9bb67f731eb01d0ecda1
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 7
|
2020-10-05T18:20:16.000Z
|
2022-02-01T00:54:35.000Z
|
airflow/contrib/secrets/hashicorp_vault.py
|
colpal/airfloss
|
1857cf309b69d4c2d60e9bb67f731eb01d0ecda1
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-10-21T03:22:43.000Z
|
2020-10-21T03:22:43.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Objects relating to sourcing connections & variables from Hashicorp Vault
"""
from typing import Optional
import hvac
from cached_property import cached_property
from hvac.exceptions import InvalidPath, VaultError
from airflow.exceptions import AirflowException
from airflow.secrets import BaseSecretsBackend
from airflow.utils.log.logging_mixin import LoggingMixin
class VaultBackend(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Connections and Variables from Hashicorp Vault
Configurable via ``airflow.cfg`` as follows:
.. code-block:: ini
[secrets]
backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend
backend_kwargs = {
"connections_path": "connections",
"url": "http://127.0.0.1:8200",
"mount_point": "airflow"
}
For example, if your keys are under ``connections`` path in ``airflow`` mount_point, this
would be accessible if you provide ``{"connections_path": "connections"}`` and request
conn_id ``smtp_default``.
:param connections_path: Specifies the path of the secret to read to get Connections.
(default: 'connections')
:type connections_path: str
:param variables_path: Specifies the path of the secret to read to get Variables.
(default: 'variables')
:type variables_path: str
:param config_path: Specifies the path of the secret to read Airflow Configurations
(default: 'configs').
:type config_path: str
:param url: Base URL for the Vault instance being addressed.
:type url: str
:param auth_type: Authentication Type for Vault (one of 'token', 'ldap', 'userpass', 'approle',
'github', 'gcp', 'kubernetes'). Default is ``token``.
:type auth_type: str
:param mount_point: The "path" the secret engine was mounted on. (Default: ``secret``)
:type mount_point: str
:param token: Authentication token to include in requests sent to Vault.
(for ``token`` and ``github`` auth_type)
:type token: str
:param kv_engine_version: Select the version of the engine to run (``1`` or ``2``, default: ``2``)
:type kv_engine_version: int
:param username: Username for Authentication (for ``ldap`` and ``userpass`` auth_type)
:type username: str
:param password: Password for Authentication (for ``ldap`` and ``userpass`` auth_type)
:type password: str
:param role_id: Role ID for Authentication (for ``approle`` auth_type)
:type role_id: str
:param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type)
:type kubernetes_role: str
:param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type, deafult:
``/var/run/secrets/kubernetes.io/serviceaccount/token``)
:type kubernetes_jwt_path: str
:param secret_id: Secret ID for Authentication (for ``approle`` auth_type)
:type secret_id: str
:param gcp_key_path: Path to GCP Credential JSON file (for ``gcp`` auth_type)
:type gcp_key_path: str
:param gcp_scopes: Comma-separated string containing GCP scopes (for ``gcp`` auth_type)
:type gcp_scopes: str
"""
def __init__( # pylint: disable=too-many-arguments
self,
connections_path='connections', # type: str
variables_path='variables', # type: str
config_path='config', # type: str
url=None, # type: Optional[str]
auth_type='token', # type: str
mount_point='secret', # type: str
kv_engine_version=2, # type: int
token=None, # type: Optional[str]
username=None, # type: Optional[str]
password=None, # type: Optional[str]
role_id=None, # type: Optional[str]
kubernetes_role=None, # type: Optional[str]
kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str
secret_id=None, # type: Optional[str]
gcp_key_path=None, # type: Optional[str]
gcp_scopes=None, # type: Optional[str]
**kwargs
):
super(VaultBackend, self).__init__()
self.connections_path = connections_path.rstrip('/')
if variables_path != None:
self.variables_path = variables_path.rstrip('/')
else:
self.variables_path = variables_path
self.config_path = config_path.rstrip('/')
self.url = url
self.auth_type = auth_type
self.kwargs = kwargs
self.token = token
self.username = username
self.password = password
self.role_id = role_id
self.kubernetes_role = kubernetes_role
self.kubernetes_jwt_path = kubernetes_jwt_path
self.secret_id = secret_id
self.mount_point = mount_point
self.kv_engine_version = kv_engine_version
self.gcp_key_path = gcp_key_path
self.gcp_scopes = gcp_scopes
@cached_property
def client(self):
# type: () -> hvac.Client
"""
Return an authenticated Hashicorp Vault client
"""
_client = hvac.Client(url=self.url, **self.kwargs)
if self.auth_type == "token":
if not self.token:
raise VaultError("token cannot be None for auth_type='token'")
_client.token = self.token
elif self.auth_type == "ldap":
_client.auth.ldap.login(
username=self.username, password=self.password)
elif self.auth_type == "userpass":
_client.auth_userpass(username=self.username, password=self.password)
elif self.auth_type == "approle":
_client.auth_approle(role_id=self.role_id, secret_id=self.secret_id)
elif self.auth_type == "kubernetes":
if not self.kubernetes_role:
raise VaultError("kubernetes_role cannot be None for auth_type='kubernetes'")
with open(self.kubernetes_jwt_path) as f:
jwt = f.read()
_client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt)
elif self.auth_type == "github":
_client.auth.github.login(token=self.token)
elif self.auth_type == "gcp":
from airflow.contrib.utils.gcp_credentials_provider import (
get_credentials_and_project_id,
_get_scopes
)
scopes = _get_scopes(self.gcp_scopes)
credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes)
_client.auth.gcp.configure(credentials=credentials)
else:
raise AirflowException("Authentication type '{}' not supported".format(self.auth_type))
if _client.is_authenticated():
return _client
else:
raise VaultError("Vault Authentication Error!")
def get_conn_uri(self, conn_id):
# type: (str) -> Optional[str]
"""
Get secret value from Vault. Store the secret in the form of URI
:param conn_id: connection id
:type conn_id: str
"""
response = self._get_secret(self.connections_path, conn_id)
return response.get("conn_uri") if response else None
def get_variable(self, key):
# type: (str) -> Optional[str]
"""
Get Airflow Variable
:param key: Variable Key
:return: Variable Value
"""
if self.variables_path == None:
return None
else:
response = self._get_secret(self.variables_path, key)
return response.get("value") if response else None
def _get_secret(self, path_prefix, secret_id):
# type: (str, str) -> Optional[dict]
"""
Get secret value from Vault.
:param path_prefix: Prefix for the Path to get Secret
:type path_prefix: str
:param secret_id: Secret Key
:type secret_id: str
"""
secret_path = self.build_path(path_prefix, secret_id)
try:
if self.kv_engine_version == 1:
response = self.client.secrets.kv.v1.read_secret(
path=secret_path, mount_point=self.mount_point
)
else:
response = self.client.secrets.kv.v2.read_secret_version(
path=secret_path, mount_point=self.mount_point)
except InvalidPath:
self.log.info("Secret %s not found in Path: %s", secret_id, secret_path)
return None
return_data = response["data"] if self.kv_engine_version == 1 else response["data"]["data"]
return return_data
def get_config(self, key):
# type: (str) -> Optional[str]
"""
Get Airflow Configuration
:param key: Configuration Option Key
:type key: str
:rtype: str
:return: Configuration Option Value retrieved from the vault
"""
response = self._get_secret(self.config_path, key)
return response.get("value") if response else None
| 40.452282
| 102
| 0.647656
| 1,206
| 9,749
| 5.063847
| 0.190713
| 0.031439
| 0.02358
| 0.028001
| 0.232848
| 0.158343
| 0.131816
| 0.121991
| 0.068937
| 0.047486
| 0
| 0.003041
| 0.257975
| 9,749
| 240
| 103
| 40.620833
| 0.841167
| 0.441379
| 0
| 0.080357
| 0
| 0
| 0.072175
| 0.014676
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0.044643
| 0.071429
| 0
| 0.196429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be27d0cf506bd514ef2b8fd412eba196789b1b66
| 6,347
|
py
|
Python
|
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py
|
AdamCoscia/eve-trajectory-mining
|
134f142a5665f66fbf92aada8dd6252fab64ddff
|
[
"MIT"
] | null | null | null |
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py
|
AdamCoscia/eve-trajectory-mining
|
134f142a5665f66fbf92aada8dd6252fab64ddff
|
[
"MIT"
] | null | null | null |
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py
|
AdamCoscia/eve-trajectory-mining
|
134f142a5665f66fbf92aada8dd6252fab64ddff
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Computes distance between killmails by text similarity.
Edit Distance Metrics
- Levenshtein Distance
- Damerau-Levenshtein Distance
- Jaro Distance
- Jaro-Winkler Distance
- Match Rating Approach Comparison
- Hamming Distance
Vector Distance Metrics
- Jaccard Similarity
- Cosine Distance
Written By: Adam Coscia
Updated On: 11/09/2019
"""
# Start timing
import time
start = time.time()
total = 0
def lap(msg):
"""Records time elapsed."""
global start, total
elapsed = (time.time() - start) - total
total = time.time() - start
if elapsed > 3600:
print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}')
elif elapsed > 60:
if total > 3600:
print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}')
else:
print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}')
else:
if total > 3600:
print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}')
elif total > 60:
print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}')
else:
print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}')
lap("Importing modules...")
from ast import literal_eval
from functools import reduce
import os
import sys
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def get_long_text_cosine_distance(los1, los2):
"""Calculates cosine distance between two killmails' item lists.
1. Converts collection of long text items to raw document representation.
2. Converts the collection of raw documents to a matrix of TF-IDF features
using TfidfVectorizer (combines vector counting and TF-IDF calculator).
3. Computes cosine similarity between feature vectors. Uses linear kernel
since TF-IDF matrix will be normalized already.
Arguments:
los1: First document, a list of raw strings.
los2: Second document, a list of raw strings.
Returns:
cosine distance as a value between 0-1, with 1 being identical.
"""
if type(los1) == float or type(los2) == float:
return 0
if len(los1) == 0 or len(los2) == 0:
return 0
doc1 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los1]) # Create bag of words
doc2 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los2]) # Create bag of words
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
def get_short_text_cosine_distance(los1, los2):
"""Calculates cosine distance between two killmails' item lists.
1. Converts collection of short text items to raw document representation.
2. Converts the collection of raw documents to a matrix of TF-IDF features
using TfidfVectorizer (combines vector counting and TF-IDF calculator).
3. Computes cosine similarity between feature vectors. Uses linear kernel
since TF-IDF matrix will be normalized already.
Arguments:
los1: First document, a list of raw strings.
los2: Second document, a list of raw strings.
Returns:
cosine distance as a value between 0-1, with 1 being identical and 0
being complete different.
"""
if type(los1) == float or type(los2) == float:
return 0
if len(los1) == 0 or len(los2) == 0:
return 0
doc1 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los1]) # Create bag of words
doc2 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los2]) # Create bag of words
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
# Load CSV from local file
lap("Loading CSV data from local file...")
df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8')
df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill'])
df = df.dropna()
# Convert items column to correct data type
lap("Converting 'item' column value types...")
df['items'] = df['items'].apply(literal_eval)
# Group DataFrame by character_id and compute distance series for each group
lap("Computing cosine distances and change in kd by grouping character_id's...")
groupby = df.groupby('character_id') # group dataframe by character_id
num_groups = len(groupby) # get number of groups
count = 0 # current group number out of number of groups
groups = [] # list to append modified group dataframes to
for name, gp in groupby:
# Order the observations and prepare the dataframe
gp = (gp.sort_values(by=['killmail_id'])
.reset_index()
.drop('index', axis=1))
# Generate change in kills over change in deaths and change in kd ratio
kills1 = gp['k_count']
kills2 = gp['k_count'].shift()
deaths1 = gp['d_count']
deaths2 = gp['d_count'].shift()
idx = len(gp.columns)
gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2 - deaths1))
gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift())
# Generate pairs of observations sequentially to compare
pairs = []
items1 = gp['items']
items2 = gp['items'].shift()
for i in range(1, len(gp)): # Start from 1 to avoid adding nan pair
los1 = items1.iloc[i]
los2 = items2.iloc[i]
pairs.append((los2, los1))
# Generate distance series using pairs list and different metrics
# start distance series with nan due to starting range at 1
cos_dist_lt = [np.nan] # cosine distance b/w long text BoW
cos_dist_st = [np.nan] # cosine distance b/w short text BoW
for pair in pairs:
cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1]))
cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1]))
idx = len(gp.columns)
gp.insert(idx, 'cos_dist_lt', cos_dist_lt)
gp.insert(idx, 'cos_dist_st', cos_dist_st)
groups.append(gp)
# Record progress
count += 1
print(f"Progress {count/num_groups:2.1%}", end="\r")
lap("Concatenating resulting groups and writing to file...")
df_res = pd.concat(groups)
df_res.to_csv(f'data/useable_victims_distancesAndKD.csv')
lap("Exit")
| 37.556213
| 92
| 0.669293
| 949
| 6,347
| 4.401475
| 0.272919
| 0.043572
| 0.018674
| 0.014364
| 0.490065
| 0.463012
| 0.445296
| 0.389753
| 0.378262
| 0.378262
| 0
| 0.031019
| 0.212699
| 6,347
| 168
| 93
| 37.779762
| 0.804883
| 0.396565
| 0
| 0.223404
| 0
| 0.031915
| 0.218547
| 0.088124
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031915
| false
| 0
| 0.106383
| 0
| 0.202128
| 0.074468
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be28146fdfcf8ed2a16239294869650841f46a74
| 1,181
|
py
|
Python
|
src/chess/utils.py
|
Dalkio/custom-alphazero
|
e24ee8c646a37bf9509b99ca6c96d3f6e69ee4db
|
[
"MIT"
] | null | null | null |
src/chess/utils.py
|
Dalkio/custom-alphazero
|
e24ee8c646a37bf9509b99ca6c96d3f6e69ee4db
|
[
"MIT"
] | 6
|
2020-08-13T13:02:58.000Z
|
2022-02-10T02:21:49.000Z
|
src/chess/utils.py
|
Dalkio/custom-alphazero
|
e24ee8c646a37bf9509b99ca6c96d3f6e69ee4db
|
[
"MIT"
] | null | null | null |
import numpy as np
from itertools import product
from typing import List
from src.config import ConfigChess
from src.chess.board import Board
from src.chess.move import Move
def get_all_possible_moves() -> List[Move]:
all_possible_moves = set()
array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype("int8")
for i, j, piece in product(
range(ConfigChess.board_size), range(ConfigChess.board_size), ["Q", "N"]
):
array[i][j] = Board.piece_symbol_to_int(piece)
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
array[i][j] = 0
# underpromotion moves
array[1, :] = Board.piece_symbol_to_int("P")
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
array[0, :] = Board.piece_symbol_to_int("p")
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
# no need to add castling moves: they have already be added with queen moves under UCI notation
return sorted(list(all_possible_moves))
| 36.90625
| 99
| 0.686706
| 172
| 1,181
| 4.546512
| 0.354651
| 0.084399
| 0.122762
| 0.069054
| 0.391304
| 0.36445
| 0.36445
| 0.36445
| 0.36445
| 0.36445
| 0
| 0.00418
| 0.18967
| 1,181
| 31
| 100
| 38.096774
| 0.812957
| 0.096528
| 0
| 0.230769
| 0
| 0
| 0.007519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.230769
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be2868ed0261dc37f256c2a99990b52d127544a4
| 1,845
|
py
|
Python
|
multirotor.py
|
christymarc/mfac
|
29449a0c79e618059fa6f67ae7ab76711543c513
|
[
"MIT"
] | null | null | null |
multirotor.py
|
christymarc/mfac
|
29449a0c79e618059fa6f67ae7ab76711543c513
|
[
"MIT"
] | null | null | null |
multirotor.py
|
christymarc/mfac
|
29449a0c79e618059fa6f67ae7ab76711543c513
|
[
"MIT"
] | 1
|
2022-03-01T05:00:02.000Z
|
2022-03-01T05:00:02.000Z
|
from random import gauss
class MultiRotor:
"""Simple vertical dynamics for a multirotor vehicle."""
GRAVITY = -9.81
def __init__(
self, altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1
):
"""
Args:
altitude (float): initial altitude of the vehicle
velocity (float): initial velocity of the vehicle
mass (float): mass of the vehicle
emc (float): electromechanical constant for the vehicle
dt (float): simulation time step
noise (float): standard deviation of normally distributed simulation noise
"""
self.y0 = altitude
self.y1 = velocity
self.mass = mass
self.emc = emc
self.dt = dt
self.noise = noise
def step(self, effort):
"""Advance the multirotor simulation and apply motor forces.
Args:
effort (float): related to the upward thrust of the vehicle,
it must be >= 0
Return:
The current state (altitude, velocity) of the vehicle.
"""
effort = max(0, effort)
scaled_effort = self.emc / self.mass * effort
net_acceleration = MultiRotor.GRAVITY - 0.75 * self.y1 + scaled_effort
# Don't let the vehcicle fall through the ground
if self.y0 <= 0 and net_acceleration < 0:
y0dot = 0
y1dot = 0
else:
y0dot = self.y1
y1dot = net_acceleration
self.y0 += y0dot * self.dt
self.y1 += y1dot * self.dt
self.y0 += gauss(0, self.noise)
return self.y0, self.y1
def get_altitude(self):
"""Return the current altitude."""
return self.y0
def get_delta_time(self):
"""Return the simulation time step."""
return self.dt
| 27.132353
| 86
| 0.566938
| 225
| 1,845
| 4.595556
| 0.351111
| 0.058027
| 0.058027
| 0.038685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036454
| 0.345799
| 1,845
| 67
| 87
| 27.537313
| 0.820215
| 0.384824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.033333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be286e006cd7ef8775677a3d599b4cc9bc55f723
| 6,329
|
py
|
Python
|
stpmex/client.py
|
cuenca-mx/stpmex-python
|
93f630cd05cea927b32f5aeb5f9b958c4ee91af9
|
[
"MIT"
] | 37
|
2019-01-06T02:52:38.000Z
|
2022-03-17T21:19:48.000Z
|
stpmex/client.py
|
cuenca-mx/stpmex-python
|
93f630cd05cea927b32f5aeb5f9b958c4ee91af9
|
[
"MIT"
] | 204
|
2018-09-05T22:55:33.000Z
|
2022-03-31T23:21:13.000Z
|
stpmex/client.py
|
cuenca-mx/stpmex-python
|
93f630cd05cea927b32f5aeb5f9b958c4ee91af9
|
[
"MIT"
] | 20
|
2018-09-17T15:29:51.000Z
|
2022-02-03T06:29:32.000Z
|
import re
from typing import Any, ClassVar, Dict, List, NoReturn, Union
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from requests import Response, Session
from .exc import (
AccountDoesNotExist,
BankCodeClabeMismatch,
ClaveRastreoAlreadyInUse,
DuplicatedAccount,
InvalidAccountType,
InvalidAmount,
InvalidField,
InvalidInstitution,
InvalidPassphrase,
InvalidRfcOrCurp,
InvalidTrackingKey,
MandatoryField,
NoOrdenesEncontradas,
NoServiceResponse,
PldRejected,
SameAccount,
SignatureValidationError,
StpmexException,
)
from .resources import CuentaFisica, Orden, Resource, Saldo
from .version import __version__ as client_version
DEMO_HOST = 'https://demo.stpmex.com:7024'
PROD_HOST = 'https://prod.stpmex.com'
class Client:
base_url: str
soap_url: str
session: Session
# resources
cuentas: ClassVar = CuentaFisica
ordenes: ClassVar = Orden
saldos: ClassVar = Saldo
def __init__(
self,
empresa: str,
priv_key: str,
priv_key_passphrase: str,
demo: bool = False,
base_url: str = None,
soap_url: str = None,
timeout: tuple = None,
):
self.timeout = timeout
self.session = Session()
self.session.headers['User-Agent'] = f'stpmex-python/{client_version}'
if demo:
host_url = DEMO_HOST
self.session.verify = False
else:
host_url = PROD_HOST
self.session.verify = True
self.base_url = base_url or f'{host_url}/speiws/rest'
self.soap_url = (
soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices'
)
try:
self.pkey = serialization.load_pem_private_key(
priv_key.encode('utf-8'),
priv_key_passphrase.encode('ascii'),
default_backend(),
)
except (ValueError, TypeError, UnsupportedAlgorithm):
raise InvalidPassphrase
Resource.empresa = empresa
Resource._client = self
def post(
self, endpoint: str, data: Dict[str, Any]
) -> Union[Dict[str, Any], List[Any]]:
return self.request('post', endpoint, data)
def put(
self, endpoint: str, data: Dict[str, Any]
) -> Union[Dict[str, Any], List[Any]]:
return self.request('put', endpoint, data)
def delete(
self, endpoint: str, data: Dict[str, Any]
) -> Union[Dict[str, Any], List[Any]]:
return self.request('delete', endpoint, data)
def request(
self, method: str, endpoint: str, data: Dict[str, Any], **kwargs: Any
) -> Union[Dict[str, Any], List[Any]]:
url = self.base_url + endpoint
response = self.session.request(
method,
url,
json=data,
timeout=self.timeout,
**kwargs,
)
self._check_response(response)
resultado = response.json()
if 'resultado' in resultado: # Some responses are enveloped
resultado = resultado['resultado']
return resultado
@staticmethod
def _check_response(response: Response) -> None:
if not response.ok:
response.raise_for_status()
resp = response.json()
if isinstance(resp, dict):
try:
_raise_description_error_exc(resp)
except KeyError:
...
try:
assert resp['descripcion']
_raise_description_exc(resp)
except (AssertionError, KeyError):
...
response.raise_for_status()
def _raise_description_error_exc(resp: Dict) -> NoReturn:
id = resp['resultado']['id']
error = resp['resultado']['descripcionError']
if id == 0 and error == 'No se recibió respuesta del servicio':
raise NoServiceResponse(**resp['resultado'])
elif id == 0 and error == 'Error validando la firma':
raise SignatureValidationError(**resp['resultado'])
elif id == 0 and re.match(r'El campo .+ es obligatorio', error):
raise MandatoryField(**resp['resultado'])
elif id == -1 and re.match(
r'La clave de rastreo .+ ya fue utilizada', error
):
raise ClaveRastreoAlreadyInUse(**resp['resultado'])
elif id == -7 and re.match(r'La cuenta .+ no existe', error):
raise AccountDoesNotExist(**resp['resultado'])
elif id == -9 and re.match(r'La Institucion \d+ no es valida', error):
raise InvalidInstitution(**resp['resultado'])
elif id == -11 and re.match(r'El tipo de cuenta \d+ es invalido', error):
raise InvalidAccountType(**resp['resultado'])
elif id == -20 and re.match(r'El monto {.+} no es válido', error):
raise InvalidAmount(**resp['resultado'])
elif id == -22 and 'no coincide para la institucion operante' in error:
raise BankCodeClabeMismatch(**resp['resultado'])
elif id == -24 and re.match(r'Cuenta {\d+} - {MISMA_CUENTA}', error):
raise SameAccount(**resp['resultado'])
elif id == -34 and 'Clave rastreo invalida' in error:
raise InvalidTrackingKey(**resp['resultado'])
elif id == -100 and error.startswith('No se encontr'):
raise NoOrdenesEncontradas
elif id == -200 and 'Se rechaza por PLD' in error:
raise PldRejected(**resp['resultado'])
else:
raise StpmexException(**resp['resultado'])
def _raise_description_exc(resp: Dict) -> NoReturn:
id = resp['id']
desc = resp['descripcion']
if id == 0 and 'Cuenta en revisión' in desc:
# STP regresa esta respuesta cuando se registra
# una cuenta. No se levanta excepción porque
# todas las cuentas pasan por este status.
...
elif id == 1 and desc == 'rfc/curp invalido':
raise InvalidRfcOrCurp(**resp)
elif id == 1 and re.match(r'El campo \w+ es invalido', desc):
raise InvalidField(**resp)
elif id == 3 and desc == 'Cuenta Duplicada':
raise DuplicatedAccount(**resp)
elif id == 5 and re.match(r'El campo .* obligatorio \w+', desc):
raise MandatoryField(**resp)
else:
raise StpmexException(**resp)
| 34.026882
| 78
| 0.618739
| 705
| 6,329
| 5.466667
| 0.299291
| 0.024909
| 0.048521
| 0.054229
| 0.144006
| 0.110535
| 0.06876
| 0.052932
| 0.052932
| 0.052932
| 0
| 0.006931
| 0.270501
| 6,329
| 185
| 79
| 34.210811
| 0.82781
| 0.026544
| 0
| 0.123457
| 0
| 0
| 0.136474
| 0.016247
| 0
| 0
| 0
| 0
| 0.012346
| 1
| 0.049383
| false
| 0.024691
| 0.055556
| 0.018519
| 0.17284
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be2a7a241325332e4117c63de7ba8c5d1c491871
| 332
|
py
|
Python
|
metasync/params.py
|
dstarikov/metavault
|
1933cc6cd828ee9c594a45a78238a9a319de0143
|
[
"MIT"
] | 1
|
2019-05-28T15:59:35.000Z
|
2019-05-28T15:59:35.000Z
|
metasync/params.py
|
dstarikov/metavault
|
1933cc6cd828ee9c594a45a78238a9a319de0143
|
[
"MIT"
] | null | null | null |
metasync/params.py
|
dstarikov/metavault
|
1933cc6cd828ee9c594a45a78238a9a319de0143
|
[
"MIT"
] | null | null | null |
# config params
KB = 1024
MB = 1024*KB
GB = 1024*MB
# name of meta root dir
META_DIR = ".metasync"
# batching time for daemon
SYNC_WAIT = 3
# blob size
BLOB_UNIT = 32*MB
# Increase of Paxos proposal number
PAXOS_PNUM_INC = 10
# authentication directory
import os
AUTH_DIR = os.path.join(os.path.expanduser("~"), ".metasync")
| 15.090909
| 61
| 0.713855
| 53
| 332
| 4.358491
| 0.716981
| 0.051948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.180723
| 332
| 21
| 62
| 15.809524
| 0.786765
| 0.391566
| 0
| 0
| 0
| 0
| 0.097436
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be2c413f1972d5571cb52206e64c8dffe9762a99
| 2,503
|
py
|
Python
|
hitnet/hitnet.py
|
AchintyaSrivastava/HITNET-Stereo-Depth-estimation
|
90654dafc8c8bdf5c17079d3cb8bf7ad6d3da166
|
[
"MIT"
] | 38
|
2021-09-05T13:59:11.000Z
|
2022-03-28T14:18:30.000Z
|
hitnet/hitnet.py
|
AchintyaSrivastava/HITNET-Stereo-Depth-estimation
|
90654dafc8c8bdf5c17079d3cb8bf7ad6d3da166
|
[
"MIT"
] | 3
|
2021-11-25T08:21:01.000Z
|
2022-03-07T08:22:11.000Z
|
hitnet/hitnet.py
|
AchintyaSrivastava/HITNET-Stereo-Depth-estimation
|
90654dafc8c8bdf5c17079d3cb8bf7ad6d3da166
|
[
"MIT"
] | 5
|
2021-09-05T23:15:10.000Z
|
2022-02-10T08:32:00.000Z
|
import tensorflow as tf
import numpy as np
import time
import cv2
from hitnet.utils_hitnet import *
drivingStereo_config = CameraConfig(0.546, 1000)
class HitNet():
def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config):
self.fps = 0
self.timeLastPrediction = time.time()
self.frameCounter = 0
self.camera_config = camera_config
# Initialize model
self.model = self.initialize_model(model_path, model_type)
def __call__(self, left_img, right_img):
return self.estimate_disparity(left_img, right_img)
def initialize_model(self, model_path, model_type):
self.model_type = model_type
with tf.io.gfile.GFile(model_path, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
loaded = graph_def.ParseFromString(f.read())
# Wrap frozen graph to ConcreteFunctions
if self.model_type == ModelType.flyingthings:
model = wrap_frozen_graph(graph_def=graph_def,
inputs="input:0",
outputs=["reference_output_disparity:0","secondary_output_disparity:0"])
else:
model = wrap_frozen_graph(graph_def=graph_def,
inputs="input:0",
outputs="reference_output_disparity:0")
return model
def estimate_disparity(self, left_img, right_img):
input_tensor = self.prepare_input(left_img, right_img)
# Perform inference on the image
if self.model_type == ModelType.flyingthings:
left_disparity, right_disparity = self.inference(input_tensor)
self.disparity_map = left_disparity
else:
self.disparity_map = self.inference(input_tensor)
return self.disparity_map
def get_depth(self):
return self.camera_config.f*self.camera_config.baseline/self.disparity_map
def prepare_input(self, left_img, right_img):
if (self.model_type == ModelType.eth3d):
# Shape (1, None, None, 2)
left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)
right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)
left_img = np.expand_dims(left_img,2)
right_img = np.expand_dims(right_img,2)
combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0
else:
# Shape (1, None, None, 6)
left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB)
right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB)
combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0
return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32)
def inference(self, input_tensor):
output = self.model(input_tensor)
return np.squeeze(output)
| 25.804124
| 96
| 0.742709
| 362
| 2,503
| 4.856354
| 0.256906
| 0.051763
| 0.047782
| 0.059727
| 0.33504
| 0.263936
| 0.222981
| 0.222981
| 0.14562
| 0.14562
| 0
| 0.023596
| 0.153416
| 2,503
| 96
| 97
| 26.072917
| 0.806041
| 0.054335
| 0
| 0.203704
| 0
| 0
| 0.042463
| 0.035669
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12963
| false
| 0
| 0.092593
| 0.037037
| 0.351852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
076ca6ec3c064417c645687635c5d40cf01c07b7
| 29,159
|
py
|
Python
|
code/trainer.py
|
mazzaAnt/StackGAN-v2
|
dcf696f34bc8e360179eec9e7f2e9e66eec8b9a0
|
[
"MIT"
] | 1
|
2019-02-04T20:45:51.000Z
|
2019-02-04T20:45:51.000Z
|
code/trainer.py
|
mazzaAnt/StackGAN-v2
|
dcf696f34bc8e360179eec9e7f2e9e66eec8b9a0
|
[
"MIT"
] | null | null | null |
code/trainer.py
|
mazzaAnt/StackGAN-v2
|
dcf696f34bc8e360179eec9e7f2e9e66eec8b9a0
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from six.moves import range
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torchvision.utils as vutils
import numpy as np
import os
import time
from PIL import Image, ImageFont, ImageDraw
from copy import deepcopy
from miscc.config import cfg
from miscc.utils import mkdir_p
from CaptionDatasets import *
from tensorboard import summary
from tensorboard import FileWriter
from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3
# ################## Shared functions ###################
def compute_mean_covariance(img):
batch_size = img.size(0)
channel_num = img.size(1)
height = img.size(2)
width = img.size(3)
num_pixels = height * width
# batch_size * channel_num * 1 * 1
mu = img.mean(2, keepdim=True).mean(3, keepdim=True)
# batch_size * channel_num * num_pixels
img_hat = img - mu.expand_as(img)
img_hat = img_hat.view(batch_size, channel_num, num_pixels)
# batch_size * num_pixels * channel_num
img_hat_transpose = img_hat.transpose(1, 2)
# batch_size * channel_num * channel_num
covariance = torch.bmm(img_hat, img_hat_transpose)
covariance = covariance / num_pixels
return mu, covariance
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
if m.bias is not None:
m.bias.data.fill_(0.0)
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def compute_inception_score(predictions, num_splits=1):
# print('predictions', predictions.shape)
scores = []
for i in range(num_splits):
istart = i * predictions.shape[0] // num_splits
iend = (i + 1) * predictions.shape[0] // num_splits
part = predictions[istart:iend, :]
kl = part * \
(np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
def negative_log_posterior_probability(predictions, num_splits=1):
# print('predictions', predictions.shape)
scores = []
for i in range(num_splits):
istart = i * predictions.shape[0] // num_splits
iend = (i + 1) * predictions.shape[0] // num_splits
part = predictions[istart:iend, :]
result = -1. * np.log(np.max(part, 1))
result = np.mean(result)
scores.append(result)
return np.mean(scores), np.std(scores)
def load_network(gpus):
netG = G_NET()
netG.apply(weights_init)
netG = torch.nn.DataParallel(netG, device_ids=gpus)
print(netG)
netsD = []
if cfg.TREE.BRANCH_NUM > 0:
netsD.append(D_NET64())
if cfg.TREE.BRANCH_NUM > 1:
netsD.append(D_NET128())
if cfg.TREE.BRANCH_NUM > 2:
netsD.append(D_NET256())
if cfg.TREE.BRANCH_NUM > 3:
netsD.append(D_NET512())
if cfg.TREE.BRANCH_NUM > 4:
netsD.append(D_NET1024())
# TODO: if cfg.TREE.BRANCH_NUM > 5:
for i in range(len(netsD)):
netsD[i].apply(weights_init)
netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
# print(netsD[i])
print('# of netsD', len(netsD))
count = 0
if cfg.TRAIN.NET_G != '':
state_dict = torch.load(cfg.TRAIN.NET_G)
netG.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_G)
istart = cfg.TRAIN.NET_G.rfind('_') + 1
iend = cfg.TRAIN.NET_G.rfind('.')
count = cfg.TRAIN.NET_G[istart:iend]
count = int(count) + 1
if cfg.TRAIN.NET_D != '':
for i in range(len(netsD)):
print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i))
netsD[i].load_state_dict(state_dict)
inception_model = INCEPTION_V3()
if cfg.CUDA:
netG.cuda()
for i in range(len(netsD)):
netsD[i].cuda()
inception_model = inception_model.cuda()
inception_model.eval()
return netG, netsD, len(netsD), inception_model, count
def define_optimizers(netG, netsD):
optimizersD = []
num_Ds = len(netsD)
for i in range(num_Ds):
opt = optim.Adam(netsD[i].parameters(),
lr=cfg.TRAIN.DISCRIMINATOR_LR,
betas=(0.5, 0.999))
optimizersD.append(opt)
# G_opt_paras = []
# for p in netG.parameters():
# if p.requires_grad:
# G_opt_paras.append(p)
optimizerG = optim.Adam(netG.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
return optimizerG, optimizersD
def save_model(netG, avg_param_G, netsD, epoch, model_dir):
load_params(netG, avg_param_G)
torch.save(
netG.state_dict(),
'%s/netG_%d.pth' % (model_dir, epoch))
for i in range(len(netsD)):
netD = netsD[i]
torch.save(
netD.state_dict(),
'%s/netD%d.pth' % (model_dir, i))
print('Save G/Ds models.')
def save_real(imgs_tcpu, image_dir):
num = cfg.TRAIN.VIS_COUNT
# The range of real_img (i.e., self.imgs_tcpu[i][0:num])
# is changed to [0, 1] by function vutils.save_image
real_img = imgs_tcpu[-1][0:num]
vutils.save_image(
real_img, '%s/real_samples.png' % (image_dir),
normalize=True)
real_img_set = vutils.make_grid(real_img).numpy()
real_img_set = np.transpose(real_img_set, (1, 2, 0))
real_img_set = real_img_set * 255
real_img_set = real_img_set.astype(np.uint8)
sup_real_img = summary.image('real_img', real_img_set)
def save_img_results(imgs_tcpu, fake_imgs, num_imgs,
count, image_dir, summary_writer):
num = cfg.TRAIN.VIS_COUNT
# The range of real_img (i.e., self.imgs_tcpu[i][0:num])
# is changed to [0, 1] by function vutils.save_image
real_img = imgs_tcpu[-1][0:num]
vutils.save_image(
real_img, '%s/real_samples.png' % (image_dir),
normalize=True)
real_img_set = vutils.make_grid(real_img).numpy()
real_img_set = np.transpose(real_img_set, (1, 2, 0))
real_img_set = real_img_set * 255
real_img_set = real_img_set.astype(np.uint8)
sup_real_img = summary.image('real_img', real_img_set)
summary_writer.add_summary(sup_real_img, count)
for i in range(num_imgs):
fake_img = fake_imgs[i][0:num]
# The range of fake_img.data (i.e., self.fake_imgs[i][0:num])
# is still [-1. 1]...
vutils.save_image(
fake_img.data, '%s/count_%09d_fake_samples_%d.png' %
(image_dir, count, i), normalize=True)
fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy()
fake_img_set = np.transpose(fake_img_set, (1, 2, 0))
fake_img_set = (fake_img_set + 1) * 255 / 2
fake_img_set = fake_img_set.astype(np.uint8)
sup_fake_img = summary.image('fake_img%d' % i, fake_img_set)
summary_writer.add_summary(sup_fake_img, count)
summary_writer.flush()
# ################# Text to image task############################ #
class condGANTrainer(object):
def __init__(self, output_dir, data_loader, imsize):
if cfg.TRAIN.FLAG:
self.model_dir = os.path.join(output_dir, 'Model')
self.image_dir = os.path.join(output_dir, 'Image')
self.log_dir = os.path.join(output_dir, 'Log')
mkdir_p(self.model_dir)
mkdir_p(self.image_dir)
mkdir_p(self.log_dir)
self.summary_writer = FileWriter(self.log_dir)
s_gpus = cfg.GPU_ID.split(',')
self.gpus = [int(ix) for ix in s_gpus]
self.num_gpus = len(self.gpus)
torch.cuda.set_device(self.gpus[0])
cudnn.benchmark = True
self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus
self.max_epoch = cfg.TRAIN.MAX_EPOCH
self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL
self.data_loader = data_loader
self.num_batches = len(self.data_loader)
def prepare_data(self, data):
imgs, w_imgs, t_embedding, _ = data
real_vimgs, wrong_vimgs = [], []
if cfg.CUDA:
vembedding = Variable(t_embedding).cuda()
else:
vembedding = Variable(t_embedding)
for i in range(self.num_Ds):
if cfg.CUDA:
real_vimgs.append(Variable(imgs[i]).cuda())
wrong_vimgs.append(Variable(w_imgs[i]).cuda())
else:
real_vimgs.append(Variable(imgs[i]))
wrong_vimgs.append(Variable(w_imgs[i]))
return imgs, real_vimgs, wrong_vimgs, vembedding
def train_Dnet(self, idx, count):
flag = count % 100
batch_size = self.real_imgs[0].size(0)
criterion, mu = self.criterion, self.mu
netD, optD = self.netsD[idx], self.optimizersD[idx]
real_imgs = self.real_imgs[idx]
wrong_imgs = self.wrong_imgs[idx]
fake_imgs = self.fake_imgs[idx]
#
netD.zero_grad()
# Forward
real_labels = self.real_labels[:batch_size]
fake_labels = self.fake_labels[:batch_size]
# for real
real_logits = netD(real_imgs, mu.detach())
wrong_logits = netD(wrong_imgs, mu.detach())
fake_logits = netD(fake_imgs.detach(), mu.detach())
#
errD_real = criterion(real_logits[0], real_labels)
errD_wrong = criterion(wrong_logits[0], fake_labels)
errD_fake = criterion(fake_logits[0], fake_labels)
if len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0:
errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \
criterion(real_logits[1], real_labels)
errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \
criterion(wrong_logits[1], real_labels)
errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \
criterion(fake_logits[1], fake_labels)
#
errD_real = errD_real + errD_real_uncond
errD_wrong = errD_wrong + errD_wrong_uncond
errD_fake = errD_fake + errD_fake_uncond
#
errD = errD_real + errD_wrong + errD_fake
else:
errD = errD_real + 0.5 * (errD_wrong + errD_fake)
# backward
errD.backward()
# update parameters
optD.step()
# log
if flag == 0:
summary_D = summary.scalar('D_loss%d' % idx, errD.item())
self.summary_writer.add_summary(summary_D, count)
return errD
def train_Gnet(self, count):
self.netG.zero_grad()
errG_total = 0
flag = count % 100
batch_size = self.real_imgs[0].size(0)
criterion, mu, logvar = self.criterion, self.mu, self.logvar
real_labels = self.real_labels[:batch_size]
for i in range(self.num_Ds):
outputs = self.netsD[i](self.fake_imgs[i], mu)
errG = criterion(outputs[0], real_labels)
if len(outputs) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0:
errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\
criterion(outputs[1], real_labels)
errG = errG + errG_patch
errG_total = errG_total + errG
if flag == 0:
summary_D = summary.scalar('G_loss%d' % i, errG.item())
self.summary_writer.add_summary(summary_D, count)
# Compute color consistency losses
if cfg.TRAIN.COEFF.COLOR_LOSS > 0:
if self.num_Ds > 1:
mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1])
mu2, covariance2 = \
compute_mean_covariance(self.fake_imgs[-2].detach())
like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2)
like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \
nn.MSELoss()(covariance1, covariance2)
errG_total = errG_total + like_mu2 + like_cov2
if flag == 0:
sum_mu = summary.scalar('G_like_mu2', like_mu2.item())
self.summary_writer.add_summary(sum_mu, count)
sum_cov = summary.scalar('G_like_cov2', like_cov2.item())
self.summary_writer.add_summary(sum_cov, count)
if self.num_Ds > 2:
mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2])
mu2, covariance2 = \
compute_mean_covariance(self.fake_imgs[-3].detach())
like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2)
like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \
nn.MSELoss()(covariance1, covariance2)
errG_total = errG_total + like_mu1 + like_cov1
if flag == 0:
sum_mu = summary.scalar('G_like_mu1', like_mu1.item())
self.summary_writer.add_summary(sum_mu, count)
sum_cov = summary.scalar('G_like_cov1', like_cov1.item())
self.summary_writer.add_summary(sum_cov, count)
kl_loss = KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL
errG_total = errG_total + kl_loss
# Postpone the backward propagation
# errG_total.backward()
# self.optimizerG.step()
return kl_loss, errG_total
def train(self):
self.netG, self.netsD, self.num_Ds,\
self.inception_model, start_count = load_network(self.gpus)
avg_param_G = copy_G_params(self.netG)
self.optimizerG, self.optimizersD = \
define_optimizers(self.netG, self.netsD)
self.criterion = nn.BCELoss()
self.SATcriterion = nn.CrossEntropyLoss()
self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1))
self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0))
self.gradient_one = torch.FloatTensor([1.0])
self.gradient_half = torch.FloatTensor([0.5])
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(self.batch_size, nz))
fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1))
# Data parameters
data_folder = 'birds_output' # folder with data files saved by create_input_files.py
data_name = 'CUB_5_cap_per_img_5_min_word_freq' # base name shared by data files
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# Show, Attend, and Tell Dataloader
train_loader = torch.utils.data.DataLoader(
CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])),
batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True)
if cfg.CUDA:
self.criterion.cuda()
self.SATcriterion.cuda() # Compute SATloss
self.real_labels = self.real_labels.cuda()
self.fake_labels = self.fake_labels.cuda()
self.gradient_one = self.gradient_one.cuda()
self.gradient_half = self.gradient_half.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
predictions = []
count = start_count
start_epoch = start_count // (self.num_batches)
for epoch in range(start_epoch, self.max_epoch):
start_t = time.time()
# for step, data in enumerate(self.data_loader, 0):
for step, data in enumerate(zip(self.data_loader, train_loader), 0):
data_1 = data[0]
_, caps, caplens = data[1]
data = data_1
#######################################################
# (0) Prepare training data
######################################################
self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \
self.txt_embedding = self.prepare_data(data)
# Testing line for real samples
if epoch == start_epoch and step == 0:
print ('Checking real samples at first...')
save_real(self.imgs_tcpu, self.image_dir)
#######################################################
# (1) Generate fake images
######################################################
noise.data.normal_(0, 1)
self.fake_imgs, self.mu, self.logvar = \
self.netG(noise, self.txt_embedding)
# len(self.fake_imgs) = NUM_BRANCHES
# self.fake_imgs[0].shape = [batch_size, 3, 64, 64]
# self.fake_imgs[1].shape = [batch_size, 3, 128, 128]
# self.fake_imgs[2].shape = [batch_size, 3, 256, 256]
#######################################################
# (*) Forward fake images to SAT
######################################################
from SATmodels import Encoder, DecoderWithAttention
from torch.nn.utils.rnn import pack_padded_sequence
fine_tune_encoder = False
# Read word map
word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')
with open(word_map_file, 'r') as j:
word_map = json.load(j)
# Define the encoder/decoder structure for SAT model
decoder = DecoderWithAttention(attention_dim=512,
embed_dim=512,
decoder_dim=512,
vocab_size=len(word_map),
dropout=0.5).cuda()
decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()),
lr=4e-4)
encoder = Encoder().cuda()
encoder.fine_tune(fine_tune_encoder)
encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()),
lr=1e-4) if fine_tune_encoder else None
SATloss = 0
# Compute the SAT loss after forwarding the SAT model
for idx in range(len(self.fake_imgs)):
img = encoder(self.fake_imgs[idx])
scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(img, caps, caplens)
targets = caps_sorted[:, 1:]
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda()
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda()
SATloss += self.SATcriterion(scores, targets) + 1 * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Set zero_grad for encoder/decoder
decoder_optimizer.zero_grad()
if encoder_optimizer is not None:
encoder_optimizer.zero_grad()
#######################################################
# (2) Update D network
######################################################
errD_total = 0
for i in range(self.num_Ds):
errD = self.train_Dnet(i, count)
errD_total += errD
#######################################################
# (3) Update G network: maximize log(D(G(z)))
######################################################
kl_loss, errG_total = self.train_Gnet(count)
for p, avg_p in zip(self.netG.parameters(), avg_param_G):
avg_p.mul_(0.999).add_(0.001, p.data)
# Combine with G and SAT first, then back propagation
errG_total += SATloss
errG_total.backward()
self.optimizerG.step()
#######################################################
# (*) Update SAT network:
######################################################
# Update weights
decoder_optimizer.step()
if encoder_optimizer is not None:
encoder_optimizer.step()
#######################################################
# (*) Prediction and Inception score:
######################################################
pred = self.inception_model(self.fake_imgs[-1].detach())
predictions.append(pred.data.cpu().numpy())
if count % 100 == 0:
summary_D = summary.scalar('D_loss', errD_total.item())
summary_G = summary.scalar('G_loss', errG_total.item())
summary_KL = summary.scalar('KL_loss', kl_loss.item())
self.summary_writer.add_summary(summary_D, count)
self.summary_writer.add_summary(summary_G, count)
self.summary_writer.add_summary(summary_KL, count)
count += 1
#######################################################
# (*) Save Images/Log/Model per SNAPSHOT_INTERVAL:
######################################################
if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0:
save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)
# Save images
backup_para = copy_G_params(self.netG)
load_params(self.netG, avg_param_G)
#
self.fake_imgs, _, _ = self.netG(fixed_noise, self.txt_embedding)
save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds,
count, self.image_dir, self.summary_writer)
#
load_params(self.netG, backup_para)
# Compute inception score
if len(predictions) > 500:
predictions = np.concatenate(predictions, 0)
mean, std = compute_inception_score(predictions, 10)
# print('mean:', mean, 'std', std)
m_incep = summary.scalar('Inception_mean', mean)
self.summary_writer.add_summary(m_incep, count)
#
mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions, 10)
m_nlpp = summary.scalar('NLPP_mean', mean_nlpp)
self.summary_writer.add_summary(m_nlpp, count)
#
predictions = []
end_t = time.time()
print('''[%d/%d][%d]
Loss_D: %.2f Loss_G: %.2f Loss_KL: %.2f Time: %.2fs
''' # D(real): %.4f D(wrong):%.4f D(fake) %.4f
% (epoch, self.max_epoch, self.num_batches,
errD_total.item(), errG_total.item(),
kl_loss.item(), end_t - start_t))
save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)
self.summary_writer.close()
def save_superimages(self, images_list, filenames,
save_dir, split_dir, imsize):
batch_size = images_list[0].size(0)
num_sentences = len(images_list)
for i in range(batch_size):
s_tmp = '%s/super/%s/%s' %\
(save_dir, split_dir, filenames[i])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
print('Make a new folder: ', folder)
mkdir_p(folder)
#
savename = '%s_%d.png' % (s_tmp, imsize)
super_img = []
for j in range(num_sentences):
img = images_list[j][i]
# print(img.size())
img = img.view(1, 3, imsize, imsize)
# print(img.size())
super_img.append(img)
# break
super_img = torch.cat(super_img, 0)
vutils.save_image(super_img, savename, nrow=10, normalize=True)
def save_singleimages(self, images, filenames,
save_dir, split_dir, sentenceID, imsize):
for i in range(images.size(0)):
s_tmp = '%s/single_samples/%s/%s' %\
(save_dir, split_dir, filenames[i])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
print('Make a new folder: ', folder)
mkdir_p(folder)
fullpath = '%s_%d_sentence%d.png' % (s_tmp, imsize, sentenceID)
# range from [-1, 1] to [0, 255]
img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte()
ndarr = img.permute(1, 2, 0).data.cpu().numpy()
im = Image.fromarray(ndarr)
im.save(fullpath)
def evaluate(self, split_dir):
if cfg.TRAIN.NET_G == '':
print('Error: the path for morels is not found!')
else:
# Build and load the generator
if split_dir == 'test':
split_dir = 'valid'
netG = G_NET()
netG.apply(weights_init)
netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
print(netG)
# state_dict = torch.load(cfg.TRAIN.NET_G)
state_dict = \
torch.load(cfg.TRAIN.NET_G,
map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_G)
# the path to save generated images
s_tmp = cfg.TRAIN.NET_G
istart = s_tmp.rfind('_') + 1
iend = s_tmp.rfind('.')
iteration = int(s_tmp[istart:iend])
s_tmp = s_tmp[:s_tmp.rfind('/')]
save_dir = '%s/iteration%d' % (s_tmp, iteration)
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(self.batch_size, nz))
if cfg.CUDA:
netG.cuda()
noise = noise.cuda()
# switch to evaluate mode
netG.eval()
for step, data in enumerate(self.data_loader, 0):
imgs, t_embeddings, filenames = data
if cfg.CUDA:
t_embeddings = Variable(t_embeddings).cuda()
else:
t_embeddings = Variable(t_embeddings)
# print(t_embeddings[:, 0, :], t_embeddings.size(1))
embedding_dim = t_embeddings.size(1)
batch_size = imgs[0].size(0)
noise.data.resize_(batch_size, nz)
noise.data.normal_(0, 1)
fake_img_list = []
for i in range(embedding_dim):
fake_imgs, _, _ = netG(noise, t_embeddings[:, i, :])
if cfg.TEST.B_EXAMPLE:
# fake_img_list.append(fake_imgs[0].data.cpu())
# fake_img_list.append(fake_imgs[1].data.cpu())
fake_img_list.append(fake_imgs[2].data.cpu())
else:
self.save_singleimages(fake_imgs[-1], filenames,
save_dir, split_dir, i, 256)
# self.save_singleimages(fake_imgs[-2], filenames,
# save_dir, split_dir, i, 128)
# self.save_singleimages(fake_imgs[-3], filenames,
# save_dir, split_dir, i, 64)
# break
if cfg.TEST.B_EXAMPLE:
# self.save_superimages(fake_img_list, filenames,
# save_dir, split_dir, 64)
# self.save_superimages(fake_img_list, filenames,
# save_dir, split_dir, 128)
self.save_superimages(fake_img_list, filenames,
save_dir, split_dir, 256)
| 41.360284
| 116
| 0.540245
| 3,524
| 29,159
| 4.233541
| 0.124291
| 0.018768
| 0.013674
| 0.010322
| 0.386688
| 0.320665
| 0.275957
| 0.241035
| 0.196528
| 0.166097
| 0
| 0.019728
| 0.322028
| 29,159
| 704
| 117
| 41.419034
| 0.734939
| 0.091258
| 0
| 0.230461
| 0
| 0
| 0.027423
| 0.003497
| 0
| 0
| 0
| 0.00142
| 0
| 1
| 0.04008
| false
| 0
| 0.044088
| 0
| 0.106212
| 0.026052
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
076cc2a993643184f8804f5d69cb1769c80c9cee
| 5,654
|
py
|
Python
|
spletni_vmesnik.py
|
LeaHolc/recepcija
|
bff9f804e795e45c2da214432042c0ae067783b0
|
[
"MIT"
] | 1
|
2021-11-11T08:20:13.000Z
|
2021-11-11T08:20:13.000Z
|
spletni_vmesnik.py
|
LeaHolc/recepcija
|
bff9f804e795e45c2da214432042c0ae067783b0
|
[
"MIT"
] | null | null | null |
spletni_vmesnik.py
|
LeaHolc/recepcija
|
bff9f804e795e45c2da214432042c0ae067783b0
|
[
"MIT"
] | null | null | null |
from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file
import bottle
import controller
from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna
import datetime as dt
@bottle.get('/')
def root():
redirect('/domov')
@bottle.get('/domov')
def index():
parcele = dobi_parcele_za_prikaz(dt.date.today())
return template("domov", parcele=parcele, hide_header_back=True)
@bottle.get("/parcela/<id_parcele>")
def parcela(id_parcele):
'Preverimo stanje parcele'
rez, gostje = dobi_info_parcele(id_parcele, dt.date.today())
if rez is not None:
stanje = "Parcela je trenutno zasedena"
else:
stanje = "Parcela je trenutno na voljo"
return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje)
@bottle.get("/naredi-rezervacijo/<id_parcele>")
def nova_rezervacija(id_parcele=None):
print(id_parcele)
today = dt.date.today()
tomorrow = today + dt.timedelta(days=1)
return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow)
@bottle.post("/naredi-rezervacijo")
def naredi_novo_rezervacijo():
" V modelu naredi novo rezervacijo in ji doda prvega gosta"
# Preberemo lastnosti iz forme
ime = request.forms.ime#get("")
priimek = request.forms.priimek#get("")
emso = request.forms.emso#get("")
drzava = request.forms.drzava#get("")
id_parcele = request.forms.id_parcele#get("")
od = request.forms.zacetek#get("")
do = request.forms.konec#get("")
print(ime, priimek)
try:
datum_od = dt.datetime.fromisoformat(od).date()
datum_do = dt.datetime.fromisoformat(do).date()
except Exception as e:
print(e)
print("Napaka pri pretvorbi datumov")
return redirect("/naredi-rezervacijo")
rezervacija = naredi_rezervacijo(id_parcele)
dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, {
"EMSO":emso,
"ime":ime,
"priimek":priimek,
"drzava":drzava,
}, datum_od, datum_do)
return redirect(f"/parcela/{id_parcele}")
@bottle.get("/dodaj-gosta/<id_rezervacije>")
def get_dodaj_gosta_na_rezervacijo(id_rezervacije):
today = dt.date.today()
tomorrow = today + dt.timedelta(days=1)
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
return template("dodajanje_gosta", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow)
@bottle.post("/dodaj-gosta-na-rezervacijo")
def post_dodaj_gosta_na_rezervacijo():
" V modelu rezervaciji doda gosta"
# Preberemo lastnosti iz forme
ime = request.forms.ime
priimek = request.forms.priimek
emso = request.forms.emso#get("")
drzava = request.forms.drzava#get("")
id_rezervacije = request.forms.rez#get("")
od = request.forms.zacetek#get("")
do = request.forms.konec#get("")
try:
datum_od = dt.datetime.fromisoformat(od).date()
datum_do = dt.datetime.fromisoformat(do).date()
except Exception as e:
print(e)
print("Napaka pri pretvorbi datumov")
return redirect("/dodaj-gosta")
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, {
"EMSO":emso,
"ime":ime,
"priimek":priimek,
"drzava":drzava,
},datum_od,datum_do)
print(id_rezervacije)
return redirect(f"/parcela/{rezervacija.id_parcele}")
@bottle.get("/predracun/<id_rezervacije>")
def predracun(id_rezervacije):
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
today = dt.date.today()
gostje = rezervacija.gostje
sestevek, postavke = dobi_postavke_racuna(rezervacija)
slovar_cen = {}
slovar_kolicin = {}
for gost in gostje:
slovar_kolicin[gost] = len(gost.nocitve)
slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f')
return template("racun", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime("%d/%m/%Y"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin)
@bottle.get("/zakljuci/<id_rezervacije>")
def racun(id_rezervacije):
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
today = dt.date.today()
gostje = rezervacija.gostje
sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today())
slovar_cen = {}
slovar_kolicin = {}
for gost in gostje:
slovar_kolicin[gost] = len(gost.nocitve)
slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f')
return template("racun", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime("%d/%m/%Y"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin)
@bottle.error(404)
def napaka404(a):
return template("error", sporocilo="Stran ne obstaja!", naslov="404")
@bottle.error(500)
def napaka500(a):
return template("error", sporocilo="Napaka streznika!", naslov="500")
bottle.run(reloader=True, debug=True)
| 37.693333
| 196
| 0.703926
| 711
| 5,654
| 5.416315
| 0.188467
| 0.067515
| 0.019995
| 0.035835
| 0.598026
| 0.548429
| 0.529733
| 0.529733
| 0.529733
| 0.504804
| 0
| 0.005096
| 0.166961
| 5,654
| 149
| 197
| 37.946309
| 0.812527
| 0.045278
| 0
| 0.548387
| 0
| 0
| 0.147508
| 0.039287
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08871
| false
| 0
| 0.040323
| 0.016129
| 0.258065
| 0.056452
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
076da057376eccf60a978162dbf694687eba8ff6
| 1,233
|
py
|
Python
|
espnet/nets/pytorch_backend/transducer/initializer.py
|
magictron/espnet
|
075cee8d586957241be3e54c47846fbb12a32310
|
[
"Apache-2.0"
] | 2
|
2020-06-21T11:15:10.000Z
|
2021-12-03T08:08:45.000Z
|
espnet/nets/pytorch_backend/transducer/initializer.py
|
magictron/espnet
|
075cee8d586957241be3e54c47846fbb12a32310
|
[
"Apache-2.0"
] | 1
|
2021-03-05T10:43:49.000Z
|
2021-03-05T10:43:49.000Z
|
espnet/nets/pytorch_backend/transducer/initializer.py
|
magictron/espnet
|
075cee8d586957241be3e54c47846fbb12a32310
|
[
"Apache-2.0"
] | 2
|
2021-03-30T06:02:08.000Z
|
2021-08-06T06:59:22.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Parameter initialization for transducer RNN/Transformer parts."""
import six
from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters
from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one
from espnet.nets.pytorch_backend.transformer.initializer import initialize
def initializer(model, args):
"""Initialize transducer model.
Args:
model (torch.nn.Module): transducer instance
args (Namespace): argument Namespace containing options
"""
if args.dtype != "transformer":
if args.etype == "transformer":
initialize(model.encoder, args.transformer_init)
lecun_normal_init_parameters(model.dec)
else:
lecun_normal_init_parameters(model)
model.dec.embed.weight.data.normal_(0, 1)
for l in six.moves.range(len(model.dec.decoder)):
set_forget_bias_to_one(model.dec.decoder[l].bias_ih)
else:
if args.etype == "transformer":
initialize(model, args.transformer_init)
else:
lecun_normal_init_parameters(model.encoder)
initialize(model.decoder, args.transformer_init)
| 31.615385
| 83
| 0.697486
| 146
| 1,233
| 5.69863
| 0.417808
| 0.052885
| 0.072115
| 0.120192
| 0.399038
| 0.286058
| 0.115385
| 0
| 0
| 0
| 0
| 0.004086
| 0.206002
| 1,233
| 38
| 84
| 32.447368
| 0.845761
| 0.203569
| 0
| 0.25
| 0
| 0
| 0.034664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.2
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
077018ad315b121efadde62952dbcb47369a343a
| 2,368
|
py
|
Python
|
benchmarks/eval.py
|
rom1mouret/anoflows
|
42381c06b8897e4510e73cda87ea97ea3f4a5579
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/eval.py
|
rom1mouret/anoflows
|
42381c06b8897e4510e73cda87ea97ea3f4a5579
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/eval.py
|
rom1mouret/anoflows
|
42381c06b8897e4510e73cda87ea97ea3f4a5579
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
import logging
import yaml
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from sklearn.impute import SimpleImputer
from anoflows.hpo import find_best_flows
from data_loading import load_data
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) == 1:
logging.error("YAML data specification missing from the command line arguments")
exit(1)
spec_file = sys.argv[1]
df, spec = load_data(spec_file)
max_rows = min(len(df), spec.get("max_rows", 40000))
novelty_detection = spec.get("novelty", True)
normal_classes = spec["normal_classes"]
precision = defaultdict(list)
for rounds in range(spec.get("rounds", 1)):
# random sampling
df = df.sample(n=max_rows, replace=False)
label_col = spec["label_column"]
y = df[label_col].values
other = df.drop(label_col, inplace=False, axis=1)
X = other.values
# imputing
X = SimpleImputer(copy=False).fit_transform(X)
# train/test split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, shuffle=False, test_size=0.5)
if novelty_detection:
keep = np.where(np.isin(y_train, normal_classes))[0]
X_train = X_train[keep, :]
y_train = y_train[keep]
# training
#flows, loss = find_best_flows(X_train, device='cpu', n_trials=1)
from anoflows.anoflow_bagging import AnoFlowBagging
flows = AnoFlowBagging()
flows.fit(X_train)
iforest = IsolationForest().fit(X_train)
# prediction
pred = {
"anoflows": flows.likelihood(X_test),
"iforest": iforest.decision_function(X_test)
}
# evaluation
y_true = np.where(np.isin(y_test, spec["anomaly_classes"]))[0]
ref = np.zeros(len(y_test))
ref[y_true] = 1
k = len(y_true)
for name, y_pred in pred.items():
anomaly_indices = y_pred.argsort()[:k]
prec = ref[anomaly_indices].sum() / k
logging.info("%s: %.1f%% (%d anomalies / %d rows)" % (name, 100*prec, k, len(y_test)))
precision[name].append(prec)
logging.info("* SUMMARY %s", spec_file)
for name, prec in precision.items():
prec = 100 * np.array(prec)
mean = np.mean(prec)
std = np.std(prec)
logging.info("%s; mean=%.1f%% std=%.1f%%" % (name, mean, std))
| 29.234568
| 94
| 0.678209
| 347
| 2,368
| 4.463977
| 0.37464
| 0.027114
| 0.027114
| 0.019367
| 0.018076
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013584
| 0.191723
| 2,368
| 80
| 95
| 29.6
| 0.795716
| 0.066723
| 0
| 0
| 0
| 0
| 0.09673
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.210526
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07702a9eb4e9374ca232b483bdbecbfbdb1917c5
| 840
|
py
|
Python
|
pydantic/version.py
|
jamescurtin/pydantic
|
4f8f9396906a094626b770fb7cc8eecf03770ffe
|
[
"MIT"
] | 1
|
2020-02-25T15:28:47.000Z
|
2020-02-25T15:28:47.000Z
|
pydantic/version.py
|
jamescurtin/pydantic
|
4f8f9396906a094626b770fb7cc8eecf03770ffe
|
[
"MIT"
] | 1
|
2020-01-17T17:12:45.000Z
|
2020-01-17T17:12:45.000Z
|
pydantic/version.py
|
jamescurtin/pydantic
|
4f8f9396906a094626b770fb7cc8eecf03770ffe
|
[
"MIT"
] | 1
|
2020-12-19T18:00:19.000Z
|
2020-12-19T18:00:19.000Z
|
__all__ = ['VERSION', 'version_info']
VERSION = '1.4a1'
def version_info() -> str:
import platform
import sys
from importlib import import_module
from pathlib import Path
from .main import compiled
optional_deps = []
for p in ('typing-extensions', 'email-validator', 'devtools'):
try:
import_module(p.replace('-', '_'))
except ImportError:
continue
optional_deps.append(p)
info = {
'pydantic version': VERSION,
'pydantic compiled': compiled,
'install path': Path(__file__).resolve().parent,
'python version': sys.version,
'platform': platform.platform(),
'optional deps. installed': optional_deps,
}
return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items())
| 27.096774
| 101
| 0.589286
| 91
| 840
| 5.263736
| 0.527473
| 0.100209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008065
| 0.261905
| 840
| 30
| 102
| 28
| 0.764516
| 0
| 0
| 0
| 0
| 0
| 0.204762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.291667
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0770f2a922548842dd4151e55d3fc69c6cf5b84c
| 2,319
|
py
|
Python
|
spire/core/registry.py
|
siq/spire
|
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
|
[
"Linux-OpenIB"
] | null | null | null |
spire/core/registry.py
|
siq/spire
|
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
|
[
"Linux-OpenIB"
] | 1
|
2016-09-15T16:19:27.000Z
|
2016-09-15T16:20:06.000Z
|
spire/core/registry.py
|
siq/spire
|
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
|
[
"Linux-OpenIB"
] | null | null | null |
from scheme import Structure
__all__ = ('Configurable', 'Registry')
class Configurable(object):
"""A sentry class which indicates that subclasses can establish a configuration chain."""
class Registry(object):
"""The unit registry."""
dependencies = {}
schemas = {}
units = {}
@classmethod
def is_configurable(cls, obj):
return (obj is not Configurable and issubclass(obj, Configurable) and
Configurable not in obj.__bases__)
@classmethod
def purge(cls):
cls.schemas = {}
cls.units = {}
@classmethod
def register_dependency(cls, dependency):
token = dependency.token
if not token:
return
if token not in cls.dependencies:
cls.dependencies[token] = type(dependency)
if not dependency.configurable:
return
configuration = dependency.unit.configuration
if token in cls.schemas:
structure = cls.schemas[token]
if configuration.required and not dependency.optional and not structure.required:
structure.required = True
else:
schema = dependency.construct_schema(generic=True, name=token)
if dependency.optional:
schema = schema.clone(required=False)
cls.schemas[token] = schema
@classmethod
def register_unit(cls, unit):
cls.units[unit.identity] = unit
if cls.is_configurable(unit):
queue = [(unit, [unit.identity], None)]
while queue:
subject, tokens, dependency = queue.pop(0)
if subject.configuration:
token = '/'.join(tokens)
if dependency:
structure = dependency.construct_schema(name=token)
if dependency.token and structure.required:
structure = structure.clone(required=False)
else:
structure = subject.configuration.schema.clone(required=False,
name=token)
cls.schemas[token] = structure
for attr, subdependency in subject.dependencies.iteritems():
queue.append((subdependency.unit, tokens + [attr], subdependency))
| 34.61194
| 93
| 0.581285
| 219
| 2,319
| 6.091324
| 0.296804
| 0.037481
| 0.033733
| 0.031484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000647
| 0.333765
| 2,319
| 66
| 94
| 35.136364
| 0.862783
| 0.043984
| 0
| 0.150943
| 0
| 0
| 0.009519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.018868
| 0.018868
| 0.245283
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0771ae571980aa4669298ae5f48b1ac83a19af96
| 2,953
|
py
|
Python
|
scripts/extract.py
|
nng555/fairseq
|
c9730a125825a85f33042e1b9fd1959b8ca829e5
|
[
"MIT"
] | 2
|
2020-10-05T08:52:01.000Z
|
2021-03-03T15:26:35.000Z
|
scripts/extract.py
|
nng555/fairseq
|
c9730a125825a85f33042e1b9fd1959b8ca829e5
|
[
"MIT"
] | null | null | null |
scripts/extract.py
|
nng555/fairseq
|
c9730a125825a85f33042e1b9fd1959b8ca829e5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extracts random constraints from reference files."""
import argparse
import random
import sys
from sacrebleu import extract_ngrams
def get_phrase(words, index, length):
assert index < len(words) - length + 1
phr = " ".join(words[index : index + length])
for i in range(index, index + length):
words.pop(index)
return phr
def main(args):
if args.seed:
random.seed(args.seed)
for line in sys.stdin:
constraints = []
def add_constraint(constraint):
constraints.append(constraint)
source = line.rstrip()
if "\t" in line:
source, target = line.split("\t")
if args.add_sos:
target = f"<s> {target}"
if args.add_eos:
target = f"{target} </s>"
if len(target.split()) >= args.len:
words = [target]
num = args.number
choices = {}
for i in range(num):
if len(words) == 0:
break
segmentno = random.choice(range(len(words)))
segment = words.pop(segmentno)
tokens = segment.split()
phrase_index = random.choice(range(len(tokens)))
choice = " ".join(
tokens[phrase_index : min(len(tokens), phrase_index + args.len)]
)
for j in range(
phrase_index, min(len(tokens), phrase_index + args.len)
):
tokens.pop(phrase_index)
if phrase_index > 0:
words.append(" ".join(tokens[0:phrase_index]))
if phrase_index + 1 < len(tokens):
words.append(" ".join(tokens[phrase_index:]))
choices[target.find(choice)] = choice
# mask out with spaces
target = target.replace(choice, " " * len(choice), 1)
for key in sorted(choices.keys()):
add_constraint(choices[key])
print(source, *constraints, sep="\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases")
parser.add_argument("--len", "-l", type=int, default=1, help="phrase length")
parser.add_argument(
"--add-sos", default=False, action="store_true", help="add <s> token"
)
parser.add_argument(
"--add-eos", default=False, action="store_true", help="add </s> token"
)
parser.add_argument("--seed", "-s", default=0, type=int)
args = parser.parse_args()
Main(args)
| 31.752688
| 88
| 0.529292
| 330
| 2,953
| 4.639394
| 0.336364
| 0.071848
| 0.055519
| 0.01437
| 0.184193
| 0.128021
| 0.128021
| 0.128021
| 0.128021
| 0.074461
| 0
| 0.0052
| 0.348798
| 2,953
| 92
| 89
| 32.097826
| 0.790952
| 0.088385
| 0
| 0.03125
| 0
| 0
| 0.061194
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 1
| 0.046875
| false
| 0
| 0.0625
| 0
| 0.125
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|