text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
/* dyntest - fiddle with boost UTF init_unit_test_suite CPATH=../libboost-1.55.inst/include/ g++ -c -o dyntest.o dyntest.cpp -DYYTEXT_POINTER=1 -fPIC -g -O0 -W -Wall -Wextra -Wnon-virtual-dtor -ansi -std=c++98 -pipe -Wno-empty-body -Wno-missing-field-initializers -Wwrite-strings -Wno-deprecated -Wno-unused -Wno-non-virtual-dtor -Wno-variadic-macros -ftemplate-depth-128 -fno-merge-constants LIBRARY_PATH=../libboost-1.55.inst/lib/ g++ -o dyntest dyntest.o -lboost_unit_test_framework LD_LIBRARY_PATH=../libboost-1.55.inst/lib/ dyntest --log_level=all "--run_test=a/1" ab 12 */ #include "tests/SPARQLTest.hpp" #ifndef BOOST_TEST_DYN_LINK #define BOOST_TEST_DYN_LINK #endif #include <boost/test/unit_test.hpp> #include "TSVmanifest.hpp" w3c_sw_DEBUGGING_FUNCTIONS(); bool init_function () { boost::unit_test::framework::master_test_suite().p_name.value = "Trig tests"; int argc = boost::unit_test::framework::master_test_suite().argc; char** argv = boost::unit_test::framework::master_test_suite().argv; boost::unit_test::test_suite* ts = BOOST_TEST_SUITE("WG"); boost::unit_test::framework::master_test_suite().add(ts); w3c_sw::TSVmanifest::readAndQueue("TrigEvalPos.tsv", w3c_sw::TSVmanifest::Expect, ts); w3c_sw::TSVmanifest::readAndQueue("TrigSynPos.tsv" , w3c_sw::TSVmanifest::Parse , ts); w3c_sw::TSVmanifest::readAndQueue("TrigSynNeg.tsv" , w3c_sw::TSVmanifest::Fail , ts); w3c_sw::TSVmanifest::readAndQueue("TrigEvalNeg.tsv", w3c_sw::TSVmanifest::Fail , ts); return true; } int main (int argc, char* argv[]) { return ::boost::unit_test::unit_test_main( &init_function, argc, argv ); } /* accounting +-------------------------------------------------------+-----+ | ?t | ?c | | <http://www.w3.org/ns/rdftest#TestTrigEval> | 136 | | <http://www.w3.org/ns/rdftest#TestTrigNegativeSyntax> | 99 | | <http://www.w3.org/ns/rdftest#TestTrigPositiveSyntax> | 96 | | <http://www.w3.org/ns/rdftest#TestTrigNegativeEval> | 4 | +-------------------------------------------------------+-----+ ../bin/sparql -d TriGTests/manifest.ttl -e ' PREFIX mf: <http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#> PREFIX qt: <http://www.w3.org/2001/sw/DataAccess/tests/test-query#> PREFIX rdft: <http://www.w3.org/ns/rdftest#> SELECT ?name ?trig ?ntriples { ?m a mf:Manifest ; mf:entries MEMBERS(?entry) . ?entry a rdft:TestTrigEval ; mf:name ?name ; mf:action ?trig ; mf:result ?ntriples }' -l tsv | sed 1d > TrigEvalPos.tsv # 136 lines ../bin/sparql -d TriGTests/manifest.ttl -e ' PREFIX mf: <http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#> PREFIX qt: <http://www.w3.org/2001/sw/DataAccess/tests/test-query#> PREFIX rdft: <http://www.w3.org/ns/rdftest#> SELECT ?name ?trig { ?m a mf:Manifest ; mf:entries MEMBERS(?entry) . ?entry a rdft:TestTrigPositiveSyntax ; mf:name ?name ; mf:action ?trig }' -l tsv | sed 1d > TrigSynPos.tsv # 96 lines # rdft:TestTrigNegativeSyntax > TrigSynNeg.tsv with 99 lines # rdft:TestTrigNegativeEval > TrigEvalNeg.tsv with 4 lines */
{"hexsha": "97a6d3416dfd361360e4195257ad5dcb111948e3", "size": 3116, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/test_Trig.cpp", "max_stars_repo_name": "ericprud/SWObjects", "max_stars_repo_head_hexsha": "c2ceae74a9e20649dac84f1da1a4b0d2bd9ddce6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2015-06-29T17:17:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-21T12:05:40.000Z", "max_issues_repo_path": "tests/test_Trig.cpp", "max_issues_repo_name": "ericprud/SWObjects", "max_issues_repo_head_hexsha": "c2ceae74a9e20649dac84f1da1a4b0d2bd9ddce6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2016-01-17T20:12:24.000Z", "max_issues_repo_issues_event_max_datetime": "2016-12-20T20:32:52.000Z", "max_forks_repo_path": "tests/test_Trig.cpp", "max_forks_repo_name": "ericprud/SWObjects", "max_forks_repo_head_hexsha": "c2ceae74a9e20649dac84f1da1a4b0d2bd9ddce6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2015-04-08T19:12:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-28T08:52:16.000Z", "avg_line_length": 45.1594202899, "max_line_length": 337, "alphanum_fraction": 0.6729781772, "num_tokens": 993}
#!/usr/bin/env python # -*- coding: utf-8 -*- """ The :mod:`samplesizelib.linear.statistical` contains next classes: - :class:`samplesizelib.linear.statistical.LagrangeEstimator` - :class:`samplesizelib.linear.statistical.LikelihoodRatioEstimator` - :class:`samplesizelib.linear.statistical.WaldEstimator` """ from __future__ import print_function __docformat__ = 'restructuredtext' import numpy as np import scipy.stats as sps from scipy.optimize import minimize from scipy.linalg import fractional_matrix_power from ..shared.estimator import SampleSizeEstimator from .models import RegressionModel, LogisticModel class LagrangeEstimator(SampleSizeEstimator): r""" Description of Lagrange Method :param statmodel: the machine learning algorithm :type statmodel: RegressionModel or LogisticModel :param ind_u: to do :type ind_u: numpy.ndarray :param epsilon: to do :type epsilon: float :param alpha: to do :type alpha: float :param beta: to do :type beta: float """ def __init__(self, statmodel, **kwards): r"""Constructor method """ super().__init__() self.statmodel = statmodel self.ind_u = kwards.pop('ind_u', None) if not isinstance(self.ind_u, np.ndarray) and self.ind_u: raise ValueError( "The ind_u should be numpy.ndarray but get {}".format( self.ind_u)) self.epsilon = kwards.pop('epsilon', 0.3) if self.epsilon <= 0: raise ValueError( "The epsilon must be positive value but get {}".format( self.epsilon)) self.alpha = kwards.pop('alpha', 0.05) if self.alpha < 0 or self.alpha > 1: raise ValueError( "The alpha must be between 0 and 1 but get {}".format( self.alpha)) self.beta = kwards.pop('beta', 0.05) if self.beta < 0 or self.beta > 1: raise ValueError( "The beta must be between 0 and 1 but get {}".format( self.beta)) def _fix_variables(self, f, x1, ind_1, dim = 0): r""" Return ... """ ind_2 = (ind_1 == False) if dim == 0: return lambda x2: f(self._stitch_vectors(x1, x2, ind_1)) elif dim == 1: return lambda x2: f(self._stitch_vectors(x1, x2, ind_1))[ind_2] elif dim == 2: return lambda x2: f(self._stitch_vectors(x1, x2, ind_1))[ind_2][:,ind_2] else: raise ValueError( 'dim must be between 0 and 2 but get {}'.format(dim)) @staticmethod def _negative_func(f): r""" Return ... """ negative_func_fx = lambda x, *args: -f(x, *args) negative_func_f = lambda x, *args: negative_func_fx(x, *args) return negative_func_f @staticmethod def _stitch_vectors(x1, x2, ind_1): r""" Return ... """ x = np.zeros(ind_1.size) x[ind_1] = x1 x[ind_1 == False] = x2 return x @staticmethod def _get_gamma(ind_u, alpha, beta): r""" Return ... """ k = ind_u.sum() f = lambda x: np.abs(sps.chi2(k, loc=x).ppf(beta) - sps.chi2(k).ppf(1-alpha)) gamma = minimize(f, 0.)['x'][0] return gamma def forward(self, features, target): r""" Returns sample size prediction for the given dataset. :param features: The tensor of shape `num_elements` :math:`\times` `num_feature`. :type features: array. :param target: The tensor of shape `num_elements`. :type target: array. :return: sample size estimation for the given dataset. :rtype: dict """ y, X = target, features m, n = features.shape if self.ind_u is None: ind_u = np.concatenate([np.ones(n // 2), np.zeros(n - n//2)]).astype(bool) else: ind_u = self.ind_u ind_v = ind_u == False model = self.statmodel(y, X) w_hat = model.fit() mu = model.predict(w_hat) if len(list(set(list(y)))) == 2: v = mu*(1-mu) else: v = np.ones_like(y)*(mu-y).var() wu0 = w_hat[ind_u] + self.epsilon wv_hat = minimize(self._fix_variables(self._negative_func(model.loglike_fixed), wu0, ind_u), np.zeros(X.shape[1] - ind_u.sum()), jac = self._fix_variables(self._negative_func(model.score_fixed), wu0, ind_u, 1), hess = self._fix_variables(self._negative_func(model.hessian_fixed), wu0, ind_u, 2), method = 'Newton-CG')['x'] w_0 = self._stitch_vectors(wu0, wv_hat, ind_u) I = -model.hessian_fixed(w_0) I_muv = I[ind_u][:,ind_v] I_mvv = I[ind_v][:,ind_v] Z_star = (X[:,ind_u].T - I_muv @ np.linalg.inv(I_mvv) @ X[:,ind_v].T).T Z_star_matrices = np.asarray([Z_star[i,None].T @ Z_star[i, None] for i in range(m)]) delta = np.ones_like(y) mu_star = model.predict(w_0) xi_m = (((mu - mu_star)*delta[None,:]).T * Z_star).sum(0) Sigma_m = ((v * delta**2).reshape(-1,1,1) * Z_star_matrices).sum(0) gamma_0 = (xi_m @ np.linalg.inv(Sigma_m) @ xi_m)/m gamma = self._get_gamma(ind_u, self.alpha, self.beta) m_star = np.ceil(gamma/gamma_0).astype(int) self._set_status(100.) return {'m*': m_star} class LikelihoodRatioEstimator(SampleSizeEstimator): r""" Description of Likelihood Ratio Method :param statmodel: the machine learning algorithm :type statmodel: RegressionModel or LogisticModel :param ind_u: to do :type ind_u: numpy.ndarray :param epsilon: to do :type epsilon: float :param alpha: to do :type alpha: float :param beta: to do :type beta: float """ def __init__(self, statmodel, **kwards): r"""Constructor method """ super().__init__() self.statmodel = statmodel self.ind_u = kwards.pop('ind_u', None) if not isinstance(self.ind_u, np.ndarray) and self.ind_u: raise ValueError( "The ind_u should be numpy.ndarray but get {}".format( self.ind_u)) self.epsilon = kwards.pop('epsilon', 0.3) if self.epsilon <= 0: raise ValueError( "The epsilon must be positive value but get {}".format( self.epsilon)) self.alpha = kwards.pop('alpha', 0.05) if self.alpha < 0 or self.alpha > 1: raise ValueError( "The alpha must be between 0 and 1 but get {}".format( self.alpha)) self.beta = kwards.pop('beta', 0.05) if self.beta < 0 or self.alpha > 1: raise ValueError( "The beta must be between 0 and 1 but get {}".format( self.beta)) def _fix_variables(self, f, x1, ind_1, dim = 0): r""" Return ... """ ind_2 = (ind_1 == False) if dim == 0: return lambda x2: f(self._stitch_vectors(x1, x2, ind_1)) elif dim == 1: return lambda x2: f(self._stitch_vectors(x1, x2, ind_1))[ind_2] elif dim == 2: return lambda x2: f(self._stitch_vectors(x1, x2, ind_1))[ind_2][:,ind_2] else: raise ValueError( 'dim must be between 0 and 2 but get {}'.format(dim)) @staticmethod def _negative_func(f): r""" Return ... """ negative_func_fx = lambda x, *args: -f(x, *args) negative_func_f = lambda x, *args: negative_func_fx(x, *args) return negative_func_f @staticmethod def _stitch_vectors(x1, x2, ind_1): r""" Return ... """ x = np.zeros(ind_1.size) x[ind_1] = x1 x[ind_1 == False] = x2 return x @staticmethod def _get_gamma(ind_u, alpha, beta): r""" Return ... """ k = ind_u.sum() f = lambda x: np.abs(sps.chi2(k, loc=x).ppf(beta) - sps.chi2(k).ppf(1-alpha)) gamma = minimize(f, 0.)['x'][0] return gamma def forward(self, features, target): r""" Returns sample size prediction for the given dataset. :param features: The tensor of shape `num_elements` :math:`\times` `num_feature`. :type features: array. :param target: The tensor of shape `num_elements`. :type target: array. :return: sample size estimation for the given dataset. :rtype: dict """ y, X = target, features m, n = features.shape if self.ind_u is None: ind_u = np.concatenate([np.ones(n // 2), np.zeros(n - n//2)]).astype(bool) else: ind_u = self.ind_u model = self.statmodel(y, X) w_hat = model.fit() wu0 = w_hat[ind_u] + self.epsilon wv_hat = minimize(self._fix_variables(self._negative_func(model.loglike_fixed), wu0, ind_u), np.zeros(X.shape[1] - ind_u.sum()), jac = self._fix_variables(self._negative_func(model.score_fixed), wu0, ind_u, 1), hess = self._fix_variables(self._negative_func(model.hessian_fixed), wu0, ind_u, 2), method = 'Newton-CG')['x'] w_0 = self._stitch_vectors(wu0, wv_hat, ind_u) theta = X @ w_hat theta_star = X @ w_0 if len(list(set(list(y)))) == 2: a = 1. b = lambda w: -np.log(1 - model.predict(w) + 1e-30) grad_b = lambda w: model.predict(w) else: a = 2*((y - theta).std())**2 b = lambda w: model.predict(w)**2 grad_b = lambda w: 2*model.predict(w) delta_star = 2*(1/a)*((theta-theta_star)*grad_b(w_hat) - b(w_hat) + b(w_0)).mean() gamma_star = self._get_gamma(ind_u, self.alpha, self.beta) m_star = np.ceil(gamma_star/delta_star).astype(int) self._set_status(100.) return {'m*': m_star} class WaldEstimator(SampleSizeEstimator): r""" Description of Wald Method :param statmodel: the machine learning algorithm :type statmodel: RegressionModel or LogisticModel :param ind_u: to do :type ind_u: numpy.ndarray :param epsilon: to do :type epsilon: float :param alpha: to do :type alpha: float :param beta: to do :type beta: float """ def __init__(self, statmodel, **kwards): r"""Constructor method """ super().__init__() self.statmodel = statmodel self.ind_u = kwards.pop('ind_u', None) if not isinstance(self.ind_u, np.ndarray) and self.ind_u: raise ValueError( "The ind_u should be numpy.ndarray but get {}".format( self.ind_u)) self.epsilon = kwards.pop('epsilon', 0.3) if self.epsilon <= 0: raise ValueError( "The epsilon must be positive value but get {}".format( self.epsilon)) self.alpha = kwards.pop('alpha', 0.05) if self.alpha < 0 or self.alpha > 1: raise ValueError( "The alpha must be between 0 and 1 but get {}".format( self.alpha)) self.beta = kwards.pop('beta', 0.05) if self.beta < 0 or self.alpha > 1: raise ValueError( "The beta must be between 0 and 1 but get {}".format( self.beta)) @staticmethod def _fix_alpha(alpha, Sigma, Sigma_star): r""" Return ... """ p = Sigma.shape[0] Sigma_12 = fractional_matrix_power(Sigma, 0.5) matrix = Sigma_12.T @ np.linalg.inv(Sigma_star) @ Sigma_12 lambdas = np.real(np.linalg.eigvals(matrix)) factorials = [1, 1, 2, 8] k = np.asarray([factorials[r] * np.sum(lambdas**r) for r in [1,1,2,3]]) t1 = 4*k[1]*k[2]**2 + k[3]*(k[2]-k[1]**2) t2 = k[3]*k[1] - 2*k[2]**2 chi_quantile = sps.chi2(p).ppf(1-alpha) if t1 < 10**(-5): a_new = 2 + (k[1]**2)/(k[2]**2) b_new = (k[1]**3)/k[2] + k[1] s1 = 2*k[1]*(k[3]*k[1] + k[2]*k[1]**2 - k[2]**2) s2 = 3*t2 + 2*k[2]*(k[2] + k[1]**2) alpha_star = 1 - sps.invgamma(a_new, scale = b_new).cdf(chi_quantile) elif t2 < 10**(-5): a_new = (k[1]**2)/k[2] b_new = k[2]/k[1] alpha_star = 1 - sps.gamma(a_new, scale = b_new).cdf(chi_quantile) else: a1 = 2*k[1]*(k[3]*k[1] + k[2]*k[1]**2 - k[2]**2)/t1 a2 = 3 + 2*k[2]*(k[2] + k[1]**2)/t2 alpha_star = 1 - sps.f(2*a1, 2*a2).cdf(a2*t2*chi_quantile/(a1*t1)) return alpha_star def _fix_variables(self, f, x1, ind_1, dim = 0): r""" Return ... """ ind_2 = (ind_1 == False) if dim == 0: return lambda x2: f(self._stitch_vectors(x1, x2, ind_1)) elif dim == 1: return lambda x2: f(self._stitch_vectors(x1, x2, ind_1))[ind_2] elif dim == 2: return lambda x2: f(self._stitch_vectors(x1, x2, ind_1))[ind_2][:,ind_2] else: raise ValueError( 'dim must be between 0 and 2 but get {}'.format(dim)) @staticmethod def _negative_func(f): r""" Return ... """ negative_func_fx = lambda x, *args: -f(x, *args) negative_func_f = lambda x, *args: negative_func_fx(x, *args) return negative_func_f @staticmethod def _stitch_vectors(x1, x2, ind_1): r""" Return ... """ x = np.zeros(ind_1.size) x[ind_1] = x1 x[ind_1 == False] = x2 return x @staticmethod def _get_gamma(ind_u, alpha, beta): r""" Return ... """ k = ind_u.sum() f = lambda x: np.abs(sps.chi2(k, loc=x).ppf(beta) - sps.chi2(k).ppf(1-alpha)) gamma = minimize(f, 0.)['x'][0] return gamma def forward(self, features, target): r""" Returns sample size prediction for the given dataset. :param features: The tensor of shape `num_elements` :math:`\times` `num_feature`. :type features: array. :param target: The tensor of shape `num_elements`. :type target: array. :return: sample size estimation for the given dataset. :rtype: dict """ y, X = target, features m, n = features.shape if self.ind_u is None: ind_u = np.concatenate([np.ones(n // 2), np.zeros(n - n//2)]).astype(bool) else: ind_u = self.ind_u model = self.statmodel(y, X) w_hat = model.fit() wu0 = w_hat[ind_u] + self.epsilon wv_hat = minimize(self._fix_variables(self._negative_func(model.loglike_fixed), wu0, ind_u), np.zeros(X.shape[1] - ind_u.sum()), jac = self._fix_variables(self._negative_func(model.score_fixed), wu0, ind_u, 1), hess = self._fix_variables(self._negative_func(model.hessian_fixed), wu0, ind_u, 2), method = 'Newton-CG')['x'] w_0 = self._stitch_vectors(wu0, wv_hat, ind_u) V = np.array(np.linalg.inv(-model.hessian_fixed(w_hat))[ind_u][:,ind_u], ndmin=2) V_star = np.array(np.linalg.inv(-model.hessian_fixed(w_0))[ind_u][:,ind_u], ndmin=2) Sigma = m*V Sigma_star = m*V_star w_u = w_hat[ind_u] w_u0 = w_0[ind_u] delta = np.dot((w_u - w_u0), np.linalg.inv(Sigma)@(w_u - w_u0)) classification = len(list(set(list(y)))) == 2 if classification: alpha_star = self._fix_alpha(self.alpha, Sigma, Sigma_star) else: alpha_star = self.alpha gamma_star = self._get_gamma(ind_u, alpha_star, self.beta) m_star = np.ceil(gamma_star/delta).astype(int) self._set_status(100.) return {'m*':m_star}
{"hexsha": "646d66880864443f2610d9442bb85d0f83203667", "size": 16230, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/samplesizelib/linear/statistical.py", "max_stars_repo_name": "andriygav/SampleSizeEstimation", "max_stars_repo_head_hexsha": "079959711a46201e08ae3e0d41815bcb70d7efc4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-16T18:24:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T11:52:24.000Z", "max_issues_repo_path": "src/samplesizelib/linear/statistical.py", "max_issues_repo_name": "andriygav/SampleSizeEstimation", "max_issues_repo_head_hexsha": "079959711a46201e08ae3e0d41815bcb70d7efc4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-08-16T17:53:49.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-18T19:57:40.000Z", "max_forks_repo_path": "src/samplesizelib/linear/statistical.py", "max_forks_repo_name": "andriygav/SampleSizeEstimation", "max_forks_repo_head_hexsha": "079959711a46201e08ae3e0d41815bcb70d7efc4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1901840491, "max_line_length": 136, "alphanum_fraction": 0.5454097351, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 4468}
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.11.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/tylerlum/ufc_automated_scoring_system/blob/main/UFC_Automated_Scoring_System.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="TPiJGcdUYaoU" # # UFC Data Scraping # # The goal of this notebook is to: # * Explore the FightMetrics webpage to scrape the fight and fighter information we need # * Preprocess the data # * Store the fight and fighter data into csv files # # Functional as of April 2021 # + [markdown] id="Qx-IhTkrwliQ" # ## Set parameters for dataset creation # # NUM_EVENTS_INPUT: Integer number of UFC events to get fights from or "All" for all events. There are about 10 fights per event. # # DATA_MODE_INPUT: Either "Summary" or "Round by Round". Either get data with columns that are summaries of the whole fight, or summaries round-by-round (more columns). # + cellView="form" id="tYkaAlJNfhul" # NUM_EVENTS_INPUT = "All" #@param {type:"string"} NUM_EVENTS_INPUT = "20" #@param {type:"string"} DATA_MODE_INPUT = "Summary" #@param {type:"string"} # - NUM_EVENTS = None if NUM_EVENTS_INPUT == "All" else int(NUM_EVENTS_INPUT) ROUND_BY_ROUND = (DATA_MODE_INPUT == "Round by Round") # + [markdown] id="7897ryXiaoCv" # ## Get information about all fighters # + id="ioQESt2oZPXz" import pandas as pd from tqdm import tqdm import numpy as np import re from string import ascii_lowercase # + id="0WtkXEs0LNry" def get_all_fighters(): '''Get pandas table of all UFC fighters (Name, Height, Weight, Reach, Record, etc.)''' all_fighters_tables = [] for c in tqdm(ascii_lowercase): all_fighters_url = f"http://ufcstats.com/statistics/fighters?char={c}&page=all" all_fighters_table = pd.read_html(all_fighters_url)[0] all_fighters_tables.append(all_fighters_table) all_fighters = pd.concat(all_fighters_tables) return all_fighters # + colab={"base_uri": "https://localhost:8080/", "height": 216} id="AkCCSiuUa4lu" outputId="de1fcb58-1d27-4f12-8dd7-5644ddfb2ec6" ALL_FIGHTERS = get_all_fighters() ALL_FIGHTERS.head() # + colab={"base_uri": "https://localhost:8080/"} id="Zoayc5Ad3tKm" outputId="629230e2-28a8-4ff8-d72f-1fa366f4f2fb" ALL_FIGHTERS.dtypes # + [markdown] id="bIDzcIHz3mpC" # ## Clean fighter data # # TODO: Convert height, weight, reach to floats. # + colab={"base_uri": "https://localhost:8080/", "height": 198} id="gdNWtyaB5cTi" outputId="db88f028-18b9-460d-e998-3a4c887c085f" ALL_FIGHTERS = ALL_FIGHTERS.replace("^-+", np.nan, regex=True) # Replace -- and --- with nan ALL_FIGHTERS.dropna(subset=["First", "Last"], how='all') # Remove rows with no name ALL_FIGHTERS.head() # + [markdown] id="HBvmzviJ625s" # ## Helper functions # + id="_KC8TRhZSW58" def get_fighters(fighters_string): '''Parses string containing two fighter names. Uses ALL_FIGHTERS global to remove ambiguity in parsing. Returns each fighter name Eg. "Robert Whittaker Kelvin Gastelum" => ("Robert Whittaker", "Kelvin Gastelum")''' for i, row in ALL_FIGHTERS.iterrows(): fighter_name = f'{row["First"]} {row["Last"]}' if fighters_string.startswith(fighter_name): first_fighter = fighter_name second_fighter = fighters_string[len(fighter_name)+1:] break return first_fighter, second_fighter def remove_duplicates_keep_order(list_): '''Removes duplicates while keeping same order''' return list(dict.fromkeys(list_)) # + [markdown] id="hYGOutwI-g9H" # ## Get a list of all UFC events # + id="UqZSSQ8r-rMm" from urllib.request import urlopen from string import ascii_uppercase from dateutil import parser from datetime import datetime # + id="t-X7tWV_NiBo" ALL_PAST_EVENTS_URL = "http://ufcstats.com/statistics/events/completed?page=all" # + id="JOvgx_9Z1SOv" def get_all_events(all_past_events_url): '''Takes in URL to all past events. Returns list of urls, each one representing a UFC event''' all_past_events_html = urlopen(all_past_events_url).read().decode("utf-8") # Regex for "http://ufcstats.com/events-details/<alphanumeric>" # Eg. "http://ufcstats.com/event-details/27541033b97c076d" pattern = "\"http://ufcstats.com/event-details/[a-zA-Z0-9_]+\"" all_urls = re.findall(pattern, all_past_events_html) # Remove quotes and duplicates all_urls = [url.strip("\"") for url in all_urls] all_urls = remove_duplicates_keep_order(all_urls) return all_urls # + colab={"base_uri": "https://localhost:8080/"} id="zzxASuh8_Vnh" outputId="74c88906-ed19-4754-9b28-dde55d6bba45" # Events ALL_EVENT_URLS = get_all_events(ALL_PAST_EVENTS_URL) print(f"Got {len(ALL_EVENT_URLS)} events") print() print("Removing the most recent event, since it might not have happened yet") ALL_EVENT_URLS = ALL_EVENT_URLS[1:] print(f"Now got {len(ALL_EVENT_URLS)} events") print(ALL_EVENT_URLS) # + [markdown] id="QoU8LAPK_dbQ" # ## Get a list of UFC fights # # TODO: Right now only sees if result is win. Else sets winner to None. See if this can be improved. # + id="2nDi4mo6CLn_" def get_all_fights_in_event(past_event_url, get_results=False): '''Takes in a single URL to a past event. If get_results=True, returns fight_urls, winners, methods else, return fight_urls''' # Regex for "http://ufcstats.com/events-details/<alphanumeric>" # Eg. "http://ufcstats.com/fight-details/f67aa0b16e16a9ea" past_event_html = urlopen(past_event_url).read().decode("utf-8") pattern = "\"http://ufcstats.com/fight-details/[a-zA-Z0-9_]+\"" fight_urls = re.findall(pattern, past_event_html) # Remove quotes and duplicates fight_urls = [url.strip("\"") for url in fight_urls] fight_urls = remove_duplicates_keep_order(fight_urls) # Get the winner and method (dec or KO or sub) of each fight past_event_table = pd.read_html(past_event_url)[0] # Will be length 1 list winners, methods = [], [] for _, row in past_event_table.iterrows(): # TODO: Improve this processing of result result = row["W/L"].split(' ')[0] if result == "win": winner, _ = get_fighters(row["Fighter"]) else: winner = None winners.append(winner) methods.append(row["Method"]) if get_results: return fight_urls, winners, methods else: return fight_urls # + id="7_pNUnyMPxkM" def get_all_fights(all_event_urls, num_events=None): '''Takes in list of URLs to past events. Returns 3 lists: urls, winners, methods, each representing a UFC fight. Set num_events to be the number of events to get fights from. Set to None if want all.''' if num_events is None: num_events = len(all_event_urls) all_fight_urls, all_winners, all_methods = [], [], [] for i, event_url in enumerate(tqdm(all_event_urls[:num_events])): # For each event, get the fight urls and winners fight_urls, winners, methods = get_all_fights_in_event(event_url, get_results=True) all_fight_urls.extend(fight_urls) all_winners.extend(winners) all_methods.extend(methods) return all_fight_urls, all_winners, all_methods # + colab={"base_uri": "https://localhost:8080/"} id="Q66lyNtAF-Vo" outputId="c7eb935e-443a-4498-8690-f09c8e8be3ab" FIGHT_URLS, WINNERS, METHODS = get_all_fights(ALL_EVENT_URLS, num_events=NUM_EVENTS) print(f"Got {len(FIGHT_URLS)} fights") print(FIGHT_URLS) print(WINNERS) print(METHODS) assert(len(FIGHT_URLS) == len(WINNERS)) assert(len(FIGHT_URLS) == len(METHODS)) # + [markdown] id="CzlsyBU6DdRE" # ## Get fight tables # # + id="zJjLUhEyDcSs" def get_labeled_fight_tables(fight_url): '''Convert fight url to dictionary of pandas tables of information. Before, gave a list of tables that was hard to understand. Now have Totals, Per Round Totals, Significant Strikes, Per Round Significant Strikes''' fight_tables = pd.read_html(fight_url) labeled_fight_tables = {} labeled_fight_tables['Totals'] = fight_tables[0] labeled_fight_tables['Per Round Totals'] = fight_tables[1] labeled_fight_tables['Significant Strikes'] = fight_tables[2] labeled_fight_tables['Per Round Significant Strikes'] = fight_tables[3] return labeled_fight_tables # + id="08jcNbZaDlBE" RAW_FIGHT_TABLES_LIST = [] for url in tqdm(FIGHT_URLS): RAW_FIGHT_TABLES_LIST.append(get_labeled_fight_tables(url)) # + id="c9msProI12dH" RAW_FIGHT_TABLES_LIST[0]['Totals'].head() # + id="5_IIeQRx13WJ" RAW_FIGHT_TABLES_LIST[0]['Per Round Totals'].head() # + id="b8vW4zw818TK" RAW_FIGHT_TABLES_LIST[0]['Significant Strikes'].head() # + id="LtCciS5g16MB" RAW_FIGHT_TABLES_LIST[0]['Per Round Significant Strikes'].head() # + [markdown] id="r6YwJd-fAOwd" # ## Clean fight information # # Separate each fighter's information into a different column # # TODO: Lots of stuff to improve. Smarter use of Totals, round by round, and significant strikes. Can also use non integer information, total attempted strikes (not just landed), fighter information, etc. All of those being ignored right now. Find nice way to parse new information round by round. Handle no winner case better. May need to add ignore_index=True for pd.concat # + id="-PfTg13LB3ck" def parse_string(row_string): '''Break string into two parts: one for fighter 0 and one for fighter 1 Eg. 150 of 284 62 of 209 => (150 of 284, 62 of 209)''' if not isinstance(row_string, str): return "0", "0" string_split = row_string.split(" ") first_fighter_stat = " ".join(string_split[:len(string_split)//2]) second_fighter_stat = " ".join(string_split[len(string_split)//2+1:]) return first_fighter_stat, second_fighter_stat # + id="dqnRE1IfMY9k" def convert_to_int_or_double_if_possible(string): '''Convert string to int or double if possible If has a percent sign, tries to remove it and continue.''' def isfloat(value): try: float(value) return True except ValueError: return False # If input is not string, then return it unchanged if not isinstance(string, str): return string # Remove % if "%" in string: string = string.strip("%") # Convert to int or float if isfloat(string) and float(string).is_integer(): return int(string) if isfloat(string): return float(string) return string # + id="ORlZYocyRO4M" def process_fight(raw_fight_table): '''Takes in a raw, one-row pandas fight table. Returns a pandas dataframe representing the fight statistics''' # Break up columns. # Eg. "Name" => "Fighter 0 Name", "Fighter 1 Name" # "KD" => "Fighter 0 KD", "Fighter 1 KD" new_columns = [] for column in raw_fight_table.columns: new_columns.append(f"Fighter 0 {column}") new_columns.append(f"Fighter 1 {column}") # Go through each row and break up the data into the columns new_rows = [] for i, row in raw_fight_table.iterrows(): new_row = [] for column in raw_fight_table.columns: # Split string at the center space stat1, stat2 = parse_string(row[column]) # TODO: Update this to capture more information # Has "100 of 120" type stat. Just store first number if " of " in stat1: stat1 = stat1.split(" of ")[0] if " of " in stat2: stat2 = stat2.split(" of ")[0] # Has "2:32" type stat (min:sec). Convert to sec. if len(re.findall("^[0-9]+:[0-9]+$", stat1)) > 0: min1, sec1 = stat1.split(":")[0], stat1.split(":")[1] stat1 = convert_to_int_or_double_if_possible(min1)*60 + convert_to_int_or_double_if_possible(sec1) if len(re.findall("^[0-9]+:[0-9]+$", stat2)) > 0: min2, sec2 = stat2.split(":")[0], stat2.split(":")[1] stat2 = convert_to_int_or_double_if_possible(min2)*60 + convert_to_int_or_double_if_possible(sec2) # Convert string to float or int if possible stat1 = convert_to_int_or_double_if_possible(stat1) stat2 = convert_to_int_or_double_if_possible(stat2) # Add to row new_row.append(stat1) new_row.append(stat2) new_rows.append(new_row) # Bring together into new dataframe, then only store the numerical values # TODO: Process better to keep more info, not throw so much away df = pd.DataFrame(new_rows, columns=new_columns) # Add in names, using smarter parsing df = df.drop(columns=['Fighter 0 Fighter', 'Fighter 1 Fighter']) fighters_string = raw_fight_table["Fighter"][0] # Only 1 row table fighter0, fighter1 = get_fighters(fighters_string) df['Fighter 0 Name'] = fighter0 df['Fighter 1 Name'] = fighter1 return df # + id="5oIdF2niZpag" def process_raw_fight_tables(raw_fight_tables, winner, method, round_by_round=False): '''Takes in set of raw fight table (one fight), the name of the fight winner, and the method of winning. Returns a cleaned pandas table. Set round_by_round=True to use the round-by-round data. Otherwise, uses full fight stats.''' def create_aggregated_fight_table(raw_fight_tables): # Aggregate data from multiple tables fight_table = process_fight(raw_fight_tables["Totals"]) fight_table2 = process_fight(raw_fight_tables["Significant Strikes"]) # Rename column names with identical data to match fight_table2 = fight_table2.rename(columns={"Fighter 0 Sig. str": "Fighter 0 Sig. str.", "Fighter 1 Sig. str": "Fighter 1 Sig. str."}) # Bring tables together, then remove duplicates fight_table = pd.concat([fight_table, fight_table2], axis=1) fight_table = fight_table.loc[:,~fight_table.columns.duplicated()] return fight_table def create_aggregated_round_by_round_fight_table(raw_fight_tables): ##### Aggregate data totals table tables = [] for i, row in raw_fight_tables["Per Round Totals"].iterrows(): # Get df of one round df = pd.DataFrame(row) values = list(df[i].to_dict().values()) cols = list(raw_fight_tables["Totals"].columns) df = pd.DataFrame([values], columns=cols) # Update columns with round number new_cols = [f"Round {i+1} {c}" if c != "Fighter" else c for c in cols] df.columns = new_cols tables.append(process_fight(df)) # Concatenate round-by-round horizontally, so each row is for 1 fight. # Then remove duplicates totals_df = pd.concat(tables, axis=1) totals_df = totals_df.loc[:,~totals_df.columns.duplicated()] ##### Aggregate data significant strikes table tables = [] for i, row in raw_fight_tables["Per Round Significant Strikes"].iterrows(): # Get df of one round df = pd.DataFrame(row) values = list(df[i].to_dict().values()) cols = list(raw_fight_tables["Significant Strikes"].columns) if len(values) != len(cols): values = values[:-1] # Remove last column values, as shown above, has extra column for no reason df = pd.DataFrame([values], columns=cols) # Update columns with round number new_cols = [f"Round {i+1} {c}" if c != "Fighter" else c for c in cols] df.columns = new_cols tables.append(process_fight(df)) # Concatenate round-by-round horizontally, so each row is for 1 fight # Then remove duplicates sig_strikes_df = pd.concat(tables, axis=1) sig_strikes_df = sig_strikes_df.loc[:,~sig_strikes_df.columns.duplicated()] ##### Bring tables together, then remove duplicates fight_table = pd.concat([totals_df, sig_strikes_df], axis=1) fight_table = fight_table.loc[:,~fight_table.columns.duplicated()] return fight_table if round_by_round: fight_table = create_aggregated_round_by_round_fight_table(raw_fight_tables) else: fight_table = create_aggregated_fight_table(raw_fight_tables) if fight_table["Fighter 0 Name"][0] == winner: label = 0 elif fight_table["Fighter 1 Name"][0] == winner: label = 1 else: print(f'ERROR: fight_table["Fighter 0 Name"]={fight_table["Fighter 0 Name"]}, fight_table["Fighter 1 Name"]={fight_table["Fighter 1 Name"]}, winner={winner}') label = -1 fight_table['Winner'] = label fight_table['Method'] = method return fight_table # + id="BUyy5MUhNTkJ" FIGHT_TABLE = [] for i in tqdm(range(len(RAW_FIGHT_TABLES_LIST))): FIGHT_TABLE.append(process_raw_fight_tables(RAW_FIGHT_TABLES_LIST[i], WINNERS[i], METHODS[i], round_by_round=ROUND_BY_ROUND)) FIGHT_TABLE = pd.concat(FIGHT_TABLE, ignore_index=True) FIGHT_TABLE = FIGHT_TABLE.replace("^-+", np.nan, regex=True) # Replace -- and --- with nan # + id="G9EhqLLcAWs-" FIGHT_TABLE.head() # + id="7hQjO9B2RDoZ" FIGHT_TABLE.tail() # + [markdown] id="pCMOvzM0efI4" # ## Augment dataset by flipping around columns # # The system should work the same no matter what order we pass in the fighters. Let fighters be A and B. We want # # winner(fighter0=A, fighter1=B) = winner(fighter0=B, fighter1=A) # + id="kM2b_cAif7rM" def create_flipped_table(table): '''Rearranges columns of table so that each fight has two rows. Let fighters be A and B. One row has (Fighter 0 = A, Fighter 1 = B). One row has (Fighter 0 = B, Fighter 1 = A) Ensure same column order, as column names not looked at when passed to ML model''' # Get columns in flipped order, which moves the columns around, but changes column name order too flipped_columns = [] for column in table.columns: if "Fighter 0" in column: flipped_columns.append(column.replace("Fighter 0", "Fighter 1")) elif "Fighter 1" in column: flipped_columns.append(column.replace("Fighter 1", "Fighter 0")) else: flipped_columns.append(column) flipped_table = table[flipped_columns] # Flips winners around if 'Winner' in flipped_table.columns: flipped_table['Winner'] = flipped_table['Winner'].replace([0, 1], [1, 0]) # Change column names back to normal flipped_table.columns = table.columns return flipped_table # + id="KQcGgKW6k-ba" def add_rows_of_flipped_columns(table): flipped_table = create_flipped_table(table) new_table = pd.concat([table, flipped_table]) return new_table # + id="HnwZdNiplLF3" FULL_FIGHT_TABLE = add_rows_of_flipped_columns(FIGHT_TABLE) # + id="PlnOp-fbjknE" FULL_FIGHT_TABLE.head() # + [markdown] id="gu7-RmZOkP68" # ## Example of augmented data # + id="PHsGqr0_joHn" FULL_FIGHT_TABLE[(FULL_FIGHT_TABLE['Fighter 0 Name'] == "Robert Whittaker") & (FULL_FIGHT_TABLE['Fighter 1 Name'] == "Kelvin Gastelum")] # + id="samSx7Olj3vQ" FULL_FIGHT_TABLE[(FULL_FIGHT_TABLE['Fighter 1 Name'] == "Robert Whittaker") & (FULL_FIGHT_TABLE['Fighter 0 Name'] == "Kelvin Gastelum")] # + [markdown] id="3OOgguk84RJl" # ## Additional data cleaning # # TODO: See if something better than replacing nan with 0. See if something better for labels than 0 and 1. Could remove fights with no winner, or handle them differently. Could remove fights that don't go to decision by removing based on Method. # + id="RIS0yarnbTmj" X = FIGHT_TABLE.drop(['Winner', 'Fighter 0 Name', 'Fighter 1 Name', 'Method'], axis=1).fillna(0) y = FIGHT_TABLE[['Winner']] # + id="QxOiDLXHfgDx" X.head() # + id="N5qqnw6Efh8K" y.head() # + [markdown] id="JwvrHfOCf1mh" # ## Setup train/validate/test split # Can't blindly use full fight table train/validate/test split, because the augmented data must stay together. If in train we know winner(A, B) = A, then we don't want to have winner(B, A) in the validation/test set. # + id="CwlwAWNRcwJ1" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0) X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.33, random_state=0) X_train, y_train = add_rows_of_flipped_columns(X_train), add_rows_of_flipped_columns(y_train) X_valid, y_valid = add_rows_of_flipped_columns(X_valid), add_rows_of_flipped_columns(y_valid) X_test, y_test = add_rows_of_flipped_columns(X_test), add_rows_of_flipped_columns(y_test) # + id="aFmWIOydoJXd" # Expect equal number of examples in Fighter 0 as Fighter 1 assert(len(y_train[y_train['Winner'] == 0]) == len(y_train[y_train['Winner'] == 1])) assert(len(y_valid[y_valid['Winner'] == 0]) == len(y_valid[y_valid['Winner'] == 1])) assert(len(y_test[y_test['Winner'] == 0]) == len(y_test[y_test['Winner'] == 1])) # + id="jekwTdNAk3rE" X_train.head() # + id="BUeeqFtHpZQw" y_train.head() # + id="75PnIBkYpabr" print(f"X_train.shape = {X_train.shape}") print(f"X_valid.shape = {X_valid.shape}") print(f"X_test.shape = {X_test.shape}") print(f"y_train.shape = {y_train.shape}") print(f"y_valid.shape = {y_valid.shape}") print(f"y_test.shape = {y_test.shape}") # + [markdown] id="ARUH8kxCbJpG" # ## ML Models # + id="0_v4cnEFbKp3" from sklearn.ensemble import RandomForestClassifier # + id="6gOrDS8AbPqM" # Train clf = RandomForestClassifier(max_depth=5, random_state=0) clf.fit(X_train, y_train) # Validate accuracy_train = clf.score(X_train, y_train) accuracy_valid = clf.score(X_valid, y_valid) print(f"accuracy_train = {accuracy_train}") print(f"accuracy_valid = {accuracy_valid}") # + id="dn1Njq7ecfAT" import matplotlib.pyplot as plt # Visualize importances plt.rcParams.update({'font.size': 8}) plt.barh(X_train.columns, clf.feature_importances_) # + id="GifEEZiTq2yL" # MLP from sklearn.neural_network import MLPClassifier clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train) accuracy_train = clf.score(X_train, y_train) accuracy_valid = clf.score(X_valid, y_valid) print(f"accuracy_train = {accuracy_train}") print(f"accuracy_valid = {accuracy_valid}") # + id="r6tiCNo3rEE0" # SVM from sklearn.svm import SVC clf = SVC(random_state=1).fit(X_train, y_train) accuracy_train = clf.score(X_train, y_train) accuracy_valid = clf.score(X_valid, y_valid) print(f"accuracy_train = {accuracy_train}") print(f"accuracy_valid = {accuracy_valid}") # + id="KNxPPw2DrbpW" # FFN import tensorflow as tf model = tf.keras.models.Sequential() model.add(tf.keras.Input(shape=X_train.shape[1:])) model.add(tf.keras.layers.Dense(32, activation='relu')) model.add(tf.keras.layers.Dense(32, activation='relu')) model.add(tf.keras.layers.Dense(1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) tf.keras.utils.plot_model(model, show_shapes=True, rankdir="LR") # + id="agRwGSv2IEKa" model.summary() # + id="Wo9gE7_HtQhl" model.fit(X_train, y_train, epochs=100, validation_data=(X_valid, y_valid)) # + id="RWTGwJUVtalk" model.evaluate(X_train, y_train) model.evaluate(X_valid, y_valid) # + [markdown] id="ZOwm0hZTxZyr" # ## Test out model manually # + id="OWIypYX-uryi" idx = 6 # + id="ofKgNtPPuC0V" X_test.iloc[idx] # + id="4tEAEW59ulsz" # 0 means fighter 0 won. 1 means fighter 1 won. y_test.iloc[idx] # + id="67EbW0E1uXGi" X_test.shape # + id="3FWZP5LfuYYJ" X_test.iloc[idx].shape # + id="W19rlXXouGfs" model.predict(np.expand_dims(X_test.iloc[idx], 0)) # + [markdown] id="ZOwm0hZTxZyr" # ## Save data # # Store beginning file parameters. # Use current date and time to save files uniquely. # + from datetime import datetime now = datetime.now() dt_string = now.strftime("%d-%m-%Y_%H:%M:%S") print("dt_string =", dt_string) # - parameters_string = f"NUM_EVENTS_{NUM_EVENTS_INPUT}_DATA_MODE_{DATA_MODE_INPUT}" print("parameters_string =", parameters_string) import pickle filename1 = f"FULL_FIGHT_TABLE_{parameters_string}_{dt_string}.csv" filename2 = f"FIGHT_TABLE_{parameters_string}_{dt_string}.csv" filename3 = f"ALL_FIGHTERS_{parameters_string}_{dt_string}.csv" filename4 = f"RAW_FIGHT_TABLES_LIST_{parameters_string}_{dt_string}.pkl" print(f"Saving to {filename1} and {filename2} and {filename3} and {filename4}") FULL_FIGHT_TABLE.to_csv(filename1, index=False) FIGHT_TABLE.to_csv(filename2, index=False) ALL_FIGHTERS.to_csv(filename3, index=False) with open(filename4, 'wb') as handle: pickle.dump(RAW_FIGHT_TABLES_LIST, handle, protocol=pickle.HIGHEST_PROTOCOL) new = pd.read_csv(filename1) new with open(filename4, 'rb') as pickle_file: new2 = pickle.load(pickle_file) len(new2[0]) # ## Experimental: Get detailed fighter information # # TODO: Get more detailed information about fighters, so we can change the task to fight prediction using fighter stats only. http://ufcstats.com/statistics/fighters?char=a&page=all has little information compared to http://ufcstats.com/fighter-details/33a331684283900f. Still lots to improve. Better features like strikes per minute. Handling nans better. Handling non win/losses better. def get_all_fighters_detailed(): '''Get pandas table with detailed information about all UFC fighters (KO's, strikes, etc.)''' fighter_detailed_tables = [] # For each letter of the alphabet, get the fighters for c in tqdm(ascii_lowercase): # Each page has a list of fighter detail urls all_fighters_url = f"http://ufcstats.com/statistics/fighters?char={c}&page=all" all_fighters_html = urlopen(all_fighters_url).read().decode("utf-8") # Regex for "http://ufcstats.com/fighter-details/<alphanumeric>" # Eg. "http://ufcstats.com/fighter-details/27541033b97c076d" pattern = "\"http://ufcstats.com/fighter-details/[a-zA-Z0-9_]+\"" urls = re.findall(pattern, all_fighters_html) # Remove quotes and duplicates urls = [url.strip("\"") for url in urls] urls = remove_duplicates_keep_order(urls) # For each fighter detail url, merge together their record information # Initially in form "Eddie Alvarez Rafael Dos Anjos", "0 0", "1:10, 0:00" # Want just "Eddie Alvarez", "0", "1:10", then convert to numbers # Just need to get the first value of each one, then average/sum/aggregate this together for url in urls: fighter_table = pd.read_html(url)[0].dropna(subset=["Time"], how='all') # Drop initial row of nans # If no fight information, add empty dataframe if fighter_table.shape[0] == 0: df = pd.DataFrame() fighter_detailed_tables.append(df) continue # Preprocess certain values for consistency # TODO: Handle this better, perhaps keep more information fighter_table = fighter_table.drop(columns=["Method", "Event"]) fighter_table.loc[~fighter_table['W/L'].isin(['win', 'loss']), 'W/L'] = "-1 -1" fighter_table.loc[fighter_table['W/L'] == 'win', 'W/L'] = "1 1" fighter_table.loc[fighter_table['W/L'] == 'loss', 'W/L'] = "0 0" times = [int(min_) * 60 + int(sec) for min_, sec in fighter_table['Time'].str.split(':')] fighter_table['Time'] = [f"{t} {t}" for t in times] # Parse each row to remove the other fighter's information new_rows = [] for i, row in fighter_table.iterrows(): # Get df of one round df = pd.DataFrame(row, columns=fighter_table.columns) values = [row[col] for col in df.columns] df = pd.DataFrame([values], columns=fighter_table.columns) df = process_fight(df) new_rows.append(df) # Put rows together, then only keep Fighter 0, then remove "Fighter 0 " totals_df = pd.concat(new_rows) totals_df = totals_df.loc[:, totals_df.columns.str.contains('Fighter 0')] totals_df.columns = [col.replace("Fighter 0 ", "") for col in totals_df.columns] totals_df = totals_df.replace("^-+", np.nan, regex=True) # Replace -- and --- with nan # Summarize fighter in 1 row new_columns = [] new_row = [] for col in totals_df.columns: if col == "Name": new_columns.append(col) new_row.append(totals_df[col].iloc[0]) else: total_col = f"{col} Total" avg_col = f"{col} Avg" new_columns.extend([total_col, avg_col]) total = totals_df[col].sum() avg = totals_df[col].mean() new_row.extend([total, avg]) totals_df = pd.DataFrame([new_row], columns=new_columns) fighter_detailed_tables.append(totals_df) break # Remove this when ready all_fighters = pd.concat(fighter_detailed_tables) return all_fighters x = get_all_fighters_detailed() x.head()
{"hexsha": "4ed7cd5b5cff2f457d68acfd20d6d88a658dcede", "size": 29411, "ext": "py", "lang": "Python", "max_stars_repo_path": "UFC_data_scraping.py", "max_stars_repo_name": "tylerlum/ufc_automated_scoring_system", "max_stars_repo_head_hexsha": "130e87365c2856f8dcc1bf00f5afbcf1159c41f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "UFC_data_scraping.py", "max_issues_repo_name": "tylerlum/ufc_automated_scoring_system", "max_issues_repo_head_hexsha": "130e87365c2856f8dcc1bf00f5afbcf1159c41f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "UFC_data_scraping.py", "max_forks_repo_name": "tylerlum/ufc_automated_scoring_system", "max_forks_repo_head_hexsha": "130e87365c2856f8dcc1bf00f5afbcf1159c41f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3955613577, "max_line_length": 388, "alphanum_fraction": 0.6815137194, "include": true, "reason": "import numpy", "num_tokens": 8012}
# Do not use packages that are not in standard distribution of python import numpy as np from ._base_network import _baseNetwork class SoftmaxRegression(_baseNetwork): def __init__(self, input_size=28*28, num_classes=10): ''' A single layer softmax regression. The network is composed by: a linear layer without bias => (optional ReLU activation) => Softmax :param input_size: the input dimension :param num_classes: the number of classes in total ''' super().__init__(input_size, num_classes) self._weight_init() self.X = None self.y = None def _weight_init(self): ''' initialize weights of the single layer regression network. No bias term included. :return: None; self.weights is filled based on method - W1: The weight matrix of the linear layer of shape (num_features, hidden_size) ''' np.random.seed(1024) self.weights['W1'] = 0.001 * np.random.randn(self.input_size, self.num_classes) self.gradients['W1'] = np.zeros((self.input_size, self.num_classes)) def forward(self, X, y, mode='train'): ''' Compute loss and gradients using softmax with vectorization. :param X: a batch of image (N, 28x28) :param y: labels of images in the batch (N,) :return: loss: the loss associated with the batch accuracy: the accuracy of the batch ''' loss = None gradient = None accuracy = None # 1) Implement the forward process and compute the Cross-Entropy loss # 2) Compute the gradient with respect to the loss self.X = np.array(X) self.y = np.array(y) X = self.X y = self.y N = X.shape[0] z = X @ self.weights['W1'] y_hot = np.zeros((len(y), self.num_classes)) y_hot[np.arange(len(y)), y] = 1 y_hat = self.ReLU(z) y_til = self.softmax(y_hat) loss = self.cross_entropy_loss(y_til,y) accuracy = self.compute_accuracy(y_til,y) if mode != 'train': return loss, accuracy # 1) Implement the backward process: # 1) Compute gradients of each weight and bias by chain rule # 2) Store the gradients in self.gradients self.gradients['W1'] = (1/N) * np.dot( X.T, self.ReLU_dev(z) * (y_til - y_hot)) return loss, accuracy
{"hexsha": "fd8cd9de24e22744179c9db52b6b89efb4526356", "size": 2629, "ext": "py", "lang": "Python", "max_stars_repo_path": "hw1 Two-layer-network/models/softmax_regression.py", "max_stars_repo_name": "mtang1001/ML-Exploration", "max_stars_repo_head_hexsha": "6fec422eca127210e948945e6d15526947bfae8e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hw1 Two-layer-network/models/softmax_regression.py", "max_issues_repo_name": "mtang1001/ML-Exploration", "max_issues_repo_head_hexsha": "6fec422eca127210e948945e6d15526947bfae8e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw1 Two-layer-network/models/softmax_regression.py", "max_forks_repo_name": "mtang1001/ML-Exploration", "max_forks_repo_head_hexsha": "6fec422eca127210e948945e6d15526947bfae8e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5697674419, "max_line_length": 89, "alphanum_fraction": 0.5652339293, "include": true, "reason": "import numpy", "num_tokens": 605}
import wf_core_data.utils import pandas as pd import numpy as np import inflection import collections import itertools import copy import os import logging logger = logging.getLogger(__name__) TIME_FRAME_ID_VARIABLES = [ 'school_year', 'term' ] STUDENT_ID_VARIABLES = [ 'legal_entity', 'student_id_nwea' ] STUDENT_INFO_VARIABLES = [ 'first_name', 'last_name' ] STUDENT_ASSIGNMENT_VARIABLES = [ 'school', 'teacher_last_first', 'classroom', 'grade' ] ASSESSMENT_ID_VARIABLES = [ 'subject', 'course' ] RESULTS_VARIABLES = [ 'test_date', 'rit_score', 'rit_score_sem', 'percentile', 'percentile_se' ] TERMS = ( 'Fall', 'Winter', 'Spring' ) ASSESSMENTS = collections.OrderedDict(( ('Language Arts', [ 'Reading', 'Reading (Spanish)', 'Language Usage' ]), ('Mathematics', [ 'Math K-12' ]) )) SUBJECTS = list(ASSESSMENTS.keys()) COURSES=list(itertools.chain(*ASSESSMENTS.values())) DEFAULT_MIN_GROWTH_DAYS = 60 DEFAULT_SCHOOL_YEAR_DURATION_MONTHS = 9 def fetch_results_local_directory( path, file_extensions=['.csv', '.CSV'] ): if not os.path.exists(path): raise ValueError('Path \'{}\' not found'.format(path)) if not os.path.isdir(path): raise ValueError('Object at \'{}\' is not a directory'.format(path)) paths = list() for directory_entry in os.listdir(path): file_path = os.path.join( path, directory_entry ) if not os.path.isfile(file_path): continue file_extension = os.path.splitext(os.path.normpath(file_path))[1] if file_extension not in file_extensions: continue paths.append(file_path) if len(paths) == 0: raise ValueError('No files of type {} found in directory'.format(file_extensions)) results = fetch_results_local_files(paths) return results def fetch_results_local_files( paths ): results_list = list() for path in paths: results_file = fetch_results_local_file( path=path ) results_list.append(results_file) results = pd.concat(results_list) return results def fetch_results_local_file( path ): if not os.path.exists(path): raise ValueError('File \'{}\' not found'.format(path)) if not os.path.isfile(path): raise ValueError('Object at \'{}\' is not a file'.format(path)) results = pd.read_csv( path, dtype='object' ) return results def parse_results(results): test_events = extract_test_events(results) student_info, student_info_changes = extract_student_info(results) student_assignments = extract_student_assignments(results) return test_events, student_info, student_info_changes, student_assignments def extract_test_events( results ): test_events = ( results .rename(columns={ 'TermTested': 'term_school_year', 'DistrictName': 'legal_entity', 'Subject': 'subject', 'Course': 'course', 'StudentID': 'student_id_nwea', 'TestDate': 'test_date', 'StartRIT': 'rit_score', 'StartRITSEM': 'rit_score_sem', 'StartPercentile': 'percentile', 'StartPercentileSE': 'percentile_se' }) ) test_events['term'] = test_events['term_school_year'].apply(lambda x: x.split(' ')[0]) test_events['school_year'] = test_events['term_school_year'].apply(lambda x: x.split(' ')[1]) test_events['term'] = pd.Categorical( test_events['term'], categories=TERMS, ordered=True ) test_events['subject'] = pd.Categorical( test_events['subject'], categories=SUBJECTS, ordered=True ) test_events['course'] = pd.Categorical( test_events['course'], categories=COURSES, ordered=True ) test_events['test_date'] = test_events['test_date'].apply(wf_core_data.utils.to_date) test_events['rit_score'] = pd.to_numeric(test_events['rit_score']).astype('float') test_events['rit_score_sem'] = pd.to_numeric(test_events['rit_score_sem']).astype('float') test_events['percentile'] = pd.to_numeric(test_events['percentile']).astype('float') test_events['percentile_se'] = pd.to_numeric(test_events['percentile_se'].replace('<1', 0.5)).astype('float') test_events = test_events.reindex(columns=list(itertools.chain( TIME_FRAME_ID_VARIABLES, ASSESSMENT_ID_VARIABLES, STUDENT_ID_VARIABLES, RESULTS_VARIABLES ))) test_events = ( test_events .drop_duplicates() .set_index(list(itertools.chain( TIME_FRAME_ID_VARIABLES, ASSESSMENT_ID_VARIABLES, STUDENT_ID_VARIABLES ))) .sort_index() ) return test_events def extract_student_info( results ): student_info = ( results .rename(columns= { 'TermTested': 'term_school_year', 'DistrictName': 'legal_entity', 'StudentID': 'student_id_nwea', 'StudentLastName': 'last_name', 'StudentFirstName': 'first_name' }) ) student_info['term'] = student_info['term_school_year'].apply(lambda x: x.split(' ')[0]) student_info['school_year'] = student_info['term_school_year'].apply(lambda x: x.split(' ')[1]) student_info = ( student_info .reindex(columns=list(itertools.chain( STUDENT_ID_VARIABLES, TIME_FRAME_ID_VARIABLES, STUDENT_INFO_VARIABLES ))) .drop_duplicates() ) student_info_changes = ( student_info .groupby(STUDENT_ID_VARIABLES) .filter(lambda group: len(group.drop_duplicates(subset=STUDENT_INFO_VARIABLES)) > 1) ) student_info = ( student_info .sort_values(TIME_FRAME_ID_VARIABLES) .drop(columns=TIME_FRAME_ID_VARIABLES) .groupby(STUDENT_ID_VARIABLES) .tail(1) .set_index(STUDENT_ID_VARIABLES) .sort_index() ) return student_info, student_info_changes def extract_student_assignments( results ): student_assignments = ( results .rename(columns= { 'TermTested': 'term_school_year', 'DistrictName': 'legal_entity', 'StudentID': 'student_id_nwea', 'SchoolName': 'school', 'Teacher': 'teacher_last_first', 'ClassName': 'classroom', 'StudentGrade': 'grade' }) ) student_assignments['term'] = student_assignments['term_school_year'].apply(lambda x: x.split(' ')[0]) student_assignments['school_year'] = student_assignments['term_school_year'].apply(lambda x: x.split(' ')[1]) student_assignments = ( student_assignments .reindex(columns=list(itertools.chain( STUDENT_ID_VARIABLES, TIME_FRAME_ID_VARIABLES, STUDENT_ASSIGNMENT_VARIABLES ))) .drop_duplicates() .set_index(list(itertools.chain( STUDENT_ID_VARIABLES, TIME_FRAME_ID_VARIABLES ))) .sort_index() ) return student_assignments def summarize_by_test( test_events, student_assignments, grouping_variables = [ 'school_year', 'legal_entity', 'school', 'classroom', 'subject', 'course', 'term' ], filter_dict=None, select_dict=None ): tests = ( test_events .join( student_assignments, how='left', on=[ 'legal_entity', 'student_id_nwea', 'school_year', 'term' ] ) .groupby(grouping_variables) .agg( num_test_events=('test_date', 'count'), num_valid_rit_score=('rit_score', 'count'), num_valid_percentile=('percentile', 'count') ) ) tests = tests.loc[tests['num_test_events'] > 0].copy() if filter_dict is not None: tests = wf_core_data.utils.filter_dataframe( dataframe=tests, filter_dict=filter_dict ) if select_dict is not None: tests = wf_core_data.utils.select_from_dataframe( dataframe=tests, select_dict=select_dict ) return tests def summarize_by_student( test_events, student_info, student_assignments, new_time_index=['school_year'], min_growth_days=DEFAULT_MIN_GROWTH_DAYS, school_year_duration_months=DEFAULT_SCHOOL_YEAR_DURATION_MONTHS, filter_dict=None, select_dict=None ): new_index_variables = list(itertools.chain( new_time_index, ASSESSMENT_ID_VARIABLES, STUDENT_ID_VARIABLES )) unstack_variables = copy.deepcopy(TIME_FRAME_ID_VARIABLES) for new_time_index_variable in new_time_index: unstack_variables.remove(new_time_index_variable) students = ( test_events .unstack(unstack_variables) ) students.columns = ['_'.join([inflection.underscore(variable_name) for variable_name in x]) for x in students.columns] underlying_data_columns = list(students.columns) rit_scores = ( test_events .dropna(subset=['rit_score']) .sort_values('test_date') .groupby(new_index_variables) .agg( rit_score_starting_date=('test_date', lambda x: x.dropna().iloc[0]), rit_score_ending_date=('test_date', lambda x: x.dropna().iloc[-1]), starting_rit_score=('rit_score', lambda x: x.dropna().iloc[0]), starting_rit_score_sem=('rit_score_sem', lambda x: x.dropna().iloc[0]), ending_rit_score=('rit_score', lambda x: x.dropna().iloc[-1]), ending_rit_score_sem=('rit_score_sem', lambda x: x.dropna().iloc[-1]) ) ) percentiles = ( test_events .dropna(subset=['percentile']) .sort_values('test_date') .groupby(new_index_variables) .agg( percentile_starting_date=('test_date', lambda x: x.dropna().iloc[0]), percentile_ending_date=('test_date', lambda x: x.dropna().iloc[-1]), starting_percentile=('percentile', lambda x: x.dropna().iloc[0]), starting_percentile_se=('percentile_se', lambda x: x.dropna().iloc[0]), ending_percentile=('percentile', lambda x: x.dropna().iloc[-1]), ending_percentile_se=('percentile_se', lambda x: x.dropna().iloc[-1]) ) ) students = ( students .join( rit_scores, how='left' ) .join( percentiles, how='left' ) ) students['rit_score_num_days'] = ( np.subtract( students['rit_score_ending_date'], students['rit_score_starting_date'] ) .apply(lambda x: x.days) ) students['rit_score_growth'] = np.subtract( students['ending_rit_score'], students['starting_rit_score'] ) students['rit_score_growth_se'] = np.sqrt(np.add( np.square(students['starting_rit_score_sem']), np.square(students['ending_rit_score_sem']) )) students.loc[students['rit_score_num_days'] < min_growth_days, 'rit_score_growth'] = np.nan students.loc[students['rit_score_num_days'] < min_growth_days, 'rit_score_growth_se'] = np.nan students['rit_score_growth_per_school_year'] = 365.25*(school_year_duration_months/12)*students['rit_score_growth']/students['rit_score_num_days'] students['rit_score_growth_per_school_year_se'] = 365.25*(school_year_duration_months/12)*students['rit_score_growth_se']/students['rit_score_num_days'] students['percentile_num_days'] = ( np.subtract( students['percentile_ending_date'], students['percentile_starting_date'] ) .apply(lambda x: x.days) ) students['percentile_growth'] = np.subtract( students['ending_percentile'], students['starting_percentile'] ) students['percentile_growth_se'] = np.sqrt(np.add( np.square(students['starting_percentile_se']), np.square(students['ending_percentile_se']) )) students.loc[students['percentile_num_days'] < min_growth_days, 'percentile_growth'] = np.nan students.loc[students['percentile_num_days'] < min_growth_days, 'percentile_growth_se'] = np.nan students['percentile_growth_per_school_year'] = 365.25*(school_year_duration_months/12)*students['percentile_growth']/students['percentile_num_days'] students['percentile_growth_per_school_year_se'] = 365.25*(school_year_duration_months/12)*students['percentile_growth_se']/students['percentile_num_days'] students = students.join( student_info, how='left', on=['legal_entity', 'student_id_nwea'] ) latest_student_assignments = ( student_assignments .reset_index() .sort_values(['school_year', 'term']) .groupby(list(itertools.chain( STUDENT_ID_VARIABLES, new_time_index ))) .tail(1) .set_index(list(itertools.chain( STUDENT_ID_VARIABLES, new_time_index ))) ) students = students.join( latest_student_assignments, how='left', on=latest_student_assignments.index.names ) students = students.reindex(columns=list(itertools.chain( STUDENT_INFO_VARIABLES, STUDENT_ASSIGNMENT_VARIABLES, underlying_data_columns, [ 'rit_score_starting_date', 'rit_score_ending_date', 'rit_score_num_days', 'starting_rit_score', 'starting_rit_score_sem', 'ending_rit_score', 'ending_rit_score_sem', 'rit_score_growth', 'rit_score_growth_se', 'rit_score_growth_per_school_year', 'rit_score_growth_per_school_year_se', 'percentile_starting_date', 'percentile_ending_date', 'percentile_num_days', 'starting_percentile', 'starting_percentile_se', 'ending_percentile', 'ending_percentile_se', 'percentile_growth', 'percentile_growth_se', 'percentile_growth_per_school_year', 'percentile_growth_per_school_year_se' ] ))) if filter_dict is not None: students = wf_core_data.utils.filter_dataframe( dataframe=students, filter_dict=filter_dict ) if select_dict is not None: students = wf_core_data.utils.select_from_dataframe( dataframe=students, select_dict=select_dict ) return students def summarize_by_group( students, grouping_variables=[ 'school_year', 'legal_entity', 'subject', 'course' ], filter_dict=None, select_dict=None ): groups = ( students .reset_index() .groupby(grouping_variables) .agg( num_test_results=('student_id_nwea', 'count'), num_valid_starting_rit_score=('starting_rit_score', 'count'), mean_starting_rit_score=('starting_rit_score', 'mean'), starting_rit_score_sd=('starting_rit_score', 'std'), mean_starting_rit_score_se=( 'starting_rit_score_sem', lambda x: np.sqrt(np.nansum(np.square(x))/np.sum(np.isfinite(x))**2) if np.sum(np.isfinite(x)) > 0 else np.nan ), num_valid_ending_rit_score=('ending_rit_score', 'count'), mean_ending_rit_score=('ending_rit_score', 'mean'), ending_rit_score_sd=('ending_rit_score', 'std'), mean_ending_rit_score_se=( 'ending_rit_score_sem', lambda x: np.sqrt(np.nansum(np.square(x))/np.sum(np.isfinite(x))**2) if np.sum(np.isfinite(x)) > 0 else np.nan ), num_valid_rit_score_growth=('rit_score_growth', 'count'), mean_rit_score_growth=('rit_score_growth', 'mean'), rit_score_growth_sd=('rit_score_growth', 'std'), mean_rit_score_growth_se=( 'rit_score_growth_se', lambda x: np.sqrt(np.nansum(np.square(x))/np.sum(np.isfinite(x))**2) if np.sum(np.isfinite(x)) > 0 else np.nan ), mean_rit_score_growth_per_school_year=('rit_score_growth_per_school_year', 'mean'), rit_score_growth_per_school_year_sd=('rit_score_growth_per_school_year', 'std'), mean_rit_score_growth_per_school_year_se=( 'rit_score_growth_per_school_year_se', lambda x: np.sqrt(np.nansum(np.square(x))/np.sum(np.isfinite(x))**2) if np.sum(np.isfinite(x)) > 0 else np.nan ), num_valid_starting_percentile=('starting_percentile', 'count'), mean_starting_percentile=('starting_percentile', 'mean'), starting_percentile_sd=('starting_percentile', 'std'), mean_starting_percentile_se=( 'starting_percentile_se', lambda x: np.sqrt(np.nansum(np.square(x))/np.sum(np.isfinite(x))**2) if np.sum(np.isfinite(x)) > 0 else np.nan ), num_valid_ending_percentile=('ending_percentile', 'count'), mean_ending_percentile=('ending_percentile', 'mean'), ending_percentile_sd=('ending_percentile', 'std'), mean_ending_percentile_se=( 'ending_percentile_se', lambda x: np.sqrt(np.nansum(np.square(x))/np.sum(np.isfinite(x))**2) if np.sum(np.isfinite(x)) > 0 else np.nan ), num_valid_percentile_growth=('percentile_growth', 'count'), mean_percentile_growth=('percentile_growth', 'mean'), percentile_growth_sd=('percentile_growth', 'std'), mean_percentile_growth_se=( 'percentile_growth_se', lambda x: np.sqrt(np.nansum(np.square(x))/np.sum(np.isfinite(x))**2) if np.sum(np.isfinite(x)) > 0 else np.nan ), mean_percentile_growth_per_school_year=('percentile_growth_per_school_year', 'mean'), percentile_growth_per_school_year_sd=('percentile_growth_per_school_year', 'std'), mean_percentile_growth_per_school_year_se=( 'percentile_growth_per_school_year_se', lambda x: np.sqrt(np.nansum(np.square(x))/np.sum(np.isfinite(x))**2) if np.sum(np.isfinite(x)) > 0 else np.nan ) ) .dropna(how='all') ) groups = groups.loc[groups['num_test_results'] > 0].copy() groups['mean_starting_rit_score_sem'] = np.divide( groups['starting_rit_score_sd'], np.sqrt(groups['num_valid_starting_rit_score']) ) groups['mean_ending_rit_score_sem'] = np.divide( groups['ending_rit_score_sd'], np.sqrt(groups['num_valid_ending_rit_score']) ) groups['mean_rit_score_growth_sem'] = np.divide( groups['rit_score_growth_sd'], np.sqrt(groups['num_valid_rit_score_growth']) ) groups['mean_rit_score_growth_per_school_year_sem'] = np.divide( groups['rit_score_growth_per_school_year_sd'], np.sqrt(groups['num_valid_rit_score_growth']) ) groups['mean_starting_percentile_sem'] = np.divide( groups['starting_percentile_sd'], np.sqrt(groups['num_valid_starting_percentile']) ) groups['mean_ending_percentile_sem'] = np.divide( groups['ending_percentile_sd'], np.sqrt(groups['num_valid_ending_percentile']) ) groups['mean_percentile_growth_sem'] = np.divide( groups['percentile_growth_sd'], np.sqrt(groups['num_valid_percentile_growth']) ) groups['mean_percentile_growth_per_school_year_sem'] = np.divide( groups['percentile_growth_per_school_year_sd'], np.sqrt(groups['num_valid_percentile_growth']) ) groups = groups.reindex(columns=[ 'num_test_results', 'num_valid_starting_rit_score', 'mean_starting_rit_score', 'starting_rit_score_sd', 'mean_starting_rit_score_sem', 'mean_starting_rit_score_se', 'num_valid_ending_rit_score', 'mean_ending_rit_score', 'ending_rit_score_sd', 'mean_ending_rit_score_sem', 'mean_ending_rit_score_se', 'num_valid_rit_score_growth', 'mean_rit_score_growth', 'rit_score_growth_sd', 'mean_rit_score_growth_sem', 'mean_rit_score_growth_se', 'mean_rit_score_growth_per_school_year', 'rit_score_growth_per_school_year_sd', 'mean_rit_score_growth_per_school_year_sem', 'mean_rit_score_growth_per_school_year_se', 'num_valid_starting_percentile', 'mean_starting_percentile', 'starting_percentile_sd', 'mean_starting_percentile_sem', 'mean_starting_percentile_se', 'num_valid_ending_percentile', 'mean_ending_percentile', 'ending_percentile_sd', 'mean_ending_percentile_sem', 'mean_ending_percentile_se', 'num_valid_percentile_growth', 'mean_percentile_growth', 'percentile_growth_sd', 'mean_percentile_growth_sem', 'mean_percentile_growth_se', 'mean_percentile_growth_per_school_year', 'percentile_growth_per_school_year_sd', 'mean_percentile_growth_per_school_year_sem', 'mean_percentile_growth_per_school_year_se' ]) if filter_dict is not None: groups = wf_core_data.utils.filter_dataframe( dataframe=groups, filter_dict=filter_dict ) if select_dict is not None: groups = wf_core_data.utils.select_from_dataframe( dataframe=groups, select_dict=select_dict ) return groups
{"hexsha": "81aba13078c34d47b70fa337ffa7a00a55daf841", "size": 22003, "ext": "py", "lang": "Python", "max_stars_repo_path": "nwea_utils/analysis.py", "max_stars_repo_name": "WildflowerSchools/wf-nwea-utils", "max_stars_repo_head_hexsha": "f3b35baa5b03d36ea7b351c0173037055879d926", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nwea_utils/analysis.py", "max_issues_repo_name": "WildflowerSchools/wf-nwea-utils", "max_issues_repo_head_hexsha": "f3b35baa5b03d36ea7b351c0173037055879d926", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nwea_utils/analysis.py", "max_forks_repo_name": "WildflowerSchools/wf-nwea-utils", "max_forks_repo_head_hexsha": "f3b35baa5b03d36ea7b351c0173037055879d926", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8148734177, "max_line_length": 159, "alphanum_fraction": 0.6296414125, "include": true, "reason": "import numpy", "num_tokens": 5025}
#' Group input by rows #' #' \Sexpr[results=rd, stage=render]{dplyr:::lifecycle("questioning")} #' #' See [this repository](https://github.com/jennybc/row-oriented-workflows) #' for alternative ways to perform row-wise operations #' #' `rowwise()` is used for the results of [do()] when you #' create list-variables. It is also useful to support arbitrary #' complex operations that need to be applied to each row. #' #' Currently, rowwise grouping only works with data frames. Its #' main impact is to allow you to work with list-variables in #' [summarise()] and [mutate()] without having to #' use \code{[[1]]}. This makes `summarise()` on a rowwise tbl #' effectively equivalent to [plyr::ldply()]. #' #' @param data Input data frame. #' @export #' @examples #' df <- expand.grid(x = 1:3, y = 3:1) #' df_done <- df %>% rowwise() %>% do(i = seq(.$x, .$y)) #' df_done #' df_done %>% summarise(n = length(i)) rowwise <- function(data) { stopifnot(is.data.frame(data)) assert_all_allow_list(data) structure(data, class = c("rowwise_df", "tbl_df", "tbl", "data.frame")) } setOldClass(c("rowwise_df", "tbl_df", "tbl", "data.frame")) #' @export print.rowwise_df <- function(x, ..., n = NULL, width = NULL) { cat("Source: local data frame ", dim_desc(x), "\n", sep = "") cat("Groups: <by row>\n") cat("\n") print(trunc_mat(x, n = n, width = width)) invisible(x) } #' @export ungroup.rowwise_df <- function(x, ...) { class(x) <- c("tbl_df", "tbl", "data.frame") x } #' @export as.data.frame.rowwise_df <- function(x, row.names, optional, ...) { class(x) <- "data.frame" x } #' @export group_size.rowwise_df <- function(x) { rep.int(1L, nrow(x)) } #' @export n_groups.rowwise_df <- function(x) { nrow(x) } #' @export group_by.rowwise_df <- function(.data, ..., add = FALSE, .drop = group_by_drop_default(.data)) { warn("Grouping rowwise data frame strips rowwise nature") .data <- ungroup(.data) groups <- group_by_prepare(.data, ..., add = add) grouped_df(groups$data, groups$group_names, .drop) } #' @export group_by_.rowwise_df <- function(.data, ..., .dots = list(), add = FALSE, .drop = FALSE) { dots <- compat_lazy_dots(.dots, caller_env(), ...) group_by(.data, !!!dots, add = add, .drop = .drop) } # Do --------------------------------------------------------------------------- #' @export do.rowwise_df <- function(.data, ...) { # Create ungroup version of data frame suitable for subsetting group_data <- ungroup(.data) args <- quos(...) named <- named_args(args) # Create new environment, inheriting from parent, with an active binding # for . that resolves to the current subset. `_i` is found in environment # of this function because of usual scoping rules. mask <- new_data_mask(new_environment()) current_row <- function() lapply(group_data[`_i`, , drop = FALSE], "[[", 1) env_bind_do_pronouns(mask, current_row) n <- nrow(.data) m <- length(args) out <- replicate(m, vector("list", n), simplify = FALSE) names(out) <- names(args) p <- progress_estimated(n * m, min_time = 2) for (`_i` in seq_len(n)) { for (j in seq_len(m)) { out[[j]][`_i`] <- list(eval_tidy(args[[j]], mask)) p$tick()$print() } } if (!named) { label_output_dataframe(NULL, out, groups(.data), group_by_drop_default(.data)) } else { label_output_list(NULL, out, groups(.data)) } } #' @export do_.rowwise_df <- function(.data, ..., .dots = list()) { dots <- compat_lazy_dots(.dots, caller_env(), ...) do(.data, !!!dots) }
{"hexsha": "21805c7cd3bc7bf0beedca6533bf24dd14bf48aa", "size": 3527, "ext": "r", "lang": "R", "max_stars_repo_path": "R/rowwise.r", "max_stars_repo_name": "rensa/dplyr", "max_stars_repo_head_hexsha": "1da8c88293bc5e26de3d100b83a9b92e216becf6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-19T03:05:41.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-19T03:05:41.000Z", "max_issues_repo_path": "R/rowwise.r", "max_issues_repo_name": "rensa/dplyr", "max_issues_repo_head_hexsha": "1da8c88293bc5e26de3d100b83a9b92e216becf6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/rowwise.r", "max_forks_repo_name": "rensa/dplyr", "max_forks_repo_head_hexsha": "1da8c88293bc5e26de3d100b83a9b92e216becf6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1487603306, "max_line_length": 96, "alphanum_fraction": 0.627161894, "num_tokens": 1015}
import numpy as np from sklearn.metrics import classification_report from preprocessing import Preprocessor import time class NaiveBayes: def __init__(self): self.labels = set() self.word_counts = {} self.priors = {} self.likelihoods = {} def train(self, train_x, train_y): self.labels = set(train_y) self.word_counts = {label:{} for label in self.labels} self.priors = {label:0 for label in self.labels} self.likelihoods = {label:{} for label in self.labels} self.total_words = 0 for tweet,label in zip(train_x,train_y): self.priors[label] += 1 for word in tweet.split(): self.total_words += 1 if word in self.word_counts[label]: self.word_counts[label][word] += 1 else: self.word_counts[label][word] = 1 self.priors = {l:self.priors[l]/len(train_y) for l in self.labels} self.likelihoods = { l : { w : (self.word_counts[l][w]+1)/(self.total_words+sum(self.word_counts[l].values())) \ for w in self.word_counts[l] } for l in self.labels } def predict(self, tweet): best = {'label':None, 'score':np.inf} for l in self.labels: score = -np.log(self.priors[l]) for w in tweet.split(): if w in self.likelihoods[l]: score += -np.log(self.likelihoods[l][w]) else: score += -np.log(1.0/(self.total_words+sum(self.word_counts[l].values()))) if score < best['score']: best['score'],best['label'] = score,l return best['label'] def test(self, test_x): return [self.predict(tweet) for tweet in test_x] def acc(self, y_preds, y_true): return sum([yp==yt for yp,yt in zip(y_preds,y_true)])/len(y_preds) if __name__ == "__main__": train_options = { "train_data_path": "data/OLIDv1.0/olid-training-v1.0_clean.tsv", "test_tweet_path": "data/OLIDv1.0/testset-levela_clean.tsv", "test_label_path": "data/OLIDv1.0/labels-levela.csv", "sample_size":1, "seed":1 } print('='*40) print('task B') print('loading data...') pp = Preprocessor() train_x, train_y = pp.get_train_data(train_options["train_data_path"], sample=train_options['sample_size'], seed=train_options['seed'], task='subtask_a') test_x, test_y = pp.get_test_data(train_options['test_tweet_path'], train_options['test_label_path']) print('='*40) print() print('='*40) print('training model...') start = time.time() model = NaiveBayes() model.train(train_x=train_x, train_y=train_y) end = time.time() print(f'took {round(end-start,2)}s') print('predicting...') start = time.time() preds = model.test(test_x=test_x) end = time.time() print(f'took {round(end-start,2)}s') print('='*40) print() results = classification_report(y_true=test_y, y_pred=preds, output_dict=True, digits=4) print('='*40) print('testing') print(f'accuracy: \t{model.acc(preds,test_y)}') print(f'precision: \t{results["macro avg"]["precision"]}') print(f'recall: \t{results["macro avg"]["recall"]}') print(f'F1-score: \t{results["macro avg"]["f1-score"]}') print('='*40) print() # ======================================== # task A # loading data... # cleaning data... # took 0.26s # ======================================== # ======================================== # training model... # took 2.8s # predicting... # took 0.36s # ======================================== # ======================================== # testing # accuracy: 0.7395348837209302 # precision: 0.7725511898173769 # recall: 0.5397177419354838 # F1-score: 0.5019184825888655 # ========================================
{"hexsha": "ef67cb40641b773595fde5a5c0f932e390840587", "size": 4190, "ext": "py", "lang": "Python", "max_stars_repo_path": "naive_bayes.py", "max_stars_repo_name": "radhe2205/abusive_lang_detection", "max_stars_repo_head_hexsha": "330066f505bb75222bdfcf95d29e105aa6282d11", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "naive_bayes.py", "max_issues_repo_name": "radhe2205/abusive_lang_detection", "max_issues_repo_head_hexsha": "330066f505bb75222bdfcf95d29e105aa6282d11", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "naive_bayes.py", "max_forks_repo_name": "radhe2205/abusive_lang_detection", "max_forks_repo_head_hexsha": "330066f505bb75222bdfcf95d29e105aa6282d11", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.480620155, "max_line_length": 101, "alphanum_fraction": 0.5200477327, "include": true, "reason": "import numpy", "num_tokens": 1005}
\section{Results \& Discussion} The aim of this work was to produce a well-parallelised software capable of quickly producing starting structures for later MD simulations of multiple micellar species from SAS data. \subsection{Parallelisation} The parallelisation of a software package is commonly assessed using two metrics, strong and weak scaling. These assess the CPU-bound\footnote{Central processing unit} efficiency and memory-bound efficiency of the software respectively. A perfectly parallelised software would present a strong and weak scaling efficiency of 1 regardless of the number of processors. In order to determine the strong scaling relationship for \texttt{fitoog}, a system was set up with a population size of 128 and was run for 5000 iterations. This system was run on a range of processor counts, from 1 to 128,\footnote{Increasing in a $\log_2$ fashion.} on the SCARF cluster of STFC. Figure~\ref{fig:scale}(a) shows the strong scaling relationship for \texttt{fitoog} running on up to 128 cores. The weak scaling was probed by increasing the population size alongside the number of processors, both were varied in the same range as for the strong scaling, e.g. a population of 1 on a single core up to a population of 128 over 128 cores. The weak scaling relationship is shown in Figure~\ref{fig:scale}(b). % \begin{figure}[t] \centering \includegraphics[width=\textwidth]{smallangle/scaling} \caption{The (a) strong and (b) weak scaling relationships of \texttt{fitoog} running on upto 128 cores of the SCARF cluster. The slight increase between \num{1} and \num{2} cores is most likely due to small difference in the speed of the different processors.} \label{fig:scale} \end{figure} % It can be seen that both the strong and weak parallel efficiency of \texttt{fitoog} are relatively good, with the efficiency not dropping below \SI{80}{\percent} even when spread over 128 cores. The speedup of \texttt{fitoog} is shown in Figure~\ref{fig:speedup},\footnote{This is generated from the strong scaling relationship.} from Amdahl's law\autocite{amdahl_validity_1967} it is possible to find that the parallel component of a given \texttt{fitoog} run makes up \SI{99.8}{\percent} of the computation. This suggests that the parallelisation methodology currently implemented is successful, and it would not be advantageous to utilise a more sophisticated parallelisation method. % \begin{figure}[t] \forcerectofloat \centering \includegraphics[width=\textwidth]{smallangle/speedup} \caption{The speed up of \texttt{fitoog} running on upto 128 cores of the SCARF cluster, the blue dots show the speedup at different compute size, the orange line indicates the theoretical maximum, and the blue line shows the fitting Amdahl's law to the measured speedup.} \label{fig:speedup} \end{figure} % The high efficiency in both strong and weak scaling regimes and the indication that the serial component of the computation is very small indicate that it is sensible to utilise high-performance computing resources for this software package. In the real data application of this work, the \texttt{fitoog} software was run on 48 cores of the SCARF cluster. This was chosen as it would spread the computation over exactly two nodes of the SCARF cluster,\footnote{Each node contains 24 cores.} therefore both nodes are being used to their full capacity. \subsection{Test system} In order to assess the PSO implementation, a simple test system was defined. This consisted of a coordinate cell that contained four surfactant molecules at four corners of a \SI{20}{\angstrom} cube, each orientated in a different direction, see Figures~\ref{fig:test}(a-c). The scattering intensity was calculated from the cell, with the blue beads given a scattering length of \SI{100}{\femto\meter} and the grey beads a scattering length of \SI{20}{\femto\meter}, to ensure the presence of intense scattering. The scattering was calculated using the Debye equation\autocite{debye_zerstreuung_1915} for values of $q$ in a range from \SIrange{0.3}{1.5}{\per\angstrom} with 100 data points, this profile is shown in Figure~\ref{fig:test}(d). % \begin{figure}[t] \forceversofloat \centering \includegraphics[width=\textwidth]{smallangle/fake_box} \includegraphics[width=\textwidth]{smallangle/fake} \caption{Test system coordinate cell observed down the (a) \emph{x}-, (b) \emph{y}, and (c) \emph{z}-axis, and the calculated scattering data from the Debye equation.} \label{fig:test} \end{figure} % \texttt{fitoog} was used to fit the ``experimental'' data; a population size of 100 was iterated over 5000 steps. Ten repetitions of the \texttt{fitoog} run were performed,\footnote{The random seed and therefore the initial starting configuration varied between each run.} taking around two and a half minutes per run on a workstation computer with four cores. Figure~\ref{fig:test_assess} shows the optimised scattering profile obtained from each of the runs and compares with the ``experimental'' data. It is clear that some of the runs agree well with the data, in particular runs \num{1} and \num{2}, the resulting coordinate cell for these profiles are also shown in Figure~\ref{fig:test_assess}. % \begin{figure} \centering \includegraphics[width=0.49\textwidth]{smallangle/fake_assess1} \includegraphics[width=0.49\textwidth]{smallangle/fake_assess2} \\ \includegraphics[width=0.49\textwidth]{smallangle/fake_assess3} \includegraphics[width=0.49\textwidth]{smallangle/fake_assess4} \\ \includegraphics[width=0.49\textwidth]{smallangle/fake_assess5} \includegraphics[width=0.49\textwidth]{smallangle/fake_assess6} \\ \includegraphics[width=0.49\textwidth]{smallangle/fake_assess7} \includegraphics[width=0.49\textwidth]{smallangle/fake_assess8} \\ \includegraphics[width=0.49\textwidth]{smallangle/fake_assess9} \includegraphics[width=0.49\textwidth]{smallangle/fake_assess10} \\ \includegraphics[width=\textwidth]{smallangle/fake_result} \caption{The best fit from the \texttt{fitoog} run (orange line) is compared with the ``real experimental'' data (blue line) for each of the ten runs. The result of runs 1 (a, b, and c) and 2 (d, e, and f) along each axis for the test system coordinated cell.} \label{fig:test_assess} \end{figure} % This agreement with the ``experimental data'' in the test case is a positive result, and the resulting cells are shown in Figure~\ref{fig:test_assess} appear to show quantitative agreement\footnote{The molecules are in similar locations.} with the coordination cell from which the ``experimental'' data was found. In particular, the agreement between the simulated and ``experimental'' data sufficient to consider that in the more disordered example it may be possible to form a realistic starting structure. Therefore, the use of a PSO method was continued and applied to the real experimental data. \subsection{Real data} \label{sec:real_data} This real experimental data consisted of a single SANS profile for the hydrogenated \emph{n}-decyltrimethylammonium micelle, with nitrate counter ions\footnote{Abbreviated to \ce{C_{10}TA+} and \ce{NO3-} respectively.} counter ions in \ce{D2O}. It was assumed that this data was completely background subtracted such that the scattering present was a result of the micelles alone. Figure~\ref{fig:expdata} shows the scattering profile that was being modelled. % \begin{figure}[t] \centering \includegraphics[width=\textwidth]{smallangle/exp_data} \caption{The experimental SANS data, from a solution of hydrogenated \ce{C_{10}TANO_3} in \ce{D2O} at a concentration of \SI{\sim0.15}{\mol\deci\meter^{-3}}, to which the real \texttt{fitoog} run was attempting to fit.} \label{fig:expdata} \end{figure} % The aim of the application to real data was to attempt to quickly produce a system with multiple micellar species. Therefore, the simulation cell was substantially larger; containing 500 MARTINI coarse-grained \ce{C_{10}TA+} and \ce{NO3-} molecules, the available cell was a cube with a side \SI{177}{\angstrom} in length.\footnote{No solvent was included in the box as the scattering was considered to have arisen from the micelle scattering alone. The concentration of the solution was \SI{\sim0.15}{\mol\deci\meter^{-3}}, which is nearly three times the cmc for \ce{C_{10}TA+} from \cite{rodriguez_surface_2007}.} In order to assess the utility for the PSO, two \texttt{fitoog} runs were performed.\footnote{Each with five repetitions, and a population size of 96 over 500 iterations.} The PSO method was compared with a random method, where at each iteration a new random population was generated. Figure~\ref{fig:chi} shows the variation in the figure of merit, $\zeta$, in each of these optimisations. Figure~\ref{fig:chi}(a-c) shows the best structure that was obtained from the \texttt{fitoog} runs, notably it is from the randomisation based run, which shows no evidence of the formation of micelle-like species. From this, there is no clear benefit to the use of the PSO method over simply selecting random structures. % \begin{figure} \centering \includegraphics[width=\textwidth]{smallangle/comparechi} \includegraphics[width=\textwidth]{smallangle/best_real_bad} \caption{The quality of agreement between the \texttt{fitoog} model and the experimental data; where the blue lines are the different PSO runs and the orange are the randomisation runs. Result of the best outcome from the real data example, observed down the (a) \emph{x}-, (b) \emph{y}, and (c) \emph{z}-axis.} \label{fig:chi} \end{figure} % This inability for the PSO method to optimise the structure of the micellar species could be due to a wide variety of reasons, some which could be acted on and others that could not. While it is common to use values for the acceleration coefficients that are typically in the range of \numrange{0}{2}, it may be necessary to optimise these values.\footnote{Which, of course, could lead to an infinite set of optimisations.} However, it may be the case that the parameter space was too large to be optimised using the PSO alone, this is very likely considering that the dimensionality of the parameter space was 4500.\footnote{$500\times6+500\times3$, for 500 \ce{C_{10}TA+} and \ce{NO3-} molecules.} A possible method that may enable the optimisation of such structures would be the inclusion of an energetic term. For example, this could involve the use of an energy optimisation to be performed alongside the structural optimisation to the scattering profile, regardless of the optimisation methodology. The use of energetic considerations and a Markov state model optimisation has been shown to perform well for peptide self-assembly.\autocite{sengupta_automated_2019} Additionally, the Empirical Potential Structure Refinement used by Hargreaves \emph{et al.}\autocite{hargreaves_atomistic_2011} and in a coarse-grained fashion by Soper and Edler\autocite{soper_coarse-grained_2017} and the method used by Ivanovic \emph{et al.}\autocite{ivanovic_temperature-dependent_2018} all involve performing an energetic optimisation that is biased on the agreement with experimental data. This use of an energetic consideration could be included in \texttt{fitoog} by performing an energy minimisation step following each step of the PSO. This would have the added benefit of creating a range of surfactant conformations allowing a more realistic structure to form.
{"hexsha": "121762a9df0d723d17e44d4ee1f04629b810fd3c", "size": 11522, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "reports/chapters/smallangle/discussion.tex", "max_stars_repo_name": "arm61/thesis", "max_stars_repo_head_hexsha": "4c76e837b1041472a5522427de0069a5a28d40c9", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-06-04T20:53:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-01T06:25:20.000Z", "max_issues_repo_path": "reports/chapters/smallangle/discussion.tex", "max_issues_repo_name": "arm61/thesis", "max_issues_repo_head_hexsha": "4c76e837b1041472a5522427de0069a5a28d40c9", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-06-04T17:11:33.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-04T17:11:33.000Z", "max_forks_repo_path": "reports/chapters/smallangle/discussion.tex", "max_forks_repo_name": "arm61/thesis", "max_forks_repo_head_hexsha": "4c76e837b1041472a5522427de0069a5a28d40c9", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 92.9193548387, "max_line_length": 501, "alphanum_fraction": 0.7846727999, "num_tokens": 2918}
export Mesh3, get_ngauss, get_volume @doc raw""" `Mesh` holds data structures for an unstructured mesh. - `nodes`: a $n_v \times 2$ coordinates array - `edges`: a $n_{\text{edge}} \times 2$ integer array for edges - `elems`: a $n_e \times 3$ connectivity matrix, 1-based. - `nnode`, `nedge`, `nelem`: number of nodes, edges, and elements - `ndof`: total number of degrees of freedoms - `conn`: connectivity matrix, `nelems × 3` or `nelems × 6`, depending on whether a linear element or a quadratic element is used. - `lorder`: order of quadrature rule for line integrals - `elem_type`: type of the element (P1, P2 or BDM1) """ mutable struct Mesh3 nodes::Array{Float64, 2} edges::Array{Int64, 2} faces::Array{Int64, 2} elems::Array{Int64, 2} nnode::Int64 nedge::Int64 nface::Int64 nelem::Int64 ndof::Int64 conn::Array{Int64, 2} lorder::Int64 elem_ndof::Int64 elem_type::FiniteElementType end """ Mesh3(coords::Array{Float64, 2}, elems::Array{Int64, 2}, order::Int64 = -1, degree::Union{FiniteElementType,Int64} = 1, lorder::Int64 = -1) - `degree`: 1 for P1 element, 2 for P2 element - `order`: Integration order for elements - `lorder`: Integration order for faces """ function Mesh3(coords::Array{Float64, 2}, elems::Array{Int64, 2}, order::Int64 = -1, degree::Union{FiniteElementType,Int64} = 1, lorder::Int64 = -1) @assert length(size(coords))==2 && size(coords,2)==3 @assert length(size(elems))==2 && size(elems,2)==4 if degree==P1 degree = 1 elseif degree==P2 degree = 2 end if order==-1 if degree == 1 order = 2 elseif degree == 2 order = 4 end end nnode = size(coords, 1) nelem = size(elems,1) nedges = zeros(Int64, 1) c = coords'[:] e = Int32.(elems'[:].- 1) edges_ptr = @eval ccall((:init_nnfem_mesh3, $LIBMFEM3), Ptr{Clonglong}, (Ptr{Cdouble}, Cint, Ptr{Cint}, Cint, Cint, Cint, Ptr{Clonglong}), $c, Int32(size($coords, 1)), $e, Int32(size($elems,1)), Int32($order), Int32($degree), $nedges) nedges = nedges[1] edges = unsafe_wrap(Array{Int64,1}, edges_ptr, (2nedges,), own=true) edges = reshape(edges, nedges, 2) elem_dof = Int64(@eval ccall((:mfem_get_elem_ndof3, $LIBMFEM3), Cint, ())) conn = zeros(Int64, elem_dof * size(elems, 1)) @eval ccall((:mfem_get_connectivity3, $LIBMFEM3), Cvoid, (Ptr{Clonglong}, ), $conn) ndof = Int64(@eval ccall((:mfem_get_ndof3, $LIBMFEM3), Cint, ())) conn = reshape(conn, elem_dof, size(elems, 1))'|>Array @eval ccall((:mfem_get_element_to_vertices3, $LIBMFEM3), Cvoid, (Ptr{Clonglong}, ), $elems) elem_type = missing if degree==1 elem_type = P1 elseif degree==2 elem_type = P2 end # get faces fset = Set{Tuple{Int64, Int64, Int64}}([]) for i = 1:nelem cc = elems[i,:] push!(fset, Tuple(sort(cc[[1;2;3]]))) push!(fset, Tuple(sort(cc[[1;2;4]]))) push!(fset, Tuple(sort(cc[[1;3;4]]))) push!(fset, Tuple(sort(cc[[2;3;4]]))) end faces = vcat([[x[1] x[2] x[3]] for x in fset]...) nfaces = size(faces, 1) Mesh3(coords, edges, faces, elems, nnode, nedges, nfaces, nelem, ndof, conn, lorder, elem_dof, elem_type) end Base.:copy(mesh::Mesh3) = Mesh3(copy(mesh.nodes), copy(mesh.edges), copy(mesh.faces), copy(mesh.elems), copy(mesh.nnode), copy(mesh.nedge), copy(mesh.nelem), copy(mesh.nface), copy(mesh.ndof), copy(mesh.conn), copy(mesh.lorder), copy(mesh.elem_ndof), copy(mesh.elem_type)) @doc raw""" Mesh3(m::Int64, n::Int64, l::Int64, h::Float64; order::Int64 = 2, degree::Union{FiniteElementType, Int64} = 1, lorder::Int64 = -1) Constructs a mesh of a rectangular domain. The rectangle is split into $m\times n$ cells, and each cell is further split into two triangles. `order` specifies the quadrature rule order. `degree` determines the degree for finite element basis functions. """ function Mesh3(m::Int64, n::Int64, l::Int64, h::Float64; order::Int64 = -1, degree::Union{FiniteElementType, Int64} = 1, lorder::Int64 = -1) coords = zeros((m+1)*(n+1)*(l+1), 3) elems = zeros(Int64, 5*m*n*l, 4) function ID(i, j, k) (k-1)*(n+1)*(m+1) + (j-1)*(m+1) + i end TE1 = [ [1; 2; 3; 5], [2; 3; 4; 8], [3; 5; 7; 8], [2; 3; 5; 8], [2; 5; 6; 8] ] TE2 = [ [1; 2; 4; 6], [1; 5; 6; 7], [4; 6; 7; 8], [1; 4; 6; 7], [1; 3; 4; 7] ] s = 1 for k = 1:l+1 for j = 1:m+1 for i = 1:n+1 x = (i-1)*h y = (j-1)*h z = (k-1)*h coords[s,:] = [x;y;z] s += 1 end end end K = 0 for i = 1:n for j = 1:m for k = 1:l IDX = [ ID(i, j, k) ID(i+1, j, k) ID(i, j+1, k) ID(i+1, j+1, k) ID(i, j, k+1) ID(i+1, j, k+1) ID(i, j+1, k+1) ID(i+1, j+1, k+1) ] for s = 1:5 if (i+j+k)%2==0 elems[s + K,:] = IDX[TE1[s]] else elems[s + K,:] = IDX[TE2[s]] end end K += 5 end end end Mesh3(coords, elems, order, degree, lorder) end """ Mesh3(filename::String; file_format::Union{String, Missing} = missing, order::Int64 = 2, degree::Union{FiniteElementType, Int64} = 1, lorder::Int64 = 2) """ function Mesh3(filename::String; file_format::Union{String, Missing} = missing, order::Int64 = 2, degree::Union{FiniteElementType, Int64} = 1, lorder::Int64 = 2) if splitext(filename)[2] == ".mat" d = matread(filename) return Mesh3(Float64.(d["nodes"]), Int64.(d["elems"]), order, degree, lorder) end meshio = get_meshio() if !ismissing(file_format) mesh = meshio.read(filename, file_format = file_format) else mesh = meshio.read(filename) end elem = [] for (mkr, dat) in mesh.cells if mkr == "tetra" push!(elem, dat) end end elem = vcat(elem...) if length(elem)==0 error("No triangles found in the mesh file.") end Mesh3(Float64.(mesh.points), Int64.(elem) .+ 1, order, degree, lorder) end # """ # Mesh(; order::Int64 = -1, # degree::Union{FiniteElementType, Int64} = 1, lorder::Int64 = -1) # Creates a mesh with a reference triangle. # ![](https://raw.githubusercontent.com/ADCMEMarket/ADCMEImages/master/AdFem/mapping.png) # """ # function Mesh(; order::Int64 = -1, # degree::Union{FiniteElementType, Int64} = 1, lorder::Int64 = -1) # coords = [ # 1.0 0.0 # 0.0 1.0 # 0.0 0.0 # ] # elems = [ # 1 2 3 # ] # Mesh(coords, elems, order, degree, lorder) # end """ get_ngauss(mesh::Mesh3) Return the total number of Gauss points. """ function get_ngauss(mesh::Mesh3) return Int64(@eval ccall((:mfem_get_ngauss3, $LIBMFEM3), Cint, ())) end """ get_volume(mesh::Mesh3) Return the areas of triangles as an array. """ function get_volume(mesh::Mesh3) a = zeros(size(mesh.elems,1)) @eval ccall((:mfem_get_area3, $LIBMFEM3), Cvoid, (Ptr{Cdouble}, ), $a) a end """ gauss_nodes(mesh::Mesh3) """ function gauss_nodes(mesh::Mesh3) ngauss = get_ngauss(mesh) x = zeros(ngauss) y = zeros(ngauss) z = zeros(ngauss) @eval ccall((:mfem_get_gauss3, $LIBMFEM3), Cvoid, (Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}), $x, $y, $z) [x y z] end """ fem_nodes(mesh::Mesh3) """ function fem_nodes(mesh::Mesh3) mesh.nodes end """ fvm_nodes(mesh::Mesh3) """ function fvm_nodes(mesh::Mesh3) nnode = size(mesh.nodes, 1) nelem = size(mesh.elems, 1) out = zeros(nelem, 2) for i = 1:nelem idx = mesh.elems[i, :] out[i, 1] = mean(mesh.nodes[idx, 1]) out[i, 2] = mean(mesh.nodes[idx, 2]) end return out end function _edge_dict(mesh::Mesh3) D = Dict{Tuple{Int64, Int64}, Int64}() for i = 1:mesh.nedge D[(mesh.edges[i,1], mesh.edges[i,2])] = i D[(mesh.edges[i,2], mesh.edges[i,1])] = i end D end
{"hexsha": "a2706195b27932b6aba4ad99efc1a9383dbf5ddc", "size": 8846, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MFEM3/MFEM.jl", "max_stars_repo_name": "kailaix/AdFem.jl", "max_stars_repo_head_hexsha": "77eabfeedb297570a42d1f26575c59f0712796d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2020-10-18T01:33:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T00:13:24.000Z", "max_issues_repo_path": "src/MFEM3/MFEM.jl", "max_issues_repo_name": "kailaix/AdFem.jl", "max_issues_repo_head_hexsha": "77eabfeedb297570a42d1f26575c59f0712796d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-10-19T03:51:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T23:38:46.000Z", "max_forks_repo_path": "src/MFEM3/MFEM.jl", "max_forks_repo_name": "kailaix/AdFem.jl", "max_forks_repo_head_hexsha": "77eabfeedb297570a42d1f26575c59f0712796d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-11-05T11:34:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T19:30:09.000Z", "avg_line_length": 29.2913907285, "max_line_length": 141, "alphanum_fraction": 0.5290526792, "num_tokens": 2885}
from ccdc.io import EntryReader import pandas as pd import numpy as np csd_reader = EntryReader('CSD') df = pd.read_csv('PATH TO CSV') data = [] for refcode in df['Ref Codes']: values_dict = {'Ref Code' : csd_reader.entry(refcode).identifier, 'Chemical Name': csd_reader.entry(refcode).chemical_name, 'Chemical Formula': csd_reader.entry(refcode).formula, 'Solvent' : csd_reader.entry(refcode).solvent, 'Pressure': csd_reader.entry(refcode).pressure, 'Temperature': csd_reader.entry(refcode).temperature, 'Melting Point': csd_reader.entry(refcode).melting_point, 'Density': csd_reader.entry(refcode).calculated_density, 'DOI' : csd_reader.entry(refcode).publication.doi, 'Journal': csd_reader.entry(refcode).publication.journal, 'Publication' : csd_reader.entry(refcode).publication, 'Molecule' : csd_reader.entry(refcode).crystal.molecule } data.append(values_dict) data_df = pd.DataFrame(data).fillna('None') data_df.to_csv('all_MOFs.csv')
{"hexsha": "06bd93c2332dc5df19fe91fac1c95e12b4c5ea23", "size": 1182, "ext": "py", "lang": "Python", "max_stars_repo_path": "chemdataextractor_MOFs/web-scrape/csd_data_to_csv.py", "max_stars_repo_name": "peymanzmoghadam/DigiMOF-database-master-main", "max_stars_repo_head_hexsha": "62f11c41ca68a5ef4662b905d8a71c4bb111543a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chemdataextractor_MOFs/web-scrape/csd_data_to_csv.py", "max_issues_repo_name": "peymanzmoghadam/DigiMOF-database-master-main", "max_issues_repo_head_hexsha": "62f11c41ca68a5ef4662b905d8a71c4bb111543a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chemdataextractor_MOFs/web-scrape/csd_data_to_csv.py", "max_forks_repo_name": "peymanzmoghadam/DigiMOF-database-master-main", "max_forks_repo_head_hexsha": "62f11c41ca68a5ef4662b905d8a71c4bb111543a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.7777777778, "max_line_length": 76, "alphanum_fraction": 0.6209813875, "include": true, "reason": "import numpy", "num_tokens": 266}
# # Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS, # BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, RIXEN@TUM.DE. # # Distributed under 3-Clause BSD license. See LICENSE file for more information. # """ Tools for all elements. """ __all__ = [ 'f_proj_a', 'f_proj_a_shadow' ] import numpy as np def f_proj_a(f_mat, direction): """ Compute the force traction proportional to the area of the element in any-direction. Parameters ---------- f_mat : ndarray normal force vector of one element in matrix notation. The shape of `f_mat` is (no_of_nodes, dofs_per_node) It weights the participations of the nodes to the defined force e.g. for line element: a half for each node times length of the element direction : ndarray normalized vector describing the direction, in which the force should act. Returns ------- f : ndarray force vector of traction vector in voigt notation (1d-array) """ n_nodes, dofs_per_node = f_mat.shape f_out = np.zeros(n_nodes * dofs_per_node) for i, f_vec in enumerate(f_mat): f_out[i*dofs_per_node:(i+1)*dofs_per_node] = direction * np.sqrt(f_vec @ f_vec) return f_out def f_proj_a_shadow(f_mat, direction): """ Compute the force projection in any direction proportional to the projected area, i.e. the shadow-area, the area throws in the given direction. Parameters ---------- f_mat : ndarray normal force vector of one element in matrix notation. The shape of `f_mat` is (no_of_nodes, dofs_per_node) direction : ndarray normalized vector describing the direction, in which the force should act. Returns ------- f : ndarray force vector of traction vector in voigt notation (1d-array) """ n_nodes, dofs_per_node = f_mat.shape f_out = np.zeros(n_nodes * dofs_per_node) for i, f_vec in enumerate(f_mat): # by Johannes Rutzmoser: # f_out[i*dofs_per_node:(i+1)*dofs_per_node] = direction * (direction @ f_vec) # by Christian Meyer: I think this has to be divided by || direction || because of projection f_out[i * dofs_per_node:(i + 1) * dofs_per_node] = direction * ((direction @ f_vec) / np.linalg.norm(direction)) return f_out
{"hexsha": "15076c6f3d72e6934029db4f50578c6e3f89dc37", "size": 2405, "ext": "py", "lang": "Python", "max_stars_repo_path": "amfe/neumann/tools.py", "max_stars_repo_name": "ma-kast/AMfe", "max_stars_repo_head_hexsha": "99686cc313fb8904a093fb42e6cf0b38f8cfd791", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "amfe/neumann/tools.py", "max_issues_repo_name": "ma-kast/AMfe", "max_issues_repo_head_hexsha": "99686cc313fb8904a093fb42e6cf0b38f8cfd791", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "amfe/neumann/tools.py", "max_forks_repo_name": "ma-kast/AMfe", "max_forks_repo_head_hexsha": "99686cc313fb8904a093fb42e6cf0b38f8cfd791", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8333333333, "max_line_length": 120, "alphanum_fraction": 0.6694386694, "include": true, "reason": "import numpy", "num_tokens": 615}
[STATEMENT] lemma jumpF_poly_noroot: assumes "poly p a\<noteq>0" shows "jumpF_polyL q p a = 0" "jumpF_polyR q p a = 0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. jumpF_polyL q p a = 0 &&& jumpF_polyR q p a = 0 [PROOF STEP] subgoal [PROOF STATE] proof (prove) goal (1 subgoal): 1. jumpF_polyL q p a = 0 [PROOF STEP] unfolding jumpF_polyL_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. jumpF (\<lambda>x. poly q x / poly p x) (at_left a) = 0 [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: poly p a \<noteq> 0 goal (1 subgoal): 1. jumpF (\<lambda>x. poly q x / poly p x) (at_left a) = 0 [PROOF STEP] apply (intro jumpF_not_infinity) [PROOF STATE] proof (prove) goal (2 subgoals): 1. poly p a \<noteq> 0 \<Longrightarrow> continuous (at_left a) (\<lambda>x. poly q x / poly p x) 2. poly p a \<noteq> 0 \<Longrightarrow> at_left a \<noteq> bot [PROOF STEP] by (auto intro!:continuous_intros) [PROOF STATE] proof (prove) goal (1 subgoal): 1. jumpF_polyR q p a = 0 [PROOF STEP] subgoal [PROOF STATE] proof (prove) goal (1 subgoal): 1. jumpF_polyR q p a = 0 [PROOF STEP] unfolding jumpF_polyR_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. jumpF (\<lambda>x. poly q x / poly p x) (at_right a) = 0 [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: poly p a \<noteq> 0 goal (1 subgoal): 1. jumpF (\<lambda>x. poly q x / poly p x) (at_right a) = 0 [PROOF STEP] apply (intro jumpF_not_infinity) [PROOF STATE] proof (prove) goal (2 subgoals): 1. poly p a \<noteq> 0 \<Longrightarrow> continuous (at_right a) (\<lambda>x. poly q x / poly p x) 2. poly p a \<noteq> 0 \<Longrightarrow> at_right a \<noteq> bot [PROOF STEP] by (auto intro!:continuous_intros) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{"llama_tokens": 798, "file": "Count_Complex_Roots_Extended_Sturm", "length": 11}
import numpy as np import meshio load_width = 1 width = 16 height = 8 # This values are the values of the pixels from an image # Since the image was of a FVM, the elements are being defined between the volumes volXs = np.array([36,44,59,74,88,103,117,132,147,163,183,205,227,251,278,308,339,374,410,456,520,590,623], dtype=np.float64) volXs -= volXs[0]; volXs *= width/volXs[-1] mdPts = 0.5*(volXs[:-1]+volXs[1:])[1:-1] x = np.array( [volXs[0]] + list(mdPts) + [volXs[-1]] ) # import matplotlib.pyplot as plt # plt.scatter(volXs, np.ones(volXs.size), color='k', marker='.') # plt.scatter(mdPts, np.ones(mdPts.size), color='r', marker='.') # plt.show() nx = len(x) - 1 ny = 80 nl = min(enumerate(abs((volXs-load_width))),key=lambda t:t[1])[0] y = np.linspace(0,height,ny+1) xv,yv = np.meshgrid(x,y) xv.resize(xv.size); yv.resize(yv.size) points = np.array(list(zip(xv,yv))) points = np.concatenate((points.T,[np.zeros(len(points))])).T idx = lambda i,j: i+(nx+1)*j quad_cells = np.array([[idx(i,j),idx(i+1,j),idx(i+1,j+1),idx(i,j+1)] for j in range(ny) for i in range(nx)]) line_cells = np.array([ [idx(*(p,q)[ ::(1,-1)[k] ]), idx(*(p+1,q)[ ::(1,-1)[k] ])] for k in range(2) for q in (0,(ny,nx)[k]) for p in range((nx,ny)[k]) ]) quad_data = np.ones(len(quad_cells), dtype=np.int32) line_data = np.array(nx*[2]+nl*[3]+(nx-nl)*[4]+ny*[5]+ny*[6], dtype=np.int32) cells = [("line",line_cells), ("quad",quad_cells),] cell_data = {"gmsh:physical":[line_data, quad_data],"gmsh:geometrical":[line_data, quad_data]} field_data = {"Body":[1,2], "South":[2,1], "Load":[3,1], "North":[4,1], "West":[5,1], "East":[6,1]} mesh = meshio.Mesh(points, cells, cell_data=cell_data, field_data=field_data) mesh.write("output.msh", file_format="gmsh22", binary=False)
{"hexsha": "c8cf1064af4bb29f812d7148acb0a485e4749842", "size": 1759, "ext": "py", "lang": "Python", "max_stars_repo_path": "meshes/scripts/uneq_strip_footing_x.py", "max_stars_repo_name": "Gustavo029/GridReader", "max_stars_repo_head_hexsha": "7edc950c469b06c3de0093e5fd8bf6cfd59af354", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-26T17:14:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T17:14:54.000Z", "max_issues_repo_path": "meshes/scripts/uneq_strip_footing_x.py", "max_issues_repo_name": "Gustavo029/GridReader", "max_issues_repo_head_hexsha": "7edc950c469b06c3de0093e5fd8bf6cfd59af354", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "meshes/scripts/uneq_strip_footing_x.py", "max_forks_repo_name": "Gustavo029/GridReader", "max_forks_repo_head_hexsha": "7edc950c469b06c3de0093e5fd8bf6cfd59af354", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-10-26T07:11:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T17:14:42.000Z", "avg_line_length": 37.4255319149, "max_line_length": 154, "alphanum_fraction": 0.6469584991, "include": true, "reason": "import numpy", "num_tokens": 644}
"""Test code for L2 norm""" import numpy as np import tvm import topi from topi.util import get_const_tuple def l2norm_instance_python(a_np, eps, axis=None): """L2 norm operator in NCHW layout. Parameters ---------- a_np : numpy.ndarray 4-D with shape [batch, in_channel, in_height, in_width] eps : float epsilon constant value axis : list of int axis over the normalization applied Returns ------- l2norm_out : np.ndarray 4-D with shape [batch, out_channel, out_height, out_width] """ batch, axis1, axis2, axis3 = a_np.shape sqr_sum = np.zeros(shape=(batch,)).astype(a_np.dtype) sqrt_sum = np.zeros(shape=(batch,)).astype(a_np.dtype) l2norm_out = np.zeros(shape=a_np.shape).astype(a_np.dtype) dot_value = np.power(a_np, 2.0) sqr_sum = np.sum(dot_value, axis, keepdims=True) sqrt_sum = np.sqrt(np.maximum(np.broadcast_to(sqr_sum, a_np.shape), eps)) return np.divide(a_np, sqrt_sum) def verify_l2norm(n, c, h, w, eps, axis=None): A = tvm.placeholder((n, c, h, w), name='A') B = topi.nn.l2norm_instance(A, eps, axis) dtype = A.dtype a_np = np.random.uniform(size=(n, c, h, w)).astype(dtype) b_np = l2norm_instance_python(a_np, eps, axis) def check_device(device): ctx = tvm.context(device, 0) if not ctx.exist: print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) with tvm.target.create(device): s = topi.generic.schedule_l2norm(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx) f = tvm.build(s, [A, B], device) f(a, b) np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) for device in ['llvm', 'cuda', 'opencl', 'metal', 'rocm', 'vulkan']: check_device(device) def test_l2norm(): verify_l2norm(1, 3, 20, 20, 0.001) verify_l2norm(1, 3, 20, 20, 0.001, 1) verify_l2norm(1, 3, 20, 20, 0.001, (1, 2)) verify_l2norm(1, 3, 20, 20, 0.001, (2, 3)) verify_l2norm(1, 3, 20, 20, 0.001, (0, 3)) verify_l2norm(1, 3, 20, 20, 0.001, (0, 2, 3)) if __name__ == "__main__": test_l2norm()
{"hexsha": "182099ff93674df4a6fd40bf3956ef5b41b7c494", "size": 2256, "ext": "py", "lang": "Python", "max_stars_repo_path": "topi/tests/python/test_topi_l2norm.py", "max_stars_repo_name": "TaoLv/tvm", "max_stars_repo_head_hexsha": "11318966571f654f4e8bc550bfd9a293303e3000", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-06-07T16:39:01.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-07T16:39:01.000Z", "max_issues_repo_path": "topi/tests/python/test_topi_l2norm.py", "max_issues_repo_name": "TaoLv/tvm", "max_issues_repo_head_hexsha": "11318966571f654f4e8bc550bfd9a293303e3000", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "topi/tests/python/test_topi_l2norm.py", "max_forks_repo_name": "TaoLv/tvm", "max_forks_repo_head_hexsha": "11318966571f654f4e8bc550bfd9a293303e3000", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-04-07T07:28:19.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-18T08:09:06.000Z", "avg_line_length": 31.7746478873, "max_line_length": 78, "alphanum_fraction": 0.6148049645, "include": true, "reason": "import numpy", "num_tokens": 725}
import os import PIL import ipdb import math import torch import argparse import numpy as np import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset import torchvision import torchvision.transforms as transforms import torch.distributed as dist import torch.multiprocessing as mp from einops import rearrange from tqdm import tqdm import matplotlib as plt from dall_e import map_pixels, unmap_pixels, load_model def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) torch.backends.cudnn.deterministic = True def show_img(vector): low = vector < 1e-8 high = vector > 1 - 1e-8 vector = vector * (~low & ~high) num_images = min(vector.shape[0], 10) show_img = vector[:num_images, :, :, :].permute(2, 0, 3, 1).numpy() show_img = show_img.reshape(args.image_size, num_images * args.image_size, 3) print(show_img.shape) plt.figure(figsize=(20, 10)) # 设定图像的尺寸(x, y) plt.imshow(show_img) # pretrained Discrete VAE from OpenAI def make_contiguous(module): with torch.no_grad(): for param in module.parameters(): param.set_(param.contiguous()) def unmap_pixels(x, eps=0.1): return torch.clamp((x - eps) / (1 - 2 * eps), 0, 1) def load_model(path): with open(path, 'rb') as f: return torch.load(f, map_location=torch.device('cpu')) def map_pixels(x, eps=0.1): return (1 - 2 * eps) * x + eps class OpenAIDiscreteVAE(nn.Module): def __init__(self, model_path): super().__init__() OPENAI_PATH = os.path.expanduser(os.path.join(model_path, 'OPENAI')) self.enc = load_model(os.path.join(OPENAI_PATH, 'encoder.pkl')) self.dec = load_model(os.path.join(OPENAI_PATH, 'decoder.pkl')) make_contiguous(self) self.num_layers = 3 self.image_size = 256 self.num_tokens = 8192 @torch.no_grad() def get_codebook_indices(self, img): img = map_pixels(img) z_logits = self.enc.blocks(img) z = torch.argmax(z_logits, dim=1) # print(z.shape) return rearrange(z, 'b h w -> b (h w)') def decode(self, img_seq): b, n = img_seq.shape img_seq = rearrange(img_seq, 'b (h w) -> b h w', h=int(math.sqrt(n))) z = F.one_hot(img_seq, num_classes=self.num_tokens) z = rearrange(z, 'b h w c -> b c h w').float() x_stats = self.dec(z).float() x_rec = unmap_pixels(torch.sigmoid(x_stats[:, :3])) return x_rec def forward(self, img): return self.decode(self.get_codebook_indices(img)) class WebvidFramesDataset(Dataset): def __init__(self, args, split='train'): super().__init__() self.image_size = args.image_size self.frame_path = args.data_dir self.transform = transforms.Compose([ transforms.Resize((args.image_size, args.image_size), interpolation=PIL.Image.BILINEAR), transforms.ToTensor() ]) if split == 'train': self.frame_path = os.path.join(self.frame_path, 'train') self.video_ids = os.listdir(self.frame_path) else: raise ValueError(f"No Implemention for {split}") def __len__(self): return len(self.video_ids) def __getitem__(self, index): video_id = self.video_ids[index] imgs = [] cur_path = os.path.join(self.frame_path, video_id) cur_imgs = os.listdir(cur_path) for i in range(10): img_path = os.path.join(cur_path, f"{video_id}_{i}.jpg") try: img = torchvision.datasets.folder.pil_loader(img_path) cur_img = self.transform(img).unsqueeze(0) except: raise RuntimeError('Fail to load ' + img_path) # ipdb.set_trace() imgs.append(cur_img) return torch.cat(imgs, dim=0), video_id def generate(gpu, args): # multi process rank = args.nr * args.gpus + gpu dist.init_process_group(backend='nccl', init_method='env://', world_size=args.world_size, rank=rank) # save_path for npy files save_path = os.path.join(args.save_dir, 'train') if os.path.exists(save_path) is False: os.makedirs(save_path) # set random seed setup_seed(10) torch.cuda.set_device(gpu) # set model vae = OpenAIDiscreteVAE('/home/user/mzsun/codes/Video_VQVAE/pretrained/') model = vae.cuda(gpu) # warp the model model = nn.parallel.DistributedDataParallel(model, device_ids=[gpu]) # dataloader trainset = WebvidFramesDataset(args) trainsampler = torch.utils.data.distributed.DistributedSampler( trainset, num_replicas=args.world_size, rank=rank ) trainloader = torch.utils.data.DataLoader( trainset, batch_size=args.batch_size, shuffle=False, num_workers=0, pin_memory=True, sampler=trainsampler) td = tqdm(range(len(iter(trainloader))), desc='rank: '+str(rank)) for _ in td: try: image, ids = next(iter(trainloader)) except: continue # print(image.shape) inputs = image.reshape(args.batch_size * 10, 3, args.image_size, args.image_size) indices = model.module.get_codebook_indices(inputs.cuda(gpu)) outputs = rearrange(indices, '(b n) l -> b (n l)', b=args.batch_size, n=10).detach().cpu().numpy() for i, video_id in enumerate(ids): npy_file = os.path.join(save_path, '{}.npy'.format(video_id)) if os.path.exists(npy_file) is True: continue np.save(npy_file, outputs[i]) # if gpu == 0: # print(video_id, 'is finished.') def main(): def get_arg_parser(): parser = argparse.ArgumentParser(description="DiffVQVAE") parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N') parser.add_argument('-g', '--gpus', default=1, type=int, help='number of gpus per node') parser.add_argument('-nr', '--nr', default=0, type=int, help='ranking within the nodes') parser.add_argument('--batch_size', type=int, default=128) parser.add_argument('--image_size', default=64, type=int) parser.add_argument('--dataset', type=str, default='webvid') parser.add_argument('--data_dir', type=str, default='/raid/datasets/video_datasets/webvid/webvid_frames_10/') parser.add_argument('--save_dir', type=str, default='/raid/datasets/video_datasets/webvid/webvid_tokens_im64_ds8') return parser.parse_args() args = get_arg_parser() # multi process args.world_size = args.nodes * args.gpus os.environ['MASTER_ADDR'] = '127.0.0.2' os.environ['MASTER_PORT'] = '29678' mp.spawn(generate, nprocs=args.gpus, args=(args,)) # generate(0, args) if __name__ == '__main__': main() # break
{"hexsha": "32f201f6a965bf118770de52befc307dc5a5c882", "size": 7137, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess/generate_tokens.py", "max_stars_repo_name": "Sunmingzhen/CogView", "max_stars_repo_head_hexsha": "6bc71b7cc07a209d258729674019f7d15a0ac4bb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "preprocess/generate_tokens.py", "max_issues_repo_name": "Sunmingzhen/CogView", "max_issues_repo_head_hexsha": "6bc71b7cc07a209d258729674019f7d15a0ac4bb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocess/generate_tokens.py", "max_forks_repo_name": "Sunmingzhen/CogView", "max_forks_repo_head_hexsha": "6bc71b7cc07a209d258729674019f7d15a0ac4bb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7385321101, "max_line_length": 117, "alphanum_fraction": 0.6162253047, "include": true, "reason": "import numpy", "num_tokens": 1746}
[STATEMENT] lemma z_eq_v1_solves: assumes "z = v1" shows "\<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] proof- [PROOF STATE] proof (state) goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] interpret Q': DisjointPaths G v0 v1 Q [PROOF STATE] proof (prove) goal (1 subgoal): 1. DisjointPaths G v0 v1 Q [PROOF STEP] using DisjointPaths_supergraph H_x_def Q.DisjointPaths_axioms [PROOF STATE] proof (prove) using this: DisjointPaths (remove_vertex ?v) v0 v1 ?paths \<Longrightarrow> DisjointPaths G v0 v1 ?paths H_x \<equiv> remove_vertex new_last DisjointPaths H_x v0 v1 Q goal (1 subgoal): 1. DisjointPaths G v0 v1 Q [PROOF STEP] by auto [PROOF STATE] proof (state) goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] have "v0 \<leadsto>(v0 # R')\<leadsto> v1" [PROOF STATE] proof (prove) goal (1 subgoal): 1. v0 \<leadsto>(v0 # R')\<leadsto> v1 [PROOF STEP] using assms v0_R'_path [PROOF STATE] proof (prove) using this: z = v1 v0 \<leadsto>(v0 # R')\<leadsto> z goal (1 subgoal): 1. v0 \<leadsto>(v0 # R')\<leadsto> v1 [PROOF STEP] by auto [PROOF STATE] proof (state) this: v0 \<leadsto>(v0 # R')\<leadsto> v1 goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] moreover [PROOF STATE] proof (state) this: v0 \<leadsto>(v0 # R')\<leadsto> v1 goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] { [PROOF STATE] proof (state) this: v0 \<leadsto>(v0 # R')\<leadsto> v1 goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] fix xs v [PROOF STATE] proof (state) goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] assume "xs \<in> Q" "xs \<noteq> v0 # R'" "v \<in> set xs" "v \<in> set (v0 # R')" [PROOF STATE] proof (state) this: xs \<in> Q xs \<noteq> v0 # R' v \<in> set xs v \<in> set (v0 # R') goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] then [PROOF STATE] proof (chain) picking this: xs \<in> Q xs \<noteq> v0 # R' v \<in> set xs v \<in> set (v0 # R') [PROOF STEP] have "v = v0 \<or> v = v1" [PROOF STATE] proof (prove) using this: xs \<in> Q xs \<noteq> v0 # R' v \<in> set xs v \<in> set (v0 # R') goal (1 subgoal): 1. v = v0 \<or> v = v1 [PROOF STEP] using R'_no_Q Q.hitting_paths_def \<open>z = v1\<close> [PROOF STATE] proof (prove) using this: xs \<in> Q xs \<noteq> v0 # R' v \<in> set xs v \<in> set (v0 # R') \<lbrakk>?v \<in> set R'; ?v \<noteq> z\<rbrakk> \<Longrightarrow> \<not> Q'.hitting_paths ?v Q'.hitting_paths \<equiv> \<lambda>x. x \<noteq> v0 \<and> ((\<exists>xs\<in>Q. x \<in> set xs) \<or> x = v1) z = v1 goal (1 subgoal): 1. v = v0 \<or> v = v1 [PROOF STEP] by auto [PROOF STATE] proof (state) this: v = v0 \<or> v = v1 goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] } [PROOF STATE] proof (state) this: \<lbrakk>?xs2 \<in> Q; ?xs2 \<noteq> v0 # R'; ?v2 \<in> set ?xs2; ?v2 \<in> set (v0 # R')\<rbrakk> \<Longrightarrow> ?v2 = v0 \<or> ?v2 = v1 goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: v0 \<leadsto>(v0 # R')\<leadsto> v1 \<lbrakk>?xs2 \<in> Q; ?xs2 \<noteq> v0 # R'; ?v2 \<in> set ?xs2; ?v2 \<in> set (v0 # R')\<rbrakk> \<Longrightarrow> ?v2 = v0 \<or> ?v2 = v1 [PROOF STEP] have "DisjointPaths G v0 v1 (insert (v0 # R') Q)" [PROOF STATE] proof (prove) using this: v0 \<leadsto>(v0 # R')\<leadsto> v1 \<lbrakk>?xs2 \<in> Q; ?xs2 \<noteq> v0 # R'; ?v2 \<in> set ?xs2; ?v2 \<in> set (v0 # R')\<rbrakk> \<Longrightarrow> ?v2 = v0 \<or> ?v2 = v1 goal (1 subgoal): 1. DisjointPaths G v0 v1 (insert (v0 # R') Q) [PROOF STEP] using Q'.DisjointPaths_extend [PROOF STATE] proof (prove) using this: v0 \<leadsto>(v0 # R')\<leadsto> v1 \<lbrakk>?xs2 \<in> Q; ?xs2 \<noteq> v0 # R'; ?v2 \<in> set ?xs2; ?v2 \<in> set (v0 # R')\<rbrakk> \<Longrightarrow> ?v2 = v0 \<or> ?v2 = v1 \<lbrakk>v0 \<leadsto>?P\<leadsto> v1; \<And>xs v. \<lbrakk>xs \<in> Q; xs \<noteq> ?P; v \<in> set xs; v \<in> set ?P\<rbrakk> \<Longrightarrow> v = v0 \<or> v = v1\<rbrakk> \<Longrightarrow> DisjointPaths G v0 v1 (insert ?P Q) goal (1 subgoal): 1. DisjointPaths G v0 v1 (insert (v0 # R') Q) [PROOF STEP] by blast [PROOF STATE] proof (state) this: DisjointPaths G v0 v1 (insert (v0 # R') Q) goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] moreover [PROOF STATE] proof (state) this: DisjointPaths G v0 v1 (insert (v0 # R') Q) goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] have "card (insert (v0 # R') Q) = Suc sep_size" [PROOF STATE] proof (prove) goal (1 subgoal): 1. card (insert (v0 # R') Q) = Suc sep_size [PROOF STEP] by (simp add: P_k(2) Q(2) Q.finite_paths Q.second_vertices_new_path hd_R') [PROOF STATE] proof (state) this: card (insert (v0 # R') Q) = Suc sep_size goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: DisjointPaths G v0 v1 (insert (v0 # R') Q) card (insert (v0 # R') Q) = Suc sep_size [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: DisjointPaths G v0 v1 (insert (v0 # R') Q) card (insert (v0 # R') Q) = Suc sep_size goal (1 subgoal): 1. \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size [PROOF STEP] by blast [PROOF STATE] proof (state) this: \<exists>paths. DisjointPaths G v0 v1 paths \<and> card paths = Suc sep_size goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 2761, "file": "Menger_Y_eq_new_last", "length": 27}
-- ---------------------------------------------------------------- [ Effs.idr ] -- Module : Effs.idr -- Copyright : (c) Jan de Muijnck-Hughes -- License : see LICENSE -- --------------------------------------------------------------------- [ EOH ] module Sif.Effs import public Effects import public Effect.System import public Effect.State import public Effect.Exception import public Effect.File import public Effect.StdIO import public Effect.Logging.Default import public Effect.Perf import ArgParse import Sif.Types import Sif.DSL.State import Sif.Error import Sif.Pattern import Sif.Builder.AbsInterp import Sif.Builder.DirectRep import Sif.Library import Sif.Options -- -------------------------------------------------------------- [ Directives ] %access export -- -------------------------------------------------------------- [ State Defs ] public export record SifState where constructor MkSifState opts : SifOpts lib : SifLib bends : List SifBackend builder : SifBackend Default SifState where default = MkSifState defOpts defaultLib [backendAbsInterp,backendDirectRep] backendAbsInterp -- -------------------------------------------------------------------- [ Effs ] public export SifEffs : List EFFECT SifEffs = [ FILE_IO () , SYSTEM , STDIO , LOG , PERF , 'sif ::: EXCEPTION SifError , 'bstate ::: STATE BuildState , 'sstate ::: STATE SifState ] public export Sif : Type -> Type Sif rTy = Eff rTy SifEffs -- ----------------------------------------------------------------- [ Options ] getOptions : Eff SifOpts ['sstate ::: STATE SifState] getOptions = pure $ opts !('sstate :- get) putOptions : SifOpts -> Eff () ['sstate ::: STATE SifState] putOptions o = 'sstate :- update (\st => record {opts = o} st) updateOptions : (SifOpts -> SifOpts) -> Eff () ['sstate ::: STATE SifState] updateOptions f = 'sstate :- update (\st => record {opts = f (opts st)} st) -- ----------------------------------------------------------------- [ Library ] getLibrary : Eff SifLib ['sstate ::: STATE SifState] getLibrary = pure $ lib !('sstate :- get) putLibrary : SifLib -> Eff () ['sstate ::: STATE SifState] putLibrary l = 'sstate :- update (\st => record {lib = l} st) updateLibrary : (SifLib -> SifLib) -> Eff () ['sstate ::: STATE SifState] updateLibrary u = 'sstate :- update (\st => record {lib = u (lib st)} st) -- ---------------------------------------------------------------- [ Backends ] getSifBackend : Eff SifBackend ['sstate ::: STATE SifState] getSifBackend = pure $ (builder !('sstate :- get)) setSifBackend : Maybe String -> Eff () SifEffs setSifBackend Nothing = do putStrLn $ unwords ["Using Default Backend"] 'sstate :- update (\st => record {builder = backendAbsInterp} st) setSifBackend (Just n) = do st <- 'sstate :- get case find (\x => name x == n) (bends st) of Nothing => do printLn (NoSuchBackend n) setSifBackend Nothing Just x => do putStrLn $ unwords ["Using backend", show n] 'sstate :- update (\st => record {builder = x} st) addSifBackend : SifBackend -> Eff () ['sstate ::: STATE SifState] addSifBackend b = 'sstate :- update (\st => record {bends = (b::bends st)} st) -- ----------------------------------------------------------------- [ Options ] parseOptions : Sif SifOpts parseOptions = case parseArgs defOpts convOpts !getArgs of Left err => Sif.raise $ GeneralError (show err) Right o => pure o -- --------------------------------------------------------------------- [ EOF ]
{"hexsha": "31a678ce101fbc7f019668fca76304ddd3029765", "size": 3660, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "Sif/Effs.idr", "max_stars_repo_name": "jfdm/sif-lang", "max_stars_repo_head_hexsha": "9554832d3de52a969f8866b4d6fd31fe44f93614", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-06-15T14:01:16.000Z", "max_stars_repo_stars_event_max_datetime": "2015-06-15T14:01:16.000Z", "max_issues_repo_path": "Sif/Effs.idr", "max_issues_repo_name": "jfdm/sif-lang", "max_issues_repo_head_hexsha": "9554832d3de52a969f8866b4d6fd31fe44f93614", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sif/Effs.idr", "max_forks_repo_name": "jfdm/sif-lang", "max_forks_repo_head_hexsha": "9554832d3de52a969f8866b4d6fd31fe44f93614", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0169491525, "max_line_length": 80, "alphanum_fraction": 0.5292349727, "num_tokens": 891}
# ------------------------------------------------------------------ # Licensed under the MIT License. See LICENSE in the project root. # ------------------------------------------------------------------ """ BallFolding(ball) A method for creating folds from a spatial object that are centers of balls. """ struct BallFolding{B} <: FoldingMethod ball::B end function folds(object, method::BallFolding) # retrieve parameters ball = method.ball # create search method searcher = NeighborhoodSearch(object, ball) n = nelms(object) function pair(i) # source and target indices coords = coordinates(object, i) inside = search(coords, searcher) sinds = setdiff(1:n, inside) tinds = [i] sinds, tinds end (pair(i) for i in 1:n) end
{"hexsha": "2a48b3917ef63acff1a03500c7666f43c254f94d", "size": 780, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/folding/ball.jl", "max_stars_repo_name": "mauro3/GeoStatsBase.jl", "max_stars_repo_head_hexsha": "98bd6c4c2f6ab4cbb228677329b95a6a3df3d95d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/folding/ball.jl", "max_issues_repo_name": "mauro3/GeoStatsBase.jl", "max_issues_repo_head_hexsha": "98bd6c4c2f6ab4cbb228677329b95a6a3df3d95d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/folding/ball.jl", "max_forks_repo_name": "mauro3/GeoStatsBase.jl", "max_forks_repo_head_hexsha": "98bd6c4c2f6ab4cbb228677329b95a6a3df3d95d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2857142857, "max_line_length": 68, "alphanum_fraction": 0.5666666667, "num_tokens": 178}
[STATEMENT] lemma inf_dense: "x \<noteq> bot \<Longrightarrow> y \<noteq> bot \<Longrightarrow> x \<sqinter> y \<noteq> bot" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>x \<noteq> bot; y \<noteq> bot\<rbrakk> \<Longrightarrow> x \<sqinter> y \<noteq> bot [PROOF STEP] by (metis inf_selective)
{"llama_tokens": 123, "file": "Stone_Algebras_Lattice_Basics", "length": 1}
""" Equal opportunity - Protected and unprotected False postives ratio""" import math import sys import numpy from metrics.utils import calc_fp_fn from metrics.Metric import Metric class EqOppo_fp_ratio(Metric): def __init__(self): Metric.__init__(self) self.name = "EqOppo_fp_ratio" def calc( self, actual, predicted, dict_of_sensitive_lists, single_sensitive_name, unprotected_vals, positive_pred, ): sensitive = dict_of_sensitive_lists[single_sensitive_name] fp_unprotected, fp_protected, fn_protected, fn_unprotected = calc_fp_fn( actual, predicted, sensitive, unprotected_vals, positive_pred ) fp_ratio = 0.0 if fp_unprotected > 0: fp_ratio = fp_protected / fp_unprotected if fp_unprotected == 0.0 and fp_protected == 0.0: fp_ratio = 1.0 return fp_ratio
{"hexsha": "6d1f5f1ffb9c744b77afbd7608ca368ce8346c8c", "size": 940, "ext": "py", "lang": "Python", "max_stars_repo_path": "metrics/EqOppo_fp_ratio.py", "max_stars_repo_name": "Khumayun/FairDeepLearning", "max_stars_repo_head_hexsha": "e19947c17c282ce1e89ad105cc241ffc07190628", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2021-06-20T07:57:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T05:45:08.000Z", "max_issues_repo_path": "metrics/EqOppo_fp_ratio.py", "max_issues_repo_name": "Khumayun/FairDeepLearning", "max_issues_repo_head_hexsha": "e19947c17c282ce1e89ad105cc241ffc07190628", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-24T14:29:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T14:29:37.000Z", "max_forks_repo_path": "metrics/EqOppo_fp_ratio.py", "max_forks_repo_name": "Khumayun/FairDeepLearning", "max_forks_repo_head_hexsha": "e19947c17c282ce1e89ad105cc241ffc07190628", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-06-20T08:01:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T14:31:41.000Z", "avg_line_length": 26.8571428571, "max_line_length": 80, "alphanum_fraction": 0.6563829787, "include": true, "reason": "import numpy", "num_tokens": 216}
import numpy as np from .constants import WGS84 from .ellipsoid import Ellipsoid def to_ecef(positions: np.ndarray, *, ellipsoid: Ellipsoid = WGS84) -> np.ndarray: """Convert positions to earth-centered, earth-fixed coordinates Ported from https://github.com/loicgasser/quantized-mesh-tile/blob/master/quantized_mesh_tile/llh_ecef.py under the MIT license. Originally from https://github.com/bistromath/gr-air-modes/blob/9e2515a56609658f168f0c833a14ca4d2332713e/python/mlat.py#L73-L86 under the BSD-3 clause license. Args: - positions: expected to be an ndarray with shape (-1, 3) from latitude-longitude-height to ecef Kwargs: - ellipsoid: (`Ellipsoid`): ellipsoid defined by its semi-major `a` and semi-minor `b` axes. Default: WGS84 ellipsoid. """ msg = 'ellipsoid must be an instance of the Ellipsoid class' assert isinstance(ellipsoid, Ellipsoid), msg lon = positions[:, 0] * np.pi / 180 lat = positions[:, 1] * np.pi / 180 alt = positions[:, 2] n = lambda arr: ellipsoid.a / np.sqrt(1 - ellipsoid.e2 * (np.square(np.sin(arr)))) nlat = n(lat) x = (nlat + alt) * np.cos(lat) * np.cos(lon) y = (nlat + alt) * np.cos(lat) * np.sin(lon) z = (nlat * (1 - ellipsoid.e2) + alt) * np.sin(lat) # Do I need geoid correction? # https://github.com/bistromath/gr-air-modes/blob/9e2515a56609658f168f0c833a14ca4d2332713e/python/mlat.py#L88-L92 return np.vstack([x, y, z]).T
{"hexsha": "8874ce808e880bb4d2aa912f4e3416e4367a2efd", "size": 1503, "ext": "py", "lang": "Python", "max_stars_repo_path": "quantized_mesh_encoder/ecef.py", "max_stars_repo_name": "kylebarron/quantized-mesh-py", "max_stars_repo_head_hexsha": "98e9246ee14738a6665d7c87ce0883b6fe4b941e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2020-05-11T19:47:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T02:41:35.000Z", "max_issues_repo_path": "quantized_mesh_encoder/ecef.py", "max_issues_repo_name": "kylebarron/quantized-mesh-py", "max_issues_repo_head_hexsha": "98e9246ee14738a6665d7c87ce0883b6fe4b941e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2020-04-27T06:07:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-08T19:10:12.000Z", "max_forks_repo_path": "quantized_mesh_encoder/ecef.py", "max_forks_repo_name": "kylebarron/quantized-mesh-py", "max_forks_repo_head_hexsha": "98e9246ee14738a6665d7c87ce0883b6fe4b941e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-06-02T02:29:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-10T22:37:31.000Z", "avg_line_length": 34.9534883721, "max_line_length": 117, "alphanum_fraction": 0.6679973387, "include": true, "reason": "import numpy", "num_tokens": 467}
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import pytest import ray from ray.tests.conftest import _ray_start_cluster num_tasks_submitted = [10**n for n in range(0, 6)] num_tasks_ids = ["{}_tasks".format(i) for i in num_tasks_submitted] @ray.remote def dummy_task(val): return val def benchmark_task_submission(num_tasks): total_tasks = 100000 for _ in range(total_tasks // num_tasks): ray.get([dummy_task.remote(i) for i in range(num_tasks)]) def warmup(): x = np.zeros(10**6, dtype=np.uint8) for _ in range(5): for _ in range(5): ray.put(x) for _ in range(5): ray.get([dummy_task.remote(0) for _ in range(1000)]) @pytest.mark.benchmark @pytest.mark.parametrize("num_tasks", num_tasks_submitted, ids=num_tasks_ids) def test_task_submission(benchmark, num_tasks): num_cpus = 16 ray.init( num_cpus=num_cpus, object_store_memory=10**7, ignore_reinit_error=True) # warm up the plasma store warmup() benchmark(benchmark_task_submission, num_tasks) ray.shutdown() def benchmark_task_forward(f, num_tasks): ray.get([f.remote() for _ in range(num_tasks)]) @pytest.mark.benchmark @pytest.mark.parametrize( "num_tasks", [10**3, 10**4], ids=[str(num) + "_tasks" for num in [10**3, 10**4]]) def test_task_forward(benchmark, num_tasks): with _ray_start_cluster( do_init=True, num_nodes=1, num_cpus=16, object_store_memory=10**7, ) as cluster: cluster.add_node( num_cpus=16, object_store_memory=10**7, resources={"my_resource": 100}, ) @ray.remote(resources={"my_resource": 0.001}) def f(): return 1 # Warm up ray.get([f.remote() for _ in range(100)]) benchmark(benchmark_task_forward, f, num_tasks) def benchmark_transfer_object(actor, object_ids): ray.get(actor.f.remote(object_ids)) @pytest.mark.benchmark @pytest.mark.parametrize("object_number, data_size", [(10000, 500), (10000, 5000), (1000, 500), (1000, 5000)]) def test_transfer_performance(benchmark, ray_start_cluster_head, object_number, data_size): cluster = ray_start_cluster_head cluster.add_node(resources={"my_resource": 1}, object_store_memory=10**9) @ray.remote(resources={"my_resource": 1}) class ObjectActor: def f(self, object_ids): ray.get(object_ids) # setup remote actor actor = ObjectActor.remote() actor.f.remote([]) data = bytes(1) * data_size object_ids = [ray.put(data) for _ in range(object_number)] benchmark(benchmark_transfer_object, actor, object_ids)
{"hexsha": "b4225dec1a04a5e41ead263a42739a72c9f0c9d0", "size": 2851, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/ray/tests/perf_integration_tests/test_perf_integration.py", "max_stars_repo_name": "tonymackinnon/ray", "max_stars_repo_head_hexsha": "14a1419682bdba40d2c8bf226e1727cf44abcaa4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-27T08:48:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-27T08:48:11.000Z", "max_issues_repo_path": "python/ray/tests/perf_integration_tests/test_perf_integration.py", "max_issues_repo_name": "ashuein/ray", "max_issues_repo_head_hexsha": "bcc379556b135ee2e472b0e4b388c9e1f8274dc9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-03-04T13:03:24.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-06T11:25:07.000Z", "max_forks_repo_path": "python/ray/tests/perf_integration_tests/test_perf_integration.py", "max_forks_repo_name": "ashuein/ray", "max_forks_repo_head_hexsha": "bcc379556b135ee2e472b0e4b388c9e1f8274dc9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-09-04T13:27:51.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-17T04:20:38.000Z", "avg_line_length": 27.6796116505, "max_line_length": 79, "alphanum_fraction": 0.6517011575, "include": true, "reason": "import numpy", "num_tokens": 701}
import numpy as np import random for i in range(10): p = np.random.randint(11, 21) print(p)
{"hexsha": "0c334d6007790ec4e7d9377f3a4d60a308bf3b7c", "size": 101, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test.py", "max_stars_repo_name": "owuordickson/swarm_gp", "max_stars_repo_head_hexsha": "0a6c6bdd51bc63fbf7e514207d3c367cebe72827", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test.py", "max_issues_repo_name": "owuordickson/swarm_gp", "max_issues_repo_head_hexsha": "0a6c6bdd51bc63fbf7e514207d3c367cebe72827", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test.py", "max_forks_repo_name": "owuordickson/swarm_gp", "max_forks_repo_head_hexsha": "0a6c6bdd51bc63fbf7e514207d3c367cebe72827", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.4285714286, "max_line_length": 33, "alphanum_fraction": 0.6534653465, "include": true, "reason": "import numpy", "num_tokens": 31}
// // Copyright (c) 2019 Vinnie Falco (vinnie.falco@gmail.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // Official repository: https://github.com/CPPAlliance/http_proto // // Test that header file is self-contained. #include <boost/http_proto/rfc/token_rule.hpp> #include <boost/url/grammar/parse.hpp> #include "test_suite.hpp" namespace boost { namespace http_proto { class token_rule_test { public: void testParse() { auto const match = []( string_view s, string_view m) { auto it = s.data(); auto const end = it + s.size(); error_code ec; token t; if(! grammar::parse( it, end, ec, t)) BOOST_TEST(m.empty()); else BOOST_TEST(*t == m); }; match("", ""); match("x", "x"); match("=", ""); match("xy", "xy"); match("=x", ""); match("x=", "x"); match("==", ""); match("xy=", "xy"); match("===", ""); } void run() { testParse(); } }; TEST_SUITE( token_rule_test, "boost.http_proto.token"); } // http_proto } // boost
{"hexsha": "205b0566567aa3db850e6165a34f3a7f963aef9b", "size": 1330, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/unit/rfc/token_rule.cpp", "max_stars_repo_name": "alandefreitas/http_proto", "max_stars_repo_head_hexsha": "dc64cbdd44048a2c06671282b736f7edacb39a42", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2021-11-17T03:23:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-25T15:58:02.000Z", "max_issues_repo_path": "test/unit/rfc/token_rule.cpp", "max_issues_repo_name": "alandefreitas/http_proto", "max_issues_repo_head_hexsha": "dc64cbdd44048a2c06671282b736f7edacb39a42", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 6.0, "max_issues_repo_issues_event_min_datetime": "2021-11-17T16:13:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T04:17:47.000Z", "max_forks_repo_path": "test/unit/rfc/token_rule.cpp", "max_forks_repo_name": "samd2/http_proto", "max_forks_repo_head_hexsha": "486729f1a68b7611f143e18c7bae8df9b908e9aa", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2021-11-17T03:01:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-17T14:14:45.000Z", "avg_line_length": 20.4615384615, "max_line_length": 79, "alphanum_fraction": 0.5127819549, "num_tokens": 321}
#This is just used to see the webcam output import time import cv2 import zbar import Image import numpy as np import pyqrcode background_width = 1280 background_height = 720 padding = 10 qr_scale = 4 qr_unscaled_size = 21 # version 1 qr_size = qr_unscaled_size * qr_scale x_qr_interval = background_width - (padding * 2) - qr_size y_qr_interval = background_height - (padding * 2) - qr_size def get_qr_code(data,scale): qr = pyqrcode.create(data, error='H', version=1, mode='binary') qr_code = np.array(qr.code, dtype=np.uint8) qr_code[qr_code == 0] = 255 qr_code[qr_code == 1] = 0 qr_code_size = qr_code.shape[0] * scale qr_code = cv2.resize(qr_code, (qr_code_size, qr_code_size), interpolation=cv2.INTER_AREA) return qr_code def display_background(): background = np.zeros((background_height,background_width), dtype=np.uint8) background = background + 255 for x in range(0,2): for y in range(0, 2): data = str(x) + ',' + str(y) qr_code = get_qr_code(data, qr_scale) qr_x = x * x_qr_interval + padding qr_y = y * y_qr_interval + padding background[qr_y:qr_y + qr_size,qr_x:qr_x + qr_size] = qr_code while True: cv2.imshow("background", background) if cv2.waitKey(1) & 0xFF == ord('q'): break def process_video(): #cap = cv2.VideoCapture(0) cap = cv2.VideoCapture("/home/pkrush/find-parts-faster/0.mp4") #cap.set(3,1920) #cap.set(4,1080) scanner = zbar.ImageScanner() scanner.parse_config('enable') for x in range(0,400000): # Capture frame-by-frame start_time = time.time() ret, frame = cap.read() #deskewed = deskew(frame, 5) if frame == None: #print 'None in %s seconds' % (time.time() - start_time,) break #cv2.imwrite(str(x) + '.png', frame) #frame = cv2.resize(frame, (960, 540), interpolation=cv2.INTER_AREA) #coin_size_adjustment_factor = 1.8 #frame_width = int(960 * coin_size_adjustment_factor) #frame_hieght = int(540 * coin_size_adjustment_factor) # frame = cv2.resize(frame, (frame_width, frame_hieght), interpolation=cv2.INTER_AREA) output = frame.copy() # raw detection code gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY, dstCn=0) pil = Image.fromarray(gray) width, height = pil.size raw = pil.tostring() # create a reader image = zbar.Image(width, height, 'Y800', raw) scanner.scan(image) # extract results print" for symbol in image:" + str(len(image.symbols)) if len(image.symbols) == 4: max_src = np.zeros((4, 2), dtype=np.float32) max_dst = np.zeros((4, 2), dtype=np.float32) for symbol in image: # do something useful with results if symbol.data in ["0,0","1,0","0,1","1,1"]: #print 'decoded', symbol.type, 'symbol', '"%s"' % symbol.data loc = symbol.location print symbol.data ,loc cv2.line(output, loc[0],loc[1], (0, 0, 0)) cv2.line(output, loc[1], loc[2], (0, 0, 0)) cv2.line(output, loc[2], loc[3], (0, 0, 0)) cv2.line(output, loc[3], loc[0], (0, 0, 0)) src = np.array(loc,dtype = np.float32) rad = 3 #dst = np.float32(((240+rad,240-rad), (240+rad, 320-rad), (320+rad,320-rad), (320+rad,240-rad))) offset_x = 0 offset_y = 0 if symbol.data == "0,0": max_src[0] = loc[0] qr_code_x = 0 qr_code_y = 0 if symbol.data == "0,1": max_src[1] = loc[1] qr_code_x = 0 qr_code_y = 1 if symbol.data == "1,0": max_src[3] = loc[3] qr_code_x = 1 qr_code_y = 0 if symbol.data == "1,1": max_src[2] = loc[2] qr_code_x = 1 qr_code_y = 1 local_offset_x = offset_x + x_qr_interval * qr_code_x local_offset_y = offset_y + y_qr_interval * qr_code_y local_dst = np.zeros((4,2), dtype=np.float32) local_dst[0] = [local_offset_x,local_offset_y] local_dst[1] = [local_offset_x, local_offset_y + qr_size] local_dst[2] = [local_offset_x + qr_size, local_offset_y + qr_size] local_dst[3] = [local_offset_x + qr_size, local_offset_y] #dst is all 4 qr_codes: max_offset_x = offset_x + qr_size * 1 max_offset_y = offset_y + qr_size * 1 max_dst[0] = [offset_x, offset_y] max_dst[1] = [offset_x, max_offset_y + qr_size] max_dst[2] = [max_offset_x + qr_size, max_offset_y + qr_size] max_dst[3] = [max_offset_x + qr_size, offset_y] M = cv2.getPerspectiveTransform(max_src, max_dst) warped = cv2.warpPerspective(output, M, (960,540)) cv2.imshow("warped", warped) display = cv2.resize(output, (960, 540), interpolation=cv2.INTER_AREA) cv2.imshow("Camera", display) if cv2.waitKey(1) & 0xFF == ord('q'): break #print 'In %s seconds' % (time.time() - start_time,) cap.release() cv2.destroyAllWindows() #display_background() process_video()
{"hexsha": "23b57fab2645b4e3a3338e11dfaa0fda1cef4e29", "size": 5830, "ext": "py", "lang": "Python", "max_stars_repo_path": "qr_code_temp.py", "max_stars_repo_name": "GemHunt/RealTimeCoinID", "max_stars_repo_head_hexsha": "26449a1cc79f0698f7d4fd5b8dbb000a6c25f7c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "qr_code_temp.py", "max_issues_repo_name": "GemHunt/RealTimeCoinID", "max_issues_repo_head_hexsha": "26449a1cc79f0698f7d4fd5b8dbb000a6c25f7c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qr_code_temp.py", "max_forks_repo_name": "GemHunt/RealTimeCoinID", "max_forks_repo_head_hexsha": "26449a1cc79f0698f7d4fd5b8dbb000a6c25f7c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8987341772, "max_line_length": 116, "alphanum_fraction": 0.5319039451, "include": true, "reason": "import numpy", "num_tokens": 1536}
// // Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef BEAST_IMPL_STATIC_STREAMBUF_IPP #define BEAST_IMPL_STATIC_STREAMBUF_IPP #include <beast/core/detail/type_traits.hpp> #include <boost/asio/buffer.hpp> #include <algorithm> #include <cstring> #include <iterator> #include <stdexcept> namespace beast { class static_streambuf::const_buffers_type { std::size_t n_; std::uint8_t const* p_; public: using value_type = boost::asio::const_buffer; class const_iterator; const_buffers_type() = delete; const_buffers_type( const_buffers_type const&) = default; const_buffers_type& operator=( const_buffers_type const&) = default; const_iterator begin() const; const_iterator end() const; private: friend class static_streambuf; const_buffers_type( std::uint8_t const* p, std::size_t n) : n_(n) , p_(p) { } }; class static_streambuf::const_buffers_type::const_iterator { std::size_t n_ = 0; std::uint8_t const* p_ = nullptr; public: using value_type = boost::asio::const_buffer; using pointer = value_type const*; using reference = value_type; using difference_type = std::ptrdiff_t; using iterator_category = std::bidirectional_iterator_tag; const_iterator() = default; const_iterator(const_iterator&& other) = default; const_iterator(const_iterator const& other) = default; const_iterator& operator=(const_iterator&& other) = default; const_iterator& operator=(const_iterator const& other) = default; bool operator==(const_iterator const& other) const { return p_ == other.p_; } bool operator!=(const_iterator const& other) const { return !(*this == other); } reference operator*() const { return value_type{p_, n_}; } pointer operator->() const = delete; const_iterator& operator++() { p_ += n_; return *this; } const_iterator operator++(int) { auto temp = *this; ++(*this); return temp; } const_iterator& operator--() { p_ -= n_; return *this; } const_iterator operator--(int) { auto temp = *this; --(*this); return temp; } private: friend class const_buffers_type; const_iterator( std::uint8_t const* p, std::size_t n) : n_(n) , p_(p) { } }; inline auto static_streambuf::const_buffers_type::begin() const -> const_iterator { return const_iterator{p_, n_}; } inline auto static_streambuf::const_buffers_type::end() const -> const_iterator { return const_iterator{p_ + n_, n_}; } //------------------------------------------------------------------------------ class static_streambuf::mutable_buffers_type { std::size_t n_; std::uint8_t* p_; public: using value_type = boost::asio::mutable_buffer; class const_iterator; mutable_buffers_type() = delete; mutable_buffers_type( mutable_buffers_type const&) = default; mutable_buffers_type& operator=( mutable_buffers_type const&) = default; const_iterator begin() const; const_iterator end() const; private: friend class static_streambuf; mutable_buffers_type( std::uint8_t* p, std::size_t n) : n_(n) , p_(p) { } }; class static_streambuf::mutable_buffers_type::const_iterator { std::size_t n_ = 0; std::uint8_t* p_ = nullptr; public: using value_type = boost::asio::mutable_buffer; using pointer = value_type const*; using reference = value_type; using difference_type = std::ptrdiff_t; using iterator_category = std::bidirectional_iterator_tag; const_iterator() = default; const_iterator(const_iterator&& other) = default; const_iterator(const_iterator const& other) = default; const_iterator& operator=(const_iterator&& other) = default; const_iterator& operator=(const_iterator const& other) = default; bool operator==(const_iterator const& other) const { return p_ == other.p_; } bool operator!=(const_iterator const& other) const { return !(*this == other); } reference operator*() const { return value_type{p_, n_}; } pointer operator->() const = delete; const_iterator& operator++() { p_ += n_; return *this; } const_iterator operator++(int) { auto temp = *this; ++(*this); return temp; } const_iterator& operator--() { p_ -= n_; return *this; } const_iterator operator--(int) { auto temp = *this; --(*this); return temp; } private: friend class mutable_buffers_type; const_iterator(std::uint8_t* p, std::size_t n) : n_(n) , p_(p) { } }; inline auto static_streambuf::mutable_buffers_type::begin() const -> const_iterator { return const_iterator{p_, n_}; } inline auto static_streambuf::mutable_buffers_type::end() const -> const_iterator { return const_iterator{p_ + n_, n_}; } //------------------------------------------------------------------------------ inline auto static_streambuf::data() const -> const_buffers_type { return const_buffers_type{in_, static_cast<std::size_t>(out_ - in_)}; } inline auto static_streambuf::prepare(std::size_t n) -> mutable_buffers_type { if(n > static_cast<std::size_t>(end_ - out_)) throw detail::make_exception<std::length_error>( "no space in streambuf", __FILE__, __LINE__); last_ = out_ + n; return mutable_buffers_type{out_, n}; } } // beast #endif
{"hexsha": "90a4834625868a42e6b3ac01874326ba05f969c5", "size": 6021, "ext": "ipp", "lang": "C++", "max_stars_repo_path": "src/beast/include/beast/core/impl/static_streambuf.ipp", "max_stars_repo_name": "MassICTBV/casinocoind", "max_stars_repo_head_hexsha": "81d6a15a0578c086c1812dd2203c0973099b0061", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-05-13T19:20:48.000Z", "max_stars_repo_stars_event_max_datetime": "2017-05-13T19:20:48.000Z", "max_issues_repo_path": "src/beast/include/beast/core/impl/static_streambuf.ipp", "max_issues_repo_name": "MassICTBV/casinocoind", "max_issues_repo_head_hexsha": "81d6a15a0578c086c1812dd2203c0973099b0061", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 7.0, "max_issues_repo_issues_event_min_datetime": "2017-06-06T13:03:39.000Z", "max_issues_repo_issues_event_max_datetime": "2017-07-22T17:03:05.000Z", "max_forks_repo_path": "src/beast/include/beast/core/impl/static_streambuf.ipp", "max_forks_repo_name": "MassICTBV/casinocoind", "max_forks_repo_head_hexsha": "81d6a15a0578c086c1812dd2203c0973099b0061", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2017-06-06T12:49:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-01T07:44:49.000Z", "avg_line_length": 19.5487012987, "max_line_length": 80, "alphanum_fraction": 0.6098654709, "num_tokens": 1463}
<table border="0"> <tr> <td> </td> <td> </td> </tr> </table> # Orthogonal Random Forest: Use Cases and Examples Orthogonal Random Forest (ORF) combines orthogonalization, a technique that effectively removes the confounding effect in two-stage estimation, with generalized random forests, a flexible method for estimating treatment effect heterogeneity. Due to the orthogonalization aspect of this method, the ORF performs especially well in the presence of high-dimensional confounders. For more details, see [this paper](https://arxiv.org/abs/1806.03467). The EconML SDK implements the following OrthoForest variants: * ContinuousTreatmentOrthoForest: suitable for continuous treatments * DiscreteTreatmentOrthoForest: suitable for discrete treatments In this notebook, we show the performance of the ORF on synthetic data. **Notebook contents:** 1. Example usage with continuous treatment synthetic data 2. Example usage with binary treatment synthetic data 3. Example usage with multiple discrete treatment synthetic data 4. Example usage with real continuous treatment observational data ```python import econml ``` ```python # Main imports from econml.ortho_forest import ContinuousTreatmentOrthoForest, WeightedModelWrapper, DiscreteTreatmentOrthoForest # Helper imports import numpy as np from itertools import product from sklearn.linear_model import Lasso, LassoCV, LogisticRegression, LogisticRegressionCV import matplotlib.pyplot as plt %matplotlib inline ``` ## 1. Example Usage with Continuous Treatment Synthetic Data ### 1.1. DGP We use the data generating process (DGP) from [here](https://arxiv.org/abs/1806.03467). The DGP is described by the following equations: \begin{align} T =& \langle W, \beta\rangle + \eta, & \;\eta \sim \text{Uniform}(-1, 1)\\ Y =& T\cdot \theta(X) + \langle W, \gamma\rangle + \epsilon, &\; \epsilon \sim \text{Uniform}(-1, 1)\\ W \sim& \text{Normal}(0,\, I_{n_w})\\ X \sim& \text{Uniform}(0,1)^{n_x} \end{align} where $W$ is a matrix of high-dimensional confounders and $\beta, \gamma$ have high sparsity. For this DGP, \begin{align} \theta(x) = \exp(2\cdot x_1). \end{align} ```python # Treatment effect function def exp_te(x): return np.exp(2*x[0]) ``` ```python # DGP constants np.random.seed(123) n = 1000 n_w = 30 support_size = 5 n_x = 1 # Outcome support support_Y = np.random.choice(range(n_w), size=support_size, replace=False) coefs_Y = np.random.uniform(0, 1, size=support_size) epsilon_sample = lambda n: np.random.uniform(-1, 1, size=n) # Treatment support support_T = support_Y coefs_T = np.random.uniform(0, 1, size=support_size) eta_sample = lambda n: np.random.uniform(-1, 1, size=n) # Generate controls, covariates, treatments and outcomes W = np.random.normal(0, 1, size=(n, n_w)) X = np.random.uniform(0, 1, size=(n, n_x)) # Heterogeneous treatment effects TE = np.array([exp_te(x_i) for x_i in X]) T = np.dot(W[:, support_T], coefs_T) + eta_sample(n) Y = TE * T + np.dot(W[:, support_Y], coefs_Y) + epsilon_sample(n) # ORF parameters and test data # The following parameters are set according to theory subsample_power = 0.88 subsample_ratio = ((n/np.log(n_w))**(subsample_power)) / n lambda_reg = np.sqrt(np.log(n_w) / (10 * subsample_ratio * n)) X_test = np.array(list(product(np.arange(0, 1, 0.01), repeat=n_x))) ``` ### 1.2. Train Estimator **Note:** The models in the final stage of the estimation (``model_T_final``, ``model_Y_final``) need to support sample weighting. If the models of choice do not support sample weights (e.g. ``sklearn.linear_model.LassoCV``), the ``econml`` packages provides a convenient wrapper for these models ``WeightedModelWrapper`` in order to allow sample weights. If the model of choice is a linear (regression) model such as Lasso, you should set ``sample_type="weighted"``. Otherwise, set ``sample_type="sampled"``. ```python est = ContinuousTreatmentOrthoForest( n_trees=200, min_leaf_size=5, max_depth=50, subsample_ratio=2*subsample_ratio, bootstrap=False, model_T=Lasso(alpha=lambda_reg), model_Y=Lasso(alpha=lambda_reg), model_T_final=WeightedModelWrapper(Lasso(alpha=lambda_reg), sample_type="weighted"), model_Y_final=WeightedModelWrapper(Lasso(alpha=lambda_reg), sample_type="weighted"), random_state=123) ``` ```python est.fit(Y, T, X, W) ``` [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 2.6s [Parallel(n_jobs=-1)]: Done 200 out of 200 | elapsed: 3.8s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 0.2s [Parallel(n_jobs=-1)]: Done 200 out of 200 | elapsed: 1.3s finished <econml.ortho_forest.ContinuousTreatmentOrthoForest at 0x11df7eda0> ```python treatment_effects = est.const_marginal_effect(X_test) ``` [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 0.3s [Parallel(n_jobs=-1)]: Done 100 out of 100 | elapsed: 1.2s finished ### 1.3. Performance Visualization ```python y = treatment_effects[:, 0] plt.plot(X_test, y, label='ORF estimate') expected_te = np.array([exp_te(x_i) for x_i in X_test]) plt.plot(X_test[:, 0], expected_te, 'b--', label='True effect') plt.ylabel("Treatment Effect") plt.xlabel("x") plt.legend() plt.show() ``` ## 2. Example Usage with Binary Treatment Synthetic Data ### 2.1. DGP We use the following DGP: \begin{align} T \sim & \text{Bernoulli}\left(f(W)\right), &\; f(W)=\sigma(\langle W, \beta\rangle + \eta), \;\eta \sim \text{Uniform}(-1, 1)\\ Y = & T\cdot \theta(X) + \langle W, \gamma\rangle + \epsilon, & \; \epsilon \sim \text{Uniform}(-1, 1)\\ W \sim & \text{Normal}(0,\, I_{n_w}) & \\ X \sim & \text{Uniform}(0,\, 1)^{n_x} \end{align} where $W$ is a matrix of high-dimensional confounders, $\beta, \gamma$ have high sparsity and $\sigma$ is the sigmoid function. For this DGP, \begin{align} \theta(x) = \exp( 2\cdot x_1 ). \end{align} ```python # DGP constants np.random.seed(1234) n = 1000 n_w = 30 support_size = 5 n_x = 1 # Outcome support support_Y = np.random.choice(range(n_w), size=support_size, replace=False) coefs_Y = np.random.uniform(0, 1, size=support_size) epsilon_sample = lambda n: np.random.uniform(-1, 1, size=n) # Treatment support support_T = support_Y coefs_T = np.random.uniform(0, 1, size=support_size) eta_sample = lambda n: np.random.uniform(-1, 1, size=n) # Generate controls, covariates, treatments and outcomes W = np.random.normal(0, 1, size=(n, n_w)) X = np.random.uniform(0, 1, size=(n, n_x)) # Heterogeneous treatment effects TE = np.array([exp_te(x_i) for x_i in X]) # Define treatment log_odds = np.dot(W[:, support_T], coefs_T) + eta_sample(n) T_sigmoid = 1/(1 + np.exp(-log_odds)) T = np.array([np.random.binomial(1, p) for p in T_sigmoid]) # Define the outcome Y = TE * T + np.dot(W[:, support_Y], coefs_Y) + epsilon_sample(n) # ORF parameters and test data # The following parameters are set according to theory subsample_power = 0.88 subsample_ratio = ((n/np.log(n_w))**(subsample_power)) / n lambda_reg = np.sqrt(np.log(n_w) / (10 * subsample_ratio * n)) X_test = np.array(list(product(np.arange(0, 1, 0.01), repeat=n_x))) ``` ### 2.2. Train Estimator ```python est = DiscreteTreatmentOrthoForest( n_trees=200, min_leaf_size=10, max_depth=30, subsample_ratio=2*subsample_ratio, bootstrap=False, propensity_model = LogisticRegression(C=1/(X.shape[0]*lambda_reg), penalty='l1', solver='saga'), model_Y = Lasso(alpha=lambda_reg), propensity_model_final=LogisticRegression(C=1/(X.shape[0]*lambda_reg), penalty='l1', solver='saga'), model_Y_final=WeightedModelWrapper(Lasso(alpha=lambda_reg), sample_type="weighted") ) ``` ```python est.fit(Y, T, X, W) ``` [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 0.2s [Parallel(n_jobs=-1)]: Done 200 out of 200 | elapsed: 1.4s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 0.2s [Parallel(n_jobs=-1)]: Done 118 out of 200 | elapsed: 1.0s remaining: 0.7s [Parallel(n_jobs=-1)]: Done 200 out of 200 | elapsed: 1.3s finished <econml.ortho_forest.DiscreteTreatmentOrthoForest at 0x11e7822e8> ```python treatment_effects = est.const_marginal_effect(X_test) ``` [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 0.4s [Parallel(n_jobs=-1)]: Done 100 out of 100 | elapsed: 2.1s finished ### 2.3. Performance Visualization ```python y = treatment_effects[:, 0] plt.plot(X_test, y, label='ORF estimate') expected_te = np.array([exp_te(x_i) for x_i in X_test]) plt.plot(X_test[:, 0], expected_te, 'b--', label='True effect') plt.ylabel("Treatment Effect") plt.xlabel("x") plt.legend() plt.show() ``` ## 3. Example Usage with Multiple Treatment Synthetic Data ### 3.1 DGP We use the following DGP: \begin{align} Y = & \sum_{t=1}^{n_{\text{treatments}}} 1\{T=t\}\cdot \theta_{T}(X) + \langle W, \gamma\rangle + \epsilon, \; \epsilon \sim \text{Unif}(-1, 1), \\ \text{Pr}[T=t \mid W] \propto & \exp\{\langle W, \beta_t \rangle\}, \;\;\;\; \forall t\in \{0, 1, \ldots, n_{\text{treatments}}\} \end{align} where $W$ is a matrix of high-dimensional confounders, $\beta_t, \gamma$ are sparse. For this particular example DGP we used $n_{\text{treatments}}=3$ and \begin{align} \theta_1(x) = & \exp( 2 x_1 ),\\ \theta_2(x) = & 3 \cdot \sigma(100\cdot (x_1 - .5)),\\ \theta_3(x) = & -2 \cdot \sigma(100\cdot (x_1 - .25)), \end{align} where $\sigma$ is the sigmoid function. ```python def get_test_train_data(n, n_w, support_size, n_x, te_func, n_treatments): # Outcome support support_Y = np.random.choice(range(n_w), size=support_size, replace=False) coefs_Y = np.random.uniform(0, 1, size=support_size) epsilon_sample = lambda n: np.random.uniform(-1, 1, size=n) # Treatment support support_T = support_Y coefs_T = np.random.uniform(0, 1, size=(support_size, n_treatments)) eta_sample = lambda n: np.random.uniform(-1, 1, size=n) # Generate controls, covariates, treatments and outcomes W = np.random.normal(0, 1, size=(n, n_w)) X = np.random.uniform(0, 1, size=(n, n_x)) # Heterogeneous treatment effects TE = np.array([te_func(x_i, n_treatments) for x_i in X]) log_odds = np.dot(W[:, support_T], coefs_T) T_sigmoid = np.exp(log_odds) T_sigmoid = T_sigmoid/np.sum(T_sigmoid, axis=1, keepdims=True) T = np.array([np.random.choice(n_treatments, p=p) for p in T_sigmoid]) TE = np.concatenate((np.zeros((n,1)), TE), axis=1) Y = TE[np.arange(n), T] + np.dot(W[:, support_Y], coefs_Y) + epsilon_sample(n) X_test = np.array(list(product(np.arange(0, 1, 0.01), repeat=n_x))) return (Y, T, X, W), (X_test, np.array([te_func(x, n_treatments) for x in X_test])) ``` ```python import scipy.special def te_func(x, n_treatments): return [np.exp(2*x[0]), 3*scipy.special.expit(100*(x[0] - .5)) - 1, -2*scipy.special.expit(100*(x[0] - .25))] np.random.seed(123) (Y, T, X, W), (X_test, te_test) = get_test_train_data(1000, 3, 3, 1, te_func, 4) ``` ### 3.2 Train Estimator ```python est = DiscreteTreatmentOrthoForest(n_trees=500, model_Y = WeightedModelWrapper(Lasso(alpha=lambda_reg))) ``` ```python est.fit(Y, T, X, W) ``` [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 0.5s [Parallel(n_jobs=-1)]: Done 208 tasks | elapsed: 4.4s [Parallel(n_jobs=-1)]: Done 500 out of 500 | elapsed: 10.6s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 0.5s [Parallel(n_jobs=-1)]: Done 208 tasks | elapsed: 4.7s [Parallel(n_jobs=-1)]: Done 500 out of 500 | elapsed: 11.1s finished <econml.ortho_forest.DiscreteTreatmentOrthoForest at 0x11e4f0978> ```python treatment_effects = est.const_marginal_effect(X_test) ``` [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 0.7s [Parallel(n_jobs=-1)]: Done 100 out of 100 | elapsed: 3.5s finished ### 3.3 Performance Visualization ```python y = treatment_effects for it in range(y.shape[1]): plt.plot(X_test, y[:, it], label='ORF estimate T={}'.format(it)) plt.plot(X_test[:, 0], te_test[:, it], '--', label='True effect T={}'.format(it)) plt.ylabel("Treatment Effect") plt.xlabel("x") plt.legend() plt.show() ``` ## 4. Example usage with real continuous treatment observational data We applied our technique to Dominick’s dataset, a popular historical dataset of store-level orange juice prices and sales provided by University of Chicago Booth School of Business. The dataset is comprised of a large number of covariates $W$, but researchers might only be interested in learning the elasticity of demand as a function of a few variables $x$ such as income or education. We applied the `ContinuousTreatmentOrthoForest` to estimate orange juice price elasticity as a function of income, and our results, unveil the natural phenomenon that lower income consumers are more price-sensitive. ### 4.1. Data ```python # A few more imports import os import pandas as pd import urllib.request from sklearn.preprocessing import StandardScaler ``` ```python # Import the data file_name = "oj_large.csv" if not os.path.isfile(file_name): print("Downloading file (this might take a few seconds)...") urllib.request.urlretrieve("https://msalicedatapublic.blob.core.windows.net/datasets/OrangeJuice/oj_large.csv", file_name) oj_data = pd.read_csv(file_name) oj_data.head() ``` <div> <style> .dataframe thead tr:only-child th { text-align: right; } .dataframe thead th { text-align: left; } .dataframe tbody tr th { vertical-align: top; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>store</th> <th>brand</th> <th>week</th> <th>logmove</th> <th>feat</th> <th>price</th> <th>AGE60</th> <th>EDUC</th> <th>ETHNIC</th> <th>INCOME</th> <th>HHLARGE</th> <th>WORKWOM</th> <th>HVAL150</th> <th>SSTRDIST</th> <th>SSTRVOL</th> <th>CPDIST5</th> <th>CPWVOL5</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>2</td> <td>tropicana</td> <td>40</td> <td>9.018695</td> <td>0</td> <td>3.87</td> <td>0.232865</td> <td>0.248935</td> <td>0.11428</td> <td>10.553205</td> <td>0.103953</td> <td>0.303585</td> <td>0.463887</td> <td>2.110122</td> <td>1.142857</td> <td>1.92728</td> <td>0.376927</td> </tr> <tr> <th>1</th> <td>2</td> <td>tropicana</td> <td>46</td> <td>8.723231</td> <td>0</td> <td>3.87</td> <td>0.232865</td> <td>0.248935</td> <td>0.11428</td> <td>10.553205</td> <td>0.103953</td> <td>0.303585</td> <td>0.463887</td> <td>2.110122</td> <td>1.142857</td> <td>1.92728</td> <td>0.376927</td> </tr> <tr> <th>2</th> <td>2</td> <td>tropicana</td> <td>47</td> <td>8.253228</td> <td>0</td> <td>3.87</td> <td>0.232865</td> <td>0.248935</td> <td>0.11428</td> <td>10.553205</td> <td>0.103953</td> <td>0.303585</td> <td>0.463887</td> <td>2.110122</td> <td>1.142857</td> <td>1.92728</td> <td>0.376927</td> </tr> <tr> <th>3</th> <td>2</td> <td>tropicana</td> <td>48</td> <td>8.987197</td> <td>0</td> <td>3.87</td> <td>0.232865</td> <td>0.248935</td> <td>0.11428</td> <td>10.553205</td> <td>0.103953</td> <td>0.303585</td> <td>0.463887</td> <td>2.110122</td> <td>1.142857</td> <td>1.92728</td> <td>0.376927</td> </tr> <tr> <th>4</th> <td>2</td> <td>tropicana</td> <td>50</td> <td>9.093357</td> <td>0</td> <td>3.87</td> <td>0.232865</td> <td>0.248935</td> <td>0.11428</td> <td>10.553205</td> <td>0.103953</td> <td>0.303585</td> <td>0.463887</td> <td>2.110122</td> <td>1.142857</td> <td>1.92728</td> <td>0.376927</td> </tr> </tbody> </table> </div> ```python # Prepare data Y = oj_data['logmove'].values T = np.log(oj_data["price"]).values scaler = StandardScaler() W1 = scaler.fit_transform(oj_data[[c for c in oj_data.columns if c not in ['price', 'logmove', 'brand', 'week', 'store']]].values) W2 = pd.get_dummies(oj_data[['brand']]).values W = np.concatenate([W1, W2], axis=1) X = oj_data[['INCOME']].values ``` ### 4.2. Train Estimator ```python # Define some parameters n_trees = 2000 min_leaf_size = 50 max_depth = 20 subsample_ratio = 0.02 bootstrap = False ``` ```python est = ContinuousTreatmentOrthoForest( n_trees=n_trees, min_leaf_size=min_leaf_size, max_depth=max_depth, subsample_ratio=subsample_ratio, bootstrap=bootstrap, model_T=Lasso(alpha=0.1), model_Y=Lasso(alpha=0.1), model_T_final=WeightedModelWrapper(LassoCV(cv=3), sample_type="weighted"), model_Y_final=WeightedModelWrapper(LassoCV(cv=3), sample_type="weighted") ) ``` ```python est.fit(Y, T, X, W) ``` [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 852 tasks | elapsed: 1.0s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.9s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 940 tasks | elapsed: 1.1s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 2.0s finished <econml.ortho_forest.ContinuousTreatmentOrthoForest at 0x12d8bf908> ```python min_income = 10.0 max_income = 11.1 delta = (max_income - min_income) / 100 X_test = np.arange(min_income, max_income + delta - 0.001, delta).reshape(-1, 1) ``` ```python import time t0 = time.time() te_pred = est.const_marginal_effect(X_test) print(time.time() - t0) ``` [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 30.0s 198.3771231174469 [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.3min finished ### 4.3. Performance Visualization ```python # Plot Oranje Juice elasticity as a function of income plt.plot(np.ndarray.flatten(X_test), te_pred[:, 0], label="OJ Elasticity") plt.xlabel(r'$\log$(Income)') plt.ylabel('Orange Juice Elasticity') plt.legend() plt.title("Orange Juice Elasticity vs Income") plt.show() ``` ### 4.4 Bootstrap Confidence Intervals We can also use a bootstrap estimator to generate confidence intervals; in order to return results in a few minutes we're limiting the number of trees to 100 and the number of bootstrap samples to 10 in the code below, but for better estimates these numbers can be increased at the cost of increased runtime. ```python from econml.bootstrap import BootstrapEstimator boot_est = BootstrapEstimator(ContinuousTreatmentOrthoForest( n_trees=100, min_leaf_size=min_leaf_size, max_depth=max_depth, subsample_ratio=subsample_ratio, bootstrap=bootstrap, model_T=Lasso(alpha=0.1), model_Y=Lasso(alpha=0.1), model_T_final=WeightedModelWrapper(LassoCV(cv=3), sample_type="weighted"), model_Y_final=WeightedModelWrapper(LassoCV(cv=3), sample_type="weighted") ), n_bootstrap_samples=10, n_jobs=-1) ``` ```python boot_est.fit(Y, T, X, W) te_pred_interval = boot_est.const_marginal_effect_interval(X_test, lower=1, upper=99) ``` [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 582 tasks | elapsed: 0.8s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.7s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 800 tasks | elapsed: 1.0s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.7s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 1142 tasks | elapsed: 1.2s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.7s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 632 tasks | elapsed: 0.8s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.8s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 236 tasks | elapsed: 0.4s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.7s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 688 tasks | elapsed: 0.8s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.7s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 458 tasks | elapsed: 0.6s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.8s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 1376 tasks | elapsed: 1.6s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 2.0s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 196 tasks | elapsed: 0.4s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.7s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 224 tasks | elapsed: 0.4s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.8s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 534 tasks | elapsed: 0.8s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.7s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 276 tasks | elapsed: 0.5s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.7s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 112 tasks | elapsed: 0.2s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.8s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 438 tasks | elapsed: 0.6s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.7s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 880 tasks | elapsed: 1.0s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.9s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 436 tasks | elapsed: 0.7s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 2.0s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 64 tasks | elapsed: 0.3s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 2.1s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 752 tasks | elapsed: 0.9s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.8s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 672 tasks | elapsed: 0.8s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.8s finished [Parallel(n_jobs=-1)]: Using backend LokyBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 960 tasks | elapsed: 1.1s [Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 1.7s finished [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 31.0s [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.4min finished [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 31.9s [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.5min finished [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 31.8s [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.5min finished [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 31.0s [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.3min finished [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 30.9s [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.4min finished [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 30.7s [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.3min finished [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 28.4s [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.3min finished [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 28.2s [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.4min finished [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 29.2s [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.3min finished [Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 8 concurrent workers. [Parallel(n_jobs=-1)]: Done 16 tasks | elapsed: 29.8s [Parallel(n_jobs=-1)]: Done 101 out of 101 | elapsed: 3.3min finished ```python plt.plot(np.ndarray.flatten(X_test), te_pred[:, 0], label="OJ Elasticity") plt.fill_between(np.ndarray.flatten(X_test), te_pred_interval[0][:, 0], te_pred_interval[1][:, 0], alpha=.5, label="1-99% CI") plt.xlabel(r'$\log$(Income)') plt.ylabel('Orange Juice Elasticity') plt.title("Orange Juice Elasticity vs Income") plt.legend() plt.show() ``` ```python ```
{"hexsha": "135048af3b4c1d18f12907c443683ce35c1a575d", "size": 157843, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "notebooks/Orthogonal Random Forest Examples.ipynb", "max_stars_repo_name": "bquistorff/EconML", "max_stars_repo_head_hexsha": "73a21bfe3470e7f0d1702a6db71efd0892cfee9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-05-03T13:11:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T20:05:53.000Z", "max_issues_repo_path": "notebooks/Orthogonal Random Forest Examples.ipynb", "max_issues_repo_name": "bquistorff/EconML", "max_issues_repo_head_hexsha": "73a21bfe3470e7f0d1702a6db71efd0892cfee9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/Orthogonal Random Forest Examples.ipynb", "max_forks_repo_name": "bquistorff/EconML", "max_forks_repo_head_hexsha": "73a21bfe3470e7f0d1702a6db71efd0892cfee9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 127.1901692184, "max_line_length": 36454, "alphanum_fraction": 0.8434076899, "converted": true, "num_tokens": 8747}
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import math import os.path as osp from collections import OrderedDict import numpy as np import paddle import paddle.nn.functional as F from paddle.static import InputSpec from paddlex.utils import logging, TrainingStats, DisablePrint from paddlex.cv.models.base import BaseModel from paddlex.cv.transforms import arrange_transforms from paddlex.cv.transforms.operators import Resize with DisablePrint(): from paddlex.ppcls.modeling import architectures from paddlex.ppcls.modeling.loss import CELoss __all__ = [ "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152", "ResNet18_vd", "ResNet34_vd", "ResNet50_vd", "ResNet50_vd_ssld", "ResNet101_vd", "ResNet101_vd_ssld", "ResNet152_vd", "ResNet200_vd", "AlexNet", "DarkNet53", "MobileNetV1", "MobileNetV2", "MobileNetV3_small", "MobileNetV3_small_ssld", "MobileNetV3_large", "MobileNetV3_large_ssld", "DenseNet121", "DenseNet161", "DenseNet169", "DenseNet201", "DenseNet264", "HRNet_W18_C", "HRNet_W30_C", "HRNet_W32_C", "HRNet_W40_C", "HRNet_W44_C", "HRNet_W48_C", "HRNet_W64_C", "Xception41", "Xception65", "Xception71", "ShuffleNetV2", "ShuffleNetV2_swish" ] class BaseClassifier(BaseModel): """Parent class of all classification models. Args: model_name (str, optional): Name of classification model. Defaults to 'ResNet50'. num_classes (int, optional): The number of target classes. Defaults to 1000. """ def __init__(self, model_name='ResNet50', num_classes=1000, **params): self.init_params = locals() self.init_params.update(params) if 'lr_mult_list' in self.init_params: del self.init_params['lr_mult_list'] if 'with_net' in self.init_params: del self.init_params['with_net'] super(BaseClassifier, self).__init__('classifier') if not hasattr(architectures, model_name): raise Exception("ERROR: There's no model named {}.".format( model_name)) self.model_name = model_name self.labels = None self.num_classes = num_classes for k, v in params.items(): setattr(self, k, v) if params.get('with_net', True): params.pop('with_net', None) self.net = self.build_net(**params) def build_net(self, **params): with paddle.utils.unique_name.guard(): net = architectures.__dict__[self.model_name]( class_dim=self.num_classes, **params) return net def _fix_transforms_shape(self, image_shape): if hasattr(self, 'test_transforms'): if self.test_transforms is not None: self.test_transforms.transforms.append( Resize(target_size=image_shape)) def _get_test_inputs(self, image_shape): if image_shape is not None: if len(image_shape) == 2: image_shape = [1, 3] + image_shape self._fix_transforms_shape(image_shape[-2:]) else: image_shape = [None, 3, -1, -1] self.fixed_input_shape = image_shape input_spec = [ InputSpec( shape=image_shape, name='image', dtype='float32') ] return input_spec def run(self, net, inputs, mode): net_out = net(inputs[0]) softmax_out = net_out if self.status == 'Infer' else F.softmax(net_out) if mode == 'test': outputs = OrderedDict([('prediction', softmax_out)]) elif mode == 'eval': pred = softmax_out gt = inputs[1] labels = inputs[1].reshape([-1, 1]) acc1 = paddle.metric.accuracy(softmax_out, label=labels) k = min(5, self.num_classes) acck = paddle.metric.accuracy(softmax_out, label=labels, k=k) # multi cards eval if paddle.distributed.get_world_size() > 1: acc1 = paddle.distributed.all_reduce( acc1, op=paddle.distributed.ReduceOp. SUM) / paddle.distributed.get_world_size() acck = paddle.distributed.all_reduce( acck, op=paddle.distributed.ReduceOp. SUM) / paddle.distributed.get_world_size() pred = list() gt = list() paddle.distributed.all_gather(pred, softmax_out) paddle.distributed.all_gather(gt, inputs[1]) pred = paddle.concat(pred, axis=0) gt = paddle.concat(gt, axis=0) outputs = OrderedDict([('acc1', acc1), ('acc{}'.format(k), acck), ('prediction', pred), ('labels', gt)]) else: # mode == 'train' labels = inputs[1].reshape([-1, 1]) loss = CELoss(class_dim=self.num_classes) loss = loss(net_out, inputs[1]) acc1 = paddle.metric.accuracy(softmax_out, label=labels, k=1) k = min(5, self.num_classes) acck = paddle.metric.accuracy(softmax_out, label=labels, k=k) outputs = OrderedDict([('loss', loss), ('acc1', acc1), ('acc{}'.format(k), acck)]) return outputs def default_optimizer(self, parameters, learning_rate, warmup_steps, warmup_start_lr, lr_decay_epochs, lr_decay_gamma, num_steps_each_epoch): boundaries = [b * num_steps_each_epoch for b in lr_decay_epochs] values = [ learning_rate * (lr_decay_gamma**i) for i in range(len(lr_decay_epochs) + 1) ] scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries, values) if warmup_steps > 0: if warmup_steps > lr_decay_epochs[0] * num_steps_each_epoch: logging.error( "In function train(), parameters should satisfy: " "warmup_steps <= lr_decay_epochs[0]*num_samples_in_train_dataset", exit=False) logging.error( "See this doc for more information: " "https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/appendix/parameters.md#notice", exit=False) logging.error( "warmup_steps should less than {} or lr_decay_epochs[0] greater than {}, " "please modify 'lr_decay_epochs' or 'warmup_steps' in train function". format(lr_decay_epochs[0] * num_steps_each_epoch, warmup_steps // num_steps_each_epoch)) scheduler = paddle.optimizer.lr.LinearWarmup( learning_rate=scheduler, warmup_steps=warmup_steps, start_lr=warmup_start_lr, end_lr=learning_rate) optimizer = paddle.optimizer.Momentum( scheduler, momentum=.9, weight_decay=paddle.regularizer.L2Decay(coeff=1e-04), parameters=parameters) return optimizer def train(self, num_epochs, train_dataset, train_batch_size=64, eval_dataset=None, optimizer=None, save_interval_epochs=1, log_interval_steps=10, save_dir='output', pretrain_weights='IMAGENET', learning_rate=.025, warmup_steps=0, warmup_start_lr=0.0, lr_decay_epochs=(30, 60, 90), lr_decay_gamma=0.1, early_stop=False, early_stop_patience=5, use_vdl=True, resume_checkpoint=None): """ Train the model. Args: num_epochs(int): The number of epochs. train_dataset(paddlex.dataset): Training dataset. train_batch_size(int, optional): Total batch size among all cards used in training. Defaults to 64. eval_dataset(paddlex.dataset, optional): Evaluation dataset. If None, the model will not be evaluated during training process. Defaults to None. optimizer(paddle.optimizer.Optimizer or None, optional): Optimizer used for training. If None, a default optimizer is used. Defaults to None. save_interval_epochs(int, optional): Epoch interval for saving the model. Defaults to 1. log_interval_steps(int, optional): Step interval for printing training information. Defaults to 10. save_dir(str, optional): Directory to save the model. Defaults to 'output'. pretrain_weights(str or None, optional): None or name/path of pretrained weights. If None, no pretrained weights will be loaded. At most one of `resume_checkpoint` and `pretrain_weights` can be set simultaneously. Defaults to 'IMAGENET'. learning_rate(float, optional): Learning rate for training. Defaults to .025. warmup_steps(int, optional): The number of steps of warm-up training. Defaults to 0. warmup_start_lr(float, optional): Start learning rate of warm-up training. Defaults to 0.. lr_decay_epochs(List[int] or Tuple[int], optional): Epoch milestones for learning rate decay. Defaults to (20, 60, 90). lr_decay_gamma(float, optional): Gamma coefficient of learning rate decay, default .1. early_stop(bool, optional): Whether to adopt early stop strategy. Defaults to False. early_stop_patience(int, optional): Early stop patience. Defaults to 5. use_vdl(bool, optional): Whether to use VisualDL to monitor the training process. Defaults to True. resume_checkpoint(str or None, optional): The path of the checkpoint to resume training from. If None, no training checkpoint will be resumed. At most one of `resume_checkpoint` and `pretrain_weights` can be set simultaneously. Defaults to None. """ if self.status == 'Infer': logging.error( "Exported inference model does not support training.", exit=True) if pretrain_weights is not None and resume_checkpoint is not None: logging.error( "pretrain_weights and resume_checkpoint cannot be set simultaneously.", exit=True) self.labels = train_dataset.labels # build optimizer if not defined if optimizer is None: num_steps_each_epoch = len(train_dataset) // train_batch_size self.optimizer = self.default_optimizer( parameters=self.net.parameters(), learning_rate=learning_rate, warmup_steps=warmup_steps, warmup_start_lr=warmup_start_lr, lr_decay_epochs=lr_decay_epochs, lr_decay_gamma=lr_decay_gamma, num_steps_each_epoch=num_steps_each_epoch) else: self.optimizer = optimizer # initiate weights if pretrain_weights is not None and not osp.exists(pretrain_weights): if pretrain_weights not in ['IMAGENET']: logging.warning( "Path of pretrain_weights('{}') does not exist!".format( pretrain_weights)) logging.warning( "Pretrain_weights is forcibly set to 'IMAGENET'. " "If don't want to use pretrain weights, " "set pretrain_weights to be None.") pretrain_weights = 'IMAGENET' elif pretrain_weights is not None and osp.exists(pretrain_weights): if osp.splitext(pretrain_weights)[-1] != '.pdparams': logging.error( "Invalid pretrain weights. Please specify a '.pdparams' file.", exit=True) pretrained_dir = osp.join(save_dir, 'pretrain') self.net_initialize( pretrain_weights=pretrain_weights, save_dir=pretrained_dir, resume_checkpoint=resume_checkpoint) # start train loop self.train_loop( num_epochs=num_epochs, train_dataset=train_dataset, train_batch_size=train_batch_size, eval_dataset=eval_dataset, save_interval_epochs=save_interval_epochs, log_interval_steps=log_interval_steps, save_dir=save_dir, early_stop=early_stop, early_stop_patience=early_stop_patience, use_vdl=use_vdl) def quant_aware_train(self, num_epochs, train_dataset, train_batch_size=64, eval_dataset=None, optimizer=None, save_interval_epochs=1, log_interval_steps=10, save_dir='output', learning_rate=.000025, warmup_steps=0, warmup_start_lr=0.0, lr_decay_epochs=(30, 60, 90), lr_decay_gamma=0.1, early_stop=False, early_stop_patience=5, use_vdl=True, resume_checkpoint=None, quant_config=None): """ Quantization-aware training. Args: num_epochs(int): The number of epochs. train_dataset(paddlex.dataset): Training dataset. train_batch_size(int, optional): Total batch size among all cards used in training. Defaults to 64. eval_dataset(paddlex.dataset, optional): Evaluation dataset. If None, the model will not be evaluated during training process. Defaults to None. optimizer(paddle.optimizer.Optimizer or None, optional): Optimizer used for training. If None, a default optimizer is used. Defaults to None. save_interval_epochs(int, optional): Epoch interval for saving the model. Defaults to 1. log_interval_steps(int, optional): Step interval for printing training information. Defaults to 10. save_dir(str, optional): Directory to save the model. Defaults to 'output'. learning_rate(float, optional): Learning rate for training. Defaults to .025. warmup_steps(int, optional): The number of steps of warm-up training. Defaults to 0. warmup_start_lr(float, optional): Start learning rate of warm-up training. Defaults to 0.. lr_decay_epochs(List[int] or Tuple[int], optional): Epoch milestones for learning rate decay. Defaults to (20, 60, 90). lr_decay_gamma(float, optional): Gamma coefficient of learning rate decay, default .1. early_stop(bool, optional): Whether to adopt early stop strategy. Defaults to False. early_stop_patience(int, optional): Early stop patience. Defaults to 5. use_vdl(bool, optional): Whether to use VisualDL to monitor the training process. Defaults to True. quant_config(dict or None, optional): Quantization configuration. If None, a default rule of thumb configuration will be used. Defaults to None. resume_checkpoint(str or None, optional): The path of the checkpoint to resume quantization-aware training from. If None, no training checkpoint will be resumed. Defaults to None. """ self._prepare_qat(quant_config) self.train( num_epochs=num_epochs, train_dataset=train_dataset, train_batch_size=train_batch_size, eval_dataset=eval_dataset, optimizer=optimizer, save_interval_epochs=save_interval_epochs, log_interval_steps=log_interval_steps, save_dir=save_dir, pretrain_weights=None, learning_rate=learning_rate, warmup_steps=warmup_steps, warmup_start_lr=warmup_start_lr, lr_decay_epochs=lr_decay_epochs, lr_decay_gamma=lr_decay_gamma, early_stop=early_stop, early_stop_patience=early_stop_patience, use_vdl=use_vdl, resume_checkpoint=resume_checkpoint) def evaluate(self, eval_dataset, batch_size=1, return_details=False): """ Evaluate the model. Args: eval_dataset(paddlex.dataset): Evaluation dataset. batch_size(int, optional): Total batch size among all cards used for evaluation. Defaults to 1. return_details(bool, optional): Whether to return evaluation details. Defaults to False. Returns: collections.OrderedDict with key-value pairs: {"acc1": `top 1 accuracy`, "acc5": `top 5 accuracy`}. """ # 给transform添加arrange操作 arrange_transforms( model_type=self.model_type, transforms=eval_dataset.transforms, mode='eval') self.net.eval() nranks = paddle.distributed.get_world_size() local_rank = paddle.distributed.get_rank() if nranks > 1: # Initialize parallel environment if not done. if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( ): paddle.distributed.init_parallel_env() self.eval_data_loader = self.build_data_loader( eval_dataset, batch_size=batch_size, mode='eval') eval_metrics = TrainingStats() if return_details: true_labels = list() pred_scores = list() logging.info( "Start to evaluate(total_samples={}, total_steps={})...".format( eval_dataset.num_samples, math.ceil(eval_dataset.num_samples * 1.0 / batch_size))) with paddle.no_grad(): for step, data in enumerate(self.eval_data_loader()): outputs = self.run(self.net, data, mode='eval') if return_details: true_labels.extend(outputs['labels'].tolist()) pred_scores.extend(outputs['prediction'].tolist()) outputs.pop('prediction') outputs.pop('labels') eval_metrics.update(outputs) if return_details: eval_details = { 'true_labels': true_labels, 'pred_scores': pred_scores } return eval_metrics.get(), eval_details else: return eval_metrics.get() def predict(self, img_file, transforms=None, topk=1): """ Do inference. Args: img_file(List[np.ndarray or str], str or np.ndarray): Image path or decoded image data in a BGR format, which also could constitute a list, meaning all images to be predicted as a mini-batch. transforms(paddlex.transforms.Compose or None, optional): Transforms for inputs. If None, the transforms for evaluation process will be used. Defaults to None. topk(int, optional): Keep topk results in prediction. Defaults to 1. Returns: If img_file is a string or np.array, the result is a dict with key-value pairs: {"category_id": `category_id`, "category": `category`, "score": `score`}. If img_file is a list, the result is a list composed of dicts with the corresponding fields: category_id(int): the predicted category ID category(str): category name score(float): confidence """ if transforms is None and not hasattr(self, 'test_transforms'): raise Exception("transforms need to be defined, now is None.") if transforms is None: transforms = self.test_transforms true_topk = min(self.num_classes, topk) if isinstance(img_file, (str, np.ndarray)): images = [img_file] else: images = img_file im = self._preprocess(images, transforms) self.net.eval() with paddle.no_grad(): outputs = self.run(self.net, im, mode='test') prediction = outputs['prediction'].numpy() prediction = self._postprocess(prediction, true_topk) if isinstance(img_file, (str, np.ndarray)): prediction = prediction[0] return prediction def _preprocess(self, images, transforms, to_tensor=True): arrange_transforms( model_type=self.model_type, transforms=transforms, mode='test') batch_im = list() for im in images: sample = {'image': im} batch_im.append(transforms(sample)) if to_tensor: batch_im = paddle.to_tensor(batch_im) else: batch_im = np.asarray(batch_im) return batch_im, def _postprocess(self, results, true_topk): preds = list() for i, pred in enumerate(results): pred_label = np.argsort(pred)[::-1][:true_topk] preds.append([{ 'category_id': l, 'category': self.labels[l], 'score': results[i][l] } for l in pred_label]) return preds class ResNet18(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet18, self).__init__( model_name='ResNet18', num_classes=num_classes, **params) class ResNet34(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet34, self).__init__( model_name='ResNet34', num_classes=num_classes, **params) class ResNet50(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet50, self).__init__( model_name='ResNet50', num_classes=num_classes, **params) class ResNet101(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet101, self).__init__( model_name='ResNet101', num_classes=num_classes, **params) class ResNet152(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet152, self).__init__( model_name='ResNet152', num_classes=num_classes, **params) class ResNet18_vd(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet18_vd, self).__init__( model_name='ResNet18_vd', num_classes=num_classes, **params) class ResNet34_vd(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet34_vd, self).__init__( model_name='ResNet34_vd', num_classes=num_classes, **params) class ResNet50_vd(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet50_vd, self).__init__( model_name='ResNet50_vd', num_classes=num_classes, **params) class ResNet50_vd_ssld(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet50_vd_ssld, self).__init__( model_name='ResNet50_vd', num_classes=num_classes, lr_mult_list=[.1, .1, .2, .2, .3], **params) self.model_name = 'ResNet50_vd_ssld' class ResNet101_vd(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet101_vd, self).__init__( model_name='ResNet101_vd', num_classes=num_classes, **params) class ResNet101_vd_ssld(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet101_vd_ssld, self).__init__( model_name='ResNet101_vd', num_classes=num_classes, lr_mult_list=[.1, .1, .2, .2, .3], **params) self.model_name = 'ResNet101_vd_ssld' class ResNet152_vd(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet152_vd, self).__init__( model_name='ResNet152_vd', num_classes=num_classes, **params) class ResNet200_vd(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ResNet200_vd, self).__init__( model_name='ResNet200_vd', num_classes=num_classes, **params) class AlexNet(BaseClassifier): def __init__(self, num_classes=1000, **params): super(AlexNet, self).__init__( model_name='AlexNet', num_classes=num_classes, **params) def _get_test_inputs(self, image_shape): if image_shape is not None: if len(image_shape) == 2: image_shape = [None, 3] + image_shape else: image_shape = [None, 3, 224, 224] logging.warning( '[Important!!!] When exporting inference model for {},'.format( self.__class__.__name__) + ' if fixed_input_shape is not set, it will be forcibly set to [None, 3, 224, 224]' + 'Please check image shape after transforms is [3, 224, 224], if not, fixed_input_shape ' + 'should be specified manually.') self._fix_transforms_shape(image_shape[-2:]) self.fixed_input_shape = image_shape input_spec = [ InputSpec( shape=image_shape, name='image', dtype='float32') ] return input_spec class DarkNet53(BaseClassifier): def __init__(self, num_classes=1000, **params): super(DarkNet53, self).__init__( model_name='DarkNet53', num_classes=num_classes, **params) class MobileNetV1(BaseClassifier): def __init__(self, num_classes=1000, scale=1.0, **params): supported_scale = [.25, .5, .75, 1.0] if scale not in supported_scale: logging.warning("scale={} is not supported by MobileNetV1, " "scale is forcibly set to 1.0".format(scale)) scale = 1.0 if scale == 1: model_name = 'MobileNetV1' else: model_name = 'MobileNetV1_x' + str(scale).replace('.', '_') self.scale = scale super(MobileNetV1, self).__init__( model_name=model_name, num_classes=num_classes, **params) class MobileNetV2(BaseClassifier): def __init__(self, num_classes=1000, scale=1.0, **params): supported_scale = [.25, .5, .75, 1.0, 1.5, 2.0] if scale not in supported_scale: logging.warning("scale={} is not supported by MobileNetV2, " "scale is forcibly set to 1.0".format(scale)) scale = 1.0 if scale == 1: model_name = 'MobileNetV2' else: model_name = 'MobileNetV2_x' + str(scale).replace('.', '_') super(MobileNetV2, self).__init__( model_name=model_name, num_classes=num_classes, **params) class MobileNetV3_small(BaseClassifier): def __init__(self, num_classes=1000, scale=1.0, **params): supported_scale = [.35, .5, .75, 1.0, 1.25] if scale not in supported_scale: logging.warning("scale={} is not supported by MobileNetV3_small, " "scale is forcibly set to 1.0".format(scale)) scale = 1.0 model_name = 'MobileNetV3_small_x' + str(float(scale)).replace('.', '_') super(MobileNetV3_small, self).__init__( model_name=model_name, num_classes=num_classes, **params) class MobileNetV3_small_ssld(BaseClassifier): def __init__(self, num_classes=1000, scale=1.0, **params): supported_scale = [.35, 1.0] if scale not in supported_scale: logging.warning( "scale={} is not supported by MobileNetV3_small_ssld, " "scale is forcibly set to 1.0".format(scale)) scale = 1.0 model_name = 'MobileNetV3_small_x' + str(float(scale)).replace('.', '_') super(MobileNetV3_small_ssld, self).__init__( model_name=model_name, num_classes=num_classes, **params) self.model_name = model_name + '_ssld' class MobileNetV3_large(BaseClassifier): def __init__(self, num_classes=1000, scale=1.0, **params): supported_scale = [.35, .5, .75, 1.0, 1.25] if scale not in supported_scale: logging.warning("scale={} is not supported by MobileNetV3_large, " "scale is forcibly set to 1.0".format(scale)) scale = 1.0 model_name = 'MobileNetV3_large_x' + str(float(scale)).replace('.', '_') super(MobileNetV3_large, self).__init__( model_name=model_name, num_classes=num_classes, **params) class MobileNetV3_large_ssld(BaseClassifier): def __init__(self, num_classes=1000, **params): super(MobileNetV3_large_ssld, self).__init__( model_name='MobileNetV3_large_x1_0', num_classes=num_classes, **params) self.model_name = 'MobileNetV3_large_x1_0_ssld' class DenseNet121(BaseClassifier): def __init__(self, num_classes=1000, **params): super(DenseNet121, self).__init__( model_name='DenseNet121', num_classes=num_classes, **params) class DenseNet161(BaseClassifier): def __init__(self, num_classes=1000, **params): super(DenseNet161, self).__init__( model_name='DenseNet161', num_classes=num_classes, **params) class DenseNet169(BaseClassifier): def __init__(self, num_classes=1000, **params): super(DenseNet169, self).__init__( model_name='DenseNet169', num_classes=num_classes, **params) class DenseNet201(BaseClassifier): def __init__(self, num_classes=1000, **params): super(DenseNet201, self).__init__( model_name='DenseNet201', num_classes=num_classes, **params) class DenseNet264(BaseClassifier): def __init__(self, num_classes=1000, **params): super(DenseNet264, self).__init__( model_name='DenseNet264', num_classes=num_classes, **params) class HRNet_W18_C(BaseClassifier): def __init__(self, num_classes=1000, **params): super(HRNet_W18_C, self).__init__( model_name='HRNet_W18_C', num_classes=num_classes, **params) class HRNet_W30_C(BaseClassifier): def __init__(self, num_classes=1000, **params): super(HRNet_W30_C, self).__init__( model_name='HRNet_W30_C', num_classes=num_classes, **params) class HRNet_W32_C(BaseClassifier): def __init__(self, num_classes=1000, **params): super(HRNet_W32_C, self).__init__( model_name='HRNet_W32_C', num_classes=num_classes, **params) class HRNet_W40_C(BaseClassifier): def __init__(self, num_classes=1000, **params): super(HRNet_W40_C, self).__init__( model_name='HRNet_W40_C', num_classes=num_classes, **params) class HRNet_W44_C(BaseClassifier): def __init__(self, num_classes=1000, **params): super(HRNet_W44_C, self).__init__( model_name='HRNet_W44_C', num_classes=num_classes, **params) class HRNet_W48_C(BaseClassifier): def __init__(self, num_classes=1000, **params): super(HRNet_W48_C, self).__init__( model_name='HRNet_W48_C', num_classes=num_classes, **params) class HRNet_W64_C(BaseClassifier): def __init__(self, num_classes=1000, **params): super(HRNet_W64_C, self).__init__( model_name='HRNet_W64_C', num_classes=num_classes, **params) class Xception41(BaseClassifier): def __init__(self, num_classes=1000, **params): super(Xception41, self).__init__( model_name='Xception41', num_classes=num_classes, **params) class Xception65(BaseClassifier): def __init__(self, num_classes=1000, **params): super(Xception65, self).__init__( model_name='Xception65', num_classes=num_classes, **params) class Xception71(BaseClassifier): def __init__(self, num_classes=1000, **params): super(Xception71, self).__init__( model_name='Xception71', num_classes=num_classes, **params) class ShuffleNetV2(BaseClassifier): def __init__(self, num_classes=1000, scale=1.0, **params): supported_scale = [.25, .33, .5, 1.0, 1.5, 2.0] if scale not in supported_scale: logging.warning("scale={} is not supported by ShuffleNetV2, " "scale is forcibly set to 1.0".format(scale)) scale = 1.0 model_name = 'ShuffleNetV2_x' + str(float(scale)).replace('.', '_') super(ShuffleNetV2, self).__init__( model_name=model_name, num_classes=num_classes, **params) def _get_test_inputs(self, image_shape): if image_shape is not None: if len(image_shape) == 2: image_shape = [None, 3] + image_shape else: image_shape = [None, 3, 224, 224] logging.warning( '[Important!!!] When exporting inference model for {},'.format( self.__class__.__name__) + ' if fixed_input_shape is not set, it will be forcibly set to [None, 3, 224, 224]' + 'Please check image shape after transforms is [3, 224, 224], if not, fixed_input_shape ' + 'should be specified manually.') self._fix_transforms_shape(image_shape[-2:]) self.fixed_input_shape = image_shape input_spec = [ InputSpec( shape=image_shape, name='image', dtype='float32') ] return input_spec class ShuffleNetV2_swish(BaseClassifier): def __init__(self, num_classes=1000, **params): super(ShuffleNetV2_swish, self).__init__( model_name='ShuffleNetV2_x1_5', num_classes=num_classes, **params) def _get_test_inputs(self, image_shape): if image_shape is not None: if len(image_shape) == 2: image_shape = [None, 3] + image_shape else: image_shape = [None, 3, 224, 224] logging.warning( '[Important!!!] When exporting inference model for {},'.format( self.__class__.__name__) + ' if fixed_input_shape is not set, it will be forcibly set to [None, 3, 224, 224]' + 'Please check image shape after transforms is [3, 224, 224], if not, fixed_input_shape ' + 'should be specified manually.') self._fix_transforms_shape(image_shape[-2:]) self.fixed_input_shape = image_shape input_spec = [ InputSpec( shape=image_shape, name='image', dtype='float32') ] return input_spec
{"hexsha": "8a623aa24c0c986ca583632606167e2c9964689f", "size": 35482, "ext": "py", "lang": "Python", "max_stars_repo_path": "paddlex/cv/models/classifier.py", "max_stars_repo_name": "fanweiya/PaddleX", "max_stars_repo_head_hexsha": "4258ec623d24db6c5a755357430cbb4455391731", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paddlex/cv/models/classifier.py", "max_issues_repo_name": "fanweiya/PaddleX", "max_issues_repo_head_hexsha": "4258ec623d24db6c5a755357430cbb4455391731", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paddlex/cv/models/classifier.py", "max_forks_repo_name": "fanweiya/PaddleX", "max_forks_repo_head_hexsha": "4258ec623d24db6c5a755357430cbb4455391731", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.8009650181, "max_line_length": 119, "alphanum_fraction": 0.6120004509, "include": true, "reason": "import numpy", "num_tokens": 7885}
import argparse import io import pathlib import sys import PIL.Image import numpy as np def load_image(image_filepath, image_size, scale, subtract_value, bgr): image = PIL.Image.open(image_filepath) image = image.resize((image_size, image_size)) image = np.array(image, dtype=np.float32) / 255 image *= scale image -= subtract_value image = image[:, :, (2, 1, 0)] if bgr else image image = image[np.newaxis, :] bytesio = io.BytesIO() np.save(bytesio, image) bytesio.seek(0) serialized = bytesio.read() sys.stdout.buffer.write(serialized) def main(): parser = argparse.ArgumentParser(description="Load a image and dump as numpy array") parser.add_argument('image_filepath', type=pathlib.Path) parser.add_argument('--size', type=int, default=224, help="input size") parser.add_argument('--scale', type=float, default=1) parser.add_argument('--subtract', type=float, default=0) parser.add_argument('--bgr', action='store_true') args = parser.parse_args() if not args.image_filepath.exists(): parser.error(f"{args.image_filepath} doesn't exist.") load_image(args.image_filepath, args.size, args.scale, args.subtract, args.bgr) if __name__ == '__main__': main()
{"hexsha": "c5ad88465e3881486ec75d64c8650788af476c88", "size": 1263, "ext": "py", "lang": "Python", "max_stars_repo_path": "modelutils/commands/image.py", "max_stars_repo_name": "shonohs/modelutils", "max_stars_repo_head_hexsha": "24df495ce5372c3f8a1f064f163b51150517e2de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-10T07:06:32.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-10T07:06:32.000Z", "max_issues_repo_path": "modelutils/commands/image.py", "max_issues_repo_name": "shonohs/modelutils", "max_issues_repo_head_hexsha": "24df495ce5372c3f8a1f064f163b51150517e2de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modelutils/commands/image.py", "max_forks_repo_name": "shonohs/modelutils", "max_forks_repo_head_hexsha": "24df495ce5372c3f8a1f064f163b51150517e2de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3720930233, "max_line_length": 88, "alphanum_fraction": 0.6927949327, "include": true, "reason": "import numpy", "num_tokens": 305}
# Tencent is pleased to support the open source community by making ncnn available. # # Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import numpy as np def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = np.zeros_like(x) y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y return y def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = np.zeros_like(x) y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center y[:, 2] = x[:, 2] - x[:, 0] # width y[:, 3] = x[:, 3] - x[:, 1] # height return y def make_grid(nx=20, ny=20): xv1, yv1 = np.meshgrid(np.arange(nx), np.arange(ny)) z1 = np.stack((xv1, yv1), 2).reshape((1, ny, nx, 2)).astype(np.float32) return z1 def sigmoid(x): return 1 / (1 + np.exp(-x)) def softmax(x): max_value = np.max(x, axis=-1) x -= max_value.reshape((x.shape[0], 1)) x = np.exp(x) sum_value = np.sum(x, axis=-1) x /= sum_value.reshape((x.shape[0], 1)) return x def iou_of(boxes0, boxes1, eps=1e-5): """Return intersection-over-union (Jaccard index) of boxes. Args: boxes0 (N, 4): ground truth boxes. boxes1 (N or 1, 4): predicted boxes. eps: a small number to avoid 0 as denominator. Returns: iou (N): IoU values. """ overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2]) overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:]) overlap_area = area_of(overlap_left_top, overlap_right_bottom) area0 = area_of(boxes0[..., :2], boxes0[..., 2:]) area1 = area_of(boxes1[..., :2], boxes1[..., 2:]) return overlap_area / (area0 + area1 - overlap_area + eps) def area_of(left_top, right_bottom): """Compute the areas of rectangles given two corners. Args: left_top (N, 2): left top corner. right_bottom (N, 2): right bottom corner. Returns: area (N): return the area. """ hw = np.clip(right_bottom - left_top, 0.0, None) return hw[..., 0] * hw[..., 1] def nms(boxes, scores, iou_threshold, top_k=-1, candidate_size=200): """ Args: box_scores (N, 5): boxes in corner-form(x1, y1, x2, y2) and probabilities. iou_threshold: intersection over union threshold. top_k: keep top_k results. If k <= 0, keep all the results. candidate_size: only consider the candidates with the highest scores. Returns: picked: a list of indexes of the kept boxes """ picked = [] indexes = np.argsort(scores) indexes = indexes[-candidate_size:] while len(indexes) > 0: current = indexes[-1] picked.append(current) if 0 < top_k == len(picked) or len(indexes) == 1: break current_box = boxes[current, :] indexes = indexes[:-1] rest_boxes = boxes[indexes, :] iou = iou_of( rest_boxes, np.expand_dims(current_box, axis=0), ) indexes = indexes[iou <= iou_threshold] return picked
{"hexsha": "5b886ebaf6ee7198b92f931b871e241f2114fd3f", "size": 3878, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/ncnn/utils/functional.py", "max_stars_repo_name": "fzyzcjy/ncnn", "max_stars_repo_head_hexsha": "42e71609508fde1bd54d9d9de6ca5522ee3bcf37", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 14886, "max_stars_repo_stars_event_min_datetime": "2017-07-24T02:58:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T18:17:04.000Z", "max_issues_repo_path": "python/ncnn/utils/functional.py", "max_issues_repo_name": "fzyzcjy/ncnn", "max_issues_repo_head_hexsha": "42e71609508fde1bd54d9d9de6ca5522ee3bcf37", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3361, "max_issues_repo_issues_event_min_datetime": "2017-07-24T05:56:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:26:35.000Z", "max_forks_repo_path": "python/ncnn/utils/functional.py", "max_forks_repo_name": "fzyzcjy/ncnn", "max_forks_repo_head_hexsha": "42e71609508fde1bd54d9d9de6ca5522ee3bcf37", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3786, "max_forks_repo_forks_event_min_datetime": "2017-07-24T03:09:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T16:56:40.000Z", "avg_line_length": 32.0495867769, "max_line_length": 98, "alphanum_fraction": 0.5972150593, "include": true, "reason": "import numpy", "num_tokens": 1187}
import argparse from torch.utils.data import DataLoader from resnet import get_pretrained_resnet import torch from resnet import ResNet import logging from enum import Enum import numpy as np import torchvision class CLI(Enum): DATA = 'path_data' CHECKPOINT = 'path_model_checkpoint' CHECKPOINT_FREQUENCY = 'checkpoint_frequency' EPOCHS = 'epochs' VALIDATION_FREQUENCY = 'validation_frequency' NUM_CLASSES = 'number_of_classes' DATA_SUBSET = 'data_subset' FREEZE_WEIGHTS = 'freeze_weights' IMAGES = 'path_images' LABELS = 'path_labels' MODEL = 'model' class Hyperparameters(Enum): LEARNING_RATE_SCHEDULER = 'learning_rate_scheduler' BATCH_SIZE = 'batch_size' NESTEROV = 'nesterov' WEIGHT_DECAY = 'weight_decay' MOMENTUM = 'momentum' LEARNING_RATE = 'learning_rate' SCEDULER_RATE = 'scheduler_rate' class AStanfordCarsCLI(object): IMAGE_TRANSFORMS = [torchvision.transforms.Resize(224), torchvision.transforms.RandomCrop(224), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])] def __init__(self): pass def arg_parse(self): """CLI interface""" parser = argparse.ArgumentParser(description='CLI for tuning ResNet for Stanford Cars dataset.') parser.add_argument("--" + CLI.DATA.value, dest=CLI.DATA.value, type=str, help="mat file with training and test data", required=True) parser.add_argument("--" + CLI.IMAGES.value, dest=CLI.IMAGES.value, type=str, help="path to directory with images", required=True) parser.add_argument("--" + CLI.LABELS.value, dest=CLI.LABELS.value, type=str, help="file with id and human readable label name", required=True) parser.add_argument("--" + CLI.CHECKPOINT.value, dest=CLI.CHECKPOINT.value, type=str, help="directory to save model checkpoints", required=False, default=None) parser.add_argument("--" + CLI.MODEL.value, dest=CLI.MODEL.value, type=str, help="model to use. options: ResNet18, ResNet50", required=True) parser.add_argument("--" + CLI.CHECKPOINT_FREQUENCY.value, dest=CLI.CHECKPOINT_FREQUENCY.value, type=int, help="frequency to save model", required=False, default=None) parser.add_argument("--" + CLI.NUM_CLASSES.value, dest=CLI.NUM_CLASSES.value, type=int, help='number of unique classes in labels', required=True) parser.add_argument("--" + CLI.EPOCHS.value, dest=CLI.EPOCHS.value, type=int, help="total number of training epochs", required=True) parser.add_argument("--" + CLI.VALIDATION_FREQUENCY.value, dest=CLI.VALIDATION_FREQUENCY.value, type=int, help="frequency to run validation", required=True) parser.add_argument("--" + CLI.DATA_SUBSET.value, dest=CLI.DATA_SUBSET.value, type=float, help="subset of training data to use", required=True, default=1.0) parser.add_argument("--" + CLI.FREEZE_WEIGHTS.value, dest=CLI.FREEZE_WEIGHTS.value, action='store_true', help="whether or not to freeze weights on pretrained model") parser.add_argument("--" + "no-" + CLI.FREEZE_WEIGHTS.value, dest=CLI.FREEZE_WEIGHTS.value, action='store_false', help="whether or not to freeze weights on pretrained model") return parser def load_datasets(self, *args): """Subclasses to implement dataset loading.""" pass def get_run_arguments(self, parsed_cli_dict): """Subclasses to implement getting run arguments""" pass def run(self, parameter_arguments, num_epochs, training_data, validation_data): logging.info("loading pretrained model and establishing model characteristics") # get pretrained model from PyTorch model zoo resnet_pretrained_model = get_pretrained_resnet(is_freeze_weights=parameter_arguments[CLI.FREEZE_WEIGHTS.value], number_of_labels=parameter_arguments[CLI.NUM_CLASSES.value], model_type=parameter_arguments[CLI.MODEL.value]) # define loss function cross_entropy_loss = torch.nn.CrossEntropyLoss() # define gradient descent strategy sgd_optimizer = torch.optim.SGD(resnet_pretrained_model.parameters(), lr=np.exp(parameter_arguments[Hyperparameters.LEARNING_RATE.value]), momentum=parameter_arguments[Hyperparameters.MOMENTUM.value], weight_decay=np.exp( parameter_arguments[Hyperparameters.WEIGHT_DECAY.value]), nesterov=parameter_arguments[Hyperparameters.NESTEROV.value]) # define learning rate annealing learning_rate_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(sgd_optimizer, mode='min', factor=parameter_arguments[ Hyperparameters.LEARNING_RATE_SCHEDULER.value], patience=parameter_arguments[ Hyperparameters.SCEDULER_RATE.value], verbose=True) logging.info("training model") resnet = ResNet(epochs=num_epochs, gd_optimizer=sgd_optimizer, model=resnet_pretrained_model, loss_function=cross_entropy_loss, learning_rate_scheduler=learning_rate_scheduler, validation_frequency=parameter_arguments[CLI.VALIDATION_FREQUENCY.value], torch_checkpoint_location=parameter_arguments[CLI.CHECKPOINT.value], model_checkpointing=parameter_arguments[CLI.CHECKPOINT_FREQUENCY.value]) # train model trained_model, validation_metric = resnet.train_model(training_data=DataLoader(training_data, batch_size=2 ** ( parameter_arguments[Hyperparameters.BATCH_SIZE.value]), shuffle=True), validation_data=DataLoader(validation_data, batch_size=2 ** ( parameter_arguments[ Hyperparameters.BATCH_SIZE.value]), shuffle=True), number_of_labels=parameter_arguments[ CLI.NUM_CLASSES.value]) return trained_model, validation_metric def run_all(self): """Subclasses to implement driver for running CLI""" pass
{"hexsha": "70e0f76f6d31d3b06c8266b104f987ac67def29b", "size": 7766, "ext": "py", "lang": "Python", "max_stars_repo_path": "stanford-augmented-image-classification/a_resnet_training_common_cli.py", "max_stars_repo_name": "meghanaravikumar/sigopt-examples", "max_stars_repo_head_hexsha": "e2d938928384f340d77efb52b226f678b6008fb9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 213, "max_stars_repo_stars_event_min_datetime": "2015-02-24T22:26:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T14:17:52.000Z", "max_issues_repo_path": "stanford-augmented-image-classification/a_resnet_training_common_cli.py", "max_issues_repo_name": "meghanaravikumar/sigopt-examples", "max_issues_repo_head_hexsha": "e2d938928384f340d77efb52b226f678b6008fb9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 73, "max_issues_repo_issues_event_min_datetime": "2015-11-12T03:11:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:43:18.000Z", "max_forks_repo_path": "stanford-augmented-image-classification/a_resnet_training_common_cli.py", "max_forks_repo_name": "meghanaravikumar/sigopt-examples", "max_forks_repo_head_hexsha": "e2d938928384f340d77efb52b226f678b6008fb9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 66, "max_forks_repo_forks_event_min_datetime": "2016-01-01T18:25:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-27T03:07:07.000Z", "avg_line_length": 54.3076923077, "max_line_length": 132, "alphanum_fraction": 0.5551120268, "include": true, "reason": "import numpy", "num_tokens": 1319}
import cv2 import numpy #'''The class colorImage is created and the cases are defined where the user inserts or not the path of the image''' class colorImage: def __init__ (self, route = None): #Initialization if route is None: # When the user does not enter the path of the image, it is inserted by default self.route = 'C:/Users/aleja/OneDrive/Documentos/GitHub/colorImage.AAC/lena.png' #The file is loaded else: self.route =route self.image = cv2.imread(self.route) #Saved image #'''For view the image properties''' def displayProperties(self): height, wide, comp = self.image.shape #Save the imagen data: height, wide and components print('Heigh and Wide of the photo', height, 'x', wide, '.') #Print the image data: height and wide #'''For view the image in grayscale ''' def makeGray(self): grayscale = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY) #Converting the image to gray scale using BGRGRAY code cv2.imshow('Gray Image', grayscale) #Shoe the image in gray scale cv2.waitKey(0) #'''For get at colorized image''' def colorizeRGB(self, comp= None): gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY) #Converting the image to gray scale using BGRGRAY code again # the code leaves the red color for default when the user doesn't enter the color if comp is None: self.comp = 'red_color' else: self.comp = comp #For show the red component of the image the code creates a copy of the image in red and them makes zeroes the other components if self.comp == 'red_color': image_in_red = numpy.copy(self.image) image_in_red[:, :, 0] = 0 image_in_red[:, :, 1] = 0 cv2.imshow('Image: Red Component', image_in_red) cv2.waitKey(0) #For show the green component of the image the code creates a copy of the image in green and them makes zeroes the other components elif self.comp == 'green_color': image_in_green = numpy.copy(self.image) image_in_green[:, :, 0] = 0 image_in_green[:, :, 2] = 0 cv2.imshow('Image: Green Component', image_in_green) #For show the blue component of the image the code creates a copy of the image in blue and them makes zeroes the other components cv2.waitKey(0) elif self.comp == 'blue_color': image_in_blue = numpy.copy(self.image) image_in_blue[:, :, 1] = 0 image_in_blue[:, :, 2] = 0 cv2.imshow('Image: Blue Component', image_in_blue) cv2.waitKey(0) else: print('None of the entered values is valid') #For highlight the tones in the image (HUE) def makeHue(self): HSV_function = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV) #Using cv2.COLOR_BGR2HSV code the image is converted from BGR to HSV HSV_function[:, :, 1] = 255 # S component to 255 HSV_function[:, :, 2] = 255 # V component to 255 hue_image = cv2.cvtColor(HSV_function, cv2.COLOR_HSV2BGR) cv2.imshow('HUE_image', hue_image) cv2.waitKey(0)
{"hexsha": "d9cc136fd468e7119a9460ea3f852e3d1cc855c6", "size": 3197, "ext": "py", "lang": "Python", "max_stars_repo_path": "colorImage.py", "max_stars_repo_name": "alejandraavendano/colorImage.AAC", "max_stars_repo_head_hexsha": "23b3d176ea13e7c76fc97da94b515bb574335ef2", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "colorImage.py", "max_issues_repo_name": "alejandraavendano/colorImage.AAC", "max_issues_repo_head_hexsha": "23b3d176ea13e7c76fc97da94b515bb574335ef2", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "colorImage.py", "max_forks_repo_name": "alejandraavendano/colorImage.AAC", "max_forks_repo_head_hexsha": "23b3d176ea13e7c76fc97da94b515bb574335ef2", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4027777778, "max_line_length": 161, "alphanum_fraction": 0.6396621833, "include": true, "reason": "import numpy", "num_tokens": 791}
!############################################################################## !# Tutorial 002a: Memory management, 1D arrays !############################################################################## module tutorial002a ! Include basic Feat-2 modules use fsystem use genoutput use storage implicit none private public :: start_tutorial002a contains ! *************************************************************************** subroutine start_tutorial002a ! Declare some variables integer :: i integer :: ihandleInt, ihandleDouble integer, dimension(:), pointer :: p_Idata real(DP), dimension(:), pointer :: p_Ddata ! Print a message call output_lbrk() call output_separator (OU_SEP_STAR) call output_line ("This is FEAT-2. Tutorial 002a") call output_separator (OU_SEP_MINUS) ! =============== ! Allocate memory ! =============== ! 1D integer call storage_new ("start_tutorial002a", & "arrayI", 10, ST_INT, ihandleInt, ST_NEWBLOCK_ZERO) ! 1D double call storage_new ("start_tutorial002a", & "arrayI", 10, ST_DOUBLE, ihandleDouble, ST_NEWBLOCK_ZERO) ! ===================== ! Get pointers ! ===================== call storage_getbase_int (ihandleInt,p_Idata) call storage_getbase_double (ihandleDouble,p_Ddata) ! ===================== ! Fill with data ! ===================== do i=1,10 p_Idata(i) = i**2 p_Ddata(i) = real(i,DP) ** 2 end do ! ===================== ! Print the data ! ===================== do i=1,10 call output_line (trim(sys_siL(p_Idata(i),10))) end do do i=1,10 call output_line (trim(sys_sdEL(p_Ddata(i),10))) end do ! ===================== ! Release the data. ! Pointers get invalid. ! ===================== call storage_free (ihandleInt) call storage_free (ihandleDouble) ! ===================== ! Print information ! about the memory. ! ===================== call output_lbrk() call storage_info() end subroutine end module
{"hexsha": "0f025b0fd77c5c3a561f16b0d242fcadc2edfb0b", "size": 2145, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tutorials/tutorial01/src/tutorial002a.f90", "max_stars_repo_name": "trmcnealy/Featflow2", "max_stars_repo_head_hexsha": "4af17507bc2d80396bf8ea85c9e30e9e4d2383df", "max_stars_repo_licenses": ["Intel", "Unlicense"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-08-02T11:51:34.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-10T14:14:21.000Z", "max_issues_repo_path": "tutorials/tutorial01/src/tutorial002a.f90", "max_issues_repo_name": "tudo-math-ls3/FeatFlow2", "max_issues_repo_head_hexsha": "56159aff28f161aca513bc7c5e2014a2d11ff1b3", "max_issues_repo_licenses": ["Intel", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tutorials/tutorial01/src/tutorial002a.f90", "max_forks_repo_name": "tudo-math-ls3/FeatFlow2", "max_forks_repo_head_hexsha": "56159aff28f161aca513bc7c5e2014a2d11ff1b3", "max_forks_repo_licenses": ["Intel", "Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8333333333, "max_line_length": 79, "alphanum_fraction": 0.4853146853, "num_tokens": 512}
open import Data.Bool using ( Bool ; true ; false ; _∧_ ) open import Data.Product using ( _×_ ) open import Relation.Binary.PropositionalEquality using ( _≡_ ) open import Relation.Unary using ( _∈_ ) open import Web.Semantic.DL.Concept using ( Concept ; ⟨_⟩ ; ¬⟨_⟩ ; ⊤ ; ⊥ ; _⊓_ ; _⊔_ ; ∀[_]_ ; ∃⟨_⟩_ ; ≤1 ; >1 ) open import Web.Semantic.DL.Signature using ( Signature ) open import Web.Semantic.DL.TBox using ( TBox ; ε ; _,_ ;_⊑₁_ ; _⊑₂_ ; Dis ; Ref ; Irr ; Tra ) open import Web.Semantic.Util using ( Subset ; □ ; □-proj₁ ; □-proj₂ ) module Web.Semantic.DL.TBox.Minimizable {Σ : Signature} where data LHS : Subset (Concept Σ) where ⟨_⟩ : ∀ c → ⟨ c ⟩ ∈ LHS ⊤ : ⊤ ∈ LHS ⊥ : ⊥ ∈ LHS _⊓_ : ∀ {C D} → (C ∈ LHS) → (D ∈ LHS) → ((C ⊓ D) ∈ LHS) _⊔_ : ∀ {C D} → (C ∈ LHS) → (D ∈ LHS) → ((C ⊔ D) ∈ LHS) ∃⟨_⟩_ : ∀ R {C} → (C ∈ LHS) → ((∃⟨ R ⟩ C) ∈ LHS) data RHS : Subset (Concept Σ) where ⟨_⟩ : ∀ c → ⟨ c ⟩ ∈ RHS ⊤ : ⊤ ∈ RHS _⊓_ : ∀ {C D} → (C ∈ RHS) → (D ∈ RHS) → ((C ⊓ D) ∈ RHS) ∀[_]_ : ∀ R {C} → (C ∈ RHS) → ((∀[ R ] C) ∈ RHS) ≤1 : ∀ R → ((≤1 R) ∈ RHS) data μTBox : Subset (TBox Σ) where ε : μTBox ε _,_ : ∀ {T U} → (T ∈ μTBox) → (U ∈ μTBox) → ((T , U) ∈ μTBox) _⊑₁_ : ∀ {C D} → (C ∈ LHS) → (D ∈ RHS) → ((C ⊑₁ D) ∈ μTBox) _⊑₂_ : ∀ Q R → ((Q ⊑₂ R) ∈ μTBox) Ref : ∀ R → (Ref R ∈ μTBox) Tra : ∀ R → (Tra R ∈ μTBox) lhs? : Concept Σ → Bool lhs? ⟨ c ⟩ = true lhs? ¬⟨ c ⟩ = false lhs? ⊤ = true lhs? ⊥ = true lhs? (C ⊓ D) = lhs? C ∧ lhs? D lhs? (C ⊔ D) = lhs? C ∧ lhs? D lhs? (∀[ R ] C) = false lhs? (∃⟨ R ⟩ C) = lhs? C lhs? (≤1 R) = false lhs? (>1 R) = false lhs : ∀ C {C✓ : □(lhs? C)} → LHS C lhs ⟨ c ⟩ = ⟨ c ⟩ lhs ⊤ = ⊤ lhs ⊥ = ⊥ lhs (C ⊓ D) {C⊓D✓} = lhs C {□-proj₁ C⊓D✓} ⊓ lhs D {□-proj₂ {lhs? C} C⊓D✓} lhs (C ⊔ D) {C⊔D✓} = lhs C {□-proj₁ C⊔D✓} ⊔ lhs D {□-proj₂ {lhs? C} C⊔D✓} lhs (∃⟨ R ⟩ C) {C✓} = ∃⟨ R ⟩ (lhs C {C✓}) lhs ¬⟨ c ⟩ {} lhs (∀[ R ] C) {} lhs (≤1 R) {} lhs (>1 R) {} rhs? : Concept Σ → Bool rhs? ⟨ c ⟩ = true rhs? ¬⟨ c ⟩ = false rhs? ⊤ = true rhs? ⊥ = false rhs? (C ⊓ D) = rhs? C ∧ rhs? D rhs? (C ⊔ D) = false rhs? (∀[ R ] C) = rhs? C rhs? (∃⟨ R ⟩ C) = false rhs? (≤1 R) = true rhs? (>1 R) = false rhs : ∀ C {C✓ : □(rhs? C)} → RHS C rhs ⟨ c ⟩ = ⟨ c ⟩ rhs ⊤ = ⊤ rhs (C ⊓ D) {C⊓D✓} = rhs C {□-proj₁ C⊓D✓} ⊓ rhs D {□-proj₂ {rhs? C} C⊓D✓} rhs (∀[ R ] C) {C✓} = ∀[ R ] (rhs C {C✓}) rhs (≤1 R) = ≤1 R rhs ⊥ {} rhs ¬⟨ c ⟩ {} rhs (C ⊔ D) {} rhs (∃⟨ R ⟩ C) {} rhs (>1 R) {} μTBox? : TBox Σ → Bool μTBox? ε = true μTBox? (T , U) = μTBox? T ∧ μTBox? U μTBox? (C ⊑₁ D) = lhs? C ∧ rhs? D μTBox? (Q ⊑₂ R) = true μTBox? (Dis Q R) = false μTBox? (Ref R) = true μTBox? (Irr R) = false μTBox? (Tra R) = true μtBox : ∀ T {T✓ : □(μTBox? T)} → μTBox T μtBox ε = ε μtBox (T , U) {TU✓} = (μtBox T {□-proj₁ TU✓} , μtBox U {□-proj₂ {μTBox? T} TU✓}) μtBox (C ⊑₁ D) {C⊑D✓} = lhs C {□-proj₁ C⊑D✓} ⊑₁ rhs D {□-proj₂ {lhs? C} C⊑D✓} μtBox (Q ⊑₂ R) = Q ⊑₂ R μtBox (Ref R) = Ref R μtBox (Tra R) = Tra R μtBox (Dis Q R) {} μtBox (Irr R) {}
{"hexsha": "cbdc0a4377dd63906550e7b914280550364c5520", "size": 3227, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/Web/Semantic/DL/TBox/Minimizable.agda", "max_stars_repo_name": "agda/agda-web-semantic", "max_stars_repo_head_hexsha": "8ddbe83965a616bff6fc7a237191fa261fa78bab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2015-09-13T17:46:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-14T14:21:08.000Z", "max_issues_repo_path": "src/Web/Semantic/DL/TBox/Minimizable.agda", "max_issues_repo_name": "bblfish/agda-web-semantic", "max_issues_repo_head_hexsha": "38fbc3af7062ba5c3d7d289b2b4bcfb995d99057", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-11-14T02:32:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-04T20:57:19.000Z", "max_forks_repo_path": "src/Web/Semantic/DL/TBox/Minimizable.agda", "max_forks_repo_name": "bblfish/agda-web-semantic", "max_forks_repo_head_hexsha": "38fbc3af7062ba5c3d7d289b2b4bcfb995d99057", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-12-03T14:52:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T11:40:03.000Z", "avg_line_length": 31.0288461538, "max_line_length": 82, "alphanum_fraction": 0.431670282, "num_tokens": 1645}
%!TEX TS-program = XeLaTeX %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Hieu Do - Resume % 7/26/2016 % % Reference: % Debarghya Das (http://debarghyadas.com) \documentclass[]{hieudo-build} \usepackage{enumitem} \begin{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TITLE NAME % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \namesection{Pritthijit Nath} {\urlstyle{same} \faHome \href{https://nath.p3jit.com}{ nath.p3jit.com}\\ \faEnvelope \href{mailto:pritthijit.nath@ieee.org}{ pritthijit.nath@ieee.org}\\ \faGithub \href{https://github.p3jit.com}{ https://github.p3jit.com}\\ \faPhone \href{callto:+918334087432} { +91 83340 87432} % \faLinkedinSquare \href{https://linkedin.p3jit.com}{ linkedin.p3jit.com} } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Page ONE - COLUMN ONE % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{minipage}[t]{0.34\textwidth} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % EDUCATION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Education} \subsection{Jadavpur University} \descript{Dept. of Computer Science} BE in Computer Science \& Engg.\\ Expected Grad. Jun 2022 \\ Abs. Wgtd. CGPA : 9.41 / 10 \\ Class Rank : 1/63 \\ \sectionsep \subsection{Delhi Public School, Ruby Park} Grad. May 2018 \\ AISSCE (XII) - 96.4 \% \\ \sectionsep \subsection{A.G. Church School, Park Street} Grad. May 2016 \\ ICSE (X) - 93 \% \\ \sectionsep %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % SKILLS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Skills} \subsection{Programming} \location{Languages:} C, Java, Python, JavaScript \\ SQL, PL/SQL\\ \location{Frameworks:} Django, Flask, ExpressJS \\ Sci-Kit Learn, Statsmodels \\ OpenCV, TensorFlow, PyTorch \\ NumPy, Pandas \location{Databases:} MySQL, MongoDB, Oracle \\ \location{Tools:} Git, Jupyter, Docker, \LaTeX \\ \section{Coursework} Artificial Intelligence \\ Database Management Systems\\ Data Structure and Algorithms \\ Object-Oriented Programming\\ Operating Systems\\ Systems Programming\\ \sectionsep \section{Activities} Placement Coordinator\\ Class Representative\\ Chairperson, IEEE CS Chapter\\ Lead Organizer, C-Thru 2019 \\ \sectionsep \end{minipage} \hfill \begin{minipage}[t]{0.65\textwidth} \section{Experience} \workplace{University of Waterloo \& UNICEF Mongolia}{Jun 2021 – Sep 2021} \\ \position{MITACS Globalink Research Intern}{Waterloo, Canada} \vspace{\topsep} % Hacky fix for awkward extra vertical space \begin{tightemize} \item Developed a data ecosystem capable of mining and hosting air quality data from consumer-level air quality monitoring sensors installed in Ulaanbaatar, Mongolia. \item Engineered the Azure cloud data infrastructure to store data and devised data mining scripts to onboard air quality sensors into the system. \end{tightemize} \sectionsep \workplace{Indian Statistical Institute}{Jun 2021 – Aug 2021}\\ \position{IASc-INSA-NASI Summer Research Fellow}{Kolkata, India} %\vspace{0.9em} % Hacky fix for awkward extra vertical space \begin{tightemize} \item Developed a machine learning based approach to predict the best neural network verifier for Acas-Xu like models having piecewise linear activation functions. \item Devised an end-to-end pipeline to run benchmarks on adversarial robustness properties and extract structural signatures of different Acas-Xu like models. \end{tightemize} \sectionsep \workplace{Jadavpur University}{Jun 2020 – Present} \\ \position{Undergraduate Research Assistant}{Kolkata, India} % \vspace{\topsep} % Hacky fix for awkward extra vertical space \begin{tightemize} \item Coordinated a detailed case study analysis of COVID-19 lockdown effects during first and second wave on air pollution of four major cities in India. \item Proposed novel hybrid learner models for spatio-temporal pollution forecasting using satellite AOD and a multi-site ensemble for spatial interpolation from temporal forecast data. \item Conducted a comparative study of the performance of different statistical and deep learning based methods in forecasting long term time-series pollution trends of Kolkata. \item Developed a novel method to improve predictive accuracy of time-series based prediction models using Matrix Profile and Motifs. \end{tightemize} \sectionsep \workplace{Xelpmoc Design and Tech Ltd.}{Jun 2019 – Jul 2019} \\ \position{Data Science Intern}{Kolkata, India} % \vspace{\topsep} % Hacky fix for awkward extra vertical space \begin{tightemize} \item Engineered a data extraction pipeline to store data from electoral rolls published by the ECI into a NoSQL database for future use in developing socio-demographic insights. \item Programmed and trained a deep learning based transliteration model that would romanize Bengali names into English. \end{tightemize} \sectionsep \end{minipage} \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Page ONE - COLUMN ONE % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{minipage}[t]{0.34\textwidth} \sectionsep \subsection{Languages} \location{Professional fluency} English\\ \location{Native fluency} Bengali \\ \location{Working fluency} Hindi \\ \sectionsep \sectionsep \DTMsetdatestyle{mylastupdate} Last Updated: \DTMdisplaydate{\the\day}{\the\month}{\the\year}{-1} \end{minipage} \hfill \begin{minipage}[t]{0.65\textwidth} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Page TWO - COLUMN TWO % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Publications} \begin{enumerate}[leftmargin=*] %\item P. Nath, B. Roy, A. I. Middya and S. Roy. Analysis of COVID-19 lockdown effects on city air pollution in India - A case study. \textit{Currently under peer review}. %\item P. Nath, B. Roy, P. Saha, A. I. Middya and S. Roy. Hybrid learning model for spatio-temporal %forecasting of PM$_{2.5}$ using aerosol optical depth. \textit{Currently under peer review}. %\item P. Nath, P. Saha, A. I. Middya and S. Roy. Spatio-temporal pollution forecasting using hybrid networks. \textit{Currently under peer review}. \item P. Nath, P. Saha, A. I. Middya and S. Roy (2021). Long-term time-series pollution forecast using statistical and deep learning methods. Neural Computing and Applications. \href{https://doi.org/10.1007/s00521-021-05901-2}{doi.org/10.1007/s00521-021-05901-2}. %\item P. Saha, P. Nath, A. I. Middya and S. Roy. Improving Temporal Predictions through Time-Series Labeling using Matrix Profile and Motifs. \textit{Currently under peer review}. \end{enumerate} \section{Projects} \runsubsection{EcoEden - The Green Social Media}\\ \descript{Smart India Hackathon, 2020} Developed a social media platform that aims to incentivize citizens who exercise responsible behavior in terms of trash collection and disposal. \sectionsep \runsubsection{MOXA - Real Time Mask Detection System}\\ \descript{Kolkata Police Special Task Force} Developed an easy deployable computer vision model capable of real-time face mask detection using YOLOv4, Docker and TensorRT. \sectionsep \section{Awards} \runsubsection{IEEE Computer Society Richard E. Merwin Scholarship, 2021} \descript{} Selected as a part of top 20 students all over the world in recognition of exceptional student leadership. \sectionsep \runsubsection{IASc-INSA-NASI Summer Research Fellowship, 2021} \descript{} Selected out of 5k applications to be awarded the Science Academies Summer Research Fellowship in Summer 2021. \sectionsep \runsubsection{MITACS Globalink Research Internship, 2021} \descript{} Selected for a competitive research internship organised for international undergraduates to work under a professor at a Canadian university. \sectionsep \runsubsection{First Rank in 2nd and 3rd Year Semester Examinations (2019-21)} \descript{} Secured the highest marks in the Department of Computer Science and Engineering for the 2nd as well as 3rd Year Semester Examinations. \sectionsep \runsubsection{Smart India Hackathon, 2020} \descript{} Winner in the Student Innovation Category under Sustainable Environment Track. \sectionsep \runsubsection{The Bengal Chamber Technology Quiz, 2020} \descript{} Achieved 1st position in the National Finals out of 50 teams. \sectionsep \runsubsection{CBSE Merit Certificate} \descript{} Top 0.1\% of successful candidates in Computer Science in AISSCE 2018. \sectionsep \end{minipage} \end{document}
{"hexsha": "b551fc48f1b28ec1a1ee204215791d2c65e79d1b", "size": 8300, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Resume/Pritthijit_Nath_Resume.tex", "max_stars_repo_name": "nathzi1505/CVandResume", "max_stars_repo_head_hexsha": "47d46d44040f98cd5d8a5c4eaa845bc5567383f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Resume/Pritthijit_Nath_Resume.tex", "max_issues_repo_name": "nathzi1505/CVandResume", "max_issues_repo_head_hexsha": "47d46d44040f98cd5d8a5c4eaa845bc5567383f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Resume/Pritthijit_Nath_Resume.tex", "max_forks_repo_name": "nathzi1505/CVandResume", "max_forks_repo_head_hexsha": "47d46d44040f98cd5d8a5c4eaa845bc5567383f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7280334728, "max_line_length": 185, "alphanum_fraction": 0.7237349398, "num_tokens": 2151}
import numpy as np import h5py from scipy.special import comb class BullseyeData: def __init__(self, n, eps, copies=1, scale_4=False): eps_list = [0.025, 0.05, 0.075, 0.1, 0.125, -1, -2] assert eps in eps_list a,b = 0.25, 0.5 c,d = 0.75, 1.0 self.n = n self.copies = copies self.eps = eps if eps == 0.025: self.ground_truth = 2.4026 * copies elif eps == 0.05: self.ground_truth = 1.8095 * copies elif eps == 0.075: self.ground_truth = 1.5040 * copies elif eps == 0.1: self.ground_truth = 1.3163 * copies elif eps == 0.125: self.ground_truth = 1.1931 * copies elif eps == -1: self.ground_truth = (2.4026 + 1.8095 + 1.5040 + 1.3163 + 1.1931)*(1/5) * copies elif eps == -2: # W is 0-mean gaussian, eps is 0.025 if W<0, 0.125 if W>=0. # ground truth is I(X;Y|W) self.ground_truth = (2.4026 + 1.1931)*0.5 * copies # scale by 4 if scale_4: a = 4*a b = 4*b c = 4*c d = 4*d eps = 4*eps # init idx = np.random.permutation(n) self.R = np.zeros((n, copies)) self.T = np.zeros((n, copies)) self.Y = np.zeros((n, copies)) self.X = np.zeros((n, 2*copies)) for i in range(copies): self.R[:,i] = np.hstack((np.random.uniform(a,b,size=int(n/2)), np.random.uniform(c,d,size=n-int(n/2))))[idx] if eps == -1: self.e = np.random.choice(3, n, replace=True).reshape(-1,1) N = np.zeros(self.R.shape) for i in range(3): N[self.e==i] = np.random.uniform(-eps_list[i], eps_list[i], size=N[self.e==i].shape) self.Y = self.R + N if eps == -2: self.W = np.random.normal(0, 1, size=n).reshape(-1,1) N = np.zeros(self.R.shape) N[self.W < 0] = np.random.uniform(-0.025, 0.025, size=N[self.W < 0].shape) N[self.W >= 0] = np.random.uniform(-0.125, 0.125, size=N[self.W >= 0].shape) self.Y = self.R + N self.e = self.W >= 0 else: N = np.random.uniform(-eps, eps, size=self.R.shape) self.Y = self.R + N for i in range(copies): self.T[:,i] = np.random.uniform(0, 2*np.pi, size=n) self.X[:,i] = self.R[:,i]*np.cos(self.T[:,i]) self.X[:,i+copies] = self.R[:,i]*np.sin(self.T[:,i]) def make_X_data(self, dest, include_polar=False): with h5py.File(dest, "w") as f: f.create_dataset("X", data=np.expand_dims(self.X, 2)) f.create_dataset("Y", data=self.Y) def make_R_data(self, dest): with h5py.File(dest, "w") as f: f.create_dataset("X", data=np.expand_dims(self.R, 2)) f.create_dataset("Y", data=self.Y) def make_XR_data(self, dest, include_polar=False): R = np.concatenate((self.R, np.zeros(self.R.shape)), axis=1) Z = np.stack((self.X, R), axis=2) with h5py.File(dest, "w") as f: f.create_dataset("X", data=Z) f.create_dataset("Y", data=self.Y) def make_XRT_data(self, dest, include_polar=False): R = np.concatenate((self.R, np.zeros(self.R.shape)), axis=1) T = np.concatenate((self.T, np.zeros(self.T.shape)), axis=1) Z = np.stack((self.X, R, T), axis=2) with h5py.File(dest, "w") as f: f.create_dataset("X", data=Z) f.create_dataset("Y", data=self.Y) def make_XRTe_data(self, dest, include_polar=False): R = np.concatenate((self.R, np.zeros(self.R.shape)), axis=1) T = np.concatenate((self.T, np.zeros(self.T.shape)), axis=1) e = np.concatenate((self.e, np.zeros(self.e.shape)), axis=1) W = np.concatenate((self.W, np.zeros(self.e.shape)), axis=1) Z = np.stack((self.X, R, T, e, W), axis=2) with h5py.File(dest, "w") as f: f.create_dataset("X", data=Z) f.create_dataset("Y", data=self.Y)
{"hexsha": "c1bcfe4a41f7bf532ac1d50b12aa7346d7ca1cd1", "size": 4146, "ext": "py", "lang": "Python", "max_stars_repo_path": "bullseye/bullseye.py", "max_stars_repo_name": "syanga/model-augmented-mutual-information", "max_stars_repo_head_hexsha": "a7c0ccb3b32320e9c45c266d668a879e240d39e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-10T05:45:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-06T11:44:42.000Z", "max_issues_repo_path": "bullseye/bullseye.py", "max_issues_repo_name": "syanga/model-augmented-mutual-information", "max_issues_repo_head_hexsha": "a7c0ccb3b32320e9c45c266d668a879e240d39e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bullseye/bullseye.py", "max_forks_repo_name": "syanga/model-augmented-mutual-information", "max_forks_repo_head_hexsha": "a7c0ccb3b32320e9c45c266d668a879e240d39e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6902654867, "max_line_length": 120, "alphanum_fraction": 0.5147129764, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1270}
[STATEMENT] lemma space_in_measure_of[simp]: "\<Omega> \<in> sets (measure_of \<Omega> M \<mu>)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<Omega> \<in> sets (measure_of \<Omega> M \<mu>) [PROOF STEP] by (subst sets_measure_of_conv) (auto simp: sigma_sets_top)
{"llama_tokens": 108, "file": null, "length": 1}
from copy import deepcopy from itertools import product import numpy as np with open("day17.txt", "r") as f: data = np.array([list(line) for line in f.read().splitlines()]) space = np.zeros(data.shape, dtype=int) space[np.where(data == "#")] = 1 space = np.expand_dims(space, axis=0) neighbours = np.array( [neighbour for neighbour in product([-1, 0, 1], repeat=3) if neighbour != (0, 0, 0)] ) def update_node(i, j, k): dims = space.shape neigh_values = [] for delta in neighbours: neigh_coords = np.array([i, j, k]) + delta if all(0 <= coord < dims[i] for i, coord in enumerate(neigh_coords)): neigh_values.append( space[neigh_coords[0], neigh_coords[1], neigh_coords[2]] ) node_state = space[i][j][k] n_neighs = sum(neigh_values) if (node_state == 1 and n_neighs in [2, 3]) or (node_state == 0 and n_neighs == 3): return 1 else: return 0 n_cycles = 6 for n in range(n_cycles): space = np.pad(space, 1) new_space = deepcopy(space) for i in range(new_space.shape[0]): for j in range(new_space.shape[1]): for k in range(new_space.shape[2]): # print(i, j, k) new_space[i, j, k] = update_node(i, j, k) space = new_space print(space.sum())
{"hexsha": "8e910fd03a1fbabd743a664e793fe7a7e9c0b657", "size": 1323, "ext": "py", "lang": "Python", "max_stars_repo_path": "2020/day17-1.py", "max_stars_repo_name": "alvaropp/AdventOfCode2017", "max_stars_repo_head_hexsha": "2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2020/day17-1.py", "max_issues_repo_name": "alvaropp/AdventOfCode2017", "max_issues_repo_head_hexsha": "2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2020/day17-1.py", "max_forks_repo_name": "alvaropp/AdventOfCode2017", "max_forks_repo_head_hexsha": "2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0, "max_line_length": 88, "alphanum_fraction": 0.5956160242, "include": true, "reason": "import numpy", "num_tokens": 379}
export bfgs_bl export bfgs_rc export bfgsH """ Modelos básicos... bfgs_bl (busca linear) bfgs_rc (região de confiança - Steihaug Toint) Options: - atol: absolute tolerance for the first order condition (default: 1e-6) - rtol: relative tolerance for the first order condition (default: 1e-6) - max_eval: maximum number of [functions] evaluations, use ≤ 0 for unlimited (default: 1000) - max_iter: maximum number of iterations, use ≤ 0 for unlimited (default: 0) - max_time: maximum elapsed time in seconds, use ≤ 0 for unlimited (default: 10) Disclaimers for the developer: - nlp should be the only mandatory argument - these five options are the current default for other JSO-compliant solvers - always return a GenericExecutionStats """ function bfgs_bl( nlp::AbstractNLPModel; atol::Real = 1e-6, rtol::Real = 1e-6, max_eval::Int = 1000, max_iter::Int = 0, max_time::Float64 = 10.0 ) if !unconstrained(nlp) error("Problem is not unconstrained") end x = copy(nlp.meta.x0) f(x) = obj(nlp, x) ∇f(x) = grad(nlp, x) fx = f(x) ∇fx = ∇f(x) n = length(x) Hx = Matrix(1.0I, n, n) ϵ = atol + rtol * norm(∇fx) t₀ = time() iter = 0 Δt = time() - t₀ solved = norm(∇fx) < ϵ # First order stationary tired = neval_obj(nlp) ≥ max_eval > 0 || iter ≥ max_iter > 0 || Δt ≥ max_time > 0 # Excess time, iteration, evaluations # status must be one of a few options found in SolverTools.show_statuses() # A good default value is :unknown. status = :unknown # log_header is up for some rewrite in the future. For now, it simply prints the column names with some spacing @info log_header( [:iter, :fx, :ngx, :nf, :Δt], [Int, Float64, Float64, Int, Float64], hdr_override=Dict(:fx => "f(x)", :ngx => "‖∇f(x)‖", :nf => "#f") ) # log_row uses the type information of each value, thus we use `Any` here. @info log_row( Any[iter, fx, norm(∇fx), neval_obj(nlp), Δt] ) # Aqui começa o show while !(solved || tired) α = 1.0 η = 1e-2 d = -Hx * ∇fx slope = dot(d, ∇fx) # Armijo x⁺ = x + α * d f⁺ = f(x⁺) while f⁺ ≥ fx + η * α * slope α = α / 2 x⁺ = x + α * d f⁺ = f(x⁺) if α < 1e-8 status =:small_step break end end if status != :unknown #small_step break end s = α * d y = ∇f(x⁺) - ∇fx if dot(s,y) <= 0 @warn("sᵀy = $(dot(s,y))") else Hx = bfgsH(Hx, s, y) end x .= x⁺ fx = f⁺ ∇fx = ∇f(x) iter += 1 Δt = time() - t₀ solved = norm(∇fx) < ϵ # First order stationary tired = neval_obj(nlp) ≥ max_eval > 0|| iter ≥ max_iter > 0 || Δt ≥ max_time > 0 # Excess time, iteration, evaluations @info log_row( Any[iter, fx, norm(∇fx), neval_obj(nlp), Δt] ) end if solved status = :first_order elseif tired if neval_obj(nlp) ≥ max_eval > 0 status = :max_eval elseif iter ≥ max_iter > 0 status = :max_iter elseif Δt ≥ max_time > 0 status = :max_time end end return GenericExecutionStats( status, nlp, solution=x, objective=f(x), dual_feas=norm(∇fx), elapsed_time=Δt, iter=iter ) end function bfgsH(H, s, y) ρ = 1 / dot(s, y) H = (I - ρ * s * y') * H * (I - ρ * y * s') + ρ * s * s' return H end #REGIÃO DE CONFIANÇA function bfgs_rc( nlp::AbstractNLPModel; atol::Real = 1e-6, rtol::Real = 1e-6, max_eval::Int = 50000, max_iter::Int = 0, max_time::Float64 = 10.0 ) if !unconstrained(nlp) error("Problem is not unconstrained") end #Given the starting point x = copy(nlp.meta.x0) f(x) = obj(nlp, x) ∇f(x) = grad(nlp, x) # initial Hessian approximation n = length(x) #trust region radius Δ = 1.0 fx = f(x) ∇fx = ∇f(x) #convergence tolerance ϵ = atol + rtol * norm(∇fx) #parameters η e r η = 0.01 #∈ (0,1e-3) r = 0.001 #∈ (0,1) t₀ = time() iter = 0 Δt = time() - t₀ solved = norm(∇fx) < ϵ # First order stationary tired = neval_obj(nlp) ≥ max_eval > 0 || iter ≥ max_iter > 0 || Δt ≥ max_time > 0 # Excess time, iteration, evaluations # status must be one of a few options found in SolverTools.show_statuses() # A good default value is :unknown. status = :unknown # log_header is up for some rewrite in the future. For now, it simply prints the column names with some spacing @info log_header( [:iter, :fx, :ngx, :nf, :Δt], [Int, Float64, Float64, Int, Float64], hdr_override=Dict(:fx => "f(x)", :ngx => "‖∇f(x)‖", :nf => "#f") ) # log_row uses the type information of each value, thus we use `Any` here. @info log_row( Any[iter, fx, norm(∇fx), neval_obj(nlp), Δt] ) # Aqui começa o show #determinar o primero B # B⁰ₖ = δₖI, wher δₖ = dot(y,y)/dot(s,y) Nocedal pg178 (eq.7.20) B = Matrix(1.0I, n, n) s = Steighaug(∇fx, B, Δ) y = ∇f(x.+s) - ∇fx γ = dot(y,y)/dot(s,y) if γ >= 1.0 B = Matrix(γ*I, n, n) end while !(solved || tired) #compute sₖ by solving the subproblem # (6.27) aqui vai entrar o Steihaug s = Steighaug(∇fx, B, Δ) y = ∇f(x.+s) - ∇fx ared = fx - f(x.+s) pred = -(dot(∇fx, s) + 1/2 * dot(s, B*s)) ρ = ared/pred if ρ < η Δ = Δ/2 if Δ < 10e-50 @error("Δ muito pequeno") status =:small_step end else x = x + s fx = f(x) ∇fx = ∇f(x) if ρ > 0.75 && norm(s) > 0.8 * Δ Δ = 2*Δ if Δ > 10e50 @error("Δ muito grande") status =:user end end end if status != :unknown #small_step break end yBs = y .-B*s syBs = dot(s, yBs) if abs(syBs) > r*norm(s,2)*norm(yBs,2) #era maior ou igual, deixei maior # em alguns casos, y - B*s pode ser zero... e nesse caso a atualização #de B será uma divisão por zero. #why?!?! #6.26 - evita atualizações se o denominador é pequeno #Simmetric-rank 1 method: prevent breakdown; better approximations # to the true Hessian matrix; B = B + (yBs*yBs')/syBs end iter+= 1 Δt = time() - t₀ solved = norm(∇fx) < ϵ # First order stationary tired = neval_obj(nlp) ≥ max_eval > 0|| iter ≥ max_iter > 0 || Δt ≥ max_time > 0 # Excess time, iteration, evaluations @info log_row( Any[iter, fx, norm(∇fx), neval_obj(nlp), Δt] ) end if solved status = :first_order elseif tired if neval_obj(nlp) ≥ max_eval > 0 status = :max_eval elseif iter ≥ max_iter > 0 status = :max_iter elseif Δt ≥ max_time > 0 status = :max_time end end return GenericExecutionStats( status, nlp, solution=x, objective=f(x), dual_feas=norm(∇fx), elapsed_time=Δt, iter=iter ) end function Steighaug(gx, B, Δ; ϵ = 1.0e-4) m = length(gx) zx = zeros(m) r = gx d = -r rx = r z = zx normr = norm(r) if normr < ϵ return z end # enquanto estiver dentro da região de confiança k=0 while normr > ϵ || k < m # CG vai em no máximo m-variáveis direções dotdBd = dot(d,B*d) @info("dotdBd: $dotdBd") if dotdBd ≤ 0 # para o método se a direção dⱼ é direção de curvatura não positiva # encontrar τ >=0 tal que pk e satisfaz ||pk|| = Δk # ou seja, a interseção da direção com a região de confiança m1, m2 = BhaskaraTop(z, d, Δ) return z + m2*d end dotrr = dot(r,r) α = dotrr/dotdBd @info("α: $α") zx = z + α*d if norm(zx) ≥ Δ #para se zⱼ₊₁ viola os limites da região de confiança # encontrar τ >=0 tal que pk e satisfaz ||pk|| = Δk # ou seja, a interseção da direção com a região de confiança m1, m2 = BhaskaraTop(z, d, Δ) return z + m1*d end rx = r + α*B*d #Conjugate Gradient #se α = 0 => rx = r , β = 1 if norm(α) < ϵ^2 #@error("α menor que zero") return zeros(m) end if norm(rx) < ϵ return zx end β = dot(rx,rx)/dotrr d = -rx+ β*d r = rx normr = norm(r) z = zx k+=1 end if k >= m #@error ("não encontrou a borda, direção multipla da própria direção") return zeros(m) end end function BhaskaraTop(z, d, Δ) a=dot(d,d) b=2dot(z,d) c=dot(z,z) - Δ^2 Delta = b^2-4*a*c if Delta < 0 @warn("Δ<0") else t1 = (-b+sqrt(Delta))/2a t2 = (-b-sqrt(Delta))/2a m1 = max(t1,t2) m2 = min(t1,t2) return m1, m2 end end
{"hexsha": "0b2b62301222dcbd7c3ba279b3d37b0d788a0b16", "size": 9262, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/bfgs_basicos.jl", "max_stars_repo_name": "FKrukoski/Projeto2Solvers.jl", "max_stars_repo_head_hexsha": "8d48093e9bc1fca9470f568c2415df7bdbaa1672", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/bfgs_basicos.jl", "max_issues_repo_name": "FKrukoski/Projeto2Solvers.jl", "max_issues_repo_head_hexsha": "8d48093e9bc1fca9470f568c2415df7bdbaa1672", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/bfgs_basicos.jl", "max_forks_repo_name": "FKrukoski/Projeto2Solvers.jl", "max_forks_repo_head_hexsha": "8d48093e9bc1fca9470f568c2415df7bdbaa1672", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-21T15:00:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-21T15:00:49.000Z", "avg_line_length": 24.8978494624, "max_line_length": 124, "alphanum_fraction": 0.520837832, "num_tokens": 3221}
// // TDF SDK // // Created by Sujan Reddy on 2019/03/28. // Copyright 2019 Virtru Corporation // #define BOOST_TEST_MODULE test_key_access_object_suite #include "asym_decryption.h" #include "asym_encryption.h" #include "crypto/bytes.h" #include "crypto/crypto_utils.h" #include "crypto/rsa_key_pair.h" #include "entity_object.h" #include "logger.h" #include "network/http_client_service.h" #include "mock_network_interface.h" #include "policy_object.h" #include "sdk_constants.h" #include "tdf.h" #include "tdf_exception.h" #include "tdf_logging_interface.h" #include "tdfbuilder.h" #include <boost/filesystem.hpp> #include <boost/test/included/unit_test.hpp> #include <iostream> #include <stdio.h> #include "nlohmann/json.hpp" #ifdef _WINDOWS #include <direct.h> #define GetCurrentDir _getcwd #else #include <unistd.h> #define GetCurrentDir getcwd #endif #define TEST_ENCRYPT_DECRYPT 1 #define ENABLE_TEST 0 //#define KAS_LOCALHOST // TODO: Temporary place holder should be moved to virtru sdk. // Update the appid and user for testing this code. constexpr auto user = "tdf-user@virtrucanary.com"; constexpr auto kasUrl = "https://api-develop01.develop.virtru.com/kas"; // const auto OIDCAccessToken = R"(eyJhbGciOiJSUzI1NiIsInR5cCIg // OiAiSldUIiwia2lkIiA6ICJGRjJKM0o5TjNGQWQ0dnpLVDd2aEloZE1DTEVudE1PejVtLWhGNm5ScFNZIn0. // eyJleHAiOjE2MTQxMTgzNzgsImlhdCI6MTYxNDExODA3OCwianRpIjoiNWQ4OTczYjYtYjg5Yy00OTBjLWIz // YTYtMTM0ZDMxOTYxZTM3IiwiaXNzIjoiaHR0cDovL2xvY2FsaG9zdDo4MDgwL2F1dGgvcmVhbG1zL2V4YW1w // bGUtcmVhbG0iLCJhdWQiOiJhY2NvdW50Iiwic3ViIjoiN2ZkZGJkYWQtNDlmYS00NWU4LTg4MzItMzI3ZGI4 // ZjU1MDE1IiwidHlwIjoiQmVhcmVyIiwiYXpwIjoiZXhhbXBsZS1yZWFsbS1jbGllbnQiLCJzZXNzaW9uX3N0 // YXRlIjoiOTA0MTc4NTAtNWEwNC00ZmU1LTgxZWMtOTkzZDY1MmVhYmY5IiwiYWNyIjoiMSIsInJlYWxtX2Fj // Y2VzcyI6eyJyb2xlcyI6WyJvZmZsaW5lX2FjY2VzcyIsInVtYV9hdXRob3JpemF0aW9uIl19LCJyZXNvdXJj // ZV9hY2Nlc3MiOnsiYWNjb3VudCI6eyJyb2xlcyI6WyJtYW5hZ2UtYWNjb3VudCIsIm1hbmFnZS1hY2NvdW50 // LWxpbmtzIiwidmlldy1wcm9maWxlIl19fSwic2NvcGUiOiJwcm9maWxlIGVtYWlsIiwic3VwaXJpIjoidG9r // ZW5fc3VwaXJpIiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJjbGFpbSI6eyJuYW1lIjp7InVzZXJuYW1lIjoi // Zm9vIn19LCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJqZWZmNS1leGFtcGxlIn0.NfM272HpLfyHACNJrXniyPF5 // klXjfB8QbhHBt_aTlZUF1-wO7W4-3qL02bMYe71dg_swR5WLFR0SL-zqa9zeKfsegL8E-lEeRSCcFwTvoSXP // XSZ06tafFmSNxuA88MogG_3ZBhi9sUL5uAXtCoC3Rkb6xpb-JdHp42n68s_Mm1teCU2wx2rS6O1k23YCK3lY // _xRsmV62sQ_tx973N5u7YHPxWsKVi-gHNlW3N0x23bRsEk-qcIq-3ug5cLOyADlNUeApTmug9lXGJxqxo3jl // ugnuf6VUtMwI1x8xSbePwC1pmGAfzZX2pS0kEUiGSHdH7flzibrMG70IXlutmS3e8Q)"; using namespace virtru::network; using namespace virtru::crypto; using namespace virtru; #if TEST_ENCRYPT_DECRYPT std::string getCurrentWorkingDir() { char buff[FILENAME_MAX]; GetCurrentDir(buff, FILENAME_MAX); std::string current_working_dir(buff); return current_working_dir; } HttpHeaders GetHeaders() { std::ostringstream authHeaderValue; HttpHeaders headers = {{kContentTypeKey, kContentTypeJsonValue}}; return headers; } std::string ReplaceAll(std::string str, const std::string &from, const std::string &to) { size_t start_pos = 0; while ((start_pos = str.find(from, start_pos)) != std::string::npos) { str.replace(start_pos, from.length(), to); start_pos += to.length(); // Handles case where 'to' is a substring of 'from' } return str; } typedef std::tuple<const std::string, const std::string> keyPair; //We do this a lot keyPair GetKeypair() { auto keyPairOf4096 = RsaKeyPair::Generate(4096); auto privateKey = keyPairOf4096->PrivateKeyInPEMFormat(); auto publicKey = keyPairOf4096->PublicKeyInPEMFormat(); return {publicKey, privateKey}; } void SourceAndSinkInterfaceEncrypt(TDF *tdf, std::string plainText, std::stringstream *ioStream, ByteArray<5> *buffer) { // ByteArray<5> buffer; std::istringstream inputStream(plainText); auto encryptSourceCB = [&inputStream, buffer](virtru::Status &status) -> BufferSpan { if (inputStream.read(toChar(buffer->data()), buffer->size())) { status = Status::Success; return {(const std::uint8_t *)buffer->data(), buffer->size()}; ; } else if (inputStream.eof()) { status = Status::Success; return {(const std::uint8_t *)buffer->data(), static_cast<std::size_t>(inputStream.gcount())}; } else { status = virtru::Status::Failure; return {nullptr, 0}; } }; auto encryptSinkCB = [ioStream](BufferSpan bufferSpan) { if (!(ioStream->write((const char *)(bufferSpan.data), bufferSpan.dataLength))) return Status::Failure; else return virtru::Status::Success; }; tdf->encryptData(encryptSourceCB, encryptSinkCB); } void SourceAndSinkInterfaceDecrypt(TDF *tdf, std::string plainText, std::stringstream *ioStream, ByteArray<5> *buffer) { auto decryptSourceCB = [&ioStream, buffer](virtru::Status &status) -> BufferSpan { if (ioStream->read(toChar(buffer->data()), buffer->size())) { status = Status::Success; return {(const std::uint8_t *)buffer->data(), buffer->size()}; ; } else if (ioStream->eof()) { status = Status::Success; return {(const std::uint8_t *)buffer->data(), static_cast<std::size_t>(ioStream->gcount())}; } else { status = virtru::Status::Failure; return {nullptr, 0}; } }; std::string decryptedText; auto decryptSinkCB = [&decryptedText](BufferSpan bufferSpan) { decryptedText.append((const char *)bufferSpan.data, bufferSpan.dataLength); return virtru::Status::Success; }; tdf->decryptData(decryptSourceCB, decryptSinkCB); BOOST_TEST(plainText == decryptedText); } std::unique_ptr<TDFBuilder> createTDFBuilder(LogLevel logLevel, KeyAccessType keyAccessType, Protocol protocol, keyPair kasKeys, keyPair clientKeys) { auto headers = GetHeaders(); std::string mimeType{"text/plain"}; std::unordered_map<std::string, std::string> metaData; metaData.insert({"displayName", "tdf-cpp-unit-tests"}); metaData.insert({"fileProvider", "tdf-cpp-sdk"}); auto tdfbuilderPtr = std::unique_ptr<TDFBuilder>(new TDFBuilder(user)); tdfbuilderPtr->setKasUrl(kasUrl) .setKasPublicKey(std::get<0>(kasKeys)) .setHttpHeaders(headers) .enableConsoleLogging(logLevel) .setDefaultSegmentSize(2 * 1024 * 1024) .setPrivateKey(std::get<1>(clientKeys)) .setPublicKey(std::get<0>(clientKeys)) .enableOIDC(true) .setProtocol(protocol) .setEncryptionObject(KeyType::split, CipherType::Aes256GCM) .setIntegrityAlgorithm(IntegrityAlgorithm::HS256, IntegrityAlgorithm::GMAC) .setPayloadMimeType(mimeType) .setMetaData(metaData); if (keyAccessType == KeyAccessType::Remote) { tdfbuilderPtr->setKeyAccessType(KeyAccessType::Remote); } else { tdfbuilderPtr->setKeyAccessType(KeyAccessType::Wrapped); } if (protocol == Protocol::Html) { std::string secureReaderUrl{"https://secure-develop01.develop.virtru.com/start?htmlProtocol=1"}; std::string currentDir = getCurrentWorkingDir(); std::string htmlTemplateFilepath{currentDir}; #ifdef _WINDOWS htmlTemplateFilepath.append("\\data\\tdf-html-template.html"); #else htmlTemplateFilepath.append("/data/tdf-html-template.html"); #endif // Copy the html template file data into the buffer. std::string htmlTemplateData; std::ifstream ifs(htmlTemplateFilepath.data(), std::ios::binary | std::ios::ate); if (!ifs) { std::string errorMsg{"Failed to open file for reading - "}; errorMsg.append(htmlTemplateData); ThrowException(std::move(errorMsg), VIRTRU_SYSTEM_ERROR); } std::ifstream::pos_type fileSize = ifs.tellg(); htmlTemplateData.reserve(fileSize); ifs.seekg(0, std::ios::beg); htmlTemplateData.assign(std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>()); tdfbuilderPtr->setHtmlTemplateData(std::move(htmlTemplateData)); tdfbuilderPtr->setSecureReaderURL(secureReaderUrl); } return tdfbuilderPtr; } #endif //TEST_ENCRYPT_DECRYPT //This suite tests OIDC and KAS v2 endpoints specifically. BOOST_AUTO_TEST_SUITE(test_tdf_builder_v2_suite) using namespace virtru; //This simply simulates a KAS rewrap operation (decrypt wrapped key wiht Kas privkey, rewrap/encrypt with client pubkey) //for testing/mock purposes static std::string FauxKASKeyRewrap(const std::string clientWrappedKey, const std::string kasPrivKey, const std::string clientPubKey) { // std::cout << "Kas privkey for test: " << kasPrivKey << std::endl; auto kasDecoder = AsymDecryption::create(kasPrivKey); std::vector<gsl::byte> unwrapBuffer(kasDecoder->getOutBufferSize()); auto unwrapWriteBuf = toWriteableBytes(unwrapBuffer); kasDecoder->decrypt(toBytes(clientWrappedKey), unwrapWriteBuf); WrappedKey wrappedKey; std::copy(unwrapWriteBuf.begin(), unwrapWriteBuf.end(), wrappedKey.begin()); // std::cout << "Unwrapped key (b64)" << base64Encode(toBytes(wrappedKey)) << std::endl; //Now do rewrap auto encoder = AsymEncryption::create(clientPubKey); std::vector<gsl::byte> outBuffer(encoder->getOutBufferSize()); auto writeableBytes = toWriteableBytes(outBuffer); encoder->encrypt(toBytes(wrappedKey), writeableBytes); // std::cout << "Rewrapped key (b64)" << base64Encode(writeableBytes) << std::endl; return base64Encode(writeableBytes); } static std::string BuildFakedRewrapResponse( const std::string precedingUpsertRequest, std::tuple<std::string, std::string> kasKeypair, std::tuple<std::string, std::string> clientKeypair) { auto parsedBody = nlohmann::json::parse(precedingUpsertRequest); std::string wrappedKeyAsStr = parsedBody[kKeyAccess][kWrappedKey]; auto decodedWrappedKey = base64Decode(wrappedKeyAsStr); auto rewrappedKey = FauxKASKeyRewrap(decodedWrappedKey, std::get<1>(kasKeypair), std::get<0>(clientKeypair)); const auto fakedRewrapResponseJSON = R"({ "entityWrappedKey": "", "kasWrappedKey":"RFnrVDn9NpbbCOclNNWt1nsiz3Amu1px9l2OMPy85asiFEVCUkdD/DgVKavbsqg50Ku6Ldlf5WCx9tiiKFeMuVNI8/8NenHduPE6qf85/Jvc2Ix8TziCq6zJHU7eDyz2QprnD2bY03lpTT6K0qoSAiaU8qq2TFhLlYQMRiD0a/ORV4VkyCGMFjfnP7YyE/Gg6RsTfpsaCzva37Njcky6SNY6zb//e7f4rp9x/zy4lZRjd4RGSHNH8tTCS9z0S4w5mhXfsV66am3S8LHNAnSKCM4cjptFMu7gtr5z9eLgVdplX06pUswG+0zYaNhP/1Nu/t+ClOzzPTxW7/hcaTT+Zw==", "metadata":{ "acmContract":{ "accessCount":0, "accessPercent":"0.00", "accessedBy":[ ], "attributes":[ ], "authorizations":[ ], "authorizedUser":"tdf-user@virtrucanary.com", "displayName":"sdk-test.pdf", "forwardCount":0, "isInternal":true, "isManaged":false, "isOwner":true, "key":"4459eb5439fd3696db08e72534d5add67b22cf7026bb5a71f65d8e30fcbce5ab22144542524743fc381529abdbb2a839d0abba2dd95fe560b1f6d8a228578cb95348f3ff0d7a71ddb8f13aa9ff39fc9bdcd88c7c4f3882abacc91d4ede0f2cf6429ae70f66d8d379694d3e8ad2aa12022694f2aab64c584b95840c4620f46bf391578564c8218c1637e73fb63213f1a0e91b137e9b1a0b3bdadfb363724cba48d63acdbfff7bb7f8ae9f71ff3cb8959463778446487347f2d4c24bdcf44b8c399a15dfb15eba6a6dd2f0b1cd02748a08ce1c8e9b4532eee0b6be73f5e2e055da655f4ea952cc06fb4cd868d84fff536efedf8294ecf33d3c56eff85c6934fe67", "keyAccess":{ "details":{ "body":"RFnrVDn9NpbbCOclNNWt1nsiz3Amu1px9l2OMPy85asiFEVCUkdD/DgVKavbsqg50Ku6Ldlf5WCx9tiiKFeMuVNI8/8NenHduPE6qf85/Jvc2Ix8TziCq6zJHU7eDyz2QprnD2bY03lpTT6K0qoSAiaU8qq2TFhLlYQMRiD0a/ORV4VkyCGMFjfnP7YyE/Gg6RsTfpsaCzva37Njcky6SNY6zb//e7f4rp9x/zy4lZRjd4RGSHNH8tTCS9z0S4w5mhXfsV66am3S8LHNAnSKCM4cjptFMu7gtr5z9eLgVdplX06pUswG+0zYaNhP/1Nu/t+ClOzzPTxW7/hcaTT+Zw==", "encoding":"base64" }, "keyId":"ae61e3bd-304c-48df-ad53-809c1518ccf3", "type":"string", "version":"3.0.0" }, "leaseTime":60000, "policyId":"2f0ff593-55ac-4ee7-ba23-8e246a7524da", "recipientCount":0, "sentFrom":"tdf-user@virtrucanary.com", "state":"active", "type":"file" } } })"; auto rewrapResponse = nlohmann::json::parse(fakedRewrapResponseJSON); rewrapResponse[kEntityWrappedKey] = rewrappedKey; return rewrapResponse; } BOOST_AUTO_TEST_CASE(test_tdf_builder_basic) { #if ENABLE_TEST auto headers = GetHeaders(); // constexpr auto entityObjectJsonStr = "{\n" // " \"aliases\": [\"sreddy@trusteddataformat.org\", \"sreddy@trusteddataformat.net\"], \n" // " \"attributes\": [\n" // " {\n" // " \"jwt\": \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1cmwiOiJodHRwczovL2FhLnZpcnRydS5jb20vYXR0ci91bmlxdWUtaWRlbnRpZmllci92YWx1ZS9mZTEzZjBmYS0xNmU1LTQ3ZDYtODdjYy1hOTI1MzJhYzcxYzQiLCJuYW1lIjoiZmUxM2YwZmEtMTZlNS00N2Q2LTg3Y2MtYTkyNTMyYWM3MWM0IiwiaWF0IjoxNTUzNDg1MjQ3LCJleHAiOjE1NTM1NzE2NDd9.qg8BYLJ6ZKu6e641_NLfjlghDwWexEr_YUCadUyPX-B1tonWIJUjGddhx2cz5H8Ldxpj0AurilCz2xAIcRItwm9-0M3RlNUAZ7l5wYahRnSWijwV4lL7Yvm_HwMYgrrVNvcUwj5cqpMREHfCDScS-lSb89zhq76dypVmkgmhZe3t9lD1fTSJKCJylc7X9AzbWzLc0fDQH702yU__ZVOVkBwTO2jJ4ovBDPB0w9LgCEZ-9pzvdUiTdYuhZ2PzQBTNHlK1xxQQCu148uuiTw8Fk_bs7efuGgUU7zfrKR2Lvgw5QLDpavL11HnXIKZihxzJbcrjBdKQCK0V7v3i7F2CkA\"\n" // " }, \n" // " {\n" // " \"jwt\": \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1cmwiOiJodHRwczovL2FhLnZpcnRydS5jb20vYXR0ci9wcmltYXJ5LW9yZ2FuaXphdGlvbi92YWx1ZS9mZTEzZjBmYS0xNmU1LTQ3ZDYtODdjYy1hOTI1MzJhYzcxYzQiLCJuYW1lIjoiZmUxM2YwZmEtMTZlNS00N2Q2LTg3Y2MtYTkyNTMyYWM3MWM0IiwiaWF0IjoxNTUzNDg1MjQ3LCJleHAiOjE1NTM1NzE2NDd9.TBO2RbLIESO5h3n8Cop4DVYJNhI46nfaAIuzUuTJ73v5j0myplcj3amNyW_PPRxSauMhG5gwhkSrYHgnO-f423a7YnGW1SmfqmpEFd8s5j1yGRIytJsuaVD0B5nfrjSkS4Bu5lV8J2pYmnanZkr_Mo6oj2_IhITk0lVBTgri-PTUfGKNCFfCI3bpFH2UwbvNzJD6wniW5C9rOG7oBMSbDTOK2HJK_3mf1DifzoH0iQY2r5fyJzomtYDd2Z4BGtPnWpU6wAF3rcfOYqYDW1KA74PsPZm2kaqC7Icq1PvqFglX3QwpmvQqpEvzWSNS3nNFui5yjupkHSlXfU24CEn3EA\"\n" // " }, \n" // " {\n" // " \"jwt\": \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1cmwiOiJodHRwczovL2FhLnZpcnRydS5jb20vYXR0ci9zdXBlci1hZG1pbi92YWx1ZS90cnVlIiwibmFtZSI6dHJ1ZSwiaWF0IjoxNTUzNDg1MjQ3LCJleHAiOjE1NTM1NzE2NDd9.pkvAvRxU3pcqTCvUCuJtCwEg8UnXkLGKUgdH7aBnHWqCix_CXt_OqJ5T-b58xlszelyvcdmvTQxyg1_aHXOKg5wDQQaA6Ur3NsbYr3oskrPI8dE8gIK326NPpqrjrpBGGXkPoJHkXwGO5GfqtoNpuFWd8Y5UDLmH1QKegsBJQAVoV6JpWGvPyP_apAL8cNPNiTHuAL2RyE17ArhziHu6Ujaq_faJaC8sghSejGjW6SpdWiSF9Kw0rV4dZWjRsRu9qbWf3grMIMqEoP-3mSlpxhpDPWTS0hRaCnSvpneQynFvhbKMA2XA0z29Z9i6JueQisrjKVJ1PiaYvZIWNzz3OA\"\n" // " }\n" // " ],\n" // " \n" // " \"cert\": \"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySWQiOiJzcmVkZHlAdmlydHJ1ZGVwbG95LnVzIiwiYXR0cmlidXRlcyI6W3siand0IjoiZXlKaGJHY2lPaUpTVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SjFjbXdpT2lKb2RIUndjem92TDJGaExuWnBjblJ5ZFM1amIyMHZZWFIwY2k5MWJtbHhkV1V0YVdSbGJuUnBabWxsY2k5MllXeDFaUzltWlRFelpqQm1ZUzB4Tm1VMUxUUTNaRFl0T0Rkall5MWhPVEkxTXpKaFl6Y3hZelFpTENKdVlXMWxJam9pWm1VeE0yWXdabUV0TVRabE5TMDBOMlEyTFRnM1kyTXRZVGt5TlRNeVlXTTNNV00wSWl3aWFXRjBJam94TlRVek5EZzFNalEzTENKbGVIQWlPakUxTlRNMU56RTJORGQ5LnFnOEJZTEo2Wkt1NmU2NDFfTkxmamxnaER3V2V4RXJfWVVDYWRVeVBYLUIxdG9uV0lKVWpHZGRoeDJjejVIOExkeHBqMEF1cmlsQ3oyeEFJY1JJdHdtOS0wTTNSbE5VQVo3bDV3WWFoUm5TV2lqd1Y0bEw3WXZtX0h3TVlncnJWTnZjVXdqNWNxcE1SRUhmQ0RTY1MtbFNiODl6aHE3NmR5cFZta2dtaFplM3Q5bEQxZlRTSktDSnlsYzdYOUF6Yld6TGMwZkRRSDcwMnlVX19aVk9Wa0J3VE8yako0b3ZCRFBCMHc5TGdDRVotOXB6dmRVaVRkWXVoWjJQelFCVE5IbEsxeHhRUUN1MTQ4dXVpVHc4RmtfYnM3ZWZ1R2dVVTd6ZnJLUjJMdmd3NVFMRHBhdkwxMUhuWElLWmloeHpKYmNyakJkS1FDSzBWN3YzaTdGMkNrQSJ9LHsiand0IjoiZXlKaGJHY2lPaUpTVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SjFjbXdpT2lKb2RIUndjem92TDJGaExuWnBjblJ5ZFM1amIyMHZZWFIwY2k5d2NtbHRZWEo1TFc5eVoyRnVhWHBoZEdsdmJpOTJZV3gxWlM5bVpURXpaakJtWVMweE5tVTFMVFEzWkRZdE9EZGpZeTFoT1RJMU16SmhZemN4WXpRaUxDSnVZVzFsSWpvaVptVXhNMll3Wm1FdE1UWmxOUzAwTjJRMkxUZzNZMk10WVRreU5UTXlZV00zTVdNMElpd2lhV0YwSWpveE5UVXpORGcxTWpRM0xDSmxlSEFpT2pFMU5UTTFOekUyTkRkOS5UQk8yUmJMSUVTTzVoM244Q29wNERWWUpOaEk0Nm5mYUFJdXpVdVRKNzN2NWowbXlwbGNqM2FtTnlXX1BQUnhTYXVNaEc1Z3doa1NyWUhnbk8tZjQyM2E3WW5HVzFTbWZxbXBFRmQ4czVqMXlHUkl5dEpzdWFWRDBCNW5mcmpTa1M0QnU1bFY4SjJwWW1uYW5aa3JfTW82b2oyX0loSVRrMGxWQlRncmktUFRVZkdLTkNGZkNJM2JwRkgyVXdidk56SkQ2d25pVzVDOXJPRzdvQk1TYkRUT0sySEpLXzNtZjFEaWZ6b0gwaVFZMnI1ZnlKem9tdFlEZDJaNEJHdFBuV3BVNndBRjNyY2ZPWXFZRFcxS0E3NFBzUFptMmthcUM3SWNxMVB2cUZnbFgzUXdwbXZRcXBFdnpXU05TM25ORnVpNXlqdXBrSFNsWGZVMjRDRW4zRUEifSx7Imp3dCI6ImV5SmhiR2NpT2lKU1V6STFOaUlzSW5SNWNDSTZJa3BYVkNKOS5leUoxY213aU9pSm9kSFJ3Y3pvdkwyRmhMblpwY25SeWRTNWpiMjB2WVhSMGNpOXpkWEJsY2kxaFpHMXBiaTkyWVd4MVpTOTBjblZsSWl3aWJtRnRaU0k2ZEhKMVpTd2lhV0YwSWpveE5UVXpORGcxTWpRM0xDSmxlSEFpT2pFMU5UTTFOekUyTkRkOS5wa3ZBdlJ4VTNwY3FUQ3ZVQ3VKdEN3RWc4VW5Ya0xHS1VnZEg3YUJuSFdxQ2l4X0NYdF9PcUo1VC1iNTh4bHN6ZWx5dmNkbXZUUXh5ZzFfYUhYT0tnNXdEUVFhQTZVcjNOc2JZcjNvc2tyUEk4ZEU4Z0lLMzI2TlBwcXJqcnBCR0dYa1BvSkhrWHdHTzVHZnF0b05wdUZXZDhZNVVETG1IMVFLZWdzQkpRQVZvVjZKcFdHdlB5UF9hcEFMOGNOUE5pVEh1QUwyUnlFMTdBcmh6aUh1NlVqYXFfZmFKYUM4c2doU2VqR2pXNlNwZFdpU0Y5S3cwclY0ZFpXalJzUnU5cWJXZjNnck1JTXFFb1AtM21TbHB4aHBEUFdUUzBoUmFDblN2cG5lUXluRnZoYktNQTJYQTB6MjlaOWk2SnVlUWlzcmpLVkoxUGlhWXZaSVdOenozT0EifV0sInB1YmxpY0tleSI6Ii0tLS0tQkVHSU4gUFVCTElDIEtFWS0tLS0tXG5NSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVPMG54aEdDeXVFYkhPcU5YaldJXG4yZGZ1TkM5ano2SjlaS2IzZHZvc1pMdGlybzMyK2pnZWV1ZGNPMC9sMVArUnpHa09SVUd1YnJrTi9vVVd0QzlsXG5ESmdxM1QwNXBRSlVjZy8rc2J5TDFHdlVuVTBpSmZWazl6ejV3M2NEQkUvSTk5ckNHc0lmRzJtK3VuS0tKbjIyXG53ZC9aT3FRRE93Wk42b0RrQjdaV1FKZTBRQlF1YjBsSmpUaG9nclBWaVhJSnFSZ1RvSCt0c2pVWCtodGtwOFFBXG52dmt3MDlYYzFIWjZraFpWZGZZZjdCbTBZSTBPVkNNYko3N0JWc01HMGNDc0QvOGgzLzI2RjdvcTl1aWFlVG54XG5zWkJzemZCWEpHcFVtNDBuYWFRSi80Q0lxMjBRVGFkclhMTXAxQ1JNblI1VGNlTHZ2L0twR2xRR1hiNFY0elJmXG5xUUlEQVFBQlxuLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tIiwiYWxpYXNlcyI6W10sImlhdCI6MTU1MzQ4NTI0NywiZXhwIjoxNTUzNTcxNjQ3fQ.mh1ub1kS9WryrLGj3ONFmk6EGl6rPE29VJU21O3IX4EslCkxmIMVVjpFf8s83uFkyy7c2w5Re6NJsln9FHgLTV7RKqdj71U6tE7onh6l8_ZBNPCdetrCcrMCFac65k67_Lz5XM4BVPyCvNuoe-gY8FkeqyimQzkL6Q52HNG2FpslDrgcx50HiCC_aX638UyyB3W4n7J4uF8LrLzsNqyXb2xQw9BVVBJ9-XmXUgOmFaMMG5wJyxFETpP9yR3YBCwiZw911tc5CC738ho4IufdX98HBPqECMIkoL4ZJmfVw4N7YlbaJDU5WZa2rqCpgmvn_B4Zlv1QnVf41fj4EOifyg\",\n" // "\n" // " \"publicKey\": \"-----BEGIN PUBLIC KEY-----\\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuO0nxhGCyuEbHOqNXjWI\\n2dfuNC9jz6J9ZKb3dvosZLtiro32+jgeeudcO0/l1P+RzGkORUGubrkN/oUWtC9l\\nDJgq3T05pQJUcg/+sbyL1GvUnU0iJfVk9zz5w3cDBE/I99rCGsIfG2m+unKKJn22\\nwd/ZOqQDOwZN6oDkB7ZWQJe0QBQub0lJjThogrPViXIJqRgToH+tsjUX+htkp8QA\\nvvkw09Xc1HZ6khZVdfYf7Bm0YI0OVCMbJ77BVsMG0cCsD/8h3/26F7oq9uiaeTnx\\nsZBszfBXJGpUm40naaQJ/4CIq20QTadrXLMp1CRMnR5TceLvv/KpGlQGXb4V4zRf\\nqQIDAQAB\\n-----END PUBLIC KEY-----\", \n" // " \n" // " \"signerPublicKey\": \"-----BEGIN PUBLIC KEY-----\\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuO0nxhGCyuEbHOqNXjWI\\n2dfuNC9jz6J9ZKb3dvosZLtiro32+jgeeudcO0/l1P+RzGkORUGubrkN/oUWtC9l\\nDJgq3T05pQJUcg/+sbyL1GvUnU0iJfVk9zz5w3cDBE/I99rCGsIfG2m+unKKJn22\\nwd/ZOqQDOwZN6oDkB7ZWQJe0QBQub0lJjThogrPViXIJqRgToH+tsjUX+htkp8QA\\nvvkw09Xc1HZ6khZVdfYf7Bm0YI0OVCMbJ77BVsMG0cCsD/8h3/26F7oq9uiaeTnx\\nsZBszfBXJGpUm40naaQJ/4CIq20QTadrXLMp1CRMnR5TceLvv/KpGlQGXb4V4zRf\\nqQIDAQAB\\n-----END PUBLIC KEY-----\", \n" // " \n" // " \"userId\": \"sreddy@virtrudeploy.us\"\n" // "}"; // auto entityObject = EntityObject::createEntityObjectFromJson(entityObjectJsonStr); try { std::string dummpyKey{"dummy"}; PolicyObject policyObject; policyObject.addDissem(user); auto tdfbuilderPtr = std::unique_ptr<TDFBuilder>(new TDFBuilder(user)); auto tdf = tdfbuilderPtr->setKasUrl(kasUrl) .enableOIDC(true) .setKasPublicKey(dummpyKey) .setMetaData({{"displayName", "sdk-test.pdf"}}) .setHttpHeaders(headers) .setPolicyObject(policyObject) .build(); BOOST_TEST_MESSAGE("TDF basic test passed."); } catch (const Exception &exception) { BOOST_FAIL(exception.what()); std::cout << "virtru exception " << exception.what() << std::endl; } catch (const std::exception &exception) { BOOST_FAIL(exception.what()); std::cout << exception.what() << std::endl; } catch (...) { BOOST_FAIL("Unknown exception..."); std::cout << "Unknown..." << std::endl; } #endif } BOOST_AUTO_TEST_CASE(test_tdf_builder_advanced) { #if ENABLE_TEST class ExternalLogger : public ILogger { public: /// A callback interface for log messages. void TDFSDKLog(LogMessage logMessage) override { std::ostringstream os; std::time_t timeInSeconds = (logMessage.timestamp / 1000); std::size_t fractionalSeconds = (logMessage.timestamp % 1000); os << "[" << std::put_time(std::localtime(&timeInSeconds), "%m-%d-%Y %X") << "." << fractionalSeconds << "]"; std::string logTime = os.str(); switch (logMessage.level) { case LogLevel::Trace: std::clog << logTime << "[Trace] " << logMessage.message << std::endl; BOOST_FAIL("Testing external logger - Failed(log level is Info)"); break; case LogLevel::Debug: std::clog << logTime << "[Debug] " << logMessage.message << std::endl; BOOST_TEST(static_cast<int>(logMessage.level) == static_cast<int>(LogLevel::Debug)); break; case LogLevel::Info: std::clog << logTime << "[Info] " << logMessage.message << std::endl; BOOST_TEST(static_cast<int>(logMessage.level) == static_cast<int>(LogLevel::Info)); break; case LogLevel::Warn: std::clog << logTime << "[Warn] " << logMessage.message << std::endl; BOOST_TEST(static_cast<int>(logMessage.level) == static_cast<int>(LogLevel::Warn)); break; case LogLevel::Error: std::clog << logTime << "[Error] " << logMessage.message << std::endl; BOOST_TEST(static_cast<int>(logMessage.level) == static_cast<int>(LogLevel::Error)); break; case LogLevel::Fatal: std::clog << logTime << "[Fatal] " << logMessage.message << std::endl; BOOST_TEST(static_cast<int>(logMessage.level) == static_cast<int>(LogLevel::Fatal)); break; } } }; std::shared_ptr<ExternalLogger> externalLogger = std::make_shared<ExternalLogger>(); auto headers = GetHeaders(); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); auto kasKeys = GetKeypair(); auto clientKeys = GetKeypair(); try { #if TEST_ENCRYPT_DECRYPT { // Remote std::unordered_map<std::string, std::string> metaData; metaData.insert({"displayName", "sdk-test.pdf"}); metaData.insert({"fileProvider", "tdf-cpp-sdk"}); std::string mimeType{"application/pdf"}; std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); auto tdfbuilderPtr = std::unique_ptr<TDFBuilder>(new TDFBuilder(user)); auto tdf = tdfbuilderPtr->setKasUrl(kasUrl) .setKasPublicKey(std::get<0>(kasKeys)) .setHttpHeaders(headers) .setExternalLogger(externalLogger, LogLevel::Debug) .setDefaultSegmentSize(2 * 1024 * 1024) .setMetaData(metaData) .setPrivateKey(std::get<1>(clientKeys)) .setPublicKey(std::get<0>(clientKeys)) .enableOIDC(true) .setEncryptionObject(KeyType::split, CipherType::Aes256GCM) .setIntegrityAlgorithm(IntegrityAlgorithm::HS256, IntegrityAlgorithm::GMAC) .setKeyAccessType(KeyAccessType::Remote) .setHTTPServiceProvider(mockNetwork) .setPayloadMimeType(mimeType) .setPolicyObject(policyObject) .build(); std::string currentDir = getCurrentWorkingDir(); // TODO: BUGBUG: We should use std::filesystem once all the compilers catch up. #ifdef _WINDOWS std::string inPathEncrypt{currentDir}; inPathEncrypt.append("\\data\\sample.pdf"); std::string outPathEncrypt{currentDir}; outPathEncrypt.append("\\data\\encrypt\\sample.pdf.tdf"); std::string inPathDecrypt{currentDir}; inPathDecrypt.append("\\data\\encrypt\\sample.pdf.tdf"); std::string outPathDecrypt{currentDir}; outPathDecrypt.append("\\data\\decrypt\\sample.pdf"); #else std::string inPathEncrypt{currentDir}; inPathEncrypt.append("/data/sample.pdf"); std::string outPathEncrypt{currentDir}; outPathEncrypt.append("/data/encrypt/sample.pdf.tdf"); std::string inPathDecrypt{currentDir}; inPathDecrypt.append("/data/encrypt/sample.pdf.tdf"); std::string outPathDecrypt{currentDir}; outPathDecrypt.append("/data/decrypt/sample.pdf"); #endif //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); tdf->encryptFile(inPathEncrypt, outPathEncrypt); //Since the client just encrypted, we expect the mock to have captured //an upsert request with the wrapped key - we'll take that out and have //the mock return a re-wrapped copy on the next call to `rewrap` auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); auto tdfPolicyStr = tdf->getPolicy(ioStream); auto policy = nlohmann::json::parse(tdfPolicyStr); auto &attributes = policy[kBody][kDissem] auto tdfPolicyUuid = tdf->getPolicyUUID(ioStream); BOOST_TEST(attributes.contains(user)); BOOST_TEST(tdfPolicyUuid == policyUuid); std::cout << std::endl; tdf->decryptFile(inPathDecrypt, outPathDecrypt); } { // Wrapper with no meta data // TODO: These tests are using Virtru KAS instead of Core KAS. Virtru KAS expect the metadata so // we need pass the meta information. std::unordered_map<std::string, std::string> metaData; metaData.insert({"displayName", "sdk-test.pdf"}); metaData.insert({"fileProvider", "tdf-cpp-sdk"}); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); auto tdfbuilderPtr = std::unique_ptr<TDFBuilder>(new TDFBuilder(user)); auto tdf = tdfbuilderPtr->setKasUrl(kasUrl) .setKasPublicKey(std::get<0>(kasKeys)) .setHttpHeaders(headers) .setExternalLogger(externalLogger, LogLevel::Debug) .setDefaultSegmentSize(2 * 1024 * 1024) .setPrivateKey(std::get<1>(clientKeys)) .setPublicKey(std::get<0>(clientKeys)) .enableOIDC(true) .setMetaData(metaData) .setEncryptionObject(KeyType::split, CipherType::Aes256GCM) .setIntegrityAlgorithm(IntegrityAlgorithm::HS256, IntegrityAlgorithm::GMAC) .setKeyAccessType(KeyAccessType::Wrapped) .setHTTPServiceProvider(mockNetwork) .setPolicyObject(policyObject) .build(); std::string currentDir = getCurrentWorkingDir(); // TODO: BUGBUG: We should use std::filesystem once all the compilers catch up. #ifdef _WINDOWS std::string inPathEncrypt{currentDir}; inPathEncrypt.append("\\data\\sample.pdf"); std::string outPathEncrypt{currentDir}; outPathEncrypt.append("\\data\\encrypt\\sample-wrapped.pdf.tdf"); std::string inPathDecrypt{currentDir}; inPathDecrypt.append("\\data\\encrypt\\sample-wrapped.pdf.tdf"); std::string outPathDecrypt{currentDir}; outPathDecrypt.append("\\data\\decrypt\\sample-wraped.pdf"); #else std::string inPathEncrypt{currentDir}; inPathEncrypt.append("/data/sample.pdf"); std::string outPathEncrypt{currentDir}; outPathEncrypt.append("/data/encrypt/sample-wrapped.pdf.tdf"); std::string inPathDecrypt{currentDir}; inPathDecrypt.append("/data/encrypt/sample-wrapped.pdf.tdf"); std::string outPathDecrypt{currentDir}; outPathDecrypt.append("/data/decrypt/sample-wrapped.pdf"); #endif //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); tdf->encryptFile(inPathEncrypt, outPathEncrypt); auto tdfPolicyUuid = tdf->getPolicyUUID(outPathEncrypt); BOOST_TEST(tdfPolicyUuid == policyUuid); // Lazy sync the policy. tdf->sync(outPathEncrypt); //Since the client just encrypted, we expect the mock to have captured //an upsert request with the wrapped key - we'll take that out and have //the mock return a re-wrapped copy on the next call to `rewrap` auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation( "https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); std::cout << std::endl; tdf->decryptFile(inPathDecrypt, outPathDecrypt); } #endif BOOST_TEST_MESSAGE("TDF basic test passed."); } catch (const Exception &exception) { BOOST_FAIL(exception.what()); } catch (const std::exception &exception) { BOOST_FAIL(exception.what()); std ::cout << exception.what() << std::endl; } catch (...) { BOOST_FAIL("Unknown exception..."); std ::cout << "Unknown..." << std::endl; } #endif } BOOST_AUTO_TEST_CASE(test_tdf_builder_html_tdfs) { #if ENABLE_TEST try { std::string currentDir = getCurrentWorkingDir(); auto kasKeys = GetKeypair(); auto clientKeys = GetKeypair(); auto headers = GetHeaders(); #if TEST_ENCRYPT_DECRYPT { auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Remote, Protocol::Html, kasKeys, clientKeys); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); tdfBuilder->setHTTPServiceProvider(mockNetwork); tdfBuilder->setPolicyObject(policyObject); auto tdf = tdfBuilder->build(); #ifdef _WINDOWS std::string inPathEncrypt{currentDir}; inPathEncrypt.append("\\data\\sample.pdf"); std::string outPathEncrypt{currentDir}; outPathEncrypt.append("\\data\\encrypt\\sample.pdf.html"); std::string inPathDecrypt{currentDir}; inPathDecrypt.append("\\data\\encrypt\\sample.pdf.html"); std::string outPathDecrypt{currentDir}; outPathDecrypt.append("\\data\\decrypt\\sample.pdf"); #else std::string inPathEncrypt{currentDir}; inPathEncrypt.append("/data/sample.pdf"); std::string outPathEncrypt{currentDir}; outPathEncrypt.append("/data/encrypt/sample.pdf.html"); std::string inPathDecrypt{currentDir}; inPathDecrypt.append("/data/encrypt/sample.pdf.html"); std::string outPathDecrypt{currentDir}; outPathDecrypt.append("/data/decrypt/sample.pdf"); #endif //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); tdf->encryptFile(inPathEncrypt, outPathEncrypt); auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); auto tdfPolicyUuid = tdf->getPolicyUUID(outPathEncrypt); BOOST_TEST(tdfPolicyUuid == policyUuid); std::cout << std::endl; tdf->decryptFile(inPathDecrypt, outPathDecrypt); } { // Wrapper with no meta data auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Wrapped, Protocol::Html, kasKeys, clientKeys); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); tdfBuilder->setHTTPServiceProvider(mockNetwork); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); auto tdf = tdfBuilder->build(); // TODO: BUGBUG: We should use std::filesystem once all the compilers catch up. #ifdef _WINDOWS std::string inPathEncrypt{currentDir}; inPathEncrypt.append("\\data\\sample.pdf"); std::string outPathEncrypt{currentDir}; outPathEncrypt.append("\\data\\encrypt\\sample-wrapped.pdf.html"); std::string inPathDecrypt{currentDir}; inPathDecrypt.append("\\data\\encrypt\\sample-wrapped.pdf.html"); std::string outPathDecrypt{currentDir}; outPathDecrypt.append("\\data\\decrypt\\sample-wraped.pdf"); #else std::string inPathEncrypt{currentDir}; inPathEncrypt.append("/data/sample.pdf"); std::string outPathEncrypt{currentDir}; outPathEncrypt.append("/data/encrypt/sample-wrapped.pdf.html"); std::string inPathDecrypt{currentDir}; inPathDecrypt.append("/data/encrypt/sample-wrapped.pdf.html"); std::string outPathDecrypt{currentDir}; outPathDecrypt.append("/data/decrypt/sample-wrapped.pdf"); #endif // //Set up a mocked upsert expectation // //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); tdf->encryptFile(inPathEncrypt, outPathEncrypt); auto tdfPolicyUuid = tdf->getPolicyUUID(outPathEncrypt); BOOST_TEST(tdfPolicyUuid == policyUuid); // Lazy sync the policy. tdf->sync(outPathEncrypt); //Since the client just encrypted, we expect the mock to have captured //an upsert request with the wrapped key - we'll take that out and have //the mock return a re-wrapped copy on the next call to `rewrap` auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); std::cout << std::endl; tdf->decryptFile(inPathDecrypt, outPathDecrypt); } #endif BOOST_TEST_MESSAGE("TDF basic test passed."); } catch (const Exception &exception) { BOOST_FAIL(exception.what()); } catch (const std::exception &exception) { BOOST_FAIL(exception.what()); std ::cout << exception.what() << std::endl; } catch (...) { BOOST_FAIL("Unknown exception..."); std ::cout << "Unknown..." << std::endl; } #endif } BOOST_AUTO_TEST_CASE(test_tdf_builder_stream_tdf_type) { #if ENABLE_TEST try { auto kasKeys = GetKeypair(); auto clientKeys = GetKeypair(); auto headers = GetHeaders(); #if TEST_ENCRYPT_DECRYPT { // Remote using tdf to encrypt stream auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Remote, Protocol::Zip, kasKeys, clientKeys); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); tdfBuilder->setHTTPServiceProvider(mockNetwork); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); auto tdf = tdfBuilder->build(); // Create simple string. std::string plainText{"HelloWorld!!"}; std::istringstream inputStream(plainText); std::stringstream ioStream; // will be used as input and output stream //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); // encrypt the stream. tdf->encryptStream(inputStream, ioStream); auto tdfPolicyUuid = tdf->getPolicyUUID(ioStream); BOOST_TEST(tdfPolicyUuid == policyUuid); { // Write to a file and test decrypt // Write the .tdf stream to file. std::string tdfFile{"simple.txt.tdf"}; std::ofstream outFileStream{tdfFile, std::ios_base::out | std::ios_base::binary}; if (!outFileStream) { BOOST_FAIL("Failed to open file for writing."); } outFileStream << ioStream.str(); outFileStream.close(); auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); // decrypt the file std::string outTxtFile{"simple.txt"}; tdf->decryptFile(tdfFile, outTxtFile); std::ifstream inputFileStream(outTxtFile); std::string decryptedText((std::istreambuf_iterator<char>(inputFileStream)), std::istreambuf_iterator<char>()); BOOST_TEST(plainText == decryptedText); } auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); std::ostringstream decryptedStream; tdf->decryptStream(ioStream, decryptedStream); std::string decryptedText = decryptedStream.str(); BOOST_TEST(plainText == decryptedText); } { // Wrapper with no meta data auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Wrapped, Protocol::Zip, kasKeys, clientKeys); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); //For these tests there's no `upsert` call to capture for the wrappedKey, //so set a POSTTransformer lambda on the mock that will intercept the `rewrap` //request inline std::function<std::string(std::string &&)> lambda = [kasKeys, clientKeys](std::string &&inBody) { return test_tdf_builder_v2_suite::BuildFakedRewrapResponse(inBody, kasKeys, clientKeys); }; mockNetwork->POSTTransformer = lambda; //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, "", 200); tdfBuilder->setHTTPServiceProvider(mockNetwork); auto tdf = tdfBuilder->build(); // Create simple string. std::string plainText{"HelloWorld!!"}; std::istringstream inputStream(plainText); std::stringstream ioStream; // will be used as input and output stream // encrypt the stream. tdf->encryptStream(inputStream, ioStream); auto tdfPolicyUuid = tdf->getPolicyUUID(ioStream); BOOST_TEST(tdfPolicyUuid == policyUuid); { // Write the .tdf stream to file. std::string tdfFile{"simple-wrapped.txt.tdf"}; std::ofstream outFileStream{tdfFile, std::ios_base::out | std::ios_base::binary}; if (!outFileStream) { BOOST_FAIL("Failed to open file for writing."); } outFileStream << ioStream.str(); outFileStream.close(); // decrypt the file std::string outTxtFile{"simple-wrapped.txt"}; tdf->decryptFile(tdfFile, outTxtFile); std::ifstream inputFileStream(outTxtFile); std::string decryptedText((std::istreambuf_iterator<char>(inputFileStream)), std::istreambuf_iterator<char>()); BOOST_TEST(plainText == decryptedText); } std::ostringstream decryptedStream; tdf->decryptStream(ioStream, decryptedStream); std::string decryptedText = decryptedStream.str(); BOOST_TEST(plainText == decryptedText); } #endif BOOST_TEST_MESSAGE("TDF streaming test passed using zip format."); } catch (const Exception &exception) { BOOST_FAIL(exception.what()); } catch (const std::exception &exception) { BOOST_FAIL(exception.what()); std ::cout << exception.what() << std::endl; } catch (...) { BOOST_FAIL("Unknown exception..."); std ::cout << "Unknown..." << std::endl; } #endif } BOOST_AUTO_TEST_CASE(test_tdf_builder_stream_html_type) { #if ENABLE_TEST try { auto kasKeys = GetKeypair(); auto clientKeys = GetKeypair(); auto headers = GetHeaders(); #if TEST_ENCRYPT_DECRYPT { // Remote auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Remote, Protocol::Html, kasKeys, clientKeys); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); tdfBuilder->setHTTPServiceProvider(mockNetwork); auto tdf = tdfBuilder->build(); // Create simple string. std::string plainText{"HelloWorld!!"}; std::istringstream inputStream(plainText); std::stringstream ioStream; //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); // encrypt the stream. tdf->encryptStream(inputStream, ioStream); auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); auto tdfPolicyUuid = tdf->getPolicyUUID(ioStream); BOOST_TEST(tdfPolicyUuid == policyUuid); { // Write the .tdf stream to file. std::string tdfFile{"simple-html.txt.html"}; std::ofstream outFileStream{tdfFile, std::ios_base::out | std::ios_base::binary}; if (!outFileStream) { BOOST_FAIL("Failed to open file for writing."); } outFileStream << ioStream.str(); outFileStream.close(); // decrypt the file std::string outTxtFile{"simple-html.txt"}; tdf->decryptFile(tdfFile, outTxtFile); std::ifstream inputFileStream(outTxtFile); std::string decryptedText((std::istreambuf_iterator<char>(inputFileStream)), std::istreambuf_iterator<char>()); BOOST_TEST(plainText == decryptedText); } std::ostringstream decryptedStream; tdf->decryptStream(ioStream, decryptedStream); std::string decryptedText = decryptedStream.str(); BOOST_TEST(plainText == decryptedText); } { // Wrapper with no meta data auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Wrapped, Protocol::Html, kasKeys, clientKeys); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); tdfBuilder->setHTTPServiceProvider(mockNetwork); //For 'wrapped' tests there's no `upsert` call to capture for the wrappedKey, //so set a POSTTransformer lambda on the mock that will intercept the `rewrap` //request inline std::function<std::string(std::string &&)> lambda = [kasKeys, clientKeys](std::string &&inBody) { return test_tdf_builder_v2_suite::BuildFakedRewrapResponse(inBody, kasKeys, clientKeys); }; mockNetwork->POSTTransformer = lambda; //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, "", 200); auto tdf = tdfBuilder->build(); // Create simple string. std::string plainText{"HelloWorld!!"}; std::istringstream inputStream(plainText); std::stringstream ioStream; // encrypt the stream. tdf->encryptStream(inputStream, ioStream); auto tdfPolicyUuid = tdf->getPolicyUUID(ioStream); BOOST_TEST(tdfPolicyUuid == policyUuid); { // Write the .tdf stream to file. std::string tdfFile{"simple-html-wrapped.txt.html"}; std::ofstream outFileStream{tdfFile, std::ios_base::out | std::ios_base::binary}; if (!outFileStream) { BOOST_FAIL("Failed to open file for writing."); } outFileStream << ioStream.str(); outFileStream.close(); // decrypt the file std::string outTxtFile{"simple-html-wrapped.txt"}; tdf->decryptFile(tdfFile, outTxtFile); std::ifstream inputFileStream(outTxtFile); std::string decryptedText((std::istreambuf_iterator<char>(inputFileStream)), std::istreambuf_iterator<char>()); BOOST_TEST(plainText == decryptedText); } std::ostringstream decryptedStream; tdf->decryptStream(ioStream, decryptedStream); std::string decryptedText = decryptedStream.str(); BOOST_TEST(plainText == decryptedText); } #endif BOOST_TEST_MESSAGE("TDF streaming test passed using html format."); } catch (const Exception &exception) { BOOST_FAIL(exception.what()); } catch (const std::exception &exception) { BOOST_FAIL(exception.what()); std ::cout << exception.what() << std::endl; } catch (...) { BOOST_FAIL("Unknown exception..."); std ::cout << "Unknown..." << std::endl; } #endif } BOOST_AUTO_TEST_CASE(test_tdf_builder_16mb_stream_tdf_type) { #if ENABLE_TEST try { auto kasKeys = GetKeypair(); auto clientKeys = GetKeypair(); auto headers = GetHeaders(); #if TEST_ENCRYPT_DECRYPT { // Remote using tdf to encrypt stream std::string currentDir = getCurrentWorkingDir(); // TODO: BUGBUG: We should use std::filesystem once all the compilers catch up. #ifdef _WINDOWS std::string inPathEncrypt{currentDir}; inPathEncrypt.append("\\data\\sample.pdf"); #else std::string inPathEncrypt{currentDir}; inPathEncrypt.append("/data/sample.pdf"); #endif auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Remote, Protocol::Zip, kasKeys, clientKeys); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); tdfBuilder->setHTTPServiceProvider(mockNetwork); auto tdf = tdfBuilder->build(); // Create a stream for 16mb file. std::ifstream inputStream{inPathEncrypt, std::ios_base::out | std::ios_base::binary}; std::stringstream ioStream; // will be used as input and output stream std::string fileContents{std::istreambuf_iterator<char>(inputStream), std::istreambuf_iterator<char>()}; //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); // encrypt the stream. tdf->encryptStream(inputStream, ioStream); auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); auto tdfPolicyUuid = tdf->getPolicyUUID(ioStream); BOOST_TEST(tdfPolicyUuid == policyUuid); std::ostringstream decryptedStream; tdf->decryptStream(ioStream, decryptedStream); std::string decryptedText = decryptedStream.str(); BOOST_TEST(fileContents == decryptedText); } #endif BOOST_TEST_MESSAGE("TDF streaming(16 mb) test passed using zip format."); } catch (const Exception &exception) { BOOST_FAIL(exception.what()); } catch (const std::exception &exception) { BOOST_FAIL(exception.what()); std ::cout << exception.what() << std::endl; } catch (...) { BOOST_FAIL("Unknown exception..."); std ::cout << "Unknown..." << std::endl; } #endif } BOOST_AUTO_TEST_CASE(test_tdf_builder_16mb_stream_html_type) { #if ENABLE_TEST try { auto kasKeys = GetKeypair(); auto clientKeys = GetKeypair(); auto headers = GetHeaders(); #if TEST_ENCRYPT_DECRYPT { // Remote using tdf to encrypt stream std::string currentDir = getCurrentWorkingDir(); // TODO: BUGBUG: We should use std::filesystem once all the compilers catch up. #ifdef _WINDOWS std::string inPathEncrypt{currentDir}; inPathEncrypt.append("\\data\\sample.pdf"); #else std::string inPathEncrypt{currentDir}; inPathEncrypt.append("/data/sample.pdf"); #endif auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Remote, Protocol::Html, kasKeys, clientKeys); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); tdfBuilder->setHTTPServiceProvider(mockNetwork); auto tdf = tdfBuilder->build(); // Create a stream for 16mb file. std::ifstream inputStream{inPathEncrypt, std::ios_base::out | std::ios_base::binary}; std::stringstream ioStream; // will be used as input and output stream std::string fileContents{std::istreambuf_iterator<char>(inputStream), std::istreambuf_iterator<char>()}; //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); // encrypt the stream. tdf->encryptStream(inputStream, ioStream); auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); auto tdfPolicyUuid = tdf->getPolicyUUID(ioStream); BOOST_TEST(tdfPolicyUuid == policyUuid); std::ostringstream decryptedStream; tdf->decryptStream(ioStream, decryptedStream); std::string decryptedText = decryptedStream.str(); BOOST_TEST(fileContents == decryptedText); } #endif BOOST_TEST_MESSAGE("TDF streaming(16 mb) test passed using zip format."); } catch (const Exception &exception) { BOOST_FAIL(exception.what()); } catch (const std::exception &exception) { BOOST_FAIL(exception.what()); std ::cout << exception.what() << std::endl; } catch (...) { BOOST_FAIL("Unknown exception..."); std ::cout << "Unknown..." << std::endl; } #endif } BOOST_AUTO_TEST_CASE(test_tdf_builder_callback_tdf_type) { #if ENABLE_TEST try { auto kasKeys = GetKeypair(); auto clientKeys = GetKeypair(); auto headers = GetHeaders(); #if TEST_ENCRYPT_DECRYPT { // Remote using tdf to encrypt stream auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Remote, Protocol::Zip, kasKeys, clientKeys); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); tdfBuilder->setHTTPServiceProvider(mockNetwork); auto tdf = tdfBuilder->build(); //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); std::string plainText{"Virtru offers data protection solutions for the most commonly used" " tools and applications across all industries"}; std::stringstream ioStream; ByteArray<5> buffer; SourceAndSinkInterfaceEncrypt(tdf.get(), plainText, &ioStream, &buffer); //Since the client just encrypted, we expect the mock to have captured //an upsert request with the wrapped key - we'll take that out and have //the mock return a re-wrapped copy on the next call to `rewrap` auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); SourceAndSinkInterfaceDecrypt(tdf.get(), plainText, &ioStream, &buffer); } { // Wrapper with no meta data auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Wrapped, Protocol::Zip, kasKeys, clientKeys); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); std::function<std::string(std::string &&)> lambda = [kasKeys, clientKeys](std::string &&inBody) { return test_tdf_builder_v2_suite::BuildFakedRewrapResponse(inBody, kasKeys, clientKeys); }; mockNetwork->POSTTransformer = lambda; //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, "", 200); tdfBuilder->setHTTPServiceProvider(mockNetwork); auto tdf = tdfBuilder->build(); std::string plainText{"Virtru offers data protection solutions for the most commonly used" " tools and applications across all industries"}; std::stringstream ioStream; ByteArray<5> buffer; SourceAndSinkInterfaceEncrypt(tdf.get(), plainText, &ioStream, &buffer); SourceAndSinkInterfaceDecrypt(tdf.get(), plainText, &ioStream, &buffer); } #endif BOOST_TEST_MESSAGE("TDF streaming test passed using zip format."); } catch (const Exception &exception) { BOOST_FAIL(exception.what()); } catch (const std::exception &exception) { BOOST_FAIL(exception.what()); std ::cout << exception.what() << std::endl; } catch (...) { BOOST_FAIL("Unknown exception..."); std ::cout << "Unknown..." << std::endl; } #endif } BOOST_AUTO_TEST_CASE(test_tdf_builder_callback_html_type) { #if ENABLE_TEST try { auto kasKeys = GetKeypair(); auto clientKeys = GetKeypair(); auto headers = GetHeaders(); #if TEST_ENCRYPT_DECRYPT { // Remote auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Remote, Protocol::Html, kasKeys, clientKeys); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); tdfBuilder->setHTTPServiceProvider(mockNetwork); auto tdf = tdfBuilder->build(); //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); std::string plainText{"Virtru offers data protection solutions for the most commonly used" " tools and applications across all industries"}; std::stringstream ioStream; ByteArray<5> buffer; SourceAndSinkInterfaceEncrypt(tdf.get(), plainText, &ioStream, &buffer); //Since the client just encrypted, we expect the mock to have captured //an upsert request with the wrapped key - we'll take that out and have //the mock return a re-wrapped copy on the next call to `rewrap` auto capturedRequest = mockNetwork->RecordedPOSTCalls[0]; auto upsertReq = std::get<1>(capturedRequest); auto fakeRewrapResp = BuildFakedRewrapResponse(upsertReq, kasKeys, clientKeys); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, fakeRewrapResp, 200); SourceAndSinkInterfaceDecrypt(tdf.get(), plainText, &ioStream, &buffer); } { // Wrapper with no meta data auto tdfBuilder = createTDFBuilder(LogLevel::Info, KeyAccessType::Wrapped, Protocol::Html, kasKeys, clientKeys); auto policyObject = PolicyObject{}; policyObject.addDissem(user); // Store the uuid for later verification. auto policyUuid = policyObject.getUuid(); tdfBuilder->setPolicyObject(policyObject); std::shared_ptr<MockNetwork> mockNetwork = std::make_shared<MockNetwork>(); tdfBuilder->setHTTPServiceProvider(mockNetwork); std::function<std::string(std::string &&)> lambda = [kasKeys, clientKeys](std::string &&inBody) { return test_tdf_builder_v2_suite::BuildFakedRewrapResponse(inBody, kasKeys, clientKeys); }; mockNetwork->POSTTransformer = lambda; //Set up a mocked upsert expectation //Response is discarded so doesn't matter std::string fakeResp = "NO RESPONSE"; mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/upsert", headers, fakeResp, 200); mockNetwork->addPOSTExpectation("https://api-develop01.develop.virtru.com/kas/v2/rewrap", headers, "", 200); auto tdf = tdfBuilder->build(); std::string plainText{"Virtru offers data protection solutions for the most commonly used" " tools and applications across all industries"}; std::stringstream ioStream; ByteArray<5> buffer; SourceAndSinkInterfaceEncrypt(tdf.get(), plainText, &ioStream, &buffer); SourceAndSinkInterfaceDecrypt(tdf.get(), plainText, &ioStream, &buffer); } #endif BOOST_TEST_MESSAGE("TDF streaming test passed using html format."); } catch (const Exception &exception) { BOOST_FAIL(exception.what()); } catch (const std::exception &exception) { BOOST_FAIL(exception.what()); std ::cout << exception.what() << std::endl; } catch (...) { BOOST_FAIL("Unknown exception..."); std ::cout << "Unknown..." << std::endl; } #endif } BOOST_AUTO_TEST_SUITE_END()
{"hexsha": "decf18dfe616b46c841bff2ad5389017b26342aa", "size": 67562, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/tests/test_tdfbuilder_v2.cpp", "max_stars_repo_name": "opentdf/client-cpp", "max_stars_repo_head_hexsha": "9c6dbc73a989733e30371555aa7a24ff496a62f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tests/test_tdfbuilder_v2.cpp", "max_issues_repo_name": "opentdf/client-cpp", "max_issues_repo_head_hexsha": "9c6dbc73a989733e30371555aa7a24ff496a62f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2022-01-31T14:42:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T22:44:54.000Z", "max_forks_repo_path": "src/tests/test_tdfbuilder_v2.cpp", "max_forks_repo_name": "opentdf/client-cpp", "max_forks_repo_head_hexsha": "9c6dbc73a989733e30371555aa7a24ff496a62f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-03-09T18:40:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T18:40:47.000Z", "avg_line_length": 45.8047457627, "max_line_length": 3593, "alphanum_fraction": 0.6493147035, "num_tokens": 18923}
function [Y, R, E] = IsomapII(D, n_fcn, n_size, options) % ISOMAPII Computes Isomap embedding using an advanced version of % the algorithm in Tenenbaum, de Silva, and Langford (2000), % which can take advantage of sparsity in the graph and % redundancy in the distances. % % [Y, R, E] = isomapII(D, n_fcn, n_size, options); % % Input: % D = input-space distances between pairs of N points, which can % take 1 of 3 forms: % (1) a full N x N matrix (as in isomap.m) % (2) a sparse N x N matrix (missing entries are treated as INF) % (3) the name of a function (e.g. 'd_fun') that takes % one argument, i, and returns a row vector containng the % distances from all N points to point i. % % n_fcn = neighborhood function ('epsilon' or 'k') % n_size = neighborhood size (value for epsilon or k) % % options = optional structure of options: % options.dims = (row) vector of embedding dimensionalities to use % (1:10 = default) % options.comp = which connected component to embed, if more than one. % (1 = largest (default), 2 = second largest, ...) % options.display = plot residual variance and 2-D embedding? % (1 = yes (default), 0 = no) % options.overlay = overlay graph on 2-D embedding? % (1 = yes (default), 0 = no) % options.verbose = display progress reports? % (1 = yes (default), 0 = no) % options.dijkstra = use dijkstra's algorithm for shortest paths with % full N x N distance matrix. % (1 = yes (default), 0 = use Floyd; Floyd should % be used only if you are unable to MEX dijkstra.cpp) % options.Kmax = maximum number of neighbors (used for sparse versions % of epsilon; by default, estimated by random sample) % options.landmarks = (row) vector of landmark points to use in MDS. % (MDS finds the configuration that best approximates % the distances from all points to the landmark points. % The default landmark points are 1:N (i.e. all the points), % which is equivalent to classical MDS. Good % results may often be obtained using a number of % landmarks that is much smaller than N, but much % larger than the data's intrinsic dimensionality. % Note that this extension is experimental! For % discussion, see Steyvers, de Silva, and Tenenbaum % (in preparation).) % % Output: % Y = Y.coords is a cell array, with coordinates for d-dimensional embeddings % in Y.coords{d}. Y.index contains the indices of the points embedded. % R = residual variances for embeddings in Y % E = edge matrix for neighborhood graph % % BEGIN COPYRIGHT NOTICE % % Isomap II code -- (c) 1998-2000 Josh Tenenbaum % % This code is provided as is, with no guarantees except that % bugs are almost surely present. Published reports of research % using this code (or a modified version) should cite the % article that describes the algorithm: % % J. B. Tenenbaum, V. de Silva, J. C. Langford (2000). A global % geometric framework for nonlinear dimensionality reduction. % Science 290 (5500): 2319-2323, 22 December 2000. % % Comments and bug reports are welcome. Email to jbt@psych.stanford.edu. % I would also appreciate hearing about how you used this code, % improvements that you have made to it, or translations into other % languages. % % You are free to modify, extend or distribute this code, as long % as this copyright notice is included whole and unchanged. % % END COPYRIGHT NOTICE % % Modified by Ramon Casero <rcasero@gmail.com>, University of Oxford. % % Version: 0.1.1 % % This file is distributed as a derivative work of a third-party function % with project Gerardus. % % http://code.google.com/p/gerardus/ % % Original code downloaded from the Isomap Homepage % % http://isomap.stanford.edu/ %%%%% Step 0: Initialization and Parameters %%%%% if nargin < 3 error('Too few input arguments'); elseif nargin < 4 options = struct('dims',1:10,'overlay',1,'comp',1,'display',1,'dijkstra',1,'verbose',1); end if isa(D, 'function_handle') mode = 3; d_func = D; N = length(d_func(1)); elseif issparse(D) mode = 2; N = size(D,1); if ~(N==size(D,2)) error('D must be a square matrix'); end; else mode = 1; N = size(D,1); if ~(N==size(D,2)) error('D must be a square matrix'); end; end if strcmp(n_fcn, 'k') K = n_size; if ~(K==round(K)) error('Number of neighbors for k method must be an integer'); end if ((mode==2) && ~(min(sum(D'>0))>=K)) error('Sparse D matrix must contain at least K nonzero entries in each row'); end elseif strcmp(n_fcn, 'epsilon') epsilon = n_size; if isfield(options,'Kmax') K = options.Kmax; elseif (mode==3) %% estimate maximum equivalent K %% tmp = zeros(10,N); for i=1:10 tmp(i,:) = d_func(ceil(N*rand)); end K = 2*max(sum(tmp'<epsilon)); % just to be safe end else error('Neighborhood function must be either epsilon or k'); end if (mode == 3) INF = inf; else INF = 1000*max(max(D))*N; %% effectively infinite distance end if ~isfield(options,'dims') options.dims = 1:10; end if ~isfield(options,'overlay') options.overlay = 1; end if ~isfield(options,'comp') options.comp = 1; end if ~isfield(options,'display') options.display = 1; end if ~isfield(options,'verbose') options.verbose = 1; end if ~isfield(options,'landmarks') options.landmarks = 1:N; end if ~isfield(options,'dijkstra') options.dijkstra = 1; end dims = options.dims; comp = options.comp; overlay = options.overlay; displ = options.display; verbose = options.verbose; landmarks = options.landmarks; use_dijk = options.dijkstra; Y.coords = cell(length(dims),1); R = zeros(1,length(dims)); %%%%% Step 1: Construct neighborhood graph %%%%% disp('Constructing neighborhood graph...'); if ((mode == 1) && (use_dijk == 0)) if strcmp(n_fcn, 'k') [tmp, ind] = sort(D); tic; for i=1:N D(i,ind((2+K):end,i)) = INF; if ((verbose == 1) && (rem(i,50) == 0)) disp([' Iteration: ' num2str(i) ' Estimated time to completion: ' num2str((N-i)*toc/60/50) ' minutes']); tic; end end elseif strcmp(n_fcn, 'epsilon') warning off %% Next line causes an unnecessary warning, so turn it off D = D./(D<=epsilon); D = min(D,INF); warning on end D = min(D,D'); %% Make sure distance matrix is symmetric elseif ((mode == 1) && (use_dijk == 1)) if n_fcn == 'k' [tmp, ind] = sort(D); tic; for i=1:N D(i,ind((2+K):end,i)) = 0; if ((verbose == 1) && (rem(i,50) == 0)) disp([' Iteration: ' num2str(i) ' Estimated time to completion: ' num2str((N-i)*toc/60/50) ' minutes']); tic; end end elseif strcmp(n_fcn, 'epsilon') D = D.*(D<=epsilon); end D = sparse(D); D = max(D,D'); %% Make sure distance matrix is symmetric elseif (mode == 2) if n_fcn == 'k' Di = zeros(N*K,1); Dj = zeros(N*K,1); Ds = zeros(N*K,1); counter = 0; [a,b,c] = find(D); tic; for i=1:N l = find(a==i); [g,f] = sort(c(l)); Di(counter+(1:K)) = i; Dj(counter+(1:K)) = b(l(f(1:K))); Ds(counter+(1:K)) = g(1:K); counter = counter+K; if ((verbose == 1) && (rem(i,50) == 0)) disp([' Iteration: ' num2str(i) ' Estimated time to completion: ' num2str((N-i)*toc/60/50) ' minutes']); tic; end end D = sparse(Di(1:counter), Dj(1:counter), Ds(1:counter)); clear Di Dj Ds counter; elseif strcmp(n_fcn, 'epsilon') D = D.*(D<=epsilon); end D = max(D,D'); %% Make sure distance matrix is symmetric elseif (mode == 3) Di = zeros(N*(K+1),1); Dj = zeros(N*(K+1),1); Ds = zeros(N*(K+1),1); counter = 0; tic; for i=1:N d = d_func(i); if n_fcn == 'k' [c,b] = sort(d); Di(counter+(1:(K+1))) = i; Dj(counter+(1:(K+1))) = b(1:(K+1)); Ds(counter+(1:(K+1))) = c(1:(K+1)); counter = counter+(K+1); elseif strcmp(n_fcn, 'epsilon') [a,b,c] = find(d.*(d<=epsilon)); l = length(a); Di(counter+(1:l)) = i; Dj(counter+(1:l)) = b; Ds(counter+(1:l)) = c; counter = counter+l; end if ((verbose == 1) && (rem(i,50) == 0)) disp([' Iteration: ' num2str(i) ' Estimated time to completion: ' num2str((N-i)*toc/60/50) ' minutes']); tic; end end D = sparse(Di(1:counter), Dj(1:counter), Ds(1:counter)); clear Di Dj Ds counter; D = max(D,D'); %% Make sure distance matrix is symmetric end if (overlay == 1) if ((mode == 1) && (use_dijk == 0)) E = int8(1-(D==INF)); %% Edge information for subsequent graph overlay else [a,b,c] = find(D); E = sparse(a,b,ones(size(a))); end end %%%%% Step 2: Compute shortest paths %%%%% disp('Computing shortest paths...'); if ((mode==1) && (use_dijk == 0)) tic; for k=1:N D = min(D,repmat(D(:,k),[1 N])+repmat(D(k,:),[N 1])); if ((verbose == 1) && (rem(k,20) == 0)) disp([' Iteration: ' num2str(k) ' Estimated time to completion: ' num2str((N-i)*toc/i/60) ' minutes']); end end else D = dijkstra(D, landmarks); end %%%%% Step 3: Construct low-dimensional embeddings (Classical MDS) %%%%% disp('Constructing low-dimensional embeddings (Classical MDS)...'); %%%%% Remove outliers from graph %%%%% disp(' Checking for outliers...'); if ((mode == 1) && (use_dijk == 0)) [tmp, firsts] = min(D==INF); % first point each point connects to else [tmp, firsts] = min(D==inf); % first point each point connects to end [comps, I, J] = unique(firsts); % first point in each connected component n_comps = length(comps); % number of connected components size_comps = sum((repmat(firsts,n_comps,1)==((1:n_comps)'*ones(1,N)))'); % size of each connected component [tmp, comp_order] = sort(size_comps); %% sort connected components by size comps = comps(comp_order(end:-1:1)); size_comps = size_comps(comp_order(end:-1:1)); if (comp>n_comps) comp=1; %% default: use largest component end Y.index = find(firsts==comps(comp)); %% list of points in relevant component Y.index = setdiff(Y.index,find(isinf(min(D)))); % prune points that don't connect % to any landmarks N = length(Y.index); [tmp, landmarks, land_ind] = intersect(landmarks,Y.index); % list of landmarks in component nl = length(landmarks); D = full(D(landmarks,Y.index))'; disp([' Number of connected components in graph: ' num2str(n_comps)]); disp([' Embedding component ' num2str(comp) ' with ' num2str(length(Y.index)) ' points.']); dims = unique(min(dims,nl-1)); % don't embed in more dimensions than landmarks-1 if (nl==N) opt.disp = 0; [vec, val] = eigs(-.5*(D.^2 - sum(D.^2)'*ones(1,N)/N - ones(N,1)*sum(D.^2)/N + sum(sum(D.^2))/(N^2)), max(dims), 'LR', opt); else subB = -.5*(D.^2 - sum(D'.^2)'*ones(1,nl)/nl - ones(N,1)*sum(D.^2)/N+sum(sum(D.^2))/(N*nl)); opt.disp = 0; [alpha,beta] = eigs(subB'*subB, max(dims), 'LR', opt); val = beta.^(1/2); vec = subB*alpha*inv(val); clear subB alpha beta; end h = real(diag(val)); [foo,sorth] = sort(h); sorth = sorth(end:-1:1); val = real(diag(val(sorth,sorth))); vec = vec(:,sorth); D = reshape(D,N*nl,1); for di = 1:length(dims) Y.coords{di} = real(vec(:,1:dims(di)).*(ones(N,1)*sqrt(val(1:dims(di)))'))'; r2 = 1-corrcoef(reshape(real(L2_distance(Y.coords{di}, Y.coords{di}(:,land_ind))),N*nl,1),D).^2; R(di) = r2(2,1); if (verbose == 1) disp([' Isomap on ' num2str(N) ' points with dimensionality ' num2str(dims(di)) ' --> residual variance = ' num2str(R(di))]); end end clear D; %%%%%%%%%%%%%%%%%% Graphics %%%%%%%%%%%%%%%%%% if (displ==1) %%%%% Plot fall-off of residual variance with dimensionality %%%%% figure; hold on plot(dims, R, 'bo'); plot(dims, R, 'b-'); hold off ylabel('Residual variance'); xlabel('Isomap dimensionality'); %%%%% Plot two-dimensional configuration %%%%% twod = find(dims==2); if ~isempty(twod) figure; hold on; plot(Y.coords{twod}(1,:), Y.coords{twod}(2,:), 'ro'); if (overlay == 1) gplot(E(Y.index, Y.index), [Y.coords{twod}(1,:); Y.coords{twod}(2,:)]'); title('Two-dimensional Isomap embedding (with neighborhood graph).'); else title('Two-dimensional Isomap.'); end hold off; end end return;
{"author": "vigente", "repo": "gerardus", "sha": "4d7c5195b826967781f1bb967872410e66b7cd3d", "save_path": "github-repos/MATLAB/vigente-gerardus", "path": "github-repos/MATLAB/vigente-gerardus/gerardus-4d7c5195b826967781f1bb967872410e66b7cd3d/matlab/ThirdPartyToolbox/IsomapII.m"}
from typing import Any, Protocol, Sized, Tuple, TypeVar, Union, Type import numpy as np Num = Union[float, int, complex] R = TypeVar('R', bound='MathRelation') R2 = TypeVar('R2', bound='MathRelation') SP = TypeVar('SP', bound='MathSpectrum') S = TypeVar('S', bound='MathSignal') SPRN = Union['MathSpectrum', 'MathRelation', Num] SSPR = Union['MathSpectrum', 'MathSignal', 'MathRelation'] SSPRN = Union['MathSpectrum', 'MathSignal', 'MathRelation', Num] class MathRelation(Sized, Protocol): _x: np.ndarray _y: np.ndarray def get_data(self) -> Tuple[np.ndarray, np.ndarray]: ... def max(self) -> Num: ... def min(self) -> Num: ... def get_norm(self) -> float: ... def select_data( self: R, x_start: Num = None, x_end: Num = None, **kwargs ) -> R: ... def exp(self: R, **kwargs) -> R: ... def diff(self: R, **kwargs) -> R: ... def integrate(self: R, **kwargs) -> R: ... def interpolate_extrapolate(self: R, new_x: np.ndarray, **kwargs) -> R: ... def shift(self: R, x_shift: Num = 0, **kwargs) -> R: ... def __add__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __radd__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __sub__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __rsub__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __mul__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __rmul__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __truediv__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __rtruediv__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __pow__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __rpow__(self: R, other: Union['MathRelation', Num], **kwargs) -> R:... def __iadd__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __isub__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __imul__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __idiv__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... def __ipow__(self: R, other: Union['MathRelation', Num], **kwargs) -> R: ... @staticmethod def equalize(r1: R, r2: R2) -> Tuple[R, R2]: ... @classmethod def correlate(cls: Type[R], r1: 'MathRelation', r2: 'MathRelation', **kwargs) -> R: ... @classmethod def convolve(cls: Type[R], r1: 'MathRelation', r2: 'MathRelation', **kwargs) -> R: ... class MathSpectrum(MathRelation, Protocol): spectrum: Any def get_signal(self, recalculate=False, start_time: float = None) -> 'MathSignal':... def get_amp_spectrum(self: R, **kwargs) -> 'MathRelation': ... def get_phase_spectrum(self: R, **kwargs) -> 'MathRelation': ... def get_reverse_filter(self: SP, percent: Union[float, int] = 5., subtrack_phase=True, f_start: float = None, f_end: float = None, **kwargs) -> SP: ... def add_phase(self: SP, other: SSPR, **kwargs) -> SP: ... def sub_phase(self: SP, other: SSPR, **kwargs) -> SP: ... @classmethod def get_spectrum_from_amp_phase(cls: Type[SP], s1: MathRelation, s2: MathRelation, **kwargs) -> SP: ... @classmethod def convolve(cls: Type[SP], r1: SSPR, r2: SSPR, **kwargs) -> SP: ... @classmethod def correlate(cls: Type[SP], r1: SSPR, r2: SSPR, **kwargs) -> SP: ... def __add__(self: SP, a: SSPRN, **kwargs) -> SP: ... def __sub__(self: SP, a: SSPRN, **kwargs) -> SP: ... def __mul__(self: SP, a: SSPRN, **kwargs) -> SP: ... def __truediv__(self: SP, a: SSPRN, **kwargs) -> SP: ... def __pow__(self: SP, a: SSPRN, **kwargs) -> SP: ... class MathSignal(MathRelation, Protocol): def get_spectrum(self, recalculate=False, is_start_zero=False) -> 'MathSpectrum': ... def get_reverse_signal(self: S, percent: Union[float, int] = 5., subtrack_phase: bool = True, f_start: float = None, f_end: float = None, **kwargs) -> S: ... def add_phase(self: S, other: SSPR, **kwargs) -> S: ... def sub_phase(self: S, other: SSPR, **kwargs) -> S: ... @classmethod def convolve(cls: Type[S], r1: SSPR, r2: SSPR, **kwargs) -> S: ... @classmethod def correlate(cls: Type[S], r1: SSPR, r2: SSPR, **kwargs) -> S: ... def __add__(self: S, a: SSPRN, **kwargs) -> S: ... def __sub__(self: S, a: SSPRN, **kwargs) -> S: ... def __mul__(self: S, a: SSPRN, **kwargs) -> S: ... def __truediv__(self: S, a: SSPRN, **kwargs) -> S: ... def __pow__(self: S, a: SSPRN, **kwargs) -> S: ... class MathSpectrogram(Protocol): t: np.ndarray f: np.ndarray S: np.ndarray class MathSweep(MathSignal, Protocol): f_t: MathRelation a_t: MathRelation spectrogram: MathSpectrogram aprior_signal: MathSignal
{"hexsha": "43b7bf97064d0bc75cca96b2ba3eb811f5640d27", "size": 5390, "ext": "py", "lang": "Python", "max_stars_repo_path": "compose_signals/math_protocols.py", "max_stars_repo_name": "Omnivanitate/sweep_design", "max_stars_repo_head_hexsha": "00c20066d83a2eebf8402294b413737f49a97564", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "compose_signals/math_protocols.py", "max_issues_repo_name": "Omnivanitate/sweep_design", "max_issues_repo_head_hexsha": "00c20066d83a2eebf8402294b413737f49a97564", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compose_signals/math_protocols.py", "max_forks_repo_name": "Omnivanitate/sweep_design", "max_forks_repo_head_hexsha": "00c20066d83a2eebf8402294b413737f49a97564", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1139240506, "max_line_length": 92, "alphanum_fraction": 0.5461966605, "include": true, "reason": "import numpy", "num_tokens": 1590}
C real*4 function ct_lin_inv_grey( rgb, ci, cimin, cimax ) C -------------------------------------------------------- C C Function defining a standard linear inverted grey-scale. C C ct_lin_inv_grey = (cimax-ci)/(cimax-cimin) C *- integer rgb, ci, cimin, cimax ct_lin_inv_grey = (float(cimax-ci)/float(cimax-cimin)) end
{"hexsha": "137592fb37fc80a80dc9a2d561c1e84bbbf1ea60", "size": 365, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "graphic_lib/ct_lin_inv_grey.f", "max_stars_repo_name": "CavendishAstrophysics/anmap", "max_stars_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-09-01T12:40:45.000Z", "max_stars_repo_stars_event_max_datetime": "2015-09-01T12:40:45.000Z", "max_issues_repo_path": "graphic_lib/ct_lin_inv_grey.f", "max_issues_repo_name": "CavendishAstrophysics/anmap", "max_issues_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphic_lib/ct_lin_inv_grey.f", "max_forks_repo_name": "CavendishAstrophysics/anmap", "max_forks_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0714285714, "max_line_length": 63, "alphanum_fraction": 0.5397260274, "num_tokens": 99}
staticserver(dir::AbstractString="."; cache::Int=0) = (r::Resource, req, id) -> begin filepath = joinpath(dir, req[:path]...) ext = splitext(filepath)[2][2:end] isfile(filepath) || return Response(404) mt = mtime(filepath) |> Dates.unix2datetime mt -= Dates.Millisecond(Dates.millisecond(mt)) if "If-Modified-Since" in keys(req[:headers]) try # ignore any error if mt <= DateTime(req[:headers]["If-Modified-Since"], Dates.RFC1123Format) return Response(304) end catch end end res = open(read, filepath) |> Response res.headers["Content-Type"] = get(HttpServer.mimetypes, ext, "application/octet-stream") # text/plain should be better? res.headers["Last-Modified"] = Dates.format(mt, Dates.RFC1123Format) res end
{"hexsha": "90c721e65ce417ebac8a5eeb4af1aad9431a7abb", "size": 900, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utils/staticserver.jl", "max_stars_repo_name": "UnofficialJuliaMirror/Restful.jl-a0979ab6-dee4-51c8-812d-69046455aaa6", "max_stars_repo_head_hexsha": "b84b54bac6987176d926abc1b319fd69648b59a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2016-04-19T01:36:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-22T05:44:40.000Z", "max_issues_repo_path": "src/utils/staticserver.jl", "max_issues_repo_name": "ylxdzsw/Rest.jl", "max_issues_repo_head_hexsha": "3fe724e61ca32dfe3f6f4be6549b6dff48ef4c65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2016-06-11T15:16:45.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T08:34:00.000Z", "max_forks_repo_path": "src/utils/staticserver.jl", "max_forks_repo_name": "ylxdzsw/Rest.jl", "max_forks_repo_head_hexsha": "3fe724e61ca32dfe3f6f4be6549b6dff48ef4c65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-07-12T02:16:32.000Z", "max_forks_repo_forks_event_max_datetime": "2018-02-26T18:19:46.000Z", "avg_line_length": 36.0, "max_line_length": 127, "alphanum_fraction": 0.5777777778, "num_tokens": 216}
// copyright (c) 2013 the dzcoin core developers // distributed under the mit software license, see the accompanying // file copying or http://www.opensource.org/licenses/mit-license.php. // // unit tests for alert system // #include "alert.h" #include "chain.h" #include "chainparams.h" #include "clientversion.h" #include "data/alerttests.raw.h" #include "main.h" #include "serialize.h" #include "streams.h" #include "util.h" #include "utilstrencodings.h" #include "test/test_dzcoin.h" #include <fstream> #include <boost/filesystem/operations.hpp> #include <boost/foreach.hpp> #include <boost/test/unit_test.hpp> #if 0 // // alerttests contains 7 alerts, generated with this code: // (signandsave code not shown, alert signing key is secret) // { calert alert; alert.nrelayuntil = 60; alert.nexpiration = 24 * 60 * 60; alert.nid = 1; alert.ncancel = 0; // cancels previous messages up to this id number alert.nminver = 0; // these versions are protocol versions alert.nmaxver = 999001; alert.npriority = 1; alert.strcomment = "alert comment"; alert.strstatusbar = "alert 1"; signandsave(alert, "test/alerttests"); alert.setsubver.insert(std::string("/satoshi:0.1.0/")); alert.strstatusbar = "alert 1 for satoshi 0.1.0"; signandsave(alert, "test/alerttests"); alert.setsubver.insert(std::string("/satoshi:0.2.0/")); alert.strstatusbar = "alert 1 for satoshi 0.1.0, 0.2.0"; signandsave(alert, "test/alerttests"); alert.setsubver.clear(); ++alert.nid; alert.ncancel = 1; alert.npriority = 100; alert.strstatusbar = "alert 2, cancels 1"; signandsave(alert, "test/alerttests"); alert.nexpiration += 60; ++alert.nid; signandsave(alert, "test/alerttests"); ++alert.nid; alert.nminver = 11; alert.nmaxver = 22; signandsave(alert, "test/alerttests"); ++alert.nid; alert.strstatusbar = "alert 2 for satoshi 0.1.0"; alert.setsubver.insert(std::string("/satoshi:0.1.0/")); signandsave(alert, "test/alerttests"); ++alert.nid; alert.nminver = 0; alert.nmaxver = 999999; alert.strstatusbar = "evil alert'; /bin/ls; echo '"; alert.setsubver.clear(); signandsave(alert, "test/alerttests"); } #endif struct readalerts : public testingsetup { readalerts() { std::vector<unsigned char> vch(alert_tests::alerttests, alert_tests::alerttests + sizeof(alert_tests::alerttests)); cdatastream stream(vch, ser_disk, client_version); try { while (!stream.eof()) { calert alert; stream >> alert; alerts.push_back(alert); } } catch (const std::exception&) { } } ~readalerts() { } static std::vector<std::string> read_lines(boost::filesystem::path filepath) { std::vector<std::string> result; std::ifstream f(filepath.string().c_str()); std::string line; while (std::getline(f,line)) result.push_back(line); return result; } std::vector<calert> alerts; }; boost_fixture_test_suite(alert_tests, readalerts) boost_auto_test_case(alertapplies) { setmocktime(11); const std::vector<unsigned char>& alertkey = params(cbasechainparams::main).alertkey(); boost_foreach(const calert& alert, alerts) { boost_check(alert.checksignature(alertkey)); } boost_check(alerts.size() >= 3); // matches: boost_check(alerts[0].appliesto(1, "")); boost_check(alerts[0].appliesto(999001, "")); boost_check(alerts[0].appliesto(1, "/satoshi:11.11.11/")); boost_check(alerts[1].appliesto(1, "/satoshi:0.1.0/")); boost_check(alerts[1].appliesto(999001, "/satoshi:0.1.0/")); boost_check(alerts[2].appliesto(1, "/satoshi:0.1.0/")); boost_check(alerts[2].appliesto(1, "/satoshi:0.2.0/")); // don't match: boost_check(!alerts[0].appliesto(-1, "")); boost_check(!alerts[0].appliesto(999002, "")); boost_check(!alerts[1].appliesto(1, "")); boost_check(!alerts[1].appliesto(1, "satoshi:0.1.0")); boost_check(!alerts[1].appliesto(1, "/satoshi:0.1.0")); boost_check(!alerts[1].appliesto(1, "satoshi:0.1.0/")); boost_check(!alerts[1].appliesto(-1, "/satoshi:0.1.0/")); boost_check(!alerts[1].appliesto(999002, "/satoshi:0.1.0/")); boost_check(!alerts[1].appliesto(1, "/satoshi:0.2.0/")); boost_check(!alerts[2].appliesto(1, "/satoshi:0.3.0/")); setmocktime(0); } boost_auto_test_case(alertnotify) { setmocktime(11); const std::vector<unsigned char>& alertkey = params(cbasechainparams::main).alertkey(); boost::filesystem::path temp = gettemppath() / "alertnotify.txt"; boost::filesystem::remove(temp); mapargs["-alertnotify"] = std::string("echo %s >> ") + temp.string(); boost_foreach(calert alert, alerts) alert.processalert(alertkey, false); std::vector<std::string> r = read_lines(temp); boost_check_equal(r.size(), 4u); // windows built-in echo semantics are different than posixy shells. quotes and // whitespace are printed literally. #ifndef win32 boost_check_equal(r[0], "alert 1"); boost_check_equal(r[1], "alert 2, cancels 1"); boost_check_equal(r[2], "alert 2, cancels 1"); boost_check_equal(r[3], "evil alert; /bin/ls; echo "); // single-quotes should be removed #else boost_check_equal(r[0], "'alert 1' "); boost_check_equal(r[1], "'alert 2, cancels 1' "); boost_check_equal(r[2], "'alert 2, cancels 1' "); boost_check_equal(r[3], "'evil alert; /bin/ls; echo ' "); #endif boost::filesystem::remove(temp); setmocktime(0); } static bool falsefunc() { return false; } boost_auto_test_case(partitionalert) { // test partitioncheck ccriticalsection csdummy; cblockindex indexdummy[100]; cchainparams& params = params(cbasechainparams::main); int64_t npowtargetspacing = params.getconsensus().npowtargetspacing; // generate fake blockchain timestamps relative to // an arbitrary time: int64_t now = 1427379054; setmocktime(now); for (int i = 0; i < 100; i++) { indexdummy[i].phashblock = null; if (i == 0) indexdummy[i].pprev = null; else indexdummy[i].pprev = &indexdummy[i-1]; indexdummy[i].nheight = i; indexdummy[i].ntime = now - (100-i)*npowtargetspacing; // other members don't matter, the partition check code doesn't // use them } // test 1: chain with blocks every npowtargetspacing seconds, // as normal, no worries: partitioncheck(falsefunc, csdummy, &indexdummy[99], npowtargetspacing); boost_check(strmiscwarning.empty()); // test 2: go 3.5 hours without a block, expect a warning: now += 3*60*60+30*60; setmocktime(now); partitioncheck(falsefunc, csdummy, &indexdummy[99], npowtargetspacing); boost_check(!strmiscwarning.empty()); boost_test_message(std::string("got alert text: ")+strmiscwarning); strmiscwarning = ""; // test 3: test the "partition alerts only go off once per day" // code: now += 60*10; setmocktime(now); partitioncheck(falsefunc, csdummy, &indexdummy[99], npowtargetspacing); boost_check(strmiscwarning.empty()); // test 4: get 2.5 times as many blocks as expected: now += 60*60*24; // pretend it is a day later setmocktime(now); int64_t quickspacing = npowtargetspacing*2/5; for (int i = 0; i < 100; i++) // tweak chain timestamps: indexdummy[i].ntime = now - (100-i)*quickspacing; partitioncheck(falsefunc, csdummy, &indexdummy[99], npowtargetspacing); boost_check(!strmiscwarning.empty()); boost_test_message(std::string("got alert text: ")+strmiscwarning); strmiscwarning = ""; setmocktime(0); } boost_auto_test_suite_end()
{"hexsha": "2d9cc1ebe5e8dcf9c61c7fc2033f352cd4dadafa", "size": 7888, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/test/alert_tests.cpp", "max_stars_repo_name": "dzcoin/DzCoinMiningAlgorithm", "max_stars_repo_head_hexsha": "b0294cf5ac893fe907b08105f1aa826c3da464cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/test/alert_tests.cpp", "max_issues_repo_name": "dzcoin/DzCoinMiningAlgorithm", "max_issues_repo_head_hexsha": "b0294cf5ac893fe907b08105f1aa826c3da464cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test/alert_tests.cpp", "max_forks_repo_name": "dzcoin/DzCoinMiningAlgorithm", "max_forks_repo_head_hexsha": "b0294cf5ac893fe907b08105f1aa826c3da464cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3384615385, "max_line_length": 123, "alphanum_fraction": 0.6488336714, "num_tokens": 2259}
/* * Copyright 2014 Matthias Fuchs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "stromx/raspi/test/GpioTriggerTest.h" #include <boost/bind.hpp> #include <boost/thread.hpp> #include <cppunit/TestAssert.h> #include <cppunit/TestAssert.h> #include "stromx/raspi/GpioTrigger.h" CPPUNIT_TEST_SUITE_REGISTRATION (stromx::raspi::GpioTriggerTest); namespace stromx { using namespace runtime; namespace raspi { void GpioTriggerTest::setUp() { m_operator = new OperatorTester(new GpioTrigger()); m_operator->initialize(); m_operator->setParameter(GpioTrigger::GPIO, Enum(4)); m_operator->activate(); } void GpioTriggerTest::testInterrupt() { // interrupt the execution in a separate thread boost::thread t(boost::bind(&GpioTriggerTest::interruptExecution, this)); // wait for the interrupt CPPUNIT_ASSERT_THROW(m_operator->getOutputData(GpioTrigger::OUTPUT), Interrupt); t.join(); } void GpioTriggerTest::tearDown() { delete m_operator; } void GpioTriggerTest::interruptExecution() { boost::this_thread::sleep_for(boost::chrono::seconds(1)); m_operator->interrupt(); } } }
{"hexsha": "857e7ca2d58052c5bc2ab96d61550e39827c5c8c", "size": 1876, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "stromx/raspi/test/GpioTriggerTest.cpp", "max_stars_repo_name": "uboot/stromx-raspi", "max_stars_repo_head_hexsha": "38411b6a479c1e82adffea3e5bde2fdf246d920c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stromx/raspi/test/GpioTriggerTest.cpp", "max_issues_repo_name": "uboot/stromx-raspi", "max_issues_repo_head_hexsha": "38411b6a479c1e82adffea3e5bde2fdf246d920c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stromx/raspi/test/GpioTriggerTest.cpp", "max_forks_repo_name": "uboot/stromx-raspi", "max_forks_repo_head_hexsha": "38411b6a479c1e82adffea3e5bde2fdf246d920c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7777777778, "max_line_length": 92, "alphanum_fraction": 0.6481876333, "num_tokens": 414}
halve <- function(a) floor(a/2) double <- function(a) a*2 iseven <- function(a) (a%%2)==0 ethiopicmult<-function(x,y){ res<-ifelse(iseven(y),0,x) while(!y==1){ x<-double(x) y<-halve(y) if(!iseven(y)) res<-res+x } return(res) } print(ethiopicmult(17,34))
{"hexsha": "d105b32b1d17f57101f52179769e2e14da8c0318", "size": 266, "ext": "r", "lang": "R", "max_stars_repo_path": "Task/Ethiopian-multiplication/R/ethiopian-multiplication-2.r", "max_stars_repo_name": "LaudateCorpus1/RosettaCodeData", "max_stars_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_stars_repo_licenses": ["Info-ZIP"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-09T22:08:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-09T22:08:38.000Z", "max_issues_repo_path": "Task/Ethiopian-multiplication/R/ethiopian-multiplication-2.r", "max_issues_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_issues_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_issues_repo_licenses": ["Info-ZIP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Task/Ethiopian-multiplication/R/ethiopian-multiplication-2.r", "max_forks_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_forks_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_forks_repo_licenses": ["Info-ZIP"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-09T22:08:40.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-09T22:08:40.000Z", "avg_line_length": 16.625, "max_line_length": 31, "alphanum_fraction": 0.6165413534, "num_tokens": 102}
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! ! EVB-QMDFF - RPMD molecular dynamics and rate constant calculations on ! black-box generated potential energy surfaces ! ! Copyright (c) 2021 by Julien Steffen (steffen@pctc.uni-kiel.de) ! Stefan Grimme (grimme@thch.uni-bonn.de) (QMDFF code) ! ! Permission is hereby granted, free of charge, to any person obtaining a ! copy of this software and associated documentation files (the "Software"), ! to deal in the Software without restriction, including without limitation ! the rights to use, copy, modify, merge, publish, distribute, sublicense, ! and/or sell copies of the Software, and to permit persons to whom the ! Software is furnished to do so, subject to the following conditions: ! ! The above copyright notice and this permission notice shall be included in ! all copies or substantial portions of the Software. ! ! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ! THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ! FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ! DEALINGS IN THE SOFTWARE. ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! ! function readaa: analyze output file line for Turbomole ! ! part of QMDFF ! function readaa(a,istart,iend,iend2) implicit real(kind=8) (a-h,o-z) real(kind=8)::readaa character(len=*)::a NINE=ICHAR('9') IZERO=ICHAR('0') MINUS=ICHAR('-') IDOT=ICHAR('.') ND=ICHAR('D') NE=ICHAR('E') IBL=ICHAR(' ') iend=0 iend2=0 idig=0 c1=0 c2=0 one=1.d0 x = 1.d0 nl=len(a) do j=istart,nl-1 n=ichar(a(j:j)) m=ichar(a(j+1:j+1)) if (n.le.nine.and.n.ge.izero .or.n.eq.idot) goto 20 if (n.eq.minus.and.(m.le.nine.and.m.ge.izero & & .or. m.eq.idot)) goto 20 end do readaa=0.d0 return 20 continue iend=j do i=j,nl n=ichar(a(i:i)) if (n.le.nine.and.n.ge.izero) then idig=idig+1 if (idig.gt.10) goto 60 c1=c1*10+n-izero else if (n.eq.minus.and.i.eq.j) then one=-1.d0 else if (n.eq.idot) then goto 40 else goto 60 end if end do 40 continue idig=0 do ii=i+1,nl n=ichar(a(ii:ii)) if(n.le.nine.and.n.ge.izero) then idig=idig+1 if (idig.gt.10) goto 60 c2=c2*10+n-izero x = x /10 else if (n.eq.minus.and.ii.eq.i) then x=-x else goto 60 end if end do ! ! put the pieces together ! 60 continue readaa= one * ( c1 + c2 * x) do 55 j=iend,nl n=ichar(a(j:j)) iend2=j if (n.eq.ibl) return 55 if(n.eq.nd .or. n.eq.ne) goto 57 return 57 c1=0.0d0 one=1.0d0 do 31 i=j+1,nl n=ichar(a(i:i)) iend2=i if(n.eq.ibl)goto 70 if(n.le.nine.and.n.ge.izero) c1=c1*10.0d0+n-izero if(n.eq.minus)one=-1.0d0 31 continue 61 continue 70 readaa=readaa*10**(one*c1) return end function readaa
{"hexsha": "82873909abc44bef81b69304153c50c6ea430cb4", "size": 3267, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/readaa.f90", "max_stars_repo_name": "Trebonius91/EVB-QMDFF", "max_stars_repo_head_hexsha": "8d03e1ad073becb0161b0377b630d7b65fe3c290", "max_stars_repo_licenses": ["MIT", "Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-13T15:27:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T15:27:13.000Z", "max_issues_repo_path": "src/readaa.f90", "max_issues_repo_name": "chrinide/EVB-QMDFF", "max_issues_repo_head_hexsha": "8d03e1ad073becb0161b0377b630d7b65fe3c290", "max_issues_repo_licenses": ["MIT", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/readaa.f90", "max_forks_repo_name": "chrinide/EVB-QMDFF", "max_forks_repo_head_hexsha": "8d03e1ad073becb0161b0377b630d7b65fe3c290", "max_forks_repo_licenses": ["MIT", "Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-14T03:51:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T03:51:49.000Z", "avg_line_length": 27.0, "max_line_length": 80, "alphanum_fraction": 0.6054484236, "num_tokens": 1043}
import numpy as np from numpy.testing import (TestCase, assert_array_equal, assert_equal, assert_almost_equal, assert_array_almost_equal, assert_raises) from numpy.testing.decorators import knownfailureif import astropy.cosmology from astropy import units as u try: from astropy.tests.helper import assert_quantity_allclose except ImportError: # Monkey patching failing travis test for numpy-1.8 def assert_quantity_allclose(x, y): x = x.to(y.unit) np.testing.assert_allclose(x.value, y.value) from NFW import mass_concentration from NFW.nfw import NFW class TestMc(TestCase): @classmethod def setup_class(cls): cls._cosmo = astropy.cosmology.FlatLambdaCDM(70, 0.3, Tcmb0=0) astropy.cosmology.default_cosmology.set(cls._cosmo) def test_duffy_concentration(self): m200 = 1e13, 5e13, 1e14, 1e15 zl = 1, 0.5, 1, 0.3 result = (3.71065258, 3.71071859, 3.05809022, 3.08589409) c = mass_concentration.duffy_concentration(m200, zl, self._cosmo) assert_almost_equal(c, result) assert(isinstance(c, np.ndarray)) # Assure results stay the same m200 = u.Quantity(m200, u.solMass) c = mass_concentration.duffy_concentration(m200, zl, self._cosmo) assert_almost_equal(c, result) c = mass_concentration.duffy_concentration(m200[0], zl[0], self._cosmo) assert(isinstance(c, float)) def test_dolag_concentration(self): m200 = 1e13, 5e13, 1e14, 1e15 zl = 1, 0.5, 1, 0.3 result = (6.28910161, 7.11594213, 4.97265823, 6.04888398) c = mass_concentration.dolag_concentration(m200, zl, self._cosmo) assert_almost_equal(c, result) assert(isinstance(c, np.ndarray)) # Assure results stay the same m200 = u.Quantity(m200, u.solMass) c = mass_concentration.dolag_concentration(m200, zl, self._cosmo) assert_almost_equal(c, result) c = mass_concentration.dolag_concentration(m200[0], zl[0], self._cosmo) assert(isinstance(c, float)) def _mdelta_to_mdelta_via_m200(self, m_in, func, overdensity_in, overdensity_out, z): m200 = mass_concentration.mdelta_to_m200(m_in, func, overdensity_in, (z, self._cosmo)) nfw = NFW(m200, func(m200, z, self._cosmo), z) m_out = nfw.mass_Delta(overdensity_out) return m_out def test_mdelta_to_mdelta(self): func = mass_concentration.duffy_concentration # Consistency z = 0.3 m_in = u.Quantity(5e14, u.solMass) mdelta = mass_concentration.mdelta_to_mdelta(5e14, func, 500, 200, (z, self._cosmo)) c = func(mdelta, z, self._cosmo) nfw = NFW(mdelta, c, z) m_out = nfw.mass_Delta(500) assert_quantity_allclose(m_in, m_out) mdelta1 = mass_concentration.mdelta_to_mdelta(m_in, func, 200, 500, (z, self._cosmo)) nfw = NFW(m_in, func(m_in, z, self._cosmo), z) mdelta2 = nfw.mass_Delta(500) assert_quantity_allclose(mdelta1, mdelta2) # common cases: m_in = u.Quantity(1e14, u.solMass) z = 0 mdelta1 = mass_concentration.mdelta_to_mdelta(m_in, func, 2500, 500, (z, self._cosmo)) mdelta2 = self._mdelta_to_mdelta_via_m200(m_in, func, 2500, 500, z) assert_quantity_allclose(mdelta1, mdelta2) # Test some extreme cases # first almost equal input and output overdensities m_in = u.Quantity(1e14, u.solMass) z = 1 m200 = mass_concentration.mdelta_to_mdelta(m_in, func, 199, 200, (z, self._cosmo)) m_out = mass_concentration.mdelta_to_mdelta(m200, func, 200, 199, (z, self._cosmo)) assert_quantity_allclose(m_in, m_out) # identical input/output overdensity mdelta = mass_concentration.mdelta_to_mdelta(1e14, func, 200, 200, (1, self._cosmo)) assert_equal(mdelta.value, 1e14) # Large overdensity_in, small overdensity_out m_in = 1e15 z = 0 mdelta1 = mass_concentration.mdelta_to_mdelta(m_in, func, 2500, 50, (z, self._cosmo)) mdelta2 = self._mdelta_to_mdelta_via_m200(m_in, func, 2500, 50, z) assert_quantity_allclose(mdelta1, mdelta2) # Small overdensity_in, large overdensity_out, small halo mass m_in = 1e9 z = 1 mdelta1 = mass_concentration.mdelta_to_mdelta(m_in, func, 50, 2500, (z, self._cosmo)) mdelta2 = self._mdelta_to_mdelta_via_m200(m_in, func, 50, 2500, z) assert_quantity_allclose(mdelta1, mdelta2) def test_mdelta_to_m200(self): m_in = u.Quantity(2e14, u.solMass) z = 0.2 func = mass_concentration.duffy_concentration delta_in = 450 # consistency with mdelta_to_mdelta md1 = mass_concentration.mdelta_to_m200(m_in, func, delta_in, (z, self._cosmo)) md2 = mass_concentration.mdelta_to_mdelta(m_in, func, delta_in, 200, (z, self._cosmo)) assert_quantity_allclose(md1, md2) # consistency with mass_Delta in NFW nfw = NFW(md1, func(md1, z, self._cosmo), z) m_out = nfw.mass_Delta(450) assert_quantity_allclose(m_in, m_out) def test_m200_to_mdelta(self): m_in = u.Quantity(4e14, u.solMass) z = 0.45 func = mass_concentration.duffy_concentration mdelta = mass_concentration.m200_to_mdelta(m_in, func, 500, (z, self._cosmo)) nfw = NFW(m_in, func(m_in, z, self._cosmo), z) m500 = nfw.mass_Delta(500) assert_quantity_allclose(mdelta, m500)
{"hexsha": "1c1a3f2deab05cd6f7d6c05a27e0486fe24e5796", "size": 6499, "ext": "py", "lang": "Python", "max_stars_repo_path": "NFW/tests/test_mass_concentration.py", "max_stars_repo_name": "joergdietrich/NFW", "max_stars_repo_head_hexsha": "58b0ff6b5382461e6053e12c75d35543dd3f8b13", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2016-01-22T18:39:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-01T09:43:00.000Z", "max_issues_repo_path": "NFW/tests/test_mass_concentration.py", "max_issues_repo_name": "joergdietrich/NFW", "max_issues_repo_head_hexsha": "58b0ff6b5382461e6053e12c75d35543dd3f8b13", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2015-01-13T01:04:53.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-01T11:25:19.000Z", "max_forks_repo_path": "NFW/tests/test_mass_concentration.py", "max_forks_repo_name": "joergdietrich/NFW", "max_forks_repo_head_hexsha": "58b0ff6b5382461e6053e12c75d35543dd3f8b13", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-03-15T22:21:28.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-24T17:59:11.000Z", "avg_line_length": 42.2012987013, "max_line_length": 76, "alphanum_fraction": 0.5697799661, "include": true, "reason": "import numpy,from numpy,import astropy,from astropy", "num_tokens": 1720}
import multiprocessing import os import re import _pickle as pickle import tensorflow as tf import tensorflow.contrib.slim.nets import numpy as np from models import spotify PATH_MAGNATAGATUNE = 'datasets/magnatagatune' INPUT_SHAPE = (628, 128) CLASSES = [ 'classical', 'instrumental', 'electronica', 'techno', 'male voice', 'rock', 'ambient', 'female voice', 'opera', 'indian', 'choir', 'pop', 'heavy metal', 'jazz', 'new age', 'dance', 'country', 'eastern', 'baroque', 'funk', 'hard rock', 'trance', 'folk', 'oriental', 'medieval', 'irish', 'blues', 'middle eastern', 'punk', 'celtic', 'arabic', 'rap', 'industrial', 'world', 'hip hop', 'disco', 'soft rock', 'jungle', 'reggae', 'happy', ] BATCH_SIZE = 32 EPOCHS = 100 LEARNING_RATE = 1e-3 NUM_WORKERS = multiprocessing.cpu_count() def _load_pickle(filename): with open(filename, 'rb') as f: return pickle.load(f) def check_accuracy(sess, correct_prediction, is_training, dataset_init_op): """ Check the accuracy of the model on either train or val (depending on dataset_init_op). """ # Initialize the correct dataset sess.run(dataset_init_op) num_correct, num_samples = 0, 0 while True: try: correct_pred = sess.run(correct_prediction, {is_training: False}) num_correct += correct_pred.sum() num_samples += correct_pred.shape[0] except tf.errors.OutOfRangeError: break # Return the fraction of datapoints that were correctly classified acc = float(num_correct) / num_samples return acc def _list_dataset(dataset_name): dataset_path = os.path.join(PATH_MAGNATAGATUNE, dataset_name) labels_file = os.path.join(dataset_path, 'labels.pickle') filenames = [os.path.join(dataset_path, f) for f in os.listdir(dataset_path) if re.match(r'\d+\.tfrecord', f)] print(filenames, dataset_path) labels = _load_pickle(labels_file) return filenames, np.asarray(labels) def _parse_function(example_proto): features = { 'X': tf.FixedLenFeature((), tf.string), 'y': tf.FixedLenFeature((), tf.string)} parsed_features = tf.parse_single_example(example_proto, features) spectogram = tf.decode_raw(parsed_features['X'], tf.float64) spectogram = tf.cast(spectogram, tf.float32) spectogram = tf.reshape(spectogram, INPUT_SHAPE) label = tf.decode_raw(parsed_features['y'], tf.uint8) label = tf.reshape(label, [len(CLASSES)]) return spectogram, label def _init_datasets(train_filenames, val_filenames): train_filenames = tf.constant(train_filenames) train_dataset = tf.contrib.data.TFRecordDataset(train_filenames) train_dataset = train_dataset.map(_parse_function) train_dataset = train_dataset.shuffle(buffer_size=10000) batched_train_dataset = train_dataset.batch(BATCH_SIZE) val_filenames = tf.constant(val_filenames) val_dataset = tf.contrib.data.TFRecordDataset(val_filenames) val_dataset = val_dataset.map(_parse_function) val_dataset = val_dataset.shuffle(buffer_size=10000) batched_val_dataset = val_dataset.batch(BATCH_SIZE) iterator = tf.contrib.data.Iterator.from_structure( batched_train_dataset.output_types, batched_train_dataset.output_shapes) spectograms, labels = iterator.get_next() train_init_op = iterator.make_initializer(batched_train_dataset) val_init_op = iterator.make_initializer(batched_val_dataset) return spectograms, labels, train_init_op, val_init_op def train(initial_learning_rate, learning_rate_decay=0.96): train_filenames, train_labels = _list_dataset('train') val_filenames, val_labels = _list_dataset('val') graph = tf.Graph() with graph.as_default(): spectograms, labels, train_init_op, val_init_op = _init_datasets( train_filenames, val_filenames) is_training = tf.placeholder(tf.bool) output_layer = spotify.get_tf(spectograms, len(CLASSES), activation='sigmoid') model_variables = tf.contrib.framework.get_variables('spotify_tf') model_init = tf.variables_initializer(model_variables) tf.losses.mean_squared_error(labels=labels, predictions=output_layer) loss = tf.losses.get_total_loss() global_step = tf.Variable(0, trainable=False) learning_rate = tf.train.exponential_decay( initial_learning_rate, global_step, 100000, learning_rate_decay, staircase=True) optimizer = tf.train.GradientDescentOptimizer(learning_rate) optimizer = tf.train.AdamOptimizer(lr=learning_rate) train_op = optimizer.minimize(loss) correct_prediction = tf.equal( tf.round(tf.nn.sigmoid(output_layer)), tf.round(tf.cast(labels, tf.float32))) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.get_default_graph().finalize() with tf.Session(graph=graph) as sess: sess.run(model_init) for epoch in range(EPOCHS): print('Epoch %d / %d' % (epoch + 1, EPOCHS)) sess.run(train_init_op) while True: try: _ = sess.run(train_op, {is_training: True}) except tf.errors.OutOfRangeError: break # Check accuracy on the train and val sets every epoch train_acc = check_accuracy(sess, correct_prediction, is_training, train_init_op) val_acc = check_accuracy(sess, correct_prediction, is_training, val_init_op) print(' Train accuracy: %f' % train_acc) print(' Val accuracy: %f\n' % val_acc) if __name__ == '__main__': main()
{"hexsha": "4b4365f85b71ef96203cd313f0f8e1cef72f11b1", "size": 5689, "ext": "py", "lang": "Python", "max_stars_repo_path": "project/train_magnatagatune.py", "max_stars_repo_name": "miguelfrde/cs231n", "max_stars_repo_head_hexsha": "c0dc0a505d7a8a6af3439fad33068dfe1428d2e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project/train_magnatagatune.py", "max_issues_repo_name": "miguelfrde/cs231n", "max_issues_repo_head_hexsha": "c0dc0a505d7a8a6af3439fad33068dfe1428d2e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2019-12-16T20:19:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:11:53.000Z", "max_forks_repo_path": "project/train_magnatagatune.py", "max_forks_repo_name": "miguelfrde/cs231n", "max_forks_repo_head_hexsha": "c0dc0a505d7a8a6af3439fad33068dfe1428d2e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1830065359, "max_line_length": 92, "alphanum_fraction": 0.6897521533, "include": true, "reason": "import numpy", "num_tokens": 1310}
# ___________________________________________________________________________ # # EGRET: Electrical Grid Research and Engineering Tools # Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC # (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. # Government retains certain rights in this software. # This software is distributed under the Revised BSD License. # ___________________________________________________________________________ ## functions for adding the basic status varibles from pyomo.environ import * import math from .uc_utils import add_model_attr component_name = 'status_vars' def _is_relaxed(model): if hasattr(model, 'relax_binaries') and model.relax_binaries: return True else: return False def _add_unit_on_vars(model, relaxed=False): # indicator variables for each generator, at each time period. if relaxed: model.UnitOn = Var(model.ThermalGenerators, model.TimePeriods, within=UnitInterval) else: model.UnitOn = Var(model.ThermalGenerators, model.TimePeriods, within=Binary) def _add_unit_start_vars(model, relaxed=False): # unit start if relaxed: model.UnitStart=Var(model.ThermalGenerators,model.TimePeriods, within=UnitInterval) else: model.UnitStart=Var(model.ThermalGenerators,model.TimePeriods, within=Binary) def _add_unit_stop_vars(model, relaxed=False): if relaxed: model.UnitStop=Var(model.ThermalGenerators,model.TimePeriods, within=UnitInterval) else: model.UnitStop=Var(model.ThermalGenerators,model.TimePeriods, within=Binary) @add_model_attr(component_name, requires = {'data_loader': None} ) def CA_1bin_vars(model): ''' This adds only a binary variable for unit-on, as in Carrion, M. and Arroyo, J. (2006) A Computationally Efficient Mixed-Integer Liner Formulation for the Thermal Unit Commitment Problem. IEEE Transactions on Power Systems, Vol. 21, No. 3, Aug 2006. ''' if _is_relaxed(model): _add_unit_on_vars(model, True) else: _add_unit_on_vars(model) @add_model_attr(component_name, requires = {'data_loader': None} ) def garver_3bin_vars(model): ''' This add the common 3-binary variables per generator per time period. One for start, one for stop, and one for on, as originally proposed in L. L. Garver. Power generation scheduling by integer programming-development of theory. Power Apparatus and Systems, Part III. Transactions of the American Institute of Electrical Engineers, 81(3): 730–734, April 1962. ISSN 0097-2460. ''' if _is_relaxed(model): _add_unit_on_vars(model, True) _add_unit_start_vars(model, True) _add_unit_stop_vars(model, True) else: _add_unit_on_vars(model) _add_unit_start_vars(model) _add_unit_stop_vars(model) return @add_model_attr(component_name, requires = {'data_loader': None} ) def garver_2bin_vars(model): ''' This adds the unit start and unit on variables, and causes the unit stop variable to be projected out. ''' if _is_relaxed(model): _add_unit_on_vars(model, True) _add_unit_start_vars(model, True) else: _add_unit_on_vars(model) _add_unit_start_vars(model) # unit stop def unit_stop_expr_rule(m, g, t): if t == value(m.InitialTime): return m.UnitOnT0[g] - m.UnitOn[g,t] + m.UnitStart[g,t] return m.UnitOn[g,t-1] - m.UnitOn[g,t] + m.UnitStart[g,t] model.UnitStop=Expression(model.ThermalGenerators,model.TimePeriods, rule=unit_stop_expr_rule) return @add_model_attr(component_name, requires = {'data_loader': None} ) def garver_3bin_relaxed_stop_vars(model): ''' This adds the 3-binary variables, but relaxes the integrality on the stop variable, like the "MILP-3R" formulation from Carrion, M. and Arroyo, J. (2006) A Computationally Efficient Mixed-Integer Liner Formulation for the Thermal Unit Commitment Problem. IEEE Transactions on Power Systems, Vol. 21, No. 3, Aug 2006. ''' if _is_relaxed(model): _add_unit_on_vars(model, True) _add_unit_start_vars(model, True) else: _add_unit_on_vars(model) _add_unit_start_vars(model) _add_unit_stop_vars(model, True) return @add_model_attr(component_name, requires = {'data_loader': None} ) def ALS_state_transition_vars(model): ''' These are the state-transition variables proposed in Atakan, Semih, Guglielmo Lulli, and Suvrajeet Sen. "A State Transition MIP Formulation for the Unit Commitment Problem." IEEE Transactions on Power Systems 33.1 (2018): 736-748. ''' if _is_relaxed(model): model.UnitStayOn = Var(model.ThermalGenerators, model.TimePeriods, within=UnitInterval) _add_unit_start_vars(model, True) _add_unit_stop_vars(model, True) else: model.UnitStayOn = Var(model.ThermalGenerators, model.TimePeriods, within=Binary) _add_unit_start_vars(model) _add_unit_stop_vars(model) def unit_on_expr_rule(m, g, t): return m.UnitStayOn[g,t] + m.UnitStart[g,t] model.UnitOn = Expression(model.ThermalGenerators, model.TimePeriods, rule=unit_on_expr_rule)
{"hexsha": "dbbff64d6ac2ba73c45eb72549961987483a8402", "size": 5313, "ext": "py", "lang": "Python", "max_stars_repo_path": "egret/model_library/unit_commitment/status_vars.py", "max_stars_repo_name": "bknueven/Egret", "max_stars_repo_head_hexsha": "37567c1ec3bc0072b61124ce46ceb28add9ad539", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 71, "max_stars_repo_stars_event_min_datetime": "2019-03-28T09:57:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T05:24:25.000Z", "max_issues_repo_path": "egret/model_library/unit_commitment/status_vars.py", "max_issues_repo_name": "bknueven/Egret", "max_issues_repo_head_hexsha": "37567c1ec3bc0072b61124ce46ceb28add9ad539", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 139, "max_issues_repo_issues_event_min_datetime": "2019-04-01T16:50:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T20:29:04.000Z", "max_forks_repo_path": "egret/model_library/unit_commitment/status_vars.py", "max_forks_repo_name": "bknueven/Egret", "max_forks_repo_head_hexsha": "37567c1ec3bc0072b61124ce46ceb28add9ad539", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 44, "max_forks_repo_forks_event_min_datetime": "2019-04-01T13:20:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T14:50:18.000Z", "avg_line_length": 34.5, "max_line_length": 98, "alphanum_fraction": 0.7174854131, "include": true, "reason": "from pyomo", "num_tokens": 1335}
[STATEMENT] lemma Der_conc [simp]: shows "Deriv c (A @@ B) = (Deriv c A) @@ B \<union> (if [] \<in> A then Deriv c B else {})" [PROOF STATE] proof (prove) goal (1 subgoal): 1. Deriv c (A @@ B) = Deriv c A @@ B \<union> (if [] \<in> A then Deriv c B else {}) [PROOF STEP] unfolding Deriv_def conc_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. {xs. c # xs \<in> {xs @ ys |xs ys. xs \<in> A \<and> ys \<in> B}} = {xs @ ys |xs ys. xs \<in> {xs. c # xs \<in> A} \<and> ys \<in> B} \<union> (if [] \<in> A then {xs. c # xs \<in> B} else {}) [PROOF STEP] by (auto simp add: Cons_eq_append_conv)
{"llama_tokens": 276, "file": "Regular-Sets_Regular_Set", "length": 2}
import sys sys.path.insert(0, "./../") sys.path.insert(0, "./") import os import subprocess import json import warnings from netCDF4 import Dataset import pytest import pprint as pp import numpy as np import main as scampy import common as cmn # list of possible test cases case_list = ['Bomex', 'life_cycle_Tan2018', 'Soares', 'Rico', 'TRMM_LBA', 'ARM_SGP', 'GATE_III', 'DYCOMS_RF01', 'GABLS', 'SP'] @pytest.fixture(scope="module") def data(request): # dictionary where simulation results will be stored data = {} # loop over all test cases for case in case_list: # generate namelist and paramlist setup = cmn.simulation_setup(case) # run for 2 hours, output only at the end setup["namelist"]['time_stepping']['t_max'] = 2*60*60 setup["namelist"]['stats_io']['frequency'] = setup["namelist"]['time_stepping']['t_max'] # run scampy scampy.main1d(setup["namelist"], setup["paramlist"]) # simulation results data[case] = Dataset(setup["outfile"], 'r') request.addfinalizer(cmn.removing_files) return data @pytest.mark.parametrize("case", case_list) def test_mean_qt_after_2hr(data, case): """ Check if the mean qt is equal to updraft_area * updraft_qt + (1 - updraft_area) * env_q """ eps_dict = {'SP': 7e-3,\ 'DYCOMS_RF01': 5e-3,\ 'GABLS': 5e-3,\ 'TRMM_LBA': 4e-3,\ 'life_cycle_Tan2018': 2e-3,\ 'Soares': 2e-3,\ 'ARM_SGP': 7e-4,\ 'GATE_III': 6e-4,\ 'Bomex': 4e-4,\ 'Rico': 4e-4} # read in the data qt_mean = np.array(data[str(case)]["profiles/qt_mean"][:,:]) udr_area = np.array(data[str(case)]["profiles/updraft_area"][:,:]) udr_qt = np.array(data[str(case)]["profiles/updraft_qt"][:,:]) env_qt = np.array(data[str(case)]["profiles/env_qt"][:,:]) # calculate the mean qt from updraft and environmet means tmp_mean = udr_area * udr_qt + (1 - udr_area) * env_qt # absolute error abs_error = np.abs(qt_mean - tmp_mean) # relative error rel_error = np.zeros_like(abs_error) idx_not_zero = tmp_mean != 0 np.place(rel_error, idx_not_zero, abs_error[idx_not_zero] / tmp_mean[idx_not_zero]) # output for humans print "max_abs_error = ", np.max(abs_error), "max_rel_error = ", np.max(rel_error) # test assert np.allclose(qt_mean[-1,:], tmp_mean[-1,:], rtol = eps_dict[case], atol=0),\ "qt_mean != updraft_area * updraft_qt_mean + (1-updraft_area) * env_qt_mean"
{"hexsha": "82ea71a48127710f6b91e1a5f4c3b836f8613ecb", "size": 2733, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/test_mean.py", "max_stars_repo_name": "jiahe23/SCAMPy", "max_stars_repo_head_hexsha": "0f8e9656b043e98c6bd316fda45bcf146bddbcbd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-24T04:28:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T07:11:01.000Z", "max_issues_repo_path": "tests/unit/test_mean.py", "max_issues_repo_name": "jiahe23/SCAMPy", "max_issues_repo_head_hexsha": "0f8e9656b043e98c6bd316fda45bcf146bddbcbd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2018-10-14T00:52:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-14T22:54:06.000Z", "max_forks_repo_path": "tests/unit/test_mean.py", "max_forks_repo_name": "jiahe23/SCAMPy", "max_forks_repo_head_hexsha": "0f8e9656b043e98c6bd316fda45bcf146bddbcbd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-04-09T00:53:29.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-21T21:58:11.000Z", "avg_line_length": 31.0568181818, "max_line_length": 96, "alphanum_fraction": 0.5894621295, "include": true, "reason": "import numpy", "num_tokens": 783}
import os import numpy as np from tqdm import tqdm def convert_transforms(root_path): file = os.path.join(root_path, 'transforms.npy') poses_path = os.path.join(root_path, 'poses') os.makedirs(poses_path, exist_ok=True) pose_file = os.path.join(poses_path, '{}.npy') poses_file = os.path.join(root_path, 'object_poses.npy') transforms = np.load(file) # [n,4,4] n = transforms.shape[0] poses = np.zeros((n, 4, 4)) # [n,4,4] for i, curr_transform in enumerate(tqdm(transforms)): p = np.linalg.inv(curr_transform) # calculate gt pose poses[i, :, :] = p np.save(pose_file.format(i), p) # save current gt pose np.save(poses_file, poses) # overwrite each step if __name__ == "__main__": root = os.path.join(os.getcwd(), 'LINEMOD', 'new_acquisition') convert_transforms(root)
{"hexsha": "ee61f867e52f47f250053b7b7a242b470e088533", "size": 858, "ext": "py", "lang": "Python", "max_stars_repo_path": "convert_transforms_to_npy.py", "max_stars_repo_name": "federicocunico/ObjectDatasetTools", "max_stars_repo_head_hexsha": "c7418c588bfe2d1615bcd8aa96271394eb854b85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "convert_transforms_to_npy.py", "max_issues_repo_name": "federicocunico/ObjectDatasetTools", "max_issues_repo_head_hexsha": "c7418c588bfe2d1615bcd8aa96271394eb854b85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "convert_transforms_to_npy.py", "max_forks_repo_name": "federicocunico/ObjectDatasetTools", "max_forks_repo_head_hexsha": "c7418c588bfe2d1615bcd8aa96271394eb854b85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5862068966, "max_line_length": 66, "alphanum_fraction": 0.6526806527, "include": true, "reason": "import numpy", "num_tokens": 240}
""" See detailed analysis about maxout via links below: https://github.com/Duncanswilson/maxout-pytorch/blob/master/maxout_pytorch.ipynb https://cs231n.github.io/neural-networks-1/ Detailed descriptions about arch of MaxoutConv: https://github.com/paniabhisek/maxout/blob/master/maxout.json https://arxiv.org/abs/1412.6550 """ import numpy as np import torch import torch.nn as nn import torch.nn.functional as F class ListModule(object): """A list-like object which includes all candidate rectifiers/convs of a single hidden layer for maxout operation.""" def __init__(self, module, prefix, *args): """ Args: module(nn.module): parent nn.module prefix(str): name prefix of list items args(list): list of nn.modules """ self.module = module self.prefix = prefix self.num_module = 0 for new_module in args: self.append(new_module) def append(self, new_module): if not isinstance(new_module, nn.Module): raise ValueError("Not a Module") else: self.module.add_module(self.prefix + str(self.num_module), new_module) self.num_module += 1 def __len__(self): return self.num_module def __getitem__(self, i): if i < 0 or i >= self.num_module: raise IndexError("Out of bound") return getattr(self.module, self.prefix + str(i)) class MaxoutConvMNIST(nn.Module): """ Model of 3-layer-ConvNet with maxout as activition function.""" def __init__(self, output_size=10, num_units=2): super(MaxoutConvMNIST, self).__init__() self.conv1_list = ListModule(self, "conv1_") self.conv2_list = ListModule(self, "conv2_") self.conv3_list = ListModule(self, "conv3_") self.fc = nn.Linear(96, out_features=output_size) for _ in range(num_units): self.conv1_list.append(nn.Conv2d(1, 48, 7, 1, padding=3)) self.conv2_list.append(nn.Conv2d(48, 48, 7, 1, padding=2)) self.conv3_list.append(nn.Conv2d(48, 24, 5, 1, padding=2)) def forward(self, x): x = F.max_pool2d(self.maxout(x, self.conv1_list), 4, stride=2) x = F.max_pool2d(self.maxout(x, self.conv2_list), 4, stride=2) x = F.max_pool2d(self.maxout(x, self.conv3_list), 2, stride=2) # print(x.shape) x = x.view(-1, 96) x = self.fc(x) x = F.dropout(x, training=self.training) # return F.softmax(x, dim=1) return x def maxout(self, x, layer_list): max_output = layer_list[0](x) for _, layer in enumerate(layer_list, start=1): max_output = torch.max(max_output, layer(x)) return max_output class MaxoutConvCIFAR(nn.Module): """ Model of 3-layer-ConvNet with maxout as activition function.""" def __init__(self, output_size=10, conv_num_units=2, fc_num_units=5): super(MaxoutConvCIFAR, self).__init__() self.conv1_list = ListModule(self, "conv1_") self.conv2_list = ListModule(self, "conv2_") self.conv3_list = ListModule(self, "conv3_") self.conv1_bn = nn.BatchNorm2d(96) self.conv2_bn = nn.BatchNorm2d(192) self.conv3_bn = nn.BatchNorm2d(192) self.pool1 = nn.MaxPool2d(4, 2) self.pool2 = nn.MaxPool2d(4, 2) self.pool3 = nn.MaxPool2d(2, 2) self.fc1_list = ListModule(self, "fc1_") self.fc = nn.Linear(500, out_features=output_size) for _ in range(conv_num_units): self.conv1_list.append(nn.Conv2d(3, 96, 7, 1, padding=3)) self.conv2_list.append(nn.Conv2d(96, 192, 7, 1, padding=2)) self.conv3_list.append(nn.Conv2d(192, 192, 5, 1, padding=2)) for _ in range(fc_num_units): self.fc1_list.append(nn.Linear(768, 500)) self.activation = {} self.pool2.register_forward_hook(self.get_activation("hint_layer")) def forward(self, x): x = self.pool1(self.conv1_bn(self.maxout(x, self.conv1_list))) x = self.pool2(self.conv2_bn(self.maxout(x, self.conv2_list))) x = self.pool3(self.conv3_bn(self.maxout(x, self.conv3_list))) # print(x.shape) x = x.view(-1, 768) x = self.maxout(x, self.fc1_list) x = self.fc(x) # x = F.dropout(x, p=0.2, training=self.training) # return F.softmax(x, dim=1) return x def maxout(self, x, layer_list): max_output = layer_list[0](x) for _, layer in enumerate(layer_list, start=1): max_output = torch.max(max_output, layer(x)) return max_output def get_activation(self, name): def hook(model, input, output): self.activation[name] = output.detach() return hook if __name__ == "__main__": import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" # model = MaxoutConvMNIST().to("cuda") model = MaxoutConvCIFAR().to("cuda") # data = torch.arange(28*28*1, dtype=torch.float).view(1, 1, 28, 28).to("cuda") data = torch.arange(32 * 32 * 3, dtype=torch.float).view(1, 3, 32, 32).to("cuda") model(data)
{"hexsha": "a6224ee9b7c9a0224c3bd8feceea062b032649bc", "size": 5181, "ext": "py", "lang": "Python", "max_stars_repo_path": "nets/maxout.py", "max_stars_repo_name": "zhuangzi926/KnowledgeDistillation-pytorch", "max_stars_repo_head_hexsha": "4785bd9afa5d79a744c127851e316caf8469a10e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-04T02:38:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-04T02:38:35.000Z", "max_issues_repo_path": "nets/maxout.py", "max_issues_repo_name": "zhuangzi926/KnowledgeDistillation-pytorch", "max_issues_repo_head_hexsha": "4785bd9afa5d79a744c127851e316caf8469a10e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nets/maxout.py", "max_forks_repo_name": "zhuangzi926/KnowledgeDistillation-pytorch", "max_forks_repo_head_hexsha": "4785bd9afa5d79a744c127851e316caf8469a10e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2307692308, "max_line_length": 121, "alphanum_fraction": 0.620343563, "include": true, "reason": "import numpy", "num_tokens": 1421}
#ifndef SM_TRANSFORMATION_HPP #define SM_TRANSFORMATION_HPP #include <sm/kinematics/quaternion_algebra.hpp> #include <boost/serialization/nvp.hpp> #include <sm/eigen/serialization.hpp> #include "HomogeneousPoint.hpp" #include <boost/serialization/split_member.hpp> #include <boost/serialization/version.hpp> #include <sm/kinematics/UncertainVector.hpp> namespace sm { namespace kinematics { class UncertainTransformation; class UncertainHomogeneousPoint; /// /// @class Transformation /// @brief a class that represents a transformation. /// @todo describe how these transformations work /// class Transformation { public: // See: http://eigen.tuxfamily.org/dox-devel/TopicStructHavingEigenMembers.html EIGEN_MAKE_ALIGNED_OPERATOR_NEW /// /// Default constructor. The transformation will /// be set to identity. /// Transformation(); /// /// Constructor. The transformation will be set to T_ab /// /// @param T_ab the initializing transformation. /// Transformation(const Eigen::Matrix4d & T_a_b); /// /// Constructor. /// /// @param q_a_b The quaternion that transforms vectors from b to a /// @param t_a_b_a the vector from the origin of frame a, to the origin of frame b, expresessed in frame a. /// Transformation(const Eigen::Vector4d & q_a_b, const Eigen::Vector3d & t_a_b_a); virtual ~Transformation(); /// /// /// @return The underlying transformation /// Eigen::Matrix4d T() const; /// @return the rotation matrix Eigen::Matrix3d C() const; /// @return the translation vector const Eigen::Vector3d & t() const; const Eigen::Vector4d & q() const; Eigen::Matrix<double, 3,4> T3x4() const; /** * Return a copy of the transformation inverted. * * @return The inverted transformation */ Transformation inverse() const; /// \brief Set this to a random transformation. virtual void setRandom(); virtual void set( const Eigen::Matrix4d & T_a_b ); /// \brief Set this to a random transformation with bounded rotation and translation. virtual void setRandom( double translationMaxMeters, double rotationMaxRadians); /// \brief Set this transformation to identity void setIdentity(); Transformation operator*(const Transformation & rhs) const; Eigen::Vector3d operator*(const Eigen::Vector3d & rhs) const; Eigen::Vector4d operator*(const Eigen::Vector4d & rhs) const; HomogeneousPoint operator*(const HomogeneousPoint & rhs) const; virtual UncertainTransformation operator*(const UncertainTransformation & rhs) const; virtual UncertainHomogeneousPoint operator*(const UncertainHomogeneousPoint & rhs) const; void checkTransformationIsValid( void ) const; /// \brief rotate a point (do not translate) Eigen::Vector3d rotate(const Eigen::Vector3d & p) const; /// \brief rotate a point (do not translate) Eigen::Vector4d rotate(const Eigen::Vector4d & p) const; /// \brief rotate a point (do not translate) UncertainVector3 rotate(const UncertainVector3 & p) const; double * qptr(); double * tptr(); enum {CLASS_SERIALIZATION_VERSION = 0}; BOOST_SERIALIZATION_SPLIT_MEMBER() /// /// Serialize the Transformation to a boost::serialization archive. /// /// @param ar The archive /// @param version The archive file version number. /// template<class Archive> void save(Archive & ar, const unsigned int version) const; template<class Archive> void load(Archive & ar, const unsigned int version); bool isBinaryEqual(const Transformation & rhs) const; /// \brief The update step for this transformation from a minimal update. void oplus(const Eigen::Matrix<double,6,1> & dt); /// \brief Return the S matrix that puts the oplus operation in the form /// of a small transformation. Eigen::Matrix<double,6,6> S() const; protected: /// The quaternion that will become a rotation matrix C_a_b that /// transforms vectors from b to a. Eigen::Vector4d _q_a_b; /// The vector from the origin of a to the origin of b, expressed in a Eigen::Vector3d _t_a_b_a; }; template<class Archive> void Transformation::save(Archive & ar, const unsigned int /* version */) const { ar & BOOST_SERIALIZATION_NVP(_q_a_b); ar & BOOST_SERIALIZATION_NVP(_t_a_b_a); } template<class Archive> void Transformation::load(Archive & ar, const unsigned int version) { SM_ASSERT_LE(std::runtime_error, version, (unsigned int)CLASS_SERIALIZATION_VERSION, "Unsupported serialization version"); ar >> BOOST_SERIALIZATION_NVP(_q_a_b); ar >> BOOST_SERIALIZATION_NVP(_t_a_b_a); } // Interpolate the transformation at time si between T0 (at time s0) and T1 (at time s1) Transformation interpolateTransformations(const Transformation & T0, double s0, const Transformation & T1, double s1, double si); /// brief linear interpolate between T0 and T1 as si moves from 0.0 to 1.0 Transformation slerpTransformations(const Transformation & T0, const Transformation & T1, double si); } // namespace kinematics } // namespace sm BOOST_CLASS_VERSION(sm::kinematics::Transformation, sm::kinematics::Transformation::CLASS_SERIALIZATION_VERSION) #endif /* SM_TRANSFORMATION_HPP */
{"hexsha": "ad8d3b6ba03e716f918f7f9f32482a07e6682b12", "size": 5922, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "Schweizer-Messer/sm_kinematics/include/sm/kinematics/Transformation.hpp", "max_stars_repo_name": "PushyamiKaveti/kalibr", "max_stars_repo_head_hexsha": "d8bdfc59ee666ef854012becc93571f96fe5d80c", "max_stars_repo_licenses": ["BSD-4-Clause"], "max_stars_count": 2690.0, "max_stars_repo_stars_event_min_datetime": "2015-01-07T03:50:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T20:27:01.000Z", "max_issues_repo_path": "Schweizer-Messer/sm_kinematics/include/sm/kinematics/Transformation.hpp", "max_issues_repo_name": "PushyamiKaveti/kalibr", "max_issues_repo_head_hexsha": "d8bdfc59ee666ef854012becc93571f96fe5d80c", "max_issues_repo_licenses": ["BSD-4-Clause"], "max_issues_count": 481.0, "max_issues_repo_issues_event_min_datetime": "2015-01-27T10:21:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:02:41.000Z", "max_forks_repo_path": "Schweizer-Messer/sm_kinematics/include/sm/kinematics/Transformation.hpp", "max_forks_repo_name": "PushyamiKaveti/kalibr", "max_forks_repo_head_hexsha": "d8bdfc59ee666ef854012becc93571f96fe5d80c", "max_forks_repo_licenses": ["BSD-4-Clause"], "max_forks_count": 1091.0, "max_forks_repo_forks_event_min_datetime": "2015-01-26T21:21:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T01:55:33.000Z", "avg_line_length": 31.6684491979, "max_line_length": 130, "alphanum_fraction": 0.6386355961, "num_tokens": 1293}
[STATEMENT] lemma token_time_finite_pair_rule: fixes A :: "(nat \<times> nat) set" fixes B :: "nat set" assumes unique: "\<And>x y z. P x y \<Longrightarrow> P x z \<Longrightarrow> y = z" and existsA: "\<And>x. x \<in> A \<Longrightarrow> (\<exists>y. P x y)" and existsB: "\<And>y. y \<in> B \<Longrightarrow> (\<exists>x. P x y)" and inA: "\<And>x y. P x y \<Longrightarrow> x \<in> A" and inB: "\<And>x y. P x y \<Longrightarrow> y \<in> B" and mono: "\<And>x y. P x y \<Longrightarrow> fst x \<le> y + c \<and> snd x \<le> y + d" shows "finite A \<longleftrightarrow> finite B" [PROOF STATE] proof (prove) goal (1 subgoal): 1. finite A = finite B [PROOF STEP] proof (rule finite_monotonic_image_pairs) [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>i. i \<in> A \<Longrightarrow> fst i \<le> ?f i + ?c 2. \<And>i. i \<in> A \<Longrightarrow> snd i \<le> ?f i + ?d 3. ?f ` A = B [PROOF STEP] let ?f = "(\<lambda>x. if x \<in> A then The (P x) else undefined)" [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>i. i \<in> A \<Longrightarrow> fst i \<le> ?f i + ?c 2. \<And>i. i \<in> A \<Longrightarrow> snd i \<le> ?f i + ?d 3. ?f ` A = B [PROOF STEP] { [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>i. i \<in> A \<Longrightarrow> fst i \<le> ?f i + ?c 2. \<And>i. i \<in> A \<Longrightarrow> snd i \<le> ?f i + ?d 3. ?f ` A = B [PROOF STEP] fix x [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>i. i \<in> A \<Longrightarrow> fst i \<le> ?f i + ?c 2. \<And>i. i \<in> A \<Longrightarrow> snd i \<le> ?f i + ?d 3. ?f ` A = B [PROOF STEP] assume "x \<in> A" [PROOF STATE] proof (state) this: x \<in> A goal (3 subgoals): 1. \<And>i. i \<in> A \<Longrightarrow> fst i \<le> ?f i + ?c 2. \<And>i. i \<in> A \<Longrightarrow> snd i \<le> ?f i + ?d 3. ?f ` A = B [PROOF STEP] then [PROOF STATE] proof (chain) picking this: x \<in> A [PROOF STEP] obtain y where "P x y" and "y = ?f x" [PROOF STATE] proof (prove) using this: x \<in> A goal (1 subgoal): 1. (\<And>y. \<lbrakk>P x y; y = (if x \<in> A then The (P x) else undefined)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] using existsA the_equality unique [PROOF STATE] proof (prove) using this: x \<in> A ?x \<in> A \<Longrightarrow> \<exists>y. P ?x y \<lbrakk>?P ?a; \<And>x. ?P x \<Longrightarrow> x = ?a\<rbrakk> \<Longrightarrow> (THE x. ?P x) = ?a \<lbrakk>P ?x ?y; P ?x ?z\<rbrakk> \<Longrightarrow> ?y = ?z goal (1 subgoal): 1. (\<And>y. \<lbrakk>P x y; y = (if x \<in> A then The (P x) else undefined)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by metis [PROOF STATE] proof (state) this: P x y y = (if x \<in> A then The (P x) else undefined) goal (3 subgoals): 1. \<And>i. i \<in> A \<Longrightarrow> fst i \<le> ?f i + ?c 2. \<And>i. i \<in> A \<Longrightarrow> snd i \<le> ?f i + ?d 3. ?f ` A = B [PROOF STEP] thus "fst x \<le> ?f x + c" and "snd x \<le> ?f x + d" [PROOF STATE] proof (prove) using this: P x y y = (if x \<in> A then The (P x) else undefined) goal (1 subgoal): 1. fst x \<le> (if x \<in> A then The (P x) else undefined) + c &&& snd x \<le> (if x \<in> A then The (P x) else undefined) + d [PROOF STEP] using mono [PROOF STATE] proof (prove) using this: P x y y = (if x \<in> A then The (P x) else undefined) P ?x ?y \<Longrightarrow> fst ?x \<le> ?y + c \<and> snd ?x \<le> ?y + d goal (1 subgoal): 1. fst x \<le> (if x \<in> A then The (P x) else undefined) + c &&& snd x \<le> (if x \<in> A then The (P x) else undefined) + d [PROOF STEP] by blast+ [PROOF STATE] proof (state) this: fst x \<le> (if x \<in> A then The (P x) else undefined) + c snd x \<le> (if x \<in> A then The (P x) else undefined) + d goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] } [PROOF STATE] proof (state) this: ?x2 \<in> A \<Longrightarrow> fst ?x2 \<le> (if ?x2 \<in> A then The (P ?x2) else undefined) + c ?x2 \<in> A \<Longrightarrow> snd ?x2 \<le> (if ?x2 \<in> A then The (P ?x2) else undefined) + d goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] { [PROOF STATE] proof (state) this: ?x2 \<in> A \<Longrightarrow> fst ?x2 \<le> (if ?x2 \<in> A then The (P ?x2) else undefined) + c ?x2 \<in> A \<Longrightarrow> snd ?x2 \<le> (if ?x2 \<in> A then The (P ?x2) else undefined) + d goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] fix y [PROOF STATE] proof (state) goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] have "y \<in> ?f ` A \<longleftrightarrow> (\<exists>x. x \<in> A \<and> y = The (P x))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (y \<in> (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A) = (\<exists>x. x \<in> A \<and> y = The (P x)) [PROOF STEP] unfolding image_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. (y \<in> {y. \<exists>x\<in>A. y = (if x \<in> A then The (P x) else undefined)}) = (\<exists>x. x \<in> A \<and> y = The (P x)) [PROOF STEP] by force [PROOF STATE] proof (state) this: (y \<in> (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A) = (\<exists>x. x \<in> A \<and> y = The (P x)) goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] also [PROOF STATE] proof (state) this: (y \<in> (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A) = (\<exists>x. x \<in> A \<and> y = The (P x)) goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] have "\<dots> \<longleftrightarrow> (\<exists>x. P x y)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<exists>x. x \<in> A \<and> y = The (P x)) = (\<exists>x. P x y) [PROOF STEP] by (metis inA existsA unique the_equality) [PROOF STATE] proof (state) this: (\<exists>x. x \<in> A \<and> y = The (P x)) = (\<exists>x. P x y) goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] also [PROOF STATE] proof (state) this: (\<exists>x. x \<in> A \<and> y = The (P x)) = (\<exists>x. P x y) goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] have "\<dots> \<longleftrightarrow> y \<in> B" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<exists>x. P x y) = (y \<in> B) [PROOF STEP] using inB existsB [PROOF STATE] proof (prove) using this: P ?x ?y \<Longrightarrow> ?y \<in> B ?y \<in> B \<Longrightarrow> \<exists>x. P x ?y goal (1 subgoal): 1. (\<exists>x. P x y) = (y \<in> B) [PROOF STEP] by blast [PROOF STATE] proof (state) this: (\<exists>x. P x y) = (y \<in> B) goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] finally [PROOF STATE] proof (chain) picking this: (y \<in> (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A) = (y \<in> B) [PROOF STEP] have "y \<in> ?f ` A \<longleftrightarrow> y \<in> B" [PROOF STATE] proof (prove) using this: (y \<in> (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A) = (y \<in> B) goal (1 subgoal): 1. (y \<in> (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A) = (y \<in> B) [PROOF STEP] . [PROOF STATE] proof (state) this: (y \<in> (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A) = (y \<in> B) goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] } [PROOF STATE] proof (state) this: (?y2 \<in> (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A) = (?y2 \<in> B) goal (1 subgoal): 1. (\<lambda>i. if i \<in> A then The (P i) else undefined) ` A = B [PROOF STEP] thus "?f ` A = B" [PROOF STATE] proof (prove) using this: (?y2 \<in> (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A) = (?y2 \<in> B) goal (1 subgoal): 1. (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A = B [PROOF STEP] by blast [PROOF STATE] proof (state) this: (\<lambda>x. if x \<in> A then The (P x) else undefined) ` A = B goal: No subgoals! [PROOF STEP] qed \<comment> \<open>Correspondence Between Token- and Time-Based Definitions\<close>
{"llama_tokens": 3574, "file": "LTL_to_DRA_Mojmir", "length": 32}
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Property-based testing for TFP distributions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from absl.testing import parameterized import hypothesis as hp from hypothesis import strategies as hps import numpy as np import six import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.distributions import hypothesis_testlib as dhps from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps from tensorflow_probability.python.internal import tensor_util from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal import test_util TF2_FRIENDLY_DISTS = ( 'Bernoulli', 'Beta', 'BetaBinomial', 'Binomial', 'Chi', 'Chi2', 'CholeskyLKJ', 'Categorical', 'Cauchy', 'ContinuousBernoulli', 'Deterministic', 'Dirichlet', 'DirichletMultinomial', 'DoublesidedMaxwell', 'Empirical', 'Exponential', 'FiniteDiscrete', 'Gamma', 'GammaGamma', 'GeneralizedPareto', 'Geometric', 'Gumbel', 'HalfCauchy', 'HalfNormal', 'HalfStudentT', 'Horseshoe', 'InverseGamma', 'InverseGaussian', 'JohnsonSU', 'Kumaraswamy', 'Laplace', 'LKJ', 'LogNormal', 'Logistic', 'Normal', 'Moyal', 'Multinomial', 'NegativeBinomial', 'OneHotCategorical', 'OrderedLogistic', 'Pareto', 'PERT', 'PlackettLuce', 'Poisson', # 'PoissonLogNormalQuadratureCompound' TODO(b/137956955): Add support # for hypothesis testing 'ProbitBernoulli', 'RelaxedBernoulli', 'ExpRelaxedOneHotCategorical', # 'SinhArcsinh' TODO(b/137956955): Add support for hypothesis testing 'StudentT', 'Triangular', 'TruncatedNormal', 'Uniform', 'VonMises', 'VonMisesFisher', 'WishartTriL', 'Zipf', ) NO_SAMPLE_PARAM_GRADS = { 'Deterministic': ('atol', 'rtol'), } NO_LOG_PROB_PARAM_GRADS = ('Deterministic', 'Empirical') NO_KL_PARAM_GRADS = ('Deterministic',) # Batch slicing requires implementing `_params_event_ndims`. Generic # instantiation (per `instantiable_base_dists`, below) also requires # `_params_event_ndims`, but some special distributions can be instantiated # without that. Of those, this variable lists the ones that do not support # batch slicing. INSTANTIABLE_BUT_NOT_SLICABLE = ( 'BatchReshape', ) EXTRA_TENSOR_CONVERSION_DISTS = { 'RelaxedBernoulli': 1, 'WishartTriL': 3, # not concretizing linear operator scale 'Chi': 2, # subclasses `Chi2`, runs redundant checks on `df` parameter } # TODO(b/130815467) All distributions should be auto-vectorizeable. # The lists below contain distributions from INSTANTIABLE_BASE_DISTS that are # blacklisted by the autovectorization tests. Since not all distributions are # in INSTANTIABLE_BASE_DISTS, these should not be taken as exhaustive. SAMPLE_AUTOVECTORIZATION_IS_BROKEN = [ 'DirichletMultinomial', # No converter for StatelessWhile 'Gamma', # "Incompatible shapes" error. (b/150712618). 'Multinomial', # No converter for StatelessWhile 'PlackettLuce', # No converter for TopKV2 'TruncatedNormal', # No converter for ParameterizedTruncatedNormal ] LOGPROB_AUTOVECTORIZATION_IS_BROKEN = [ 'StudentT', # Numerical problem: b/149785284 'HalfStudentT', # Numerical problem: b/149785284 'TruncatedNormal', # Numerical problem: b/150811273 'VonMisesFisher', # No converter for CheckNumerics 'Wishart', # Actually works, but disabled because log_prob of sample is # ill-conditioned for reasons unrelated to pfor. 'WishartTriL', # Same as Wishart. ] EVENT_SPACE_BIJECTOR_IS_BROKEN = [ 'InverseGamma', # TODO(b/143090143): Enable this when the bug is fixed. # (Reciprocal(Softplus(x)) -> inf for small x) ] # Vectorization can rewrite computations in ways that (apparently) lead to # minor floating-point inconsistency. # TODO(b/142827327): Bring tolerance down to 0 for all distributions. VECTORIZED_LOGPROB_ATOL = collections.defaultdict(lambda: 1e-6) VECTORIZED_LOGPROB_ATOL.update({ 'CholeskyLKJ': 1e-4, 'LKJ': 1e-3, 'BetaBinomial': 1e-5, }) VECTORIZED_LOGPROB_RTOL = collections.defaultdict(lambda: 1e-6) VECTORIZED_LOGPROB_RTOL.update({ 'NegativeBinomial': 1e-5, }) def extra_tensor_conversions_allowed(dist): """Returns number of extra tensor conversions allowed for the input dist.""" extra_conversions = EXTRA_TENSOR_CONVERSION_DISTS.get(type(dist).__name__) if extra_conversions: return extra_conversions if isinstance(dist, tfd.TransformedDistribution): return 1 if isinstance(dist, tfd.BatchReshape): # One for the batch_shape_tensor needed by _call_reshape_input_output. # One to cover inability to turn off validate_args for the base # distribution (b/143297494). return 2 return 0 @test_util.test_all_tf_execution_regimes class DistributionParamsAreVarsTest(test_util.TestCase): @parameterized.named_parameters( {'testcase_name': dname, 'dist_name': dname} for dname in TF2_FRIENDLY_DISTS) @hp.given(hps.data()) @tfp_hps.tfp_hp_settings() def testDistribution(self, dist_name, data): seed = test_util.test_seed() # Explicitly draw event_dim here to avoid relying on _params_event_ndims # later, so this test can support distributions that do not implement the # slicing protocol. event_dim = data.draw(hps.integers(min_value=2, max_value=6)) dist = data.draw(dhps.distributions( dist_name=dist_name, event_dim=event_dim, enable_vars=True)) batch_shape = dist.batch_shape batch_shape2 = data.draw(tfp_hps.broadcast_compatible_shape(batch_shape)) dist2 = data.draw( dhps.distributions( dist_name=dist_name, batch_shape=batch_shape2, event_dim=event_dim, enable_vars=True)) self.evaluate([var.initializer for var in dist.variables]) # Check that the distribution passes Variables through to the accessor # properties (without converting them to Tensor or anything like that). for k, v in six.iteritems(dist.parameters): if not tensor_util.is_ref(v): continue self.assertIs(getattr(dist, k), v) # Check that standard statistics do not read distribution parameters more # than twice (once in the stat itself and up to once in any validation # assertions). max_permissible = 2 + extra_tensor_conversions_allowed(dist) for stat in sorted(data.draw( hps.sets( hps.one_of( map(hps.just, [ 'covariance', 'entropy', 'mean', 'mode', 'stddev', 'variance' ])), min_size=3, max_size=3))): hp.note('Testing excessive var usage in {}.{}'.format(dist_name, stat)) try: with tfp_hps.assert_no_excessive_var_usage( 'statistic `{}` of `{}`'.format(stat, dist), max_permissible=max_permissible): getattr(dist, stat)() except NotImplementedError: pass # Check that `sample` doesn't read distribution parameters more than twice, # and that it produces non-None gradients (if the distribution is fully # reparameterized). with tf.GradientTape() as tape: # TDs do bijector assertions twice (once by distribution.sample, and once # by bijector.forward). max_permissible = 2 + extra_tensor_conversions_allowed(dist) with tfp_hps.assert_no_excessive_var_usage( 'method `sample` of `{}`'.format(dist), max_permissible=max_permissible): sample = dist.sample(seed=seed) if dist.reparameterization_type == tfd.FULLY_REPARAMETERIZED: grads = tape.gradient(sample, dist.variables) for grad, var in zip(grads, dist.variables): var_name = var.name.rstrip('_0123456789:') if var_name in NO_SAMPLE_PARAM_GRADS.get(dist_name, ()): continue if grad is None: raise AssertionError( 'Missing sample -> {} grad for distribution {}'.format( var_name, dist_name)) # Turn off validations, since TODO(b/129271256) log_prob can choke on dist's # own samples. Also, to relax conversion counts for KL (might do >2 w/ # validate_args). dist = dist.copy(validate_args=False) dist2 = dist2.copy(validate_args=False) # Test that KL divergence reads distribution parameters at most once, and # that is produces non-None gradients. try: for d1, d2 in (dist, dist2), (dist2, dist): with tf.GradientTape() as tape: with tfp_hps.assert_no_excessive_var_usage( '`kl_divergence` of (`{}` (vars {}), `{}` (vars {}))'.format( d1, d1.variables, d2, d2.variables), max_permissible=1): # No validation => 1 convert per var. kl = d1.kl_divergence(d2) wrt_vars = list(d1.variables) + list(d2.variables) grads = tape.gradient(kl, wrt_vars) for grad, var in zip(grads, wrt_vars): if grad is None and dist_name not in NO_KL_PARAM_GRADS: raise AssertionError('Missing KL({} || {}) -> {} grad:\n' # pylint: disable=duplicate-string-formatting-argument '{} vars: {}\n{} vars: {}'.format( d1, d2, var, d1, d1.variables, d2, d2.variables)) except NotImplementedError: pass # Test that log_prob produces non-None gradients, except for distributions # on the NO_LOG_PROB_PARAM_GRADS blacklist. if dist_name not in NO_LOG_PROB_PARAM_GRADS: with tf.GradientTape() as tape: lp = dist.log_prob(tf.stop_gradient(sample)) grads = tape.gradient(lp, dist.variables) for grad, var in zip(grads, dist.variables): if grad is None: raise AssertionError( 'Missing log_prob -> {} grad for distribution {}'.format( var, dist_name)) # Test that all forms of probability evaluation avoid reading distribution # parameters more than once. for evaluative in sorted(data.draw( hps.sets( hps.one_of( map(hps.just, [ 'log_prob', 'prob', 'log_cdf', 'cdf', 'log_survival_function', 'survival_function' ])), min_size=3, max_size=3))): hp.note('Testing excessive var usage in {}.{}'.format( dist_name, evaluative)) try: # No validation => 1 convert. But for TD we allow 2: # dist.log_prob(bijector.inverse(samp)) + bijector.ildj(samp) max_permissible = 2 + extra_tensor_conversions_allowed(dist) with tfp_hps.assert_no_excessive_var_usage( 'evaluative `{}` of `{}`'.format(evaluative, dist), max_permissible=max_permissible): getattr(dist, evaluative)(sample) except NotImplementedError: pass @test_util.test_all_tf_execution_regimes class ReproducibilityTest(test_util.TestCase): @parameterized.named_parameters( {'testcase_name': dname, 'dist_name': dname} for dname in sorted(list(dhps.INSTANTIABLE_BASE_DISTS.keys()) + list(dhps.INSTANTIABLE_META_DISTS))) @hp.given(hps.data()) @tfp_hps.tfp_hp_settings() def testDistribution(self, dist_name, data): dist = data.draw(dhps.distributions(dist_name=dist_name, enable_vars=False)) seed = test_util.test_seed() with tfp_hps.no_tf_rank_errors(): s1 = self.evaluate(dist.sample(50, seed=seed)) if tf.executing_eagerly(): tf.random.set_seed(seed) with tfp_hps.no_tf_rank_errors(): s2 = self.evaluate(dist.sample(50, seed=seed)) self.assertAllEqual(s1, s2) @test_util.test_all_tf_execution_regimes class EventSpaceBijectorsTest(test_util.TestCase): def check_bad_loc_scale(self, dist): if hasattr(dist, 'loc') and hasattr(dist, 'scale'): try: loc_ = tf.convert_to_tensor(dist.loc) scale_ = tf.convert_to_tensor(dist.scale) except (ValueError, TypeError): # If they're not Tensor-convertible, don't try to check them. This is # the case, in, for example, multivariate normal, where the scale is a # `LinearOperator`. return loc, scale = self.evaluate([loc_, scale_]) hp.assume(np.all(np.abs(loc / scale) < 1e7)) @hp.given(hps.data()) @tfp_hps.tfp_hp_settings() def testDistribution(self, data): enable_vars = data.draw(hps.booleans()) # TODO(b/146572907): Fix `enable_vars` for metadistributions. broken_dists = EVENT_SPACE_BIJECTOR_IS_BROKEN if enable_vars: broken_dists.extend(dhps.INSTANTIABLE_META_DISTS) dist = data.draw( dhps.distributions( enable_vars=enable_vars, eligibility_filter=(lambda name: name not in broken_dists))) self.evaluate([var.initializer for var in dist.variables]) self.check_bad_loc_scale(dist) event_space_bijector = dist._experimental_default_event_space_bijector() if event_space_bijector is None: return total_sample_shape = tensorshape_util.concatenate( # Draw a sample shape data.draw(tfp_hps.shapes()), # Draw a shape that broadcasts with `[batch_shape, inverse_event_shape]` # where `inverse_event_shape` is the event shape in the bijector's # domain. This is the shape of `y` in R**n, such that # x = event_space_bijector(y) has the event shape of the distribution. data.draw(tfp_hps.broadcasting_shapes( tensorshape_util.concatenate( dist.batch_shape, event_space_bijector.inverse_event_shape( dist.event_shape)), n=1))[0]) y = data.draw( tfp_hps.constrained_tensors( tfp_hps.identity_fn, total_sample_shape.as_list())) x = event_space_bijector(y) with tf.control_dependencies(dist._sample_control_dependencies(x)): self.evaluate(tf.identity(x)) def _all_shapes(thing): if isinstance(thing, (tfd.Distribution, tfb.Bijector)): # pylint: disable=g-complex-comprehension answer = [s for _, param in thing.parameters.items() for s in _all_shapes(param)] if isinstance(thing, tfd.TransformedDistribution): answer = [thing.batch_shape + s for s in answer] if isinstance(thing, tfd.Distribution): answer += [thing.batch_shape + thing.event_shape] if isinstance(thing, tfd.MixtureSameFamily): num_components = thing.mixture_distribution.logits_parameter().shape[-1] answer += [thing.batch_shape + [num_components] + thing.event_shape] return answer elif tf.is_tensor(thing): return [thing.shape] else: # Assume the thing is some Python constant like a string or a boolean return [] def _all_ok(thing, one_ok): hp.note('Testing packetization of {}.'.format(thing)) for s in _all_shapes(thing): if not one_ok(s): return False return True def _all_packetized(thing): def one_ok(shape): ans = tf.TensorShape(shape).num_elements() > 1 for dim in tf.TensorShape(shape): ans &= dim % 4 == 0 if ans: hp.note('Presuming shape {} is packetized'.format(shape)) else: hp.note('Not presuming shape {} is packetized'.format(shape)) return ans return _all_ok(thing, one_ok) def _all_non_packetized(thing): def one_ok(shape): ans = tf.TensorShape(shape).num_elements() < 4 if ans: hp.note('Presuming shape {} is non-packetized'.format(shape)) else: hp.note('Not presuming shape {} is non-packetized'.format(shape)) return ans return _all_ok(thing, one_ok) @test_util.test_all_tf_execution_regimes class DistributionSlicingTest(test_util.TestCase): def _test_slicing(self, data, dist): strm = test_util.test_seed_stream() batch_shape = dist.batch_shape slices = data.draw(dhps.valid_slices(batch_shape)) slice_str = 'dist[{}]'.format(', '.join(dhps.stringify_slices( slices))) # Make sure the slice string appears in Hypothesis' attempted example log hp.note('Using slice ' + slice_str) if not slices: # Nothing further to check. return sliced_zeros = np.zeros(batch_shape)[slices] sliced_dist = dist[slices] hp.note('Using sliced distribution {}.'.format(sliced_dist)) # Check that slicing modifies batch shape as expected. self.assertAllEqual(sliced_zeros.shape, sliced_dist.batch_shape) if not sliced_zeros.size: # TODO(b/128924708): Fix distributions that fail on degenerate empty # shapes, e.g. Multinomial, DirichletMultinomial, ... return # Check that sampling of sliced distributions executes. with tfp_hps.no_tf_rank_errors(): samples = self.evaluate(dist.sample(seed=strm())) sliced_dist_samples = self.evaluate(sliced_dist.sample(seed=strm())) # Come up with the slices for samples (which must also include event dims). sample_slices = ( tuple(slices) if isinstance(slices, collections.Sequence) else (slices,)) if Ellipsis not in sample_slices: sample_slices += (Ellipsis,) sample_slices += tuple([slice(None)] * tensorshape_util.rank(dist.event_shape)) sliced_samples = samples[sample_slices] # Report sub-sliced samples (on which we compare log_prob) to hypothesis. hp.note('Sample(s) for testing log_prob ' + str(sliced_samples)) # Check that sampling a sliced distribution produces the same shape as # slicing the samples from the original. self.assertAllEqual(sliced_samples.shape, sliced_dist_samples.shape) # Check that a sliced distribution can compute the log_prob of its own # samples (up to numerical validation errors). with tfp_hps.no_tf_rank_errors(): try: lp = self.evaluate(dist.log_prob(samples)) except tf.errors.InvalidArgumentError: # TODO(b/129271256): d.log_prob(d.sample()) should not fail # validate_args checks. # We only tolerate this case for the non-sliced dist. return sliced_lp = self.evaluate(sliced_dist.log_prob(sliced_samples)) # Check that the sliced dist's log_prob agrees with slicing the original's # log_prob. # This `hp.assume` is suppressing array sizes that cause the sliced and # non-sliced distribution to follow different Eigen code paths. Those # different code paths lead to arbitrarily large variations in the results # at parameter settings that Hypothesis is all too good at finding. Since # the purpose of this test is just to check that we got slicing right, those # discrepancies are a distraction. # TODO(b/140229057): Remove this `hp.assume`, if and when Eigen's numerics # become index-independent. all_packetized = ( _all_packetized(dist) and _all_packetized(sliced_dist) and _all_packetized(samples) and _all_packetized(sliced_samples)) hp.note('Packetization check {}'.format(all_packetized)) all_non_packetized = ( _all_non_packetized(dist) and _all_non_packetized(sliced_dist) and _all_non_packetized(samples) and _all_non_packetized(sliced_samples)) hp.note('Non-packetization check {}'.format(all_non_packetized)) hp.assume(all_packetized or all_non_packetized) self.assertAllClose(lp[slices], sliced_lp, atol=1e-5, rtol=1e-5) def _run_test(self, data): def ok(name): return name not in INSTANTIABLE_BUT_NOT_SLICABLE dist = data.draw(dhps.distributions(enable_vars=False, eligibility_filter=ok)) # Check that all distributions still register as non-iterable despite # defining __getitem__. (Because __getitem__ magically makes an object # iterable for some reason.) with self.assertRaisesRegexp(TypeError, 'not iterable'): iter(dist) # Test slicing self._test_slicing(data, dist) # TODO(bjp): Enable sampling and log_prob checks. Currently, too many errors # from out-of-domain samples. # self.evaluate(dist.log_prob(dist.sample(seed=test_util.test_seed()))) @hp.given(hps.data()) @tfp_hps.tfp_hp_settings() def testDistributions(self, data): self._run_test(data) def disabled_testFailureCase(self): # pylint: disable=invalid-name # TODO(b/140229057): This test should pass. dist = tfd.Chi(df=np.float32(27.744131)) dist = tfd.TransformedDistribution( bijector=tfb.NormalCDF(), distribution=dist, batch_shape=[4]) dist = tfb.Expm1()(dist) samps = 1.7182817 + tf.zeros_like(dist.sample(seed=test_util.test_seed())) self.assertAllClose(dist.log_prob(samps)[0], dist[0].log_prob(samps[0])) # TODO(b/150161911): reconcile graph- and eager-mode handling of denormal floats # so that we can re-enable eager mode tests. @test_util.test_graph_mode_only class DistributionsWorkWithAutoVectorizationTest(test_util.TestCase): def _test_vectorization(self, dist_name, dist): seed = test_util.test_seed() num_samples = 3 if dist_name in SAMPLE_AUTOVECTORIZATION_IS_BROKEN: sample = self.evaluate(dist.sample(num_samples, seed=seed)) else: sample = self.evaluate(tf.vectorized_map( lambda i: dist.sample(seed=seed), tf.range(num_samples), fallback_to_while_loop=False)) hp.note('Drew samples {}'.format(sample)) if dist_name not in LOGPROB_AUTOVECTORIZATION_IS_BROKEN: pfor_lp = tf.vectorized_map( dist.log_prob, tf.convert_to_tensor(sample), fallback_to_while_loop=False) batch_lp = dist.log_prob(sample) pfor_lp_, batch_lp_ = self.evaluate((pfor_lp, batch_lp)) self.assertAllClose(pfor_lp_, batch_lp_, atol=VECTORIZED_LOGPROB_ATOL[dist_name], rtol=VECTORIZED_LOGPROB_RTOL[dist_name]) @parameterized.named_parameters( {'testcase_name': dname, 'dist_name': dname} for dname in sorted(list(dhps.INSTANTIABLE_BASE_DISTS.keys()))) @hp.given(hps.data()) @tfp_hps.tfp_hp_settings() def testVmap(self, dist_name, data): dist = data.draw(dhps.distributions( dist_name=dist_name, enable_vars=False, validate_args=False)) # TODO(b/142826246): Enable validate_args. self._test_vectorization(dist_name, dist) if __name__ == '__main__': # Hypothesis often finds numerical near misses. Debugging them is much aided # by seeing all the digits of every floating point number, instead of the # usual default of truncating the printed representation to 8 digits. np.set_printoptions(floatmode='unique', precision=None) tf.test.main()
{"hexsha": "bc6786872099a99b4589b964d700193ecc959c9a", "size": 23626, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_probability/python/distributions/distribution_properties_test.py", "max_stars_repo_name": "awav/probability", "max_stars_repo_head_hexsha": "c833ee5cd9f60f3257366b25447b9e50210b0590", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensorflow_probability/python/distributions/distribution_properties_test.py", "max_issues_repo_name": "awav/probability", "max_issues_repo_head_hexsha": "c833ee5cd9f60f3257366b25447b9e50210b0590", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow_probability/python/distributions/distribution_properties_test.py", "max_forks_repo_name": "awav/probability", "max_forks_repo_head_hexsha": "c833ee5cd9f60f3257366b25447b9e50210b0590", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2297734628, "max_line_length": 125, "alphanum_fraction": 0.6864048083, "include": true, "reason": "import numpy", "num_tokens": 5760}
from __future__ import division import math import torch import torch.utils.data from collections import defaultdict import onmt from onmt.speech.Augmenter import Augmenter from onmt.modules.dropout import switchout import numpy as np from .batch_utils import allocate_batch """ Data management for sequence-to-sequence models Two basic classes: - Batch stores the input / output sequences, grouped into tensors with the same length (by padding) - Dataset stores all of the data and """ def merge_data(data, align_right=False, type='text', augmenter=None, upsampling=False, feature_size=40): """ Assembling the individual sequences into one single tensor, included padding :param feature_size: :param upsampling: :param data: the list of sequences :param align_right: aligning the sequences w.r.t padding :param type: text or audio :param augmenter: for augmentation in audio models :return: """ # initialize with batch_size * length # TODO: rewrite this function in Cython if type == "text": lengths = [x.size(0) for x in data] # positions = [torch.arange(length_) for length_ in lengths] max_length = max(lengths) tensor = data[0].new(len(data), max_length).fill_(onmt.constants.PAD) pos = None for i in range(len(data)): data_length = data[i].size(0) offset = max_length - data_length if align_right else 0 tensor[i].narrow(0, offset, data_length).copy_(data[i]) return tensor, pos, lengths elif type == "audio": # First step: on-the-fly processing for the samples # Reshaping: either downsampling or upsampling # On the fly augmentation samples = [] for i in range(len(data)): sample = data[i] if augmenter is not None: sample = augmenter.augment(sample) if upsampling: sample = sample.view(-1, feature_size) samples.append(sample) # compute the lengths afte on-the-fly processing lengths = [x.size(0) for x in samples] max_length = max(lengths) # allocate data for the batch speech feature_size = samples[0].size(1) batch_size = len(data) # feature size + 1 because the last dimension is created for padding tensor = data[0].float().new(batch_size, max_length, feature_size + 1).fill_(onmt.constants.PAD) for i in range(len(samples)): sample = samples[i] data_length = sample.size(0) offset = max_length - data_length if align_right else 0 tensor[i].narrow(0, offset, data_length).narrow(1, 1, sample.size(1)).copy_(sample) # in padding dimension: 0 is not padded, 1 is padded tensor[i].narrow(0, offset, data_length).narrow(1, 0, 1).fill_(1) return tensor, None, lengths else: raise NotImplementedError def collect_fn(src_data, tgt_data, src_lang_data, tgt_lang_data, src_align_right, tgt_align_right, src_type='text', augmenter=None, upsampling=False, bilingual=False, vocab_mask=None): tensors = dict() if src_data is not None: tensors['source'], tensors['source_pos'], src_lengths = merge_data(src_data, align_right=src_align_right, type=src_type, augmenter=augmenter, upsampling=upsampling, feature_size=40) tensors['src_type'] = src_type tensors['source'] = tensors['source'].transpose(0, 1).contiguous() if tensors['source_pos'] is not None: tensors['source_pos'] = tensors['source_pos'].transpose(0, 1) tensors['src_lengths'] = torch.LongTensor(src_lengths) tensors['src_size'] = sum(src_lengths) if tgt_data is not None: target_full, target_pos, tgt_lengths = merge_data(tgt_data, align_right=tgt_align_right) target_full = target_full.t().contiguous() # transpose BxT to TxB tensors['target'] = target_full tensors['target_input'] = target_full[:-1] tensors['target_output'] = target_full[1:] if target_pos is not None: tensors['target_pos'] = target_pos.t().contiguous()[:-1] tgt_size = sum([len(x) - 1 for x in tgt_data]) tensors['tgt_lengths'] = tgt_lengths else: tgt_size = 0 tensors['tgt_lengths'] = None tensors['tgt_size'] = tgt_size tensors['size'] = len(src_data) if src_data is not None else len(tgt_data) if src_lang_data is not None: tensors['source_lang'] = torch.cat(src_lang_data).long() if tgt_lang_data is not None: tensors['target_lang'] = torch.cat(tgt_lang_data).long() tensors['vocab_mask'] = vocab_mask return LightBatch(tensors) def rewrap(light_batch): """ Currently this light batch is used in data collection to avoid pickling error After that it is converted to Batch :param light_batch: :return: """ return Batch(light_batch.tensors) class Batch(object): # An object to manage the data within a minibatch def __init__(self, tensors): self.tensors = defaultdict(lambda: None, tensors) self.src_size = tensors['src_size'] self.tgt_size = tensors['tgt_size'] self.size = tensors['size'] self.src_lengths = tensors['src_lengths'] self.tgt_lengths = tensors['tgt_lengths'] self.has_target = True if self.tensors['target'] is not None else False self.vocab_mask = tensors['vocab_mask'] def get(self, name): if name in self.tensors: return self.tensors[name] else: return None def cuda(self, fp16=False, device=None): """ Send the minibatch data into GPU. Old-fashioned without the 'device' control :param device: default = None (default CUDA device) :param fp16: :return: None """ for key, tensor in self.tensors.items(): if isinstance(tensor, dict): for k in tensor: if isinstance(k, torch.Tensor): v = tensor[k] tensor[k] = v.cuda(device=device) elif tensor is not None: if isinstance(tensor, torch.Tensor): if tensor.type() == "torch.FloatTensor" and fp16: self.tensors[key] = tensor.half() self.tensors[key] = self.tensors[key].cuda(device=device) else: continue def switchout(self, swrate, src_vocab_size, tgt_vocab_size): # Switch out function ... currently works with only source text data # if self.src_type == 'text': if len(self.tensors['source'].shape) == 2: self.tensors['source'] = switchout(self.tensors['source'], src_vocab_size, swrate, transpose=True) if self.has_target: self.tensors['target'] = switchout(self.tensors['target'], tgt_vocab_size, swrate, transpose=True, offset=1) target_full = self.tensors['target'] self.tensors['target_input'] = target_full[:-1] self.tensors['target_output'] = target_full[1:] self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD) class LightBatch: def __init__(self, tensors): self.tensors = tensors def pin_memory(self): """ Enable memory pinning :return: """ for key, tensor in self.tensors.items(): if isinstance(tensor, dict): for k in tensor: v = tensor[k] if isinstance(v, torch.Tensor): tensor[k] = v.pin_memory() elif tensor is not None: if isinstance(tensor, torch.Tensor): self.tensors[key] = self.tensors[key].pin_memory() else: continue return self class Dataset(torch.utils.data.Dataset): def __init__(self, src_data, tgt_data, src_sizes=None, tgt_sizes=None, src_langs=None, tgt_langs=None, batch_size_words=16384, data_type="text", batch_size_sents=128, multiplier=1, sorting=False, augment=False, src_align_right=False, tgt_align_right=False, verbose=False, cleaning=False, debug=False, num_split=1, **kwargs): """ :param src_data: List of tensors for the source side (1D for text, 2 or 3Ds for other modalities) :param tgt_data: List of tensors (1D text) for the target side (already padded with <s> and </s> :param src_langs: Source languages (list of one-tensors) :param tgt_langs: Target Languages (list of one-tensors) :param batch_size_words: Maximum number of words in the minibatch (MB can't have more than this) :param data_type: Text or Audio :param batch_size_sents: Maximum number of sequences in the minibatch (MB can't have more than this) :param multiplier: The number of sequences must divide by this number (for fp16 when multiplier=8) :param reshape_speech: Put N frames together to reduce the length (this might be done already in preprocessing) :param augment: Speech Augmentation (currently only spec augmentation is implemented) """ """ For alignment, the right-aligned data looks like: P P P P D D D D P P D D D D D D P P P P P D D D P P P D D D D D This can affect positional encoding (whose implementation is not consistent w.r.t padding) For models with absolute positional encoding, src and tgt should be aligned left (This is default) For models with relative positional encoding, src should be right and tgt should be left """ self.src = src_data self._type = data_type self.src_align_right = src_align_right if self.src_align_right and verbose: print("* Source sentences aligned to the right side.") self.tgt_align_right = tgt_align_right self.upsampling = kwargs.get('upsampling', False) self.max_src_len = kwargs.get('max_src_len', None) self.max_tgt_len = kwargs.get('max_tgt_len', 256) self.cleaning = cleaning self.debug = debug self.num_split = num_split self.vocab_mask = None if self.max_src_len is None: if self._type == 'text': self.max_src_len = 256 else: self.max_src_len = 1024 # self.reshape_speech = reshape_speech if tgt_data: self.tgt = tgt_data else: self.tgt = None self.order = np.arange(len(self.src)) # Processing data sizes if self.src is not None: if src_sizes is not None: self.src_sizes = np.asarray(src_sizes) else: self.src_sizes = np.asarray([data.size(0) for data in self.src]) else: self.src_sizes = None if self.tgt is not None: if tgt_sizes is not None: self.tgt_sizes = np.asarray(tgt_sizes) else: self.tgt_sizes = np.asarray([data.size(0) for data in self.tgt]) else: self.tgt_sizes = None # sort data to have efficient mini-batching during training if sorting: if verbose: print("* Sorting data ...") if self._type == 'text': sorted_order = np.lexsort((self.src_sizes, self.tgt_sizes)) elif self._type == 'audio': sorted_order = np.lexsort((self.tgt_sizes, self.src_sizes)) self.order = sorted_order # store data length in numpy for fast query if self.tgt is not None and self.src is not None: stacked_sizes = np.stack((self.src_sizes, self.tgt_sizes - 1), axis=0) self.data_lengths = np.amax(stacked_sizes, axis=0) elif self.src is None: self.data_lengths = self.tgt_sizes else: self.data_lengths = self.src_sizes # Processing language ids self.src_langs = src_langs self.tgt_langs = tgt_langs if self.src_langs is not None and self.tgt_langs is not None: assert (len(src_langs) == len(tgt_langs)) # In "bilingual" case, the src_langs only contains one single vector # Which is broadcasted to batch_size if len(src_langs) <= 1: self.bilingual = True else: self.bilingual = False self.full_size = len(self.src) if self.src is not None else len(self.tgt) # maximum number of tokens in a mb self.batch_size_words = batch_size_words # maximum sequences in a mb self.batch_size_sents = batch_size_sents # the actual batch size must divide by this multiplier (for fp16 it has to be 4 or 8) self.multiplier = multiplier # by default: count the amount of padding when we group mini-batches self.pad_count = True # group samples into mini-batches if verbose: print("* Allocating mini-batches ...") self.batches = allocate_batch(self.order, self.data_lengths, self.src_sizes, self.tgt_sizes, batch_size_words, batch_size_sents, self.multiplier, self.max_src_len, self.max_tgt_len, self.cleaning) # the second to last mini-batch is likely the largest # (the last one can be the remnant after grouping samples which has less than max size) self.largest_batch_id = len(self.batches) - 2 self.num_batches = len(self.batches) self.cur_index = 0 self.batchOrder = None if augment: self.augmenter = Augmenter() else: self.augmenter = None def size(self): return self.full_size def switchout(self, batch): pass def set_epoch(self, epoch): pass def set_mask(self, vocab_mask): self.vocab_mask = vocab_mask def get_largest_batch(self): return self.get_batch(self.largest_batch_id) def __len__(self): return self.num_batches def __getitem__(self, index): src_lang, tgt_lang = None, None if self.bilingual: if self.src_langs is not None: src_lang = self.src_langs[0] # should be a tensor [0] if self.tgt_langs is not None: tgt_lang = self.tgt_langs[0] # should be a tensor [1] else: if self.src_langs is not None: src_lang = self.src_langs[index] if self.tgt_langs is not None: tgt_lang = self.tgt_langs[index] # move augmenter here? sample = { 'src': self.src[index] if self.src is not None else None, 'tgt': self.tgt[index] if self.tgt is not None else None, 'src_lang': src_lang, 'tgt_lang': tgt_lang } return sample def get_batch(self, index): """ This function is only used in when we need to access a batch directly from the dataset (Without an external loader) :param index: the index of the mini-batch in the list :return: Batch """ assert index < self.num_batches, "%d > %d" % (index, self.num_batches) batch_ids = self.batches[index] if self.src: src_data = [self.src[i] for i in batch_ids] else: src_data = None if self.tgt: tgt_data = [self.tgt[i] for i in batch_ids] else: tgt_data = None src_lang_data = None tgt_lang_data = None if self.bilingual: if self.src_langs is not None: src_lang_data = [self.src_langs[0]] # should be a tensor [0] if self.tgt_langs is not None: tgt_lang_data = [self.tgt_langs[0]] # should be a tensor [1] else: if self.src_langs is not None: src_lang_data = [self.src_langs[i] for i in batch_ids] if self.tgt_langs is not None: tgt_lang_data = [self.tgt_langs[i] for i in batch_ids] batch = rewrap(collect_fn(src_data, tgt_data=tgt_data, src_lang_data=src_lang_data, tgt_lang_data=tgt_lang_data, src_align_right=self.src_align_right, tgt_align_right=self.tgt_align_right, src_type=self._type, augmenter=self.augmenter, upsampling=self.upsampling, vocab_mask=self.vocab_mask) ) return batch def collater(self, collected_samples): """ Merge a list of samples into a Batch :param collected_samples: list of dicts (the output of the __getitem__) :return: batch """ split_size = math.ceil(len(collected_samples) / self.num_split) sample_list = [collected_samples[i:i+split_size] for i in range(0, len(collected_samples), split_size)] batches = list() for samples in sample_list: src_data, tgt_data = None, None src_lang_data, tgt_lang_data = None, None if self.src: src_data = [sample['src'] for sample in samples] if self.tgt: tgt_data = [sample['tgt'] for sample in samples] if self.bilingual: if self.src_langs is not None: src_lang_data = [self.src_langs[0]] # should be a tensor [0] if self.tgt_langs is not None: tgt_lang_data = [self.tgt_langs[0]] # should be a tensor [1] else: if self.src_langs is not None: src_lang_data = [sample['src_lang'] for sample in samples] # should be a tensor [0] if self.tgt_langs is not None: tgt_lang_data = [sample['tgt_lang'] for sample in samples] # should be a tensor [1] batch = collect_fn(src_data, tgt_data=tgt_data, src_lang_data=src_lang_data, tgt_lang_data=tgt_lang_data, src_align_right=self.src_align_right, tgt_align_right=self.tgt_align_right, src_type=self._type, augmenter=self.augmenter, upsampling=self.upsampling, vocab_mask=self.vocab_mask) batches.append(batch) return batches def __len__(self): return self.full_size # genereate a new batch - order (static) def create_order(self, random=True): if random: self.batchOrder = torch.randperm(self.num_batches) else: self.batchOrder = torch.arange(self.num_batches).long() self.cur_index = 0 return self.batchOrder # return the next batch according to the iterator def next(self, curriculum=False, reset=True): # reset iterator if reach data size limit if self.cur_index >= self.num_batches: if reset: self.cur_index = 0 else: return None if curriculum or self.batchOrder is None: batch_index = self.cur_index else: batch_index = self.batchOrder[self.cur_index] batch = self[batch_index] # move the iterator one step self.cur_index += 1 return [batch] def shuffle(self): data = list(zip(self.src, self.tgt)) self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))]) def set_index(self, iteration): assert (0 <= iteration < self.num_batches) self.cur_index = iteration # # # LANGUAGE MODEL DATASET AND DATAHOLDER # class LMBatch(Batch): # # def __init__(self, input, target=None): # self.tensors = defaultdict(lambda: None) # # self.tensors['target_input'] = input # T x B # self.tensors['target_output'] = target # T x B or None # # # batch size # self.size = input.size(1) # self.length = input.size(0) # # self.tgt_size = self.size * self.length # self.src_size = 0 # # def collate(self, **kwargs): # raise NotImplementedError # # # class LanguageModelDataset(Dataset): # # def __init__(self, data, batch_size_sents=128, seq_length=128): # # self.data = data # # self.batch_size_sents = batch_size_sents # # self.seq_length = seq_length # # # group samples into mini batches # self.num_batches = 0 # self.allocate_batch() # # self.full_size = self.num_batches # # self.cur_index = 0 # # self.batchOrder = None # # def allocate_batch(self): # # nsequence = self.data.size(0) // self.batch_size_sents # # self.data = self.data.narrow(0, 0, nsequence * self.batch_size_sents) # # # Evenly divide the data across the bsz batches. # self.data = self.data.view(self.batch_size_sents, -1).t().contiguous() # # # self.num_steps = nbatch - 1 # # self.num_batches = math.ceil((self.data.size(0) - 1) / self.seq_length) # # # genereate a new batch - order (static) # def create_order(self, random=False): # # # For language model order shouldn't be random # if random: # self.batchOrder = torch.randperm(self.num_batches) # else: # self.batchOrder = torch.arange(self.num_batches).long() # # self.cur_index = 0 # # return self.batchOrder # # # return the next batch according to the iterator # # for language model # def next(self, curriculum=True, reset=True, split_sizes=1): # # # reset iterator if reach data size limit # if self.cur_index >= self.num_batches: # if reset: # self.cur_index = 0 # else: # return None # # batch_index = self.cur_index # # seq_len = self.seq_length # # top_index = min(batch_index + seq_len, self.data.size(0) - 1) # # batch = LMBatch(self.data[batch_index:top_index], target=self.data[batch_index + 1:top_index + 1]) # # # move the iterator one step # self.cur_index += seq_len # # return [batch]
{"hexsha": "54790773a680227ca2deabea66e5ae8e11295cdc", "size": 22892, "ext": "py", "lang": "Python", "max_stars_repo_path": "onmt/data/dataset.py", "max_stars_repo_name": "tuannamnguyen93/NMTGMinor", "max_stars_repo_head_hexsha": "acde3454343bda7060fae541c110d0ad1a8ac4f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "onmt/data/dataset.py", "max_issues_repo_name": "tuannamnguyen93/NMTGMinor", "max_issues_repo_head_hexsha": "acde3454343bda7060fae541c110d0ad1a8ac4f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "onmt/data/dataset.py", "max_forks_repo_name": "tuannamnguyen93/NMTGMinor", "max_forks_repo_head_hexsha": "acde3454343bda7060fae541c110d0ad1a8ac4f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2727272727, "max_line_length": 120, "alphanum_fraction": 0.5890267342, "include": true, "reason": "import numpy", "num_tokens": 5205}
# -*- coding: utf-8 -*- """ Created on Wed Feb 15 16:42:48 2012 Show an animated sine function and measure frames per second (FPS) """ import sys sys.ps1 = 'Ciao' import time import numpy as np import matplotlib matplotlib.use('qt4agg') import matplotlib.pyplot as plt x = np.random.randn(10) print('ready to plot') plt.plot(x) plt.draw() plt.show(block=False) print('starting to sleep (or working hard)') time.sleep(1) plt.plot(x + 2) plt.draw() plt.show(block=False) print('sleeping again (or more work)') time.sleep(1) print('now blocking until the figure is closed') plt.show(block=True)
{"hexsha": "f87fa755342cad7b66639a1a4c449c4575103995", "size": 624, "ext": "py", "lang": "Python", "max_stars_repo_path": "dsp_fpga/00_py_examples/running_sine_1.py", "max_stars_repo_name": "chipmuenk/python_snippets", "max_stars_repo_head_hexsha": "20ea4ad1436cfaa7debcbc9c87cdef375cea996b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-04T06:53:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-04T06:53:32.000Z", "max_issues_repo_path": "dsp_fpga/00_py_examples/running_sine_1.py", "max_issues_repo_name": "chipmuenk/python_snippets", "max_issues_repo_head_hexsha": "20ea4ad1436cfaa7debcbc9c87cdef375cea996b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dsp_fpga/00_py_examples/running_sine_1.py", "max_forks_repo_name": "chipmuenk/python_snippets", "max_forks_repo_head_hexsha": "20ea4ad1436cfaa7debcbc9c87cdef375cea996b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-28T10:57:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-28T10:57:56.000Z", "avg_line_length": 20.8, "max_line_length": 67, "alphanum_fraction": 0.6875, "include": true, "reason": "import numpy", "num_tokens": 162}
""" Tests for the utility functions in has_traits_utils module. """ from unittest import skipUnless, TestCase import numpy as np from uuid import UUID try: from scimath.units.api import UnitArray, UnitScalar SCIMATH_AVAILABLE = True except ImportError: SCIMATH_AVAILABLE = False from traits.api import Array, Bool, cached_property, Dict, Float, HasTraits, \ Instance, Int, List, Property, Str if SCIMATH_AVAILABLE: from app_common.traits.has_traits_utils import is_has_traits_almost_equal,\ is_val_almost_equal, trait_dict EQUAL = (True, "") if SCIMATH_AVAILABLE: class A(HasTraits): """ Testing class """ a_list = List([1, 2, 3]) a_array = Array(value=np.arange(10)) name = Str("sdfsjhd") editable = Bool a_uarray = Instance(UnitArray) _unique_keys = Dict() a_int = Int(6) a_uscalar = Instance(UnitScalar) a_float = Float(1.2) uuid = Instance(UUID) class B(HasTraits): """ Testing class """ b_int = Int y = Float(4) z = Str("sdfgkjshd") class C(HasTraits): """ Testing class which contains another locally defined one. """ x = Int sub_attr = Instance(B, ()) y = Property(Int, depends_on="sub_attr") @cached_property def _get_y(self): return self.sub_attr.b_int class D(HasTraits): x = Float class E(HasTraits): a = Array @skipUnless(SCIMATH_AVAILABLE, "scimath not available") class TestTraitDict(TestCase): def test_simplest_class(self): self.assertEqual(trait_dict(D()), {"x": 0}) def test_simple_class_with_property(self): c_content = trait_dict(C(x=2)) expected_keys = {"x", "sub_attr"} self.assertEqual(set(c_content.keys()), expected_keys) self.assertEqual(c_content["x"], 2) def test_large_class(self): a_content = trait_dict(A(a_int=2)) expected_keys = {'a_list', 'a_array', 'name', 'editable', 'a_uarray', '_unique_keys', 'a_int', 'a_uscalar', 'a_float', 'uuid'} self.assertEqual(set(a_content.keys()), expected_keys) self.assertEqual(a_content["a_int"], 2) @skipUnless(SCIMATH_AVAILABLE, "scimath not available") class TestIsHasTraitsAlmostEqual(TestCase): def test_identical_objects(self): # Simplest HT class b = B() self.assertEqual(is_has_traits_almost_equal(b, b), EQUAL) # HT class with Parameters a = A() self.assertEqual(is_has_traits_almost_equal(a, a), EQUAL) # With nested objects a = A(a_chrom_obj=b) self.assertEqual(is_has_traits_almost_equal(a, a), EQUAL) def test_fail_from_type(self): b = B() a = A() self.assertEqual(is_has_traits_almost_equal(a, b), (False, 'Different types (A vs B)')) def test_fail_from_none(self): # a's a_chrom_obj is None since not initialized a1 = A() a2 = A(a_chrom_obj=B()) self.assertFalse(is_has_traits_almost_equal(a1, a2)[0]) # This will trigger a comparison of a.a_chrom_obj and b.a_chrom_obj self.assertFalse(is_has_traits_almost_equal(a2, a1)[0]) def test_fail_from_extra_attrs(self): c1 = C() c2 = C() c2.foo = 2 self.assertEqual(is_has_traits_almost_equal(c1, c2), (False, "Different trait content: foo")) def test_cloned_objects(self): a = A() a_clone = a.clone_traits() self.assertEqual(is_has_traits_almost_equal(a, a_clone), EQUAL) def test_list_in_objects(self): a = A(a_list=[1, 4.]) # Same list but int instead of float a2 = A(a_list=[1, 4]) self.assertEqual(is_has_traits_almost_equal(a, a2), EQUAL) a3 = A(a_list=[1, 1]) equal, msg = is_has_traits_almost_equal(a, a3) self.assertFalse(equal) self.assertTrue(msg.startswith('Different a_list[1]')) def test_list_of_objects_in_objects(self): a = A(a_list=[A(a_list=[1, 4.])]) # Same list but int instead of float a2 = A(a_list=[A(a_list=[1, 4])]) self.assertEqual(is_has_traits_almost_equal(a, a2), EQUAL) a3 = A(a_list=[A(a_list=[1, 1])]) equal, msg = is_has_traits_almost_equal(a, a3) self.assertFalse(equal) self.assertTrue(msg.startswith('Different a_list[0].a_list[1]')) def test_differ_by_property_cache(self): c1 = C(x=1, sub_attr=B()) # Trigger creation of cache attribute _ = c1.y # Copy the 2 objects manually: the cache attribute will become part of # the trait_names c2 = C() for attr, val in c1.__dict__.items(): if not attr.startswith("__traits"): setattr(c2, attr, val) # Remove the cache attribute from c1 only and make sure the 2 objects # are still considered "almost" equal. delattr(c1, "_traits_cache_y") self.assertNotIn("_traits_cache_y", c1.trait_names()) self.assertIn("_traits_cache_y", c2.trait_names()) self.assertEqual(is_has_traits_almost_equal(c1, c2), EQUAL) def test_differ_but_ignore(self): c1 = C(x=1) c2 = C(x=2) self.assertNotEqual(is_has_traits_almost_equal(c1, c2), EQUAL) self.assertEqual(is_has_traits_almost_equal(c1, c2, ignore=['x']), EQUAL) # Test ignoring nested attributes c1 = C(x=1, sub_attr=B(b_int=1)) c2 = C(x=1, sub_attr=B(b_int=2)) self.assertNotEqual(is_has_traits_almost_equal(c1, c2), EQUAL) self.assertEqual(is_has_traits_almost_equal(c1, c2, ignore=['b_int']), EQUAL) def test_array_dtype(self): e1 = E(a=np.array([1, 2], dtype=int)) e2 = E(a=np.array([1., 2.], dtype=float)) self.assertEqual(is_has_traits_almost_equal(e1, e2), EQUAL) is_equal = is_has_traits_almost_equal(e1, e2, check_dtype=True) self.assertNotEqual(is_equal, EQUAL) @skipUnless(SCIMATH_AVAILABLE, "scimath not available") class TestIsValAlmostEqual(TestCase): def test_list(self): l1 = [D(x=2.0), D(x=1.0)] l2 = [D(x=1+1), D(x=1.0)] self.assertEqual(is_val_almost_equal(l1, l2), EQUAL) l3 = [D(x=2.0 + 1.e-10), D(x=1.0)] # By default differences that are smaller than 1e-9 are ignored self.assertEqual(is_val_almost_equal(l1, l3), EQUAL) # with higher precision, it should fail: equal, msg = is_val_almost_equal(l1, l3, eps=1e-11) self.assertFalse(equal) self.assertTrue(msg.startswith('Different [0].x')) def test_array(self): a1 = np.array([1, 2, 3, 4]) a2 = np.array([1., 2., 3., 4.]) self.assertEqual(is_val_almost_equal(a1, a2), EQUAL) a1 = np.array(list("abcde")) a2 = np.array(list("abcde")) self.assertEqual(is_val_almost_equal(a1, a2), EQUAL) def test_array_different_int_dtype(self): a1 = np.array([1, 2, 3, 4], dtype="int32") a2 = np.array([1, 2, 3, 4], dtype="int64") self.assertEqual(is_val_almost_equal(a1, a2), EQUAL) self.assertFalse(is_val_almost_equal(a1, a2, check_dtype=True)[0]) def test_array_different_str_dtype(self): a1 = np.array(list("abcde"), dtype='|S1') a2 = np.array(list("abcde"), dtype='|S2') self.assertEqual(is_val_almost_equal(a1, a2), EQUAL) self.assertFalse(is_val_almost_equal(a1, a2, check_dtype=True)[0])
{"hexsha": "84651a11fdb5efc4a1b7501a7e9acda36e7d6663", "size": 7585, "ext": "py", "lang": "Python", "max_stars_repo_path": "app_common/traits/tests/test_has_traits_utils.py", "max_stars_repo_name": "KBIbiopharma/app_common", "max_stars_repo_head_hexsha": "bd913e24741fb070aad058a0f90cbb2c64d8b106", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-12T17:51:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-03T05:36:15.000Z", "max_issues_repo_path": "app_common/traits/tests/test_has_traits_utils.py", "max_issues_repo_name": "KBIbiopharma/app_common", "max_issues_repo_head_hexsha": "bd913e24741fb070aad058a0f90cbb2c64d8b106", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2020-02-04T21:38:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-25T20:55:01.000Z", "max_forks_repo_path": "app_common/traits/tests/test_has_traits_utils.py", "max_forks_repo_name": "KBIbiopharma/app_common", "max_forks_repo_head_hexsha": "bd913e24741fb070aad058a0f90cbb2c64d8b106", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8616071429, "max_line_length": 79, "alphanum_fraction": 0.6214897825, "include": true, "reason": "import numpy", "num_tokens": 1978}
# https://tel.archives-ouvertes.fr/tel-00641678/document import pytest from sympy import * from sympy import symbols, conjugate from sympy import sin, cos from sympy.abc import a, b, c, d, x, y, z, w, theta from sympy.algebras.quaternion import Quaternion from context import DualQuaternion from sympy import simplify from sympy.abc import theta, alpha print("test") d, a = symbols('d a') q1 = Quaternion(1, 0, 0, 0) q0 = Quaternion(0, 0, 0, 0) rdi = Quaternion(0, 0, 0, d) qti = Quaternion(cos(theta * 0.5), 0, 0, sin(theta * 0.5)) rai = Quaternion(0, a, 0, 0) qai = Quaternion(cos(alpha * 0.5), 0, 0, sin(alpha * 0.5)) # dqti = diff(qti, theta) # print(dqti) dq1 = DualQuaternion(q1, 0.5 * rdi) dq2 = DualQuaternion(qti, q0) dq3 = DualQuaternion(q1, 0.5 * rai) dq4 = DualQuaternion(qai, q0) dq12 = dq1 * dq2 dq123 = dq12 * dq3 dq1234 = dq123 * dq4 ddqr = diff(dq1234.real, theta) ddqd = diff(dq1234.dual, theta) ddq1234 = DualQuaternion(ddqr, ddqd) pprint(simplify(2 * ddq1234 * dq1234.quaternion_conjugate())) pprint(diff(sin(2 * theta) * cos(alpha), theta))
{"hexsha": "d607c9d9a517412bf73120fe57a6794f2f3b7a8a", "size": 1075, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/Proof_DH_diff_for_2_32.py", "max_stars_repo_name": "wdfalfred/SymDQ", "max_stars_repo_head_hexsha": "82d858f1df9057c100fc35adc8bea2793c34a0f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/Proof_DH_diff_for_2_32.py", "max_issues_repo_name": "wdfalfred/SymDQ", "max_issues_repo_head_hexsha": "82d858f1df9057c100fc35adc8bea2793c34a0f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/Proof_DH_diff_for_2_32.py", "max_forks_repo_name": "wdfalfred/SymDQ", "max_forks_repo_head_hexsha": "82d858f1df9057c100fc35adc8bea2793c34a0f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4318181818, "max_line_length": 61, "alphanum_fraction": 0.6920930233, "include": true, "reason": "from sympy", "num_tokens": 401}
#Combine vectors by rows and columns v1 = c(1,3,5,7,9) v2 = c(2,4,6,8,10) print("Original vectors:") print(v1) print(v2) print("Combines the said two vectors by columns:") result = cbind(v1,v2) print(result) print("Combines the said two vectors by rows:") result = rbind(v1,v2) print(result)
{"hexsha": "c319ca650a90b07fa4a14206feb9f5ec137eedae", "size": 304, "ext": "r", "lang": "R", "max_stars_repo_path": "combine2vectorsbyrowandcol.r", "max_stars_repo_name": "maansisrivastava/Practice-code-R", "max_stars_repo_head_hexsha": "24f1469908195050472831db7b1ebe83744d422c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "combine2vectorsbyrowandcol.r", "max_issues_repo_name": "maansisrivastava/Practice-code-R", "max_issues_repo_head_hexsha": "24f1469908195050472831db7b1ebe83744d422c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "combine2vectorsbyrowandcol.r", "max_forks_repo_name": "maansisrivastava/Practice-code-R", "max_forks_repo_head_hexsha": "24f1469908195050472831db7b1ebe83744d422c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.3846153846, "max_line_length": 51, "alphanum_fraction": 0.6842105263, "num_tokens": 100}
import os import sys from os.path import join,basename,dirname,splitext from pathlib import Path import numpy as np import scipy from scipy import io import scipy.sparse as sp import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl from pprint import pprint import argparse from collections import defaultdict,OrderedDict,deque import gzip import shutil from clu import metric_writers if __name__=="__main__": # os.makedirs("tensorboard_debug_dir",exist_ok=True) # tensorboard_dir="train_dir/imagenet2012_pretrain_2/2021-06-13-13:00:27/tensorboard" # if not os.path.isdir("tensorboard_debug_dir/tensorboard_old"): # shutil.copytree(tensorboard_dir,"tensorboard_debug_dir/tensorboard_old") # new_tensorboard_dir="tensorboard_debug_dir/tensorboard_new" # if os.path.isdir(new_tensorboard_dir): # shutil.rmtree(new_tensorboard_dir) # shutil.copytree(tensorboard_dir,new_tensorboard_dir) # # os.rename("tensorboard_debug_dir/tensorboard","tensorboard_debug_dir/tensorboard_new") # writer = metric_writers.create_default_writer(new_tensorboard_dir, asynchronous=False) # for step in range(30000,44000,10): # writer.write_scalars(step, dict(train_loss=5.0)) # writer.close() csv_data_dir="tensorboard_debug_dir/csv_data" tb_from_csv_dir="tensorboard_debug_dir/tb_from_csv" if os.path.isdir(tb_from_csv_dir): shutil.rmtree(tb_from_csv_dir) os.makedirs(tb_from_csv_dir,exist_ok=True) writer=metric_writers.create_default_writer(tb_from_csv_dir, asynchronous=False) for csv_fp in Path(csv_data_dir).glob("*.csv"): csv_id=csv_fp.stem scalar_name=csv_id.split('-')[-1] print(scalar_name) scalar_df=pd.read_csv(csv_fp) scalar_df["Step"]=scalar_df["Step"].astype(np.int64) for index,row in scalar_df.iterrows(): writer.write_scalars(row["Step"],{scalar_name:row["Value"]}) writer.close()
{"hexsha": "baeaba41eabc0ed55207a20e1e38ecdbcb9c0c85", "size": 1977, "ext": "py", "lang": "Python", "max_stars_repo_path": "tmp_test_overwrite_tensorboard.py", "max_stars_repo_name": "lzx325/vision_transformer", "max_stars_repo_head_hexsha": "8397a05f7b234fa5e0ede347d9061527b901dc68", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tmp_test_overwrite_tensorboard.py", "max_issues_repo_name": "lzx325/vision_transformer", "max_issues_repo_head_hexsha": "8397a05f7b234fa5e0ede347d9061527b901dc68", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tmp_test_overwrite_tensorboard.py", "max_forks_repo_name": "lzx325/vision_transformer", "max_forks_repo_head_hexsha": "8397a05f7b234fa5e0ede347d9061527b901dc68", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0192307692, "max_line_length": 94, "alphanum_fraction": 0.7481031866, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 485}
""" nlcmap - a nonlinear cmap from specified levels Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu> Release under MIT license. Some hacks added 2012 noted in code (@MRR) """ from pylab import * from numpy import * from matplotlib.colors import LinearSegmentedColormap class nlcmap(LinearSegmentedColormap): """A nonlinear colormap""" name = 'nlcmap' def __init__(self, cmap, levels): self.cmap = cmap # @MRR: Need to add N for backend self.N = cmap.N self.monochrome = self.cmap.monochrome self.levels = asarray(levels, dtype='float64') self._x = self.levels / self.levels.max() self._y = linspace(0.0, 1.0, len(self.levels)) #@MRR Need to add **kw for 'bytes' def __call__(self, xi, alpha=1.0, **kw): """docstring for fname""" # @MRR: Appears broken? # It appears something's wrong with the # dimensionality of a calculation intermediate #yi = stineman_interp(xi, self._x, self._y) yi = interp(xi, self._x, self._y) return self.cmap(yi, alpha) if __name__ == '__main__': y, x = mgrid[0.0:3.0:100j, 0.0:5.0:100j] H = 50.0 * exp( -(x**2 + y**2) / 4.0 ) levels = [0, 1, 2, 3, 6, 9, 20, 50] cmap_lin = cm.jet cmap_nonlin = nlcmap(cmap_lin, levels) subplot(2,1,1) contourf(x, y, H, levels, cmap=cmap_nonlin) colorbar() subplot(2,1,2) contourf(x, y, H, levels, cmap=cmap_lin) colorbar() savefig('nlcmap_example.png')
{"hexsha": "6351f90a0f5f4f6e1afab2b65644d0853cbec8e0", "size": 1544, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/data-viz/nonlinear_colormap.py", "max_stars_repo_name": "TravisWheelerLab/MMOREseqs", "max_stars_repo_head_hexsha": "492eda6efa4fd95ac0a787405a40db5a860bf3dc", "max_stars_repo_licenses": ["BSD-3-Clause-Clear", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/data-viz/nonlinear_colormap.py", "max_issues_repo_name": "TravisWheelerLab/MMOREseqs", "max_issues_repo_head_hexsha": "492eda6efa4fd95ac0a787405a40db5a860bf3dc", "max_issues_repo_licenses": ["BSD-3-Clause-Clear", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/data-viz/nonlinear_colormap.py", "max_forks_repo_name": "TravisWheelerLab/MMOREseqs", "max_forks_repo_head_hexsha": "492eda6efa4fd95ac0a787405a40db5a860bf3dc", "max_forks_repo_licenses": ["BSD-3-Clause-Clear", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0727272727, "max_line_length": 58, "alphanum_fraction": 0.606865285, "include": true, "reason": "from numpy", "num_tokens": 487}
(* *********************************************************************) (* *) (* The Quantitative CompCert verified compiler *) (* *) (* Tahina Ramananandro, Yale University *) (* *) (* This file is a modified version of the *) (* CompCert 1.13 verified compiler by Xavier Leroy, INRIA. *) (* The CompCert verified compiler is *) (* Copyright Institut National de Recherche en Informatique et en *) (* Automatique. All rights reserved. The original file is *) (* distributed *) (* under the terms of the INRIA Non-Commercial License Agreement. *) (* According to this license, this modified version is distributed *) (* under a similar license (see LICENSE for details). *) (* *) (* *********************************************************************) (** Corollaries of the main semantic preservation theorem. *) Require Import Classical. Require Import Coqlib. Require Import AST. Require Import Integers. Require Import Values. Require Import Events. Require Import Globalenvs. Require Import Smallstep. Require Import Behaviors. Require Import Csyntax. Require Import Csem. Require Import Cstrategy. Require Import Clight. Require Import Cminor. Require Import RTL. Require Import Asm. Require Import Compiler. Require Import Errors. Require Import Prune. Require Import Memdata. (** We assume that the available stack size [bound] of the process in the target machine is machine-representable, and that the stack is strongly aligned. *) Section WITHBOUNDS. Context (bound : Integers.Int.int) (Hbound: (Stacklayout.strong_align | Integers.Int.unsigned bound + size_chunk Mint32)) (external_event_needs: Events.event -> Z) (p: Csyntax.program) (tp: Asm.program) (H: transf_c_program p = OK tp) . (** * Preservation of whole-program behaviors *) (** From the simulation diagrams proved in file [Compiler]. it follows that whole-program observable behaviors are preserved in the following sense. First, every behavior of the generated assembly code is matched by a behavior of the source C code. *) Section CSTRATEGY. (** If we consider the C evaluation strategy implemented by the compiler, we get stronger preservation results: the behaviors are exactly preserved. *) Lemma prune_atomic_behaves_intro: forall beh, program_behaves (prune_semantics (Cstrategy.semantics p)) beh -> program_behaves (prune_semantics (atomic (Cstrategy.semantics p))) beh. Proof. intros. destruct (prune_program_behaves _ _ H0). destruct H1. apply atomic_behaviors in H2. eapply program_behaves_prune. eassumption. assumption. intros. eapply ssr_well_behaved. apply Cstrategy.semantics_strongly_receptive. Qed. Lemma prune_atomic_behaves_elim: forall beh, program_behaves (prune_semantics (atomic (Cstrategy.semantics p))) beh -> program_behaves (prune_semantics (Cstrategy.semantics p)) beh. Proof. intros. destruct (prune_program_behaves _ _ H0). destruct H1. apply atomic_behaviors in H2. eapply program_behaves_prune. eassumption. assumption. intros. eapply ssr_well_behaved. apply Cstrategy.semantics_strongly_receptive. Qed. (** We assume that: - the [Cstrategy] source program is proved not to go wrong - the traces of the source program do not "stack overflow". Let us explain this in more detail. To compute the weights (cf. Section 3.1 of our PLDI 2014 paper) of the traces of the so program, we instantiate the stack metric with the sizes of the stack frames obtained in [Mach] and adjusted by the [Mach]-to-[Mach2] pass ([Mach2Mach2.stacksizes]). Then, the condition [NO_OVERFLOW] below imposes that those weights must not exceed [bound]. *) Variables (NOT_STUCK: not_stuck (prune_semantics (atomic (Cstrategy.semantics p)))) (NO_OVERFLOW: no_overflow_with_mach bound external_event_needs (fun p' => atomic (Cstrategy.semantics p')) transf_c_to_mach p) . (** Under these conditions, the target [Asm] program is guaranteed to refine the [Cstrategy] source program -- and more precisely, the *pruned* behaviors (without call/return events) are exactly preserved. In particular, the [Asm] program is guaranteed to not go wrong at all, and in particular, is guaranteed to not stack overflow. As explained in Section 3.2, it is important that the source program be proved to not go wrong in the unbounded-stack setting (condition [NOT_STUCK]. Indeed, our transformation uses the [Compiler.transf_mach_program_correct_strong] theorem, which depends on *all* traces of the [Mach] program obtained during the compilation of a source. If the source program were to have a wrong behavior [Goes_wrong t], then the compiled [Mach] program would well have a behavior [behavior_app t b] whose weight could well exceed [bound], thus violating the [NO_OVERFLOW] condition of that theorem. As each pass is proved independently of the others, it is not possible to track those behaviors of the [Mach] program that correspond to [Goes_wrong] behaviors of the source. *) Theorem transf_cstrategy_program_preservation: (forall beh, program_behaves (prune_semantics (Cstrategy.semantics p)) beh <-> program_behaves (Asm.semantics bound tp) beh). Proof. split. intros. eapply forward_simulation_same_safe_behavior. eapply transf_cstrategy_program_correct. assumption. eassumption. eassumption. assumption. apply prune_atomic_behaves_intro. assumption. apply NOT_STUCK. apply prune_atomic_behaves_intro. assumption. intro. apply prune_atomic_behaves_elim. eapply backward_simulation_same_safe_behavior. eapply transf_cstrategy_program_correct. eassumption. assumption. eassumption. eassumption. assumption. assumption. Qed. End CSTRATEGY. (** Similarly, if we assume that: - the [Csem] source program is proved not to go wrong - the traces of the source program do not "stack overflow". Let us explain this in more detail. To compute the weights (cf. Section 3.1 of our PLDI 2014 paper) of the traces of the so program, we instantiate the stack metric with the sizes of the stack frames obtained in [Mach] and adjusted by the [Mach]-to-[Mach2] pass ([Mach2Mach2.stacksizes]). Then, the condition [NO_OVERFLOW] below imposes that those weights must not exceed [bound]. *) Variables (NOT_STUCK: not_stuck (prune_semantics (Csem.semantics p))) (NO_OVERFLOW: no_overflow_with_mach bound external_event_needs (Csem.semantics) transf_c_to_mach p) . (** Then, under these conditions, the target [Asm] program is guaranteed to refine the [Csem] source program. In particular, the [Asm] program is guaranteed to not go wrong at all, and in particular, is guaranteed to not stack overflow. However, the [Asm] program may well have lost some behaviors of the [Csem] program, because all internal non-determinism of [Csem] (e.g. argument evaluation order) have been solved by [Cstrategy]. So we only have refinement, not exact behavior preservation. This is CompCert-specific, nothing to do with stack consumption. *) Theorem transf_c_program_preservation: forall beh, program_behaves (Asm.semantics bound tp) beh -> program_behaves (prune_semantics (Csem.semantics p)) beh. Proof. intros. eapply backward_simulation_same_safe_behavior; eauto. eapply transf_c_program_correct; eauto. Qed. (** * Satisfaction of specifications *) (** The second additional results shows that if all executions of the source C program satisfies a given specification (a predicate on the observable behavior of the program), then all executions of the produced Asm program satisfy this specification as well. *) Section SPECS_PRESERVED. Variable spec: program_behavior -> Prop. Theorem transf_c_program_preserves_spec: (forall beh, program_behaves (prune_semantics (Csem.semantics p)) beh -> spec beh) -> (forall beh, program_behaves (Asm.semantics bound tp) beh -> spec beh). Proof. intros; eauto using transf_c_program_preservation. Qed. End SPECS_PRESERVED. End WITHBOUNDS.
{"author": "academic-archive", "repo": "pldi14-veristack", "sha": "9edcd8752ae2e1e6377bfb33589a377cc39c04ca", "save_path": "github-repos/coq/academic-archive-pldi14-veristack", "path": "github-repos/coq/academic-archive-pldi14-veristack/pldi14-veristack-9edcd8752ae2e1e6377bfb33589a377cc39c04ca/qcompcert/driver/Complements.v"}
/* * Copyright 2016 C. Brett Witherspoon */ #include <algorithm> #include <chrono> #include <cmath> #include <complex> #include <iostream> #include <stdexcept> #include <random> #include <boost/preprocessor/stringize.hpp> #include <boost/program_options.hpp> #include <boost/compute/core.hpp> #include <signum/opencl/fft.hpp> namespace compute = boost::compute; namespace po = boost::program_options; namespace opencl = signum::opencl; int main(int argc, char *argv[]) { size_t length; po::options_description desc("Supported options"); desc.add_options() ("help,h", "print help message") ("length,l", po::value<size_t>(&length)->default_value(8), "set FFT length") ("verbose,v", "print verbose messages"); po::variables_map vm; po::store(po::parse_command_line(argc, argv, desc), vm); po::notify(vm); if (vm.count("help")) { std::cerr << desc << std::endl; return 1; } compute::device device = compute::system::default_device(); compute::context context(device); compute::command_queue queue(context, device, compute::command_queue::enable_profiling); // Print some device information std::cout << device.platform().name() << ": " << device.name() << std::endl; std::cout << "Global memory size: " << device.global_memory_size() << std::endl; std::cout << "Local memory size: " << device.local_memory_size() << std::endl; std::cout << "Compute units: " << device.compute_units() << std::endl; std::cout << "Preferred vector width: " <<device.preferred_vector_width<float>() << std::endl; // Create FFT object opencl::fft fft(queue, length); // Initialize input buffer auto input = fft.map(compute::command_queue::map_write); std::default_random_engine eng; std::normal_distribution<> dist{0, 1}; auto rand = std::bind(dist, eng); std::generate(input, input + length, rand); if (vm.count("verbose")) { std::cout << "Input: " << std::endl; for (size_t i = 0; i < length; ++i) std::cout << input[i] << std::endl; } fft.unmap().wait(); // Enqueue kernels auto events = fft(); events.wait(); // Print profiling information std::chrono::nanoseconds time{0}; for (const auto &event : events) { time += event.duration<std::chrono::nanoseconds>(); } std::cout << "Execute time: " << time.count() << " ns" << std::endl; // Print output buffer auto output = fft.map(compute::command_queue::map_read); if (vm.count("verbose")) { std::cout << "Output: " << std::endl; for (size_t i = 0; i < length; ++i) std::cout << output[i] << std::endl; } fft.unmap().wait(); return 0; }
{"hexsha": "5d54952cebc9def9dd192153bba9fd9ceb15c9d2", "size": 2745, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "examples/fft_benchmark.cpp", "max_stars_repo_name": "spoonb/libcomm", "max_stars_repo_head_hexsha": "5638dac889bddb16420d8321067c783438a5deaf", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/fft_benchmark.cpp", "max_issues_repo_name": "spoonb/libcomm", "max_issues_repo_head_hexsha": "5638dac889bddb16420d8321067c783438a5deaf", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/fft_benchmark.cpp", "max_forks_repo_name": "spoonb/libcomm", "max_forks_repo_head_hexsha": "5638dac889bddb16420d8321067c783438a5deaf", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5161290323, "max_line_length": 98, "alphanum_fraction": 0.618579235, "num_tokens": 703}
// Copyright Louis Dionne 2013-2017 // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) #include <boost/hana/type.hpp> namespace hana = boost::hana; template <typename ...> struct F { struct type; }; struct M { template <typename ...> struct apply { struct type; }; }; struct T; int main() { // See http://llvm.org/bugs/show_bug.cgi?id=20046 [](auto) { return hana::trait<F>; }(1); [](auto) { return hana::type_c<T>; }(1); [](auto) { return hana::template_<F>; }(1); [](auto) { return hana::metafunction<F>; }(1); [](auto) { return hana::metafunction_class<M>; }(1); }
{"hexsha": "2d08bc9a2ba43ea99928d88f1b009d51208793e7", "size": 709, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/hana/test/issues/clang_20046.cpp", "max_stars_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_stars_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32.0, "max_stars_repo_stars_event_min_datetime": "2019-02-27T06:57:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-29T10:56:19.000Z", "max_issues_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/hana/test/issues/clang_20046.cpp", "max_issues_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_issues_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-04-04T18:00:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-04T18:00:00.000Z", "max_forks_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/hana/test/issues/clang_20046.cpp", "max_forks_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_forks_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2019-08-20T13:45:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T18:23:49.000Z", "avg_line_length": 33.7619047619, "max_line_length": 82, "alphanum_fraction": 0.6205923836, "num_tokens": 196}
C C *$ 2) Routines Producing New Images * -------------------------------- C C *+ image_convolve subroutine image_convolve(nix,niy,in_data,ncx,ncy,icx,icy,array, * null,out_data,status ) C ---------------------------------------------------------------- C C Convolve the input image with the specified array C C Given: C size of input image integer nix, niy C input image real*4 in_data(nix,niy) C size of convolving array integer ncx, ncy C centre of convolving array integer icx, icy C convolving array data real*4 array(ncx,ncy) C null data value on input image real*4 null C C Returned: C output image real*4 out_data(nix,niy) C error status word integer status C C The input image is convolved with the supplied function. The output C image is of the same size as the input image. C C Values beyond the edge of the input image are assumed to be zero. *- C Local variables integer i, j, i1, i2, j1, j2, ii, jj C check status on entry if (status.ne.0) return C initialise output data do j=1,niy do i=1,nix out_data(i,j) = 0.0 end do end do C move through input image and convolve to output image do j=1,niy j1 = max(1,j+icy-ncy) j2 = min(niy,j+icy-1) do i=1,nix i1 = max(1,i-icx+1) i2 = min(nix,i+ncx-icx) if (in_data(i,j).ne.null) then do jj=j1,j2 do ii=i1,i2 out_data(ii,jj) = out_data(ii,jj) + * in_data(i,j)*array(ii-i+icx,jj-j+ncy-icy+1) end do end do end if end do end do C report any error call cmd_err(status,'image_convolve',' ') end
{"hexsha": "40916b82ab66d661619ac74b4a8f16584fc537ed", "size": 1872, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "image_lib/image_convolve.f", "max_stars_repo_name": "CavendishAstrophysics/anmap", "max_stars_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-09-01T12:40:45.000Z", "max_stars_repo_stars_event_max_datetime": "2015-09-01T12:40:45.000Z", "max_issues_repo_path": "image_lib/image_convolve.f", "max_issues_repo_name": "CavendishAstrophysics/anmap", "max_issues_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "image_lib/image_convolve.f", "max_forks_repo_name": "CavendishAstrophysics/anmap", "max_forks_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2972972973, "max_line_length": 71, "alphanum_fraction": 0.531517094, "num_tokens": 544}
# Numerics.py # # Created: # Modified: Feb 2016, Andrew Wendorff # ---------------------------------------------------------------------- # Imports # ---------------------------------------------------------------------- from Conditions import Conditions from SUAVE.Methods.Utilities.Chebyshev import chebyshev_data import numpy as np # ---------------------------------------------------------------------- # Numerics # ---------------------------------------------------------------------- class Numerics(Conditions): def __defaults__(self): self.tag = 'numerics' self.number_control_points = 16 self.discretization_method = chebyshev_data self.solver_jacobian = "none" self.tolerance_solution = 1e-8 self.tolerance_boundary_conditions = 1e-8 self.converged = None self.dimensionless = Conditions() self.dimensionless.control_points = np.empty([0,0]) self.dimensionless.differentiate = np.empty([0,0]) self.dimensionless.integrate = np.empty([0,0]) self.time = Conditions() self.time.control_points = np.empty([0,0]) self.time.differentiate = np.empty([0,0]) self.time.integrate = np.empty([0,0])
{"hexsha": "c716dcfde36a746b4f49fe46f0af48bc51fba156", "size": 1383, "ext": "py", "lang": "Python", "max_stars_repo_path": "References/Geovana Neves/TCC_Geovana_Neves_GitHub/SUAVE_modifications/SUAVE-feature-constant_throttle_EAS/trunk/SUAVE/Analyses/Mission/Segments/Conditions/Numerics.py", "max_stars_repo_name": "Vinicius-Tanigawa/Undergraduate-Research-Project", "max_stars_repo_head_hexsha": "e92372f07882484b127d7affe305eeec2238b8a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "References/Geovana Neves/TCC_Geovana_Neves_GitHub/SUAVE_modifications/SUAVE-feature-constant_throttle_EAS/trunk/SUAVE/Analyses/Mission/Segments/Conditions/Numerics.py", "max_issues_repo_name": "Vinicius-Tanigawa/Undergraduate-Research-Project", "max_issues_repo_head_hexsha": "e92372f07882484b127d7affe305eeec2238b8a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "References/Geovana Neves/TCC_Geovana_Neves_GitHub/SUAVE_modifications/SUAVE-feature-constant_throttle_EAS/trunk/SUAVE/Analyses/Mission/Segments/Conditions/Numerics.py", "max_forks_repo_name": "Vinicius-Tanigawa/Undergraduate-Research-Project", "max_forks_repo_head_hexsha": "e92372f07882484b127d7affe305eeec2238b8a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7333333333, "max_line_length": 72, "alphanum_fraction": 0.4555314534, "include": true, "reason": "import numpy", "num_tokens": 267}
import numpy as np import cv2 import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.cluster import MiniBatchKMeans from sklearn.decomposition import LatentDirichletAllocation from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.neighbors import KDTree from sklearn.metrics.pairwise import cosine_similarity from sklearn.decomposition import PCA from time import time class visual_words: def __init__(self): pass def plot_images(n_rows, n_cols, images): f = plt.figure() for i, image in enumerate(images): plt.subplot(n_rows, n_cols, i+1) plt.imshow(images[i], cmap = plt.cm.gray) plt.xticks([]) plt.yticks([]) plt.show() #f.savefig('./figures/knn_faces.png') np.random.seed(0) if __name__ == "__main__": #Overview: #Olivetti dataset #Split into test and training #extract keypoints and compute sift features on training images #cluster sift features into a visual dictionary of size V #represent each image as visual words histogram #apply tf-idf (need text data) #fit LDA topic model on bags of visual words #given test data transform test image into tf_idf vector #use cosine similarity for image retrieval #display top-K images # Load the faces datasets data = fetch_olivetti_faces(shuffle=True, random_state=0) targets = data.target data = data.images.reshape((len(data.images), -1)) data_train = data[targets < 30] data_test = data[targets >= 30] num_train_images = data_train.shape[0] #show mean training image plt.figure() plt.imshow(np.mean(data_train,axis=0).reshape(64,64)) plt.title('Olivetti Dataset (Mean Training Image)') plt.show() #show random selection of images rnd_idx = np.arange(num_train_images) np.random.shuffle(rnd_idx) images = data_train[rnd_idx[0:16],:].reshape(16,64,64) plot_images(4,4,images) #compute dense SIFT num_kps = np.zeros(num_train_images) sift = cv2.SIFT() #orb = cv2.ORB() for img_idx in range(num_train_images): gray_img = 255*data_train[img_idx,:]/np.max(data_train[img_idx,:]) #scale gray_img = gray_img.reshape(64,64).astype(np.uint8) #reshape and cast dense = cv2.FeatureDetector_create("Dense") kp = dense.detect(gray_img) kp, des = sift.compute(gray_img, kp) #kp, des = orb.compute(gray_img, kp) #img_kp = cv2.drawKeypoints(gray_img, kp, color=(0,255,0), flags=0) #cv2.imshow('ORB keypoints', img_kp) num_kps[img_idx] = len(kp) #stack descriptors for all training images if (img_idx == 0): des_tot = des else: des_tot = np.vstack((des_tot, des)) #end for #cluster images into a dictionary dictionary_size = 100 kmeans = MiniBatchKMeans(n_clusters = dictionary_size, init = 'k-means++', batch_size = 5000, random_state = 0, verbose=0) tic = time() kmeans.fit(des_tot) toc = time() kmeans.get_params() print "K-means objective: %.2f" %kmeans.inertia_ print "elapsed time: %.4f sec" %(toc - tic) kmeans.cluster_centers_ labels = kmeans.labels_ #PCA plot of kmeans_cluster centers pca = PCA(n_components=2) visual_words = pca.fit_transform(kmeans.cluster_centers_) plt.figure() plt.scatter(visual_words[:,0], visual_words[:,1], color='b', marker='o', lw = 2.0, label='Olivetti visual words') plt.title("Visual Words (PCA of cluster centers)") plt.xlabel("PC1") plt.ylabel("PC2") plt.grid(True) plt.legend() plt.show() #histogram of labels for each image = term-document matrix A = np.zeros((dictionary_size,num_train_images)) ii = 0 jj = 0 for img_idx in range(num_train_images): if img_idx == 0: A[:,img_idx], bins = np.histogram(labels[0:num_kps[img_idx]], bins=range(dictionary_size+1)) else: ii = np.int(ii + num_kps[img_idx-1]) jj = np.int(ii + num_kps[img_idx]) A[:,img_idx], bins = np.histogram(labels[ii:jj] , bins=range(dictionary_size+1)) #print str(ii) + ':' + str(jj) #end for plt.figure() plt.spy(A.T, cmap = 'gray') plt.gca().set_aspect('auto') plt.title('AP tf-idf corpus') plt.xlabel('dictionary') plt.ylabel('documents') plt.show() #fit LDA topic model based on tf-idf of term-document matrix num_features = dictionary_size num_topics = 8 #fixed for LDA #fit LDA model print "Fitting LDA model..." lda_vb = LatentDirichletAllocation(n_topics = num_topics, max_iter=10, learning_method='online', batch_size = 512, random_state=0, n_jobs=1) tic = time() lda_vb.fit(A.T) #online VB toc = time() print "elapsed time: %.4f sec" %(toc - tic) print "LDA params" print lda_vb.get_params() print "number of EM iter: %d" % lda_vb.n_batch_iter_ print "number of dataset sweeps: %d" % lda_vb.n_iter_ #topic matrix W: K x V #components[i,j]: topic i, word j #note: here topics correspond to label clusters topics = lda_vb.components_ f = plt.figure() plt.matshow(topics, cmap = 'gray') plt.gca().set_aspect('auto') plt.title('learned topic matrix') plt.ylabel('topics') plt.xlabel('dictionary') plt.show() f.savefig('./figures/topic.png') #topic proportions matrix: D x K #note: np.sum(H, axis=1) is not 1 H = lda_vb.transform(A.T) f = plt.figure() plt.matshow(H, cmap = 'gray') plt.gca().set_aspect('auto') plt.show() plt.title('topic proportions') plt.xlabel('topics') plt.ylabel('documents') f.savefig('./figures/proportions.png') #given test data transform test image into tf_idf vector #show mean test image plt.figure() plt.imshow(np.mean(data_test,axis=0).reshape(64,64)) plt.show() num_test_images = data_test.shape[0] num_test_kps = np.zeros(num_test_images) #compute dense SIFT sift = cv2.SIFT() #orb = cv2.ORB() for img_idx in range(num_test_images): gray_img = 255*data_test[img_idx,:]/np.max(data_test[img_idx,:]) #scale gray_img = gray_img.reshape(64,64).astype(np.uint8) #reshape and cast dense = cv2.FeatureDetector_create("Dense") kp = dense.detect(gray_img) kp, des = sift.compute(gray_img, kp) #kp, des = orb.compute(gray_img, kp) #img_kp = cv2.drawKeypoints(gray_img, kp, color=(0,255,0), flags=0) #cv2.imshow('ORB keypoints', img_kp) num_test_kps[img_idx] = len(kp) #stack descriptors for all test images if (img_idx == 0): des_test_tot = des else: des_test_tot = np.vstack((des_test_tot, des)) #end for #assign des_test_tot to one of kmeans cluster centers #use 128-dimensional kd-tree to search for nearest neighbors kdt = KDTree(kmeans.cluster_centers_) Q = des_test_tot #query kdt_dist, kdt_idx = kdt.query(Q,k=1) #knn test_labels = kdt_idx #knn = 1 labels #form A_test matrix from test_labels #histogram of labels for each image: term-document matrix A_test = np.zeros((dictionary_size,num_test_images)) ii = 0 jj = 0 for img_idx in range(num_test_images): if img_idx == 0: A_test[:,img_idx], bins = np.histogram(test_labels[0:num_kps[img_idx]], bins=range(dictionary_size+1)) else: ii = np.int(ii + num_kps[img_idx-1]) jj = np.int(ii + num_kps[img_idx]) A_test[:,img_idx], bins = np.histogram(test_labels[ii:jj] , bins=range(dictionary_size+1)) #print str(ii) + ':' + str(jj) #end for plt.figure() plt.spy(A_test.T, cmap = 'gray') plt.gca().set_aspect('auto') plt.title('AP tf-idf corpus') plt.xlabel('dictionary') plt.ylabel('documents') plt.show() #Use fit transform on A_test for already trained LDA to get the H_test matrix #topic proportions matrix: D x K #note: np.sum(H, axis=1) is not 1 H_test = lda_vb.transform(A_test.T) f = plt.figure() plt.matshow(H_test, cmap = 'gray') plt.gca().set_aspect('auto') plt.show() plt.title('topic proportions') plt.xlabel('topics') plt.ylabel('documents') f.savefig('./figures/proportions_test.png') #retrieve H_train document that's closest in cosine similarity for each H_test #use cosine similarity for image retrieval Kxy = cosine_similarity(H_test, H) knn_test = np.argmin(Kxy, axis=1) f = plt.figure() plt.matshow(Kxy, cmap = 'gray') plt.gca().set_aspect('auto') plt.show() plt.title('Cosine Similarity') plt.xlabel('train data') plt.ylabel('test data') f.savefig('./figures/cosine_similarity.png') #display knn images (docId is an image) rnd_idx = np.arange(num_test_images) np.random.shuffle(rnd_idx) images = data_test[rnd_idx[0:16],:].reshape(16,64,64) images_knn = data_train[knn_test[rnd_idx[0:16]],:].reshape(16,64,64) plot_images(4,4,images) plot_images(4,4,images_knn)
{"hexsha": "d1f16660b2fcf04ea721e7d5f77ed498f481f9aa", "size": 9664, "ext": "py", "lang": "Python", "max_stars_repo_path": "visual_words/visual_words.py", "max_stars_repo_name": "vsmolyakov/cv", "max_stars_repo_head_hexsha": "dd4f5d02a82df5cd5342797d184ebf2722e6562e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2017-07-17T18:36:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T20:00:51.000Z", "max_issues_repo_path": "visual_words/visual_words.py", "max_issues_repo_name": "vsmolyakov/cv", "max_issues_repo_head_hexsha": "dd4f5d02a82df5cd5342797d184ebf2722e6562e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-11-10T15:32:09.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-15T14:55:33.000Z", "max_forks_repo_path": "visual_words/visual_words.py", "max_forks_repo_name": "vsmolyakov/cv", "max_forks_repo_head_hexsha": "dd4f5d02a82df5cd5342797d184ebf2722e6562e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2017-01-31T13:43:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-01T05:45:14.000Z", "avg_line_length": 34.8880866426, "max_line_length": 144, "alphanum_fraction": 0.6184809603, "include": true, "reason": "import numpy", "num_tokens": 2544}
# coding: utf-8 """ This module for defining chemical reaction objects was originally sourced from pymatgen and streamlined for the reaction-network code. """ import re from functools import cached_property from itertools import chain, combinations from typing import Dict, List, Optional import numpy as np from monty.fractions import gcd_float from pymatgen.core.composition import Composition, Element from rxn_network.core import Reaction class BasicReaction(Reaction): """ An object representing a basic chemical reaction: compositions and their coefficients. """ # Tolerance for determining if a particular component fraction is > 0. TOLERANCE = 1e-6 def __init__( self, compositions: List[Composition], coefficients: List[float], balanced: Optional[bool] = None, data: Optional[Dict] = None, lowest_num_errors: Optional[int] = None, ): """ A BasicReaction object is defined by a list of compositions and their corresponding coefficients, where a negative coefficient refers to a reactant, and a positive coefficient refers to a product. Args: compositions: List of composition objects (pymatgen). coefficients: List of coefficients, where negative coeff distinguishes a reactant. balanced: Whether the reaction is stoichiometricaly balanced or not (see construction via balance() method). data: Optional corresponding data in dictionary format; often used to store various calculated parameters. lowest_num_errors: the minimum number of errors reported by the reaction balancing algorithm (see the balance() method). A number of errors >= 1 means that the reaction may be different than intended (some phases may be shuffled or removed entirely). """ self._compositions = compositions self._coefficients = np.array(coefficients) self.reactant_coeffs = { comp: coeff for comp, coeff in zip(compositions, coefficients) if coeff < 0 } self.product_coeffs = { comp: coeff for comp, coeff in zip(compositions, coefficients) if coeff > 0 } if balanced is not None: self.balanced = balanced else: sum_reactants = sum( [k * abs(v) for k, v in self.reactant_coeffs.items()], Composition({}) ) sum_products = sum( [k * abs(v) for k, v in self.product_coeffs.items()], Composition({}) ) if not sum_reactants.almost_equals( sum_products, rtol=0, atol=self.TOLERANCE ): self.balanced = False else: self.balanced = True self.data = data self.lowest_num_errors = lowest_num_errors @property def reactants(self) -> List[Composition]: "List of reactants for this reaction" return list(self.reactant_coeffs.keys()) @property def products(self) -> List[Composition]: "List of products for this reaction" return list(self.product_coeffs.keys()) @property def compositions(self) -> List[Composition]: "List of composition objects for this reaction" return self._compositions @property def coefficients(self) -> np.array: # pylint: disable = W0236 "Array of reaction coefficients" return self._coefficients @property def energy(self) -> float: "The energy of this reaction" raise ValueError("No energy for a basic reaction!") @property def energy_per_atom(self) -> float: "The energy per atom of this reaction" raise ValueError("No energy (per atom) for a basic reaction!") @property def is_identity(self): "Returns True if the reaction has identical reactants and products" if set(self.reactants) != set(self.products): return False if self.balanced is False: # if not balanced, can not check coefficients return True return all( [ np.isclose(self.reactant_coeffs[c] * -1, self.product_coeffs[c]) for c in self.reactant_coeffs ] ) @property def chemical_system(self): return "-".join(sorted([str(el) for el in self.elements])) def copy(self) -> "BasicReaction": "Returns a copy of the BasicReaction object" return BasicReaction( self.compositions, self.coefficients, self.balanced, self.data, self.lowest_num_errors, ) def reverse(self) -> "Reaction": """ Returns a copy of the original Reaction object where original reactants are new products, and vice versa. """ return Reaction( self.compositions, -1 * self.coefficients, self.balanced, self.data, self.lowest_num_errors, ) def normalize_to(self, comp: Composition, factor: float = 1) -> "BasicReaction": """ Normalizes the reaction to one of the compositions via the provided factor. By default, normalizes such that the composition given has a coefficient of 1. Args: comp: Composition object to normalize to factor: factor to normalize to. Defaults to 1. """ all_comp = self.compositions coeffs = self.coefficients scale_factor = abs(1 / coeffs[self.compositions.index(comp)] * factor) coeffs *= scale_factor return BasicReaction(all_comp, coeffs) def normalize_to_element( self, element: Element, factor: float = 1 ) -> "BasicReaction": """ Normalizes the reaction to one of the elements. By default, normalizes such that the amount of the element is 1. Another factor can be specified. Args: element (Element/Species): Element to normalize to. factor (float): Factor to normalize to. Defaults to 1. """ all_comp = self.compositions coeffs = self.coefficients current_el_amount = ( sum([all_comp[i][element] * abs(coeffs[i]) for i in range(len(all_comp))]) / 2 ) scale_factor = factor / current_el_amount coeffs *= scale_factor return BasicReaction(all_comp, coeffs) def get_el_amount(self, element: Element) -> float: """ Returns the amount of the element in the reaction. Args: element (Element/Species): Element in the reaction Returns: Amount of that element in the reaction. """ return ( sum( [ self.compositions[i][element] * abs(self.coefficients[i]) for i in range(len(self.compositions)) ] ) / 2 ) def get_coeff(self, comp: Composition): """ Returns coefficient for a particular composition """ return self.coefficients[self.compositions.index(comp)] def normalized_repr_and_factor(self): """ Normalized representation for a reaction For example, ``4 Li + 2 O -> 2Li2O`` becomes ``2 Li + O -> Li2O`` """ return self._str_from_comp(self.coefficients, self.compositions, True) @property def normalized_repr(self): """ A normalized representation of the reaction. All factors are converted to lowest common factors. """ return self.normalized_repr_and_factor()[0] @staticmethod def _reduce(coeffs, compositions): r_coeffs = np.zeros(len(coeffs)) r_comps = [] for i, (amt, comp) in enumerate(zip(coeffs, compositions)): comp, factor = comp.get_reduced_composition_and_factor() r_coeffs[i] = amt * factor r_comps.append(comp) factor = 1 / gcd_float(np.abs(r_coeffs)) r_coeffs *= factor return r_coeffs, r_comps, factor @classmethod def _str_from_formulas(cls, coeffs, formulas) -> str: reactant_str = [] product_str = [] for amt, formula in zip(coeffs, formulas): if abs(amt + 1) < cls.TOLERANCE: reactant_str.append(formula) elif abs(amt - 1) < cls.TOLERANCE: product_str.append(formula) elif amt < -cls.TOLERANCE: reactant_str.append("{:.4g} {}".format(-amt, formula)) elif amt > cls.TOLERANCE: product_str.append("{:.4g} {}".format(amt, formula)) return " + ".join(reactant_str) + " -> " + " + ".join(product_str) @classmethod def _str_from_comp(cls, coeffs, compositions, reduce=False): r_coeffs = np.zeros(len(coeffs)) r_formulas = [] for i, (amt, comp) in enumerate(zip(coeffs, compositions)): formula, factor = comp.get_reduced_formula_and_factor() r_coeffs[i] = amt * factor r_formulas.append(formula) if reduce: factor = 1 / gcd_float(np.abs(r_coeffs)) r_coeffs *= factor else: factor = 1 return cls._str_from_formulas(r_coeffs, r_formulas), factor def __eq__(self, other): if self is other: return True elif str(self) == str(other): return True else: return (set(self.reactants) == set(other.reactants)) & ( set(self.products) == set(other.products) ) def __hash__(self): return hash( "-".join( [e.reduced_formula for e in sorted(self.reactants)] + [e.reduced_formula for e in sorted(self.products)] ) ) def __str__(self): return self._str_from_comp(self.coefficients, self.compositions)[0] __repr__ = __str__ @staticmethod def _from_coeff_dicts(reactant_coeffs, product_coeffs): reactant_comps, r_coefs = zip( *[(comp, -1 * coeff) for comp, coeff in reactant_coeffs.items()] ) product_comps, p_coefs = zip( *[(comp, coeff) for comp, coeff in product_coeffs.items()] ) return BasicReaction(reactant_comps + product_comps, r_coefs + p_coefs) @staticmethod def from_string(rxn_string) -> "BasicReaction": """ Generates a balanced reaction from a string. The reaction must already be balanced. Args: rxn_string: The reaction string. For example, "4 Li + O2-> 2Li2O" Returns: BalancedReaction """ rct_str, prod_str = rxn_string.split("->") def get_comp_amt(comp_str): return { Composition(m.group(2)): float(m.group(1) or 1) for m in re.finditer( r"([\d\.]*(?:[eE]-?[\d\.]+)?)\s*([A-Z][\w\.\(\)]*)", comp_str ) } reactant_coeffs = get_comp_amt(rct_str) product_coeffs = get_comp_amt(prod_str) return BasicReaction._from_coeff_dicts(reactant_coeffs, product_coeffs) @classmethod def _balance_coeffs( cls, reactants: List[Composition], products: List[Composition], robust=True ) -> np.array: """ Balances the reaction and returns the new coefficient matrix """ compositions = reactants + products num_comp = len(compositions) all_elems = sorted({elem for c in compositions for elem in c.elements}) num_elems = len(all_elems) comp_matrix = np.array([[c[el] for el in all_elems] for c in compositions]).T rank = np.linalg.matrix_rank(comp_matrix) diff = num_comp - rank num_constraints = diff if diff >= 2 else 1 # an error = a component changing sides or disappearing lowest_num_errors = np.inf first_product_idx = len(reactants) # start with simplest product constraints, work towards most complex reactant constraints product_constraints = chain.from_iterable( [ combinations(range(first_product_idx, num_comp), n_constr) for n_constr in range(num_constraints, 0, -1) ] ) reactant_constraints = chain.from_iterable( [ combinations(range(0, first_product_idx), n_constr) for n_constr in range(num_constraints, 0, -1) ] ) best_soln = None for constraints in chain(product_constraints, reactant_constraints): n_constr = len(constraints) comp_and_constraints = np.append( comp_matrix, np.zeros((n_constr, num_comp)), axis=0 ) b = np.zeros((num_elems + n_constr, 1)) b[-n_constr:] = 1 if min(constraints) >= first_product_idx else -1 for num, idx in enumerate(constraints): comp_and_constraints[num_elems + num, idx] = 1 # arbitrarily fix coeff to 1 coeffs = np.matmul(np.linalg.pinv(comp_and_constraints), b) if np.allclose(np.matmul(comp_matrix, coeffs), np.zeros((num_elems, 1))): expected_signs = np.array([-1] * len(reactants) + [+1] * len(products)) num_errors = np.sum( np.multiply(expected_signs, coeffs.T) < cls.TOLERANCE ) if num_errors == 0: lowest_num_errors = 0 best_soln = coeffs break if num_errors < lowest_num_errors: lowest_num_errors = num_errors best_soln = coeffs return np.squeeze(best_soln), lowest_num_errors @classmethod def balance( cls, reactants: List[Composition], products: List[Composition], data: Optional[Dict] = None, ) -> "BasicReaction": """ Reactants and products to be specified as list of pymatgen.core.Composition. e.g., [comp1, comp2] Args: reactants: List of reactants. products: List of products. data: Optional dictionary containing extra data about the reaction. """ compositions = reactants + products coeffs, lowest_num_errors = cls._balance_coeffs(reactants, products) return cls(compositions, coeffs, data=data, lowest_num_errors=lowest_num_errors)
{"hexsha": "1119bd60ce39b526ded56a1feea780984853a73d", "size": 14720, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/rxn_network/reactions/basic.py", "max_stars_repo_name": "bigboyabhisthi/reaction-network", "max_stars_repo_head_hexsha": "b84f16b7261ecd62d7aa8e2681907f6ea0c35565", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-22T23:09:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T23:09:47.000Z", "max_issues_repo_path": "src/rxn_network/reactions/basic.py", "max_issues_repo_name": "bigboyabhisthi/reaction-network", "max_issues_repo_head_hexsha": "b84f16b7261ecd62d7aa8e2681907f6ea0c35565", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/rxn_network/reactions/basic.py", "max_forks_repo_name": "bigboyabhisthi/reaction-network", "max_forks_repo_head_hexsha": "b84f16b7261ecd62d7aa8e2681907f6ea0c35565", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3925233645, "max_line_length": 97, "alphanum_fraction": 0.5903532609, "include": true, "reason": "import numpy", "num_tokens": 3161}
import neural_net as nn import numpy as np LEARNING_RATE = 0.8 ACTIVATION = nn.Sigmoid RANDOM_WEIGHTS = True LOSS_FN = nn.CrossEntropyLoss LAYERS = (2, 2, 1) INPUTS = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]) OUTPUTS = np.array([[0, 1, 1, 0]]) a = nn.StochasticNet(layers=LAYERS, activation=ACTIVATION, loss=LOSS_FN, lr=LEARNING_RATE, random=RANDOM_WEIGHTS) print("Initial Weights: ") for i in range(len(a.weights)): print("Layer {}:\n {}\n {}".format(i, a.weights[i], a.biases[i])) a.fit(INPUTS, OUTPUTS) result = a.predict(INPUTS) print("Forward pass results: ") for i in range(INPUTS.shape[1]): print("{} {} : {}".format(INPUTS.T[i][0], INPUTS.T[i][1], result[0].T[i])) print("Weights: ") for i in range(len(a.weights)): print("Layer {}:\n {}\n {}".format(i, a.weights[i], a.biases[i]))
{"hexsha": "70447262f79031e816f408c84db7b971b2e21936", "size": 807, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "Youyoun/numpy_neural_network", "max_stars_repo_head_hexsha": "3a13971f8877e72bb244fd0a9ba17ca6dd4ddaf5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-26T10:47:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-26T10:47:12.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "Youyoun/numpy_neural_network", "max_issues_repo_head_hexsha": "3a13971f8877e72bb244fd0a9ba17ca6dd4ddaf5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "Youyoun/numpy_neural_network", "max_forks_repo_head_hexsha": "3a13971f8877e72bb244fd0a9ba17ca6dd4ddaf5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9, "max_line_length": 113, "alphanum_fraction": 0.6468401487, "include": true, "reason": "import numpy", "num_tokens": 266}
import os import time import threading import multiprocessing import math from pylab import * import PIL.Image as im import csv import sys from imutils import face_utils import numpy as np import argparse import imutils import dlib import cv2 import numpy from PIL import ImageFont from PIL import Image from PIL import ImageDraw from string import Template import string def distanza(x1, y1, x2, y2): x12 = (x2 - x1) * (x2 - x1) y12 = (y2 - y1) * (y2 - y1) xy = x12 + y12 dist = math.sqrt(xy) return dist detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") intero = 0 voltostr = '' #Modificare per cambiare configurazione cerchi = 4 fetteQ = 4 # fette per quadrante fette = fetteQ * 4 s1 = cerchi * fette #dizionario = np.zeros((2223, s1)) #modificare il range per cambiare il numero di foto da processare dizionario = [ [ 0 for y in range(s1)] for x in range(1000)] #x = width #y = height #modificare il range per cambiare il numero di foto da processare dizionario_str = ['' for xx in range(1000)] volto = np.zeros(s1) def aggiungi(xcentro, ycentro, rax, xpunto, ypunto, distNaso, coeff, immm): indice = 0 settore = np.zeros(3) #cerchio, quadrante, fetta # distNaso = distanza dal naso a = 0 # a = raggioStart b8 = 4 * rax / 10 # b = raggioStop i = 1 # in quale cerchio cade il punto. i = [1, cerchi] b4 = 7 * rax / 10 b2 = 9 * rax / 10 #cerchi if( distNaso > a and distNaso <= b8): settore[0] = 1 elif(distNaso > b8 and distNaso <= b4): settore[0] = 2 elif(distNaso > b4 and distNaso <= b2): settore[0] = 3 else : settore[0] = 4 #quadrante if (xpunto <= xcentro and y <= ycentro): # il punto appartiene al quadrante in alto a sinistra settore[1] = 2 elif (x <= xnose and y >= ynose): # il punto appartiene al quadrante in basso a sinistra settore[1] = 3 elif (x >= xnose and y <= ynose): # il punto appartiene al quadrante in alto a destra settore[1] = 1 else: # il punto appartiene al quadrante in basso a destra settore[1] = 4 a = 0 #grado Start b = 90 / fetteQ #grado Stop i = 1 #in quale fetta cade il punto. i = [1, fette] radang_a = 0 # radiante Start radang_b = math.radians(b) # radiante Stop tng_a = math.tan(radang_a) tng_b = math.tan(radang_b) #fetta while(settore[2] == 0 and b < 90): if(coeff > tng_a and coeff <= tng_b): settore[2] = i b = b + (90 / fetteQ) radang_b = math.radians(b) # radiante Stop tng_a = tng_b tng_b = math.tan(radang_b) i = i+1 if(xpunto == xnose): settore[2] = 1 if(settore[2] == 0): settore[2] = fetteQ # settore[0] = cerchio # settore[1] = quadrante # settore[2] = fetta if(settore[1] == 1 or settore[1] == 3): indice = int(fette * (settore[0]-1) + fetteQ * (settore[1] -1) + abs(settore[2] - 4 ) -1) else: indice = int(fette * (settore[0] - 1) + fetteQ * (settore[1] - 1) + settore[2] - 1) try: if (xnose != xpunto or ynose != ypunto): #il naso non ha settore volto[indice] = int(volto[indice] + 1) except: #else: print("ERROOOOOOOREEEEEE------") print("indice ", indice) #print("xnose ", xnose, " xpunto ", xpunto, " ynose ", ynose , " ypunto " , ypunto) return indice #Cambiare il percorso relativo al dataset delle img immagini = os.listdir('Dataset1000') num_volto = 0 for img in immagini: if img.find(".jpg") > 0: tick_detector = time.time() #Cambiare il percorso relativo al dataset delle img im2 = "Dataset1000/"+str(img) foto = cv2.imread(im2) volto = np.zeros(s1) xnose = 0 ynose = 0 raggio = 0 xlont = 0 ylont = 0 foto = imutils.resize(foto, width=512) gray = cv2.cvtColor(foto, cv2.COLOR_BGR2GRAY) tick_detector = time.time() rects = detector(foto, 1) #print("tick detector " , time.time() - tick_detector) dista = 0 raggio = 0 m = 0 d = 0 n = 1 imga = zeros([512, 512, 3]) for (i, rect) in enumerate(rects): tick_predictor = time.time() shape = predictor(gray, rect) #print("tick predictor ", time.time() - tick_predictor) shape = face_utils.shape_to_np(shape) (x, y, w, h) = face_utils.rect_to_bb(rect) #cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) xnose = shape[33][0] ynose = shape[33][1] for (x, y) in shape: tick_volto = time.time() dista = distanza(xnose, ynose, x, y) if(dista > raggio) : raggio = dista xlont = x #coordinata x del punto più lontano dal naso ylont = y #coordinata y del punto più lontano dal naso for(x,y) in shape: settore = [0,0,0] if(y == ynose): m = 0 else: m = (x - xnose)/(y-ynose) m = abs(m) d = distanza(xnose, ynose, x,y) tick_punto = time.time() nnn = aggiungi(xnose, ynose, raggio, x, y, d, m, imga ) #print("tick punto " , time.time() - tick_punto) ## E SE è LA PUNTA DEL NASO ? #try: #except: #print(num_volto) #dizionario stringa = nome immagine #nstr = str(nnn) #if (x == xnose and y == ynose): #nstr = "0" #cv2.putText(imga, nstr, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) dizionario[num_volto] = volto #print("-----------------------------------------") #print("tick_volto = ", time.time() - tick_volto) nomeimagine = str(img) nomeimagine = nomeimagine[:15] nomeimagine = nomeimagine[6:] dizionario_str[num_volto] = nomeimagine num_volto = num_volto +1 if((num_volto % 200) == 0): print(num_volto) #print("dizionario = ", dizionario) #print(len(dizionario)) #print("dizionario_str = ", dizionario_str) #print(len(dizionario_str)) #print(type(dizionario)) #numpy.savetxt("foo.csv", dizionario, delimiter=",") #Modificare il nome in base alla configurazione scelta numpy.savetxt("DatasetCelebA/dataset4c4s1000.csv", dizionario, fmt='%i', delimiter=",") #cv2.imwrite("gen_settore.jpg", imga) #cv2.imshow('image',imga) #cv2.waitKey(0) #cv2.destroyAllWindows() #f = open('test.txt', 'w') #f.close()
{"hexsha": "d84489a8d8a5d6c9c15985bea5f95435c36b6a22", "size": 6976, "ext": "py", "lang": "Python", "max_stars_repo_path": "calcolo_ragnatela.py", "max_stars_repo_name": "s-corso-98/SpiderGenderProject", "max_stars_repo_head_hexsha": "cd08a1141654be9489b5a9668c06254ce2dfac22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-19T09:28:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-19T09:28:06.000Z", "max_issues_repo_path": "calcolo_ragnatela.py", "max_issues_repo_name": "s-corso-98/SpiderGenderProject", "max_issues_repo_head_hexsha": "cd08a1141654be9489b5a9668c06254ce2dfac22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "calcolo_ragnatela.py", "max_forks_repo_name": "s-corso-98/SpiderGenderProject", "max_forks_repo_head_hexsha": "cd08a1141654be9489b5a9668c06254ce2dfac22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-30T23:10:48.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-30T23:10:48.000Z", "avg_line_length": 24.5633802817, "max_line_length": 97, "alphanum_fraction": 0.5478784404, "include": true, "reason": "import numpy", "num_tokens": 2200}
[STATEMENT] lemma lt_list_encode: \<open>n [\<in>] ns \<Longrightarrow> n < list_encode ns\<close> [PROOF STATE] proof (prove) goal (1 subgoal): 1. n [\<in>] ns \<Longrightarrow> n < list_encode ns [PROOF STEP] proof (induct ns) [PROOF STATE] proof (state) goal (2 subgoals): 1. n [\<in>] [] \<Longrightarrow> n < list_encode [] 2. \<And>a ns. \<lbrakk>n [\<in>] ns \<Longrightarrow> n < list_encode ns; n [\<in>] a # ns\<rbrakk> \<Longrightarrow> n < list_encode (a # ns) [PROOF STEP] case (Cons m ns) [PROOF STATE] proof (state) this: n [\<in>] ns \<Longrightarrow> n < list_encode ns n [\<in>] m # ns goal (2 subgoals): 1. n [\<in>] [] \<Longrightarrow> n < list_encode [] 2. \<And>a ns. \<lbrakk>n [\<in>] ns \<Longrightarrow> n < list_encode ns; n [\<in>] a # ns\<rbrakk> \<Longrightarrow> n < list_encode (a # ns) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: n [\<in>] ns \<Longrightarrow> n < list_encode ns n [\<in>] m # ns [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: n [\<in>] ns \<Longrightarrow> n < list_encode ns n [\<in>] m # ns goal (1 subgoal): 1. n < list_encode (m # ns) [PROOF STEP] using le_prod_encode_1 le_prod_encode_2 [PROOF STATE] proof (prove) using this: n [\<in>] ns \<Longrightarrow> n < list_encode ns n [\<in>] m # ns ?a \<le> prod_encode (?a, ?b) ?b \<le> prod_encode (?a, ?b) goal (1 subgoal): 1. n < list_encode (m # ns) [PROOF STEP] by (metis dual_order.strict_trans1 le_imp_less_Suc less_SucI list_encode.simps(2) set_ConsD) [PROOF STATE] proof (state) this: n < list_encode (m # ns) goal (1 subgoal): 1. n [\<in>] [] \<Longrightarrow> n < list_encode [] [PROOF STEP] qed simp
{"llama_tokens": 701, "file": "FOL_Seq_Calc3_Encoding", "length": 7}
Require Import Coq.Reals.Rdefinitions. Require Import TLA.TLA. Require Import TLA.ProofRules. Require Import Examples.System. Open Scope HP_scope. Section SensorWithError. Variable err : R. Definition Sense : Formula := "Xmax" <= "Xmin" + err //\\ "Xmin" <= "x" <= "Xmax". Definition SenseSafe : Formula := "Xmin" <= "x" <= "Xmax". Definition I : Formula := SenseSafe. Variable d : R. Definition SpecR : SysRec := {| Init := I; Prog := Unchanged ("x"::"Xmax"::"Xmin"::nil)%list; world := fun _ => Sense; unch := (("x":Term)::("Xmax":Term)::("Xmin":Term)::nil)%list; maxTime := d |}. Definition Spec := SysD SpecR. Lemma SysSafe_sense : forall P, P |-- SysSafe SpecR. Proof. intros. apply SysSafe_rule; apply always_tauto. enable_ex_st. repeat eexists. solve_linear. Qed. Theorem sense_safe : Spec |-- []SenseSafe. Proof. eapply Sys_by_induction with (IndInv := SenseSafe) (A := ltrue). + tlaIntuition. + unfold Spec, SpecR. tlaAssume. + tlaIntuition. + apply SysSafe_sense. + tlaAssume. + eapply BasicProofRules.always_tauto. charge_tauto. + tlaAssume. + red. solve_linear. + unfold World. rewrite Continuous_st_formula with (F:=Sense). * solve_linear. * tlaIntuition. * tlaIntuition. * charge_tauto. + solve_linear. Qed. End SensorWithError. Close Scope HP_scope.
{"author": "dricketts", "repo": "quadcopter", "sha": "62bb21915612a141e1ffabc73df3dc2d931c54ce", "save_path": "github-repos/coq/dricketts-quadcopter", "path": "github-repos/coq/dricketts-quadcopter/quadcopter-62bb21915612a141e1ffabc73df3dc2d931c54ce/oldexamples/SensorWithError.v"}
# -*- coding: utf-8 -*- """ Created on Sun May 28 14:01:01 2017 @author: xin https://gist.github.com/stewartpark/187895beb89f0a1b3a54 """ from keras import backend as K from keras.models import Sequential from keras.layers import Dense from keras.optimizers import SGD import numpy as np batch_size = 1 num_classes = 1 epochs = 1000 x_train = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y_train = np.array([[0], [1], [1], [0]]) x_test = np.array([[0, 0], [1, 0]]) y_test = np.array([[0], [1]]) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') model = Sequential() # 定义一个序贯模型 # 添加一个全连接层,输入层有2个特征,后面的隐藏层有2个单元 # 隐藏层的激活函数为tanh # 所有层的bias都是自动添加的 model.add(Dense(2, activation='tanh', input_shape=(2,))) # 最后的输出也是一个全连接层,激活函数为sigmoid model.add(Dense(1, activation='sigmoid')) # check the structure and parameters of this model model.summary() # 优化器 sgd = SGD(lr=0.8) # learning rate # 模型训练(fit)前需要编译 model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy']) # 自动求导,梯度下降和反向传播 history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) # 利用测试样本评价模型,可以计算loss值以及测试集中的正确率 score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # 利用模型进行预测 print(model.predict_proba(x_train)) # https://github.com/tensorflow/tensorflow/issues/3388 K.clear_session()
{"hexsha": "9426d467369788e2acd7970daa0c9680eb3ffb71", "size": 1528, "ext": "py", "lang": "Python", "max_stars_repo_path": "keras/keras_xor.py", "max_stars_repo_name": "OnlyBelter/MachineLearning_examples", "max_stars_repo_head_hexsha": "c2d766540aacb0aea1a4892c97c5dd509bf2a62f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-04-15T02:31:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-28T21:27:17.000Z", "max_issues_repo_path": "keras/keras_xor.py", "max_issues_repo_name": "2429160671/machine-learning-note", "max_issues_repo_head_hexsha": "caedc49d3726ddd4079f56bab90363e5284e090e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-23T08:08:30.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-23T08:08:30.000Z", "max_forks_repo_path": "keras/keras_xor.py", "max_forks_repo_name": "2429160671/machine-learning-note", "max_forks_repo_head_hexsha": "caedc49d3726ddd4079f56bab90363e5284e090e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2018-10-15T13:08:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-28T02:42:21.000Z", "avg_line_length": 24.6451612903, "max_line_length": 56, "alphanum_fraction": 0.6695026178, "include": true, "reason": "import numpy", "num_tokens": 542}
<h1>IBM Quantum Challenge Africa 2021</h1> <p style="font-size:xx-large;">Introduction and the Crop-Yield Problem</p> Quantum Computing has the potential to revolutionize computing, as it can solve problems that are not possible to solve on a classical computer. This extra ability that quantum computers have is called quantum advantage. To achieve this goal, the world needs passionate and competent end-users: those who know how to apply the technology to their field. In this challenge you will be exposed, at a high-level, to quantum computing through Qiskit. As current or future users of quantum computers, you need to know what problems are appropriate for quantum computation, how to structure the problem model/inputs so that they are compatible with your chosen algorithm, and how to execute a given algorithm and quantum solution to solve the problem. This is the first notebook for the IBM Quantum Challenge Africa. Before starting here, ensure you have completed the Week 0 content in preparation for the following exercises. ## Initialization To ensure the demonstrations and exercises have the required Python modules and libraries, run the following cell before continuing. ```python # Import auxiliary libraries import numpy as np # Import Qiskit from qiskit import IBMQ, Aer from qiskit.algorithms import QAOA, VQE, NumPyMinimumEigensolver from qiskit.algorithms.optimizers import COBYLA from qiskit.utils import QuantumInstance, algorithm_globals from qiskit.providers.aer.noise.noise_model import NoiseModel from qiskit_optimization import QuadraticProgram from qiskit_optimization.algorithms import MinimumEigenOptimizer from qiskit_optimization.converters import QuadraticProgramToQubo import qiskit.test.mock as Fake ``` ## Table of Contents The notebook is structured as follows: 1. Initialization 2. Table of Contents 3. Qiskit and its parts 4. Setting up the Qiskit Environment 5. Quadratic Problems 6. Crop-Yield Problem as a Quadratic Problem 7. Solving the Crop-Yield Problem using Quantum Computing 8. Simulating a Real Quantum Computer for the Crop-Yield Problem ## Qiskit and its parts Qiskit is divided into multiple modules for different purposes, with Terra at the core of the Qiskit ecosystem. It helps to be familiar with what each module can do and whether you will need to utilize the software contained within them for your specific problem. There are four modules that deal with the application of quantum computing; with tools and algorithms built specifically for their fields. Have a quick look at the [Qiskit documentation](https://qiskit.org/overview) and this IBM Research [blog post](https://www.ibm.com/blogs/research/2021/04/qiskit-application-modules/) to see what these modules are called. Once you have done so, you can complete the first exercise of the challenge: replace the ficticious module names, in the python cell below, with the correct module names. Though it would be fun to have a Qiskit Gaming module, it has not yet been developed. If you are interested in contributing to the open-source Qiskit community, have a look at the [contribution guide](https://qiskit.org/documentation/contributing_to_qiskit.html). ### Exercise 1a: Qiskit Applications Modules ```python # Definitely real Qiskit module names qiskit_module_names = [ "Qiskit Nature", "Qiskit Optimization", "Qiskit Finance", "Qiskit Machine Learning", ] ``` Run the following python cell to check if you have the correct module names. ```python from qc_grader import grade_ex1a grade_ex1a(qiskit_module_names) ``` Submitting your answer for ex1/partA. Please wait... Congratulations 🎉! Your answer is correct and has been submitted. ### Qiskit Applications Modules In this notebook, we will use the `Qiskit Optimization` applications module for a specific problem to illustrate how 1. classical and mathematical problem definitions are represented in Qiskit, 2. some quantum algorithms are defined, 3. to execute a quantum algorithm for a given problem definition, 4. and algorithms and Qiskit problems are executed on real and simulated quantum computers. We will also be using Qiskit Terra and Aer as they provide the foundation of Qiskit and high-performance quantum simulators respectively. ## Quadratic Problems Some computational problems can be formulated into quadratic equations such that the minimum of the quadratic equations is the optimal solution, if any exist. These problems are encountered in finance, agriculture, operations and production management, and economics. Quadratic programming is also used to identify an optimal financial portfolio with minimum risk and optimizing the layout of production components in a factory to minimize the travel distance of resources. This notebook focuses on agriculture as it is a relevant application of quantum computing to problems facing the African continent. However, all of these applications share two common characteristics: the system can be modelled as a quadratic equation and the system variables may be constrained, with their values limited to within a given range. --- Quadratic problems take on the following structure. Given a vector of $n$ variables $x\in\mathbb{R}^n$, the quadratic function to minimize is as follows. $$ \begin{align} \text{minimize}\quad & f\left(x\right)=\frac{1}{2}x^\top{}\mathbf{Q}x + c^\top{}x &\\ \text{subject to}\quad & \mathbf{A}x\leq{}b&\\ & x^\top{}\mathbf{Q}_ix + c_{i}^\top{}x\leq{}r_i,\quad&\forall{}i\in[1,k_q]\\ & l_i\leq{}x_i\leq{}u_i,\quad&\forall{}i\in[1,k_l]\\ \end{align} $$ $\mathbf{Q}$, $\mathbf{Q}_i$, and $\mathbf{A}$ are $n\times{}n$ symmetric matrices. $c$ and $c_i$ are $n\times{}1$ column vectors. $\mathbf{Q}_i$, $\mathbf{A}$, $c_i$, $l_i$, and $u_i$ define constraints on the variables in $x$. The quadratic equation at the core of the quadratic problem is found by multiplying out the matrices in the minimization function. Though '$\leq{}$' is used in the constraint equations above, any identity relationship may be used for any number of constraints: i.e. "$<$", "$=$", "$>$", "$\geq$", or "$\leq$". A valid solution to the quadratic must satisfy all conditions for the problem. Examples of some constraints are given below. The first two are linear constraints whereas the third example is a quadratic constraint. $$ x_1 + x_4 \leq{} 10$$ $$ x_2 - 3x_6 = 10$$ $$x_1x_2 - 4x_3x_4 + x_5 \leq{} 15 $$ Qiskit has Python code that allows you to implement a quadratic problem as a `QuadraticProgram` instance. Though our definition above used matrices to define the coefficients, `QuadraticProgram` allows you to define the objective (function overwhich to minimize) directly. To illustrate how to use `QuadraticProgram`, we will use the following quadratic problem definition, with three integer variables. $$\begin{align} \text{minimize}\quad{} & f(x)=(x_1)^2 + (x_2)^2 - x_1x_2 - 6x_3 \\ \text{subject to}\quad{} & x_1 + x_2 = 2 \\ & x_2x_3 \geq{} 1 \\ & -2 \leq{} x_2 \leq{} 2 \\ & -2 \leq{} x_3 \leq{} 4 \\ \end{align}$$ The figure below shows the constraints on $x_1$ and $x_3$, with some simplifcation. The shaded area denotes valid values for $x_1$ and $x_3$, within which $f(x)$ must be minimized. In the following code, the above quadratic problem is defined as a `QuadraticProgram` instance. Have a look at the [Qiskit documentation for `QuadraticProgram`](https://qiskit.org/documentation/stubs/qiskit.optimization.QuadraticProgram.html), as it can be very useful in helping your understanding of its interface. The quadratic to minimize, called an objective, is implemented using dictionaries. This allows you, the developer, to explicitly define coefficients for specific variables and terms. The keys in the dictionaries are the variable names identifying a term in $f(x)$. For example, `("x_1","x_2")` is for $x_1x_2$. The values for each item are the coefficients for said terms. Terms that are subtracted in $f(x)$ must have a negative coefficient. ```python quadprog = QuadraticProgram(name="example 1") quadprog.integer_var(name="x_1", lowerbound=0, upperbound=4) quadprog.integer_var(name="x_2", lowerbound=-2, upperbound=2) quadprog.integer_var(name="x_3", lowerbound=-2, upperbound=4) quadprog.minimize( linear={"x_3": -6}, quadratic={("x_1", "x_1"): 1, ("x_2", "x_2"): 1, ("x_1", "x_2"): -1}, ) quadprog.linear_constraint(linear={"x_1": 1, "x_2": 1}, sense="=", rhs=2) quadprog.quadratic_constraint(quadratic={("x_2", "x_3"): 1}, sense=">=", rhs=1) ``` <qiskit_optimization.problems.quadratic_constraint.QuadraticConstraint at 0x7f2d2409d4f0> A `QuadraticProgram` can have three types of variables: binary, integer, and continuous. The Qiskit implementation of the algorithms we are going to use currently only support binary and integer variables. There are other algorithms that allow for the simulation of continuous variables, but they are not covered in this notebook. If you want to know more about them though, have a look at this Qiskit tutorial on [algorithms to solve mixed-variable quadratic problems](https://qiskit.org/documentation/tutorials/optimization/5_admm_optimizer.html). We can visualize the `QuadraticProgram` as an LP string, a portable text-based format used representating the model as a **L**inear **P**rogramming problem. ```python print(quadprog.export_as_lp_string()) ``` \ This file has been generated by DOcplex \ ENCODING=ISO-8859-1 \Problem name: example 1 Minimize obj: - 6 x_3 + [ 2 x_1^2 - 2 x_1*x_2 + 2 x_2^2 ]/2 Subject To c0: x_1 + x_2 = 2 q0: [ x_2*x_3 ] >= 1 Bounds x_1 <= 4 -2 <= x_2 <= 2 -2 <= x_3 <= 4 Generals x_1 x_2 x_3 End Any optimization problem that can be represented as a single second-order equation, in that the greatest exponent of any term is 2, can be transformed into a quadratic problem or program of the form given above. The above example is arbitrary and does not necessarily represent a given real-world problem. The main problem of this notebook focuses on optimizing the yield of a farm, though only the problem definition need be changed to apply this technique to other quadratic problem applications. ## Crop-Yield Problem as a Quadratic Problem To show how to solve your quadratic program using a quantum computer, we will use two algorithms to solve the crop-yield problem. It is a common need to optimize the crops and management of a farm to reduce risk while increasing profits. One of the big challenges facing Africa and the whole world is how to produce enough food for everyone. The problem here focuses not on profits but on the tonnage of crops harvested. Imagine you have a farm with three hectares of land suitable for farming. You need to choose which crops to plant from a selection of four. Furthermore, you also need to determine how many hectares of each you should plant. The four crops you can plant are wheat, soybeans, maize, and a push-pull crop. The fourth cannot be sold once harvested but it can help increase the yield of the other crops. <table> <tr> <th> </th> </tr> <tr> <th> Our beautiful three hectare farm </th> </tr> </table> <table> <tr> <th> </th> <th> </th> <th> </th> <th> </th> </tr> <tr> <th> Wheat </th> <th> Soybeans </th> <th> Maize </th> <th> Push-Pull </th> <!-- <th> <p align="right" style="height:32px;padding-top:10px;">Wheat</p> </th> <th> <p style="height:32px;padding-top:10px;">Soybeans</p> </th> <th> <p style="height:32px;padding-top:10px;">Maize</p> </th> <th> <p style="height:32px;padding-top:10px;">Push-Pull</p> </th> --> </tr> </table> There are three types of farming methods we can use: monocropping, intercropping, and push-pull farming. These are shown below. Monocropping is where only one crop is farmed. This is can make the farm susceptible to disease and pests as the entire yield would be affected. In some instances, growing two different plants nearby each other will increase the yield of both, though sometimes it can decrease the yield. Intercropping is the process where different plants are chosen to _increase_ the yield. Push-Pull crops are pairs of plants that repel pests and attract pests respectively. Integrating them into a larger farm increases the yield of harvested food but with the cost of not necessarily being able to use the harvest of Push-Pull crops as part of the total yield. This is because the Push-Pull crop may not be usable or even edible. <table> <tr> <th> </th> <th> </th> <th> </th> </tr> <tr> <th> Monocropping </th> <th> Intercropping </th> <th> Push-Pull farming </th> </tr> </table> --- Only in certain cases can quadratic programming problems be solved easily using classical problems. In their general sense, they are NP-Hard; a class of problems that is difficult to solve using classical computational methods. In fact, the best classical method to solve these problems involves heuristics, a technique that finds an approximate solution. Quantum Computers have been shown to provide significant speed-up and better scaling for some heuristic problems. The crop-yield problem is a combinatorial problem, in that the solution is a specific combination of input parameters. Though the problem shown here is small enough to solve classically, larger problems become intractable on a classical computer owing to the number of combinations of which to optimize. Solving the above problem using quantum computing involves three components: 1. Defining the problem 2. Defining the algorithm 3. Executing the algorithm on a backend Many problems in Qiskit follow this structure as the algorithm you use can typically be swapped for another without significantly redefining your problem. Execution on different backends is the easiest, as long as the device has sufficient resource. The first component is given below, with the second and third in sections 1.5 and 1.6. ### Define the Crop-Yield problem The following problem is defined for you but the `QuadraticProgram` is not implemented. Your task at the end of this section is to implement the `QuadraticProgram` for the given crop-yield model. Your farm has three hectares available, $3~ha$, with each crop taking up $0~ha$ or $1~ha$. We define the yield of the farm as a quadratic function where the influence of each crop on eachother is represented by the quadratic coefficients. The variables in this quadratic are the number of hectares of the crop to be planted and the objective function to maximize is the yield of usable crops in tons. Here is the mathematical model for the problem. In this scenario, all crops increase the yield of other crops. However, the problem to solve is which crops to use to achieve the maximum yield. The farm yield, in tons, is modelled as a quadratic equation, given below, with constraints on the hectares used by each crop and the total hectares available. Each crop is shown using a different symbol, as shown above, representing the number of hectares to be planted of said plant. Note that we can only plant up to 1 hectare of each crop and that our farm is constrained to 3 hectares. ---- #### Non-graphical notation Here is a non-graphical representation of the above model, if you are struggling to interpret the above graphic. $$ \begin{align} \text{maximize} \quad & 2(\operatorname{Wheat}) + \operatorname{Soybeans} + 4(\operatorname{Maize}) \\ & + 2.4(\operatorname{Wheat}\times\operatorname{Soybeans}) \\ & + 4(\operatorname{Wheat}\times\operatorname{Maize})\\ &+ 4(\operatorname{Wheat}\times\operatorname{PushPull}) \\ & + 2(\operatorname{Soybeans}\times\operatorname{Maize}) \\ & + (\operatorname{Soybeans}\times\operatorname{PushPull}) \\ & + 5(\operatorname{Maize}\times\operatorname{PushPull}) \end{align} $$ $$ \begin{align} \text{subject to} \quad & \operatorname{Wheat} + \operatorname{Soybeans} + \operatorname{Maize} + \operatorname{PushPull} \leq{} 3\\ & 0\leq{}\operatorname{Wheat}\leq{}1\\ & 0\leq{}\operatorname{Soybeans}\leq{}1\\ & 0\leq{}\operatorname{Maize}\leq{}1\\ & 0\leq{}\operatorname{PushPull}\leq{}1 \end{align} $$ ### Exercise 1b: Create Quadratic Program from crop-yield variables Your first exercise is to create a `QuadraticProgram` that represents the above model. Write your implementation in the `cropyield_quadratic_program` function below. Remember to use the example as a guide, and to look at the [QuadraticProgram documentation](https://qiskit.org/documentation/tutorials/optimization/1_quadratic_program.html?highlight=quadraticprogram) and [Qiskit reference](https://qiskit.org/documentation/stubs/qiskit.optimization.QuadraticProgram.html?highlight=quadraticprogram#qiskit.optimization.QuadraticProgram). **Note:** Ensure your variables are named `Wheat`, `Soybeans`, `Maize,` and `PushPull`. This is necessary for the grader to work. ```python def cropyield_quadratic_program(): cropyield = QuadraticProgram(name="Crop Yield") ############################## # Put your implementation here cropyield.integer_var(name="Wheat", lowerbound=0, upperbound=1) cropyield.integer_var(name="Soybeans", lowerbound=0, upperbound=1) cropyield.integer_var(name="Maize", lowerbound=0, upperbound=1) cropyield.integer_var(name="PushPull", lowerbound=0, upperbound=1) cropyield.maximize( linear={"Wheat": 2, "Soybeans":1, "Maize":4}, quadratic={("Wheat", "Soybeans"):2.4, ("Wheat", "Maize"):4, ("Wheat", "PushPull"):4, ("Soybeans", "Maize"):2, ("Soybeans", "PushPull"):1, ("Maize", "PushPull"):5} ) cropyield.linear_constraint(linear={"Wheat":1, "Soybeans":1, "Maize":1, "PushPull":1}, sense="<=", rhs=3) #cropyield.quadratic_constraint(quadratic={"Wheat":1, "Soybeans":1, "Maize":1, "PushPull":1}, sense="<=", rhs=3) # ############################## return cropyield cropyield = cropyield_quadratic_program() print(cropyield.export_as_lp_string()) ``` \ This file has been generated by DOcplex \ ENCODING=ISO-8859-1 \Problem name: Crop Yield Maximize obj: 2 Wheat + Soybeans + 4 Maize + [ 4.800000000000 Wheat*Soybeans + 8 Wheat*Maize + 8 Wheat*PushPull + 4 Soybeans*Maize + 2 Soybeans*PushPull + 10 Maize*PushPull ]/2 Subject To c0: Wheat + Soybeans + Maize + PushPull <= 3 Bounds Wheat <= 1 Soybeans <= 1 Maize <= 1 PushPull <= 1 Generals Wheat Soybeans Maize PushPull End Once you feel your implementation is correct, you can grade your solution in the following cell. ```python # Execute this cell to grade your solution from qc_grader import grade_ex1b cropyield = cropyield_quadratic_program() grade_ex1b(cropyield) ``` Submitting your answer for ex1/partB. Please wait... Congratulations 🎉! Your answer is correct and has been submitted. ### Converting QuadraticPrograms If we want to estimate how many qubits this quadratic program requires, we can convert it to an Ising Model and print the `num_qubits` parameter. An [ising model](https://qiskit.org/documentation/apidoc/qiskit.optimization.applications.ising.html?highlight=ising) is a special system model type that is well suited for quantum computing. Though we will not be using an ising model explicitly, the algorithms and Qiskit classes we are using do this conversion internally. ```python # Estimate the number of qubits required ising_operations, _ = ( QuadraticProgramToQubo() .convert( cropyield, ) .to_ising() ) print(f"Number of qubits required is {ising_operations.num_qubits}") ``` Number of qubits required is 6 Even though quadratic programs are widely used in Qiskit, the algorithms we are going to use require binary variables. Qiskit provides an automated method for converting our integer variables into binary variables. The binary-only form is called a _Quadratic Unconstrained Binary Optimization_ problem, or `QUBO`. The conversion is done using `QuadraticProgramToQUBO` from the Qiskit optimization module. Every integer variable, and their associated constraints, are transformed into binary variables. Run the following code to see how the QUBO version of the cropyield problem looks. Notice how the quadratic becomes longer and more variables are added. This is to account for the bits in each variable, including the constraints. When we run our quantum algorithm to solve this QuadraticProgram, it is converted to a QUBO instance within the Qiskit algorith implementation, implicitly. ```python QuadraticProgramToQubo().convert(cropyield) ``` \ This file has been generated by DOcplex \ ENCODING=ISO-8859-1 \Problem name: Crop Yield Minimize obj: - 160.400000000000 Wheat@0 - 159.400000000000 Soybeans@0 - 162.400000000000 Maize@0 - 158.400000000000 PushPull@0 - 158.400000000000 c0@int_slack@0 - 316.800000000000 c0@int_slack@1 + [ 52.800000000000 Wheat@0^2 + 100.800000000000 Wheat@0*Soybeans@0 + 97.600000000000 Wheat@0*Maize@0 + 97.600000000000 Wheat@0*PushPull@0 + 105.600000000000 Wheat@0*c0@int_slack@0 + 211.200000000000 Wheat@0*c0@int_slack@1 + 52.800000000000 Soybeans@0^2 + 101.600000000000 Soybeans@0*Maize@0 + 103.600000000000 Soybeans@0*PushPull@0 + 105.600000000000 Soybeans@0*c0@int_slack@0 + 211.200000000000 Soybeans@0*c0@int_slack@1 + 52.800000000000 Maize@0^2 + 95.600000000000 Maize@0*PushPull@0 + 105.600000000000 Maize@0*c0@int_slack@0 + 211.200000000000 Maize@0*c0@int_slack@1 + 52.800000000000 PushPull@0^2 + 105.600000000000 PushPull@0*c0@int_slack@0 + 211.200000000000 PushPull@0*c0@int_slack@1 + 52.800000000000 c0@int_slack@0^2 + 211.200000000000 c0@int_slack@0*c0@int_slack@1 + 211.200000000000 c0@int_slack@1^2 ]/2 + 237.600000000000 Subject To Bounds 0 <= Wheat@0 <= 1 0 <= Soybeans@0 <= 1 0 <= Maize@0 <= 1 0 <= PushPull@0 <= 1 0 <= c0@int_slack@0 <= 1 0 <= c0@int_slack@1 <= 1 Binaries Wheat@0 Soybeans@0 Maize@0 PushPull@0 c0@int_slack@0 c0@int_slack@1 End ## Solving the Crop-Yield Problem using a Quantum Computer There are three ways to _run_ a quantum algorithm using Qiskit: 1. on a simulator locally on your own machine 2. on a simulator hosted in the cloud by IBM 3. on an actual quantum computer accessible through IBM Quantum. All of these are called backends. In all cases, the _backend_ can easily be swapped for another as long as the simulator or device has appropriate resources (number of qubits etc.). In the code below, we show how to access different backends. We demonstrate this using the local Aer QASM simulator from Qiskit. The Aer QASM simulator models the physical properties of a real quantum computer so you, researchers, and developers can test their quantum computing code and algorithms before running on real devices. ```python # We will use the Aer provided QASM simulator backend = Aer.get_backend("qasm_simulator") # Given we are using a simulator, we will fix the algorithm seed to ensure our results are reproducible algorithm_globals.random_seed = 271828 ``` We would like to compare our quantum solution to that obtained classically. Secondly, we also want to try different algorithms. The following three subsections show how these different methods for solving the Crop-Yield problem are implemented in Qiskit. The two algorithms used are the [_Quantum Approximate Optimization Algorithm_](https://qiskit.org/documentation/stubs/qiskit.algorithms.QAOA.html?highlight=qaoa#qiskit.algorithms.QAOA) `QAOA` and the [_Variational Quantum Eigensolver_](https://qiskit.org/documentation/stubs/qiskit.algorithms.VQE.html?highlight=vqe#qiskit.algorithms.VQE) `VQE`. Both of these algorithms are hybrid, in that they use a classical _optimizer_ to alter parameters that affect the quantum computation. The VQE algorithm is used to find the lowest eigenvalue of a matrix, which can describe a system to optimize. The QAOA also finds the lowest eigenvalue, but achieves this is a different way to VQE. Both are very popular algorithms, with varying applications and strengths. ### Classical Solution The classical solution to the crop-yield problem can easily be found using Numpy and Qiskit. The QUBO problem can be solved by finding the minimum eigenvalue of its underlying matrix representation. Fortunately, we don't have to know what this matrix looks like. We only need to pass it to a `MinimumEigensolver` and `MinimumEigenOptimizer`. The optimizer translates the provided problem into a parameterised representation which is then passed to the solver. By optimizing the paramters, the solver will eventually give the minimum eigenvalue for the parameterized representation and thus the solution to the original problem. Here we use a classical solver from NumPy, the `NumPyMinimumEigensolver`. ```python def get_classical_solution_for(quadprog: QuadraticProgram): # Create solver solver = NumPyMinimumEigensolver() # Create optimizer for solver optimizer = MinimumEigenOptimizer(solver) # Return result from optimizer return optimizer.solve(quadprog) ``` If we execute the classical method for our crop-yield problem, we get a valid solution that maximises the yield. ```python # Get classical result classical_result = get_classical_solution_for(cropyield) # Format and print result print("Solution found using the classical method:\n") print(f"Maximum crop-yield is {classical_result.fval} tons") print(f"Crops used are: ") _crops = [v.name for v in cropyield.variables] for cropIndex, cropHectares in enumerate(classical_result.x): print(f"\t{cropHectares} ha of {_crops[cropIndex]}") ``` Solution found using the classical method: Maximum crop-yield is 19.0 tons Crops used are: 1.0 ha of Wheat 0.0 ha of Soybeans 1.0 ha of Maize 1.0 ha of PushPull ### QAOA Solution To solve our problem using QAOA, we need only replace the classical_solver with a `QAOA` class instance. Now that we are running a quantum algorithm, we need to tell the solver where to execute the quantum component. We use a `QuantumInstance` to store the backend information. The QAOA is an iterative algorithm, and thus is run multiple times with different internal parameters. The parameters are tuned classically during the optimization step of the computation by `optimizer`. If we leave `optimizer` as `None`, our algorithms will use the default optimization algorithm. To determine how many iterations there are, we define a callback function that runs for each iteration and stores the number of evaluations thus far. At the end of our algorithm execution, we return the result and the number of iterations. ```python def get_QAOA_solution_for( quadprog: QuadraticProgram, quantumInstance: QuantumInstance, optimizer=None, ): _eval_count = 0 def callback(eval_count, parameters, mean, std): nonlocal _eval_count _eval_count = eval_count # Create solver solver = QAOA( optimizer=optimizer, quantum_instance=quantumInstance, callback=callback, ) # Create optimizer for solver optimizer = MinimumEigenOptimizer(solver) # Get result from optimizer result = optimizer.solve(quadprog) return result, _eval_count ``` If we execute the QAOA method for our crop-yield problem, we get the same result as the classical method, showing that 1) the quantum solution is correct and 2) that you now know how to use a quantum algorithm! 🌟 ```python # Create a QuantumInstance simulator_instance = QuantumInstance( backend=backend, seed_simulator=algorithm_globals.random_seed, seed_transpiler=algorithm_globals.random_seed, ) # Get QAOA result qaoa_result, qaoa_eval_count = get_QAOA_solution_for(cropyield, simulator_instance) # Format and print result print("Solution found using the QAOA method:\n") print(f"Maximum crop-yield is {qaoa_result.fval} tons") print(f"Crops used are: ") for cropHectares, cropName in zip(qaoa_result.x, qaoa_result.variable_names): print(f"\t{cropHectares} ha of {cropName}") print(f"\nThe solution was found within {qaoa_eval_count} evaluations of QAOA.") ``` Solution found using the QAOA method: Maximum crop-yield is 19.0 tons Crops used are: 1.0 ha of Wheat 0.0 ha of Soybeans 1.0 ha of Maize 1.0 ha of PushPull The solution was found within 3 evaluations of QAOA. ### VQE Solution The `VQE` algorithm works in a very similar way to the `QAOA`. Not only in a mathematical modelling and algorithmic perspective, but also programmatically. There is a quantum solver and a classical optimizer. The `VQE` instance is also iterative, and so we can measure how many iterations are needed to find a solution to the Crop-Yield problem. ```python def get_VQE_solution_for( quadprog: QuadraticProgram, quantumInstance: QuantumInstance, optimizer=None, ): _eval_count = 0 def callback(eval_count, parameters, mean, std): nonlocal _eval_count _eval_count = eval_count # Create solver and optimizer solver = VQE( optimizer=optimizer, quantum_instance=quantumInstance, callback=callback ) # Create optimizer for solver optimizer = MinimumEigenOptimizer(solver) # Get result from optimizer result = optimizer.solve(quadprog) return result, _eval_count ``` And we should get the exact same answer as before. ```python # Create a QuantumInstance simulator_instance = QuantumInstance( backend=backend, seed_simulator=algorithm_globals.random_seed, seed_transpiler=algorithm_globals.random_seed, ) # Get VQE result vqe_result, vqe_eval_count = get_VQE_solution_for(cropyield, simulator_instance) # Format and print result print("Solution found using the VQE method:\n") print(f"Maximum crop-yield is {vqe_result.fval} tons") print(f"Crops used are: ") for cropHectares, cropName in zip(vqe_result.x, vqe_result.variable_names): print(f"\t{cropHectares} ha of {cropName}") print(f"\nThe solution was found within {vqe_eval_count} evaluations of VQE") ``` Solution found using the VQE method: Maximum crop-yield is 19.0 tons Crops used are: 1.0 ha of Wheat 0.0 ha of Soybeans 1.0 ha of Maize 1.0 ha of PushPull The solution was found within 25 evaluations of VQE ### Exercise 1c: Classical and Quantum Computational Results From the above computations you received six results, the maximum crop-yield and the number of evaluations for three different methods. The maximum yield values should be the same. If your yield values aren't all the same, rerun the algorithms. Sometimes the optimization process can miss the correct answer because of the randomness used to initialize the algorithm parameters. Run the code cell below to see if the maximum yields you computed are correct. ```python from qc_grader import grade_ex1c max_yield_qaoa = qaoa_result.fval max_yield_vqe = vqe_result.fval grade_ex1c(tonnage_qaoa=max_yield_qaoa, tonnage_vqe=max_yield_vqe) ``` Submitting your answer for ex1/partC. Please wait... Congratulations 🎉! Your answer is correct and has been submitted. _You could always verify your result with the classical method, though this is only possible here given the size of the problem. Larger problems become more difficult to verify._ ## Simulating a Real Quantum Computer for the Crop-Yield Problem Sometimes one would want to _simulate_ a real quantum computer to see how the actual hardware may impact the performance of the algorithm. All quantum computers have an underlying architecture, different noise characeristics, and error rates. These three aspects impact how well the algorithm can perform on a given deivce. To test the impact a given quantum computer has on the QAOA instance, we can utilize a _fake_ instance of the device in Qiskit to tell our simulator what parameters to use. In this example we will be simulating `ibmq_johannesburg`, a device named after the city of Johannesburg in South Africa. ```python fake_device = Fake.FakeJohannesburg() ``` We can inspect what this device _looks_ like using the Qiskit Jupyter tools, shown below. You do not need to know about this structure to execute quantum programs on a device, but it is useful to visualize the parameters. ```python import qiskit.tools.jupyter fake_device ``` VBox(children=(HTML(value="<h1 style='color:#ffffff;background-color:#000000;padding-top: 1%;padding-bottom: 1… <FakeJohannesburg('fake_johannesburg')> The three aforementioned components of a quantum computer are represented as a noise model, coupling map, and the basis gate set. The noise model is a representation of how the noise and errors in the computer behave. The coupling map and basis gate set are core to the architecture of the device. The coupling map represents how the physical qubits can interact whereas the basis gate set is analogous to the set of fundamental computatonal instructions we can use. You can see the coupling map in the above widget as the lines connecting each qubit in the architecture diagram. To simulate `ibmq_johannesburg`, we must pass these three components to our Aer simulator. ```python # Create the noise model, which contains the basis gate set noise_model = NoiseModel.from_backend(fake_device) # Get the coupling map coupling_map = fake_device.configuration().coupling_map ``` Next we create a new `QuantumInstance` with these parameters ```python fake_instance = QuantumInstance( backend=backend, basis_gates=noise_model.basis_gates, coupling_map=coupling_map, noise_model=noise_model, seed_simulator=algorithm_globals.random_seed, seed_transpiler=algorithm_globals.random_seed, ) ``` We can then execute the `QAOA` from before on this new _fake_ quantum device. ```python # Get QAOA result qaoa_result, qaoa_eval_count = get_QAOA_solution_for(cropyield, fake_instance) # Format and print result print("Solution found using the QAOA method:\n") print(f"Maximum crop-yield is {qaoa_result.fval} tons") print(f"Crops used are: ") for cropHectares, cropName in zip(qaoa_result.x, qaoa_result.variable_names): print(f"\t{cropHectares} ha of {cropName}") print(f"\nThe solution was found within {qaoa_eval_count} evaluations of QAOA.") ``` Solution found using the QAOA method: Maximum crop-yield is 19.0 tons Crops used are: 1.0 ha of Wheat 0.0 ha of Soybeans 1.0 ha of Maize 1.0 ha of PushPull The solution was found within 3 evaluations of QAOA. ### Scaling of the Quantum Solution vs Classical When we created our quadratic program for the crop-yield problem, we saw that the Ising Model required 6 qubits. We had constrained our problem such that we could only plant up to 1 hectare per crop. However, we could change the model so that we can plot 3 hectares per crop, upto our maximum available farm area of 3 hectares. How many qubits would this ising model require? --- Furthermore, what if we had more land to farm? We know that this problem is NP-Hard and thus classical solutions are mostly found using heuristics. This is the core reason why quantum computers are promising to solve these kinds of problems. But what are the resource requirements for the quantum solution, with a larger farm and crops that can be planted in more hectares? To illustrate this, we've provided a function that returns the number of qubits required by the underlying Ising Model for the Crop-Yield Problem. We then see the estimated number of qubits needed for different problem parameters. Feel free to modify the variables being used to see how the qubit resource requirements change. ```python # Function to estimate the number of qubits required def estimate_number_of_qubits_required_for(max_hectares_per_crop, hectares_available): return int( 4 * np.ceil(np.log2(max_hectares_per_crop + 1)) + np.ceil(np.log2(hectares_available + 1)) ) # Our new problem parameters hectares_available = 10 max_hectares_per_crop = 10 # Retrieving the number of qubits required number_of_qubits_required = estimate_number_of_qubits_required_for( max_hectares_per_crop=max_hectares_per_crop, hectares_available=hectares_available ) print( f"Optimizing a {hectares_available} ha farm with each crop taking up to {max_hectares_per_crop} ha each,", f"the computation is estimated to require {number_of_qubits_required} qubits.", ) ``` Optimizing a 10 ha farm with each crop taking up to 10 ha each, the computation is estimated to require 20 qubits. The number of qubits required is related to the constraints in the quadratic program and how the integer variables are converted to binary variables. In fact, the scaling of the number of qubits, as a function of the hectares available, is logarithmic in nature; owing to this conversion. ## Running on real quantum hardware To use the IBM Quantum platform is easy. First you need to load the account you enabled in the week 0 content. If you didn't complete this, follow this [quick guide](https://quantum-computing.ibm.com/lab/docs/iql/manage/account/ibmq) on connecting your IBM Quantum account with Qiskit in python and Jupyter. ```python IBMQ.load_account() ``` ibmqfactory.load_account:WARNING:2021-09-13 08:47:44,561: Credentials are already in use. The existing account in the session will be replaced. <AccountProvider for IBMQ(hub='ibm-q', group='open', project='main')> IBM Quantum backends are accessed through a provider, which manages the devices to which you have access. For this challenge, you have access to the new `ibm_perth` quantum computer! Typically, you would find your provider details under your [IBM Quantum account details](https://quantum-computing.ibm.com/account). Under your account you can see the different hubs, groups, and projects you are a part of. Qiskit allows us to retrieve a provider using just the hub, group, and project as follows: ```python provider = IBMQ.get_provider(hub="ibm-q", group="open", project="main") ``` However, because we have given you special access for this challenge, we are going to retrive the provider using a different method. Execute the code cell below to retrieve the correct provider. ```python provider = None for prov in IBMQ.providers(): if ( "iqc-africa-21" in prov.credentials.hub and "q-challenge" in prov.credentials.group and "ex1" in prov.credentials.project ): # Correct provider found provider = prov if provider == None: print("ERROR: The expected provider was not found!") else: print("Yay! The expected provider was found!") ``` Yay! The expected provider was found! If the above code cell returned an error, you may not yet have access to the real quantum computer. The list of participants is updated daily, so you may have to wait some time before the correct provider appears. If you need assistance, send a message to the challenge Slack channel [#challenge-africa-2021](https://qiskit.slack.com/archives/C02C8MKP153) and make sure to tag the admin team with [@africa_admin](#). ------ To retrieve a backend from the provider, one needs only request it by name. For example, we can request `ibm_perth` as follows. ```python backend_real = provider.get_backend("ibm_perth") ``` We can also list all backends available through a given backend. In this example we use the _open_ provider as it has access to all open devices and simulators, instead of the limited few for the challenge. ```python for _backend in IBMQ.get_provider(hub='ibm-q', group='open', project='main').backends(): print(_backend.name()) ``` ibmq_qasm_simulator ibmq_armonk ibmq_santiago ibmq_bogota ibmq_lima ibmq_belem ibmq_quito simulator_statevector simulator_mps simulator_extended_stabilizer simulator_stabilizer ibmq_manila Qiskit provides visual tools to view backend information in a jupyter notebook. To accomplish this, one needs to import the `jupyter` submodule and call the appropriate _magic comment_. With `qiskit_backend_overview` you can view all devices accessible by the current IBMQ account. Notice how it does not include simulators. Furthermore, you should see that all devices available through the _open group_ have at most 5 qubits. This is a problem for solving the crop-yield problem we created earlier, as we showed it requires 6 qubits. To demonstrate how a real quantum device is used, a smaller `QuadraticProgram` is provided, requiring a maximum of 4 qubits. ```python %qiskit_backend_overview ``` VBox(children=(HTML(value="<h2 style ='color:#ffffff; background-color:#000000;padding-top: 1%; padding-bottom… <div class="alert alert-block alert-warning"> If you want access to larger and more sophisticated quantum computers through IBM, see if your university or company is part of the [IBM Quantum Network](https://www.ibm.com/quantum-computing/network/members/). Researchers are institutions that are part of the [African Research Universities Alliance (ARUA)](https://arua.org.za/) can also apply for access through the University of the Witwatersrand, in South Africa; which is a member of the IBM Quantum Network. If you are a researcher, you can also apply for access through the [IBM Quantum Researchers Program](https://www.ibm.com/quantum-computing/researchers-program/). If you're a student at a highschool or university, you can ask your teachers or lecturers to apply for access through the [IBM Quantum Educators Program](https://www.ibm.com/quantum-computing/educators-program/). </div> Given that we have imported `qiskit.tools.jupyter`, Jupyter will now display a helpful widget when an IBMQ backend is displayed. We do not have to use a _magic comment_ here as the jupyter submodule defines how some variables are displayed in a jupyter notebook, without requiring a _magic comment_. ```python backend_real ``` VBox(children=(HTML(value="<h1 style='color:#ffffff;background-color:#000000;padding-top: 1%;padding-bottom: 1… <IBMQBackend('ibm_perth') from IBMQ(hub='iqc-africa-21-8', group='q-challenge-2', project='ex1-recnPEbPGZpUOAAaH')> We can create a new `QuantumInstance` object to contain our real quantum computer backend, similar to how we created one to manage our simulator. For real devices, there is an extra parameter we can set: `shots`. The output of a quantum computing algorithm is probabilistic. Therefore, we must execute the quantum computation multiple times, sampling the outputs to estimate their probabilities. The number of `shots` is the number of executions of the quantum computation. Here we set out `QuantumInstance` to use the least busy backend with 2048 shots. ```python quantum_instance_real = QuantumInstance(backend_real, shots=2048) ``` The VQE algorithm and QAOA are iterative, meaning that they incorporate a classical-quantum loop which repeats certain computations, _hopefully_ converging to a valid solution. In each iteration, or evaluation, the quantum backend will execute the quantum operations 2048 times. Each shot is quite fast, so we do not have to worry about a significant increase in processing time by using more shots. ---- We now define our small crop-yield problem, which only requires 4 qubits. In this example, only Wheat and Maize are used. The model is altered to illustrate the impact of growing too much of a single crop, with the yield decreasing as the number of hectares of a single crop is increased. However, utilizing both wheat and maize increases yield, showing the benefits of intercropping. **NB:** The maximum number of hectares available is 4, but given that the model would never exceed this limit, the linear constraint defining the maximum number of hectares is not included. This reduces the number of qubits required from 6 to 4. ```python # Create a small crop-yield example quadratic program cropyield_small = QuadraticProgram(name="Small Crop-Yield") # Add two variables, indicating whether we grow 0, 1, or 2 hectares for two different crops cropyield_small.integer_var(lowerbound=0, upperbound=2, name="Wheat") cropyield_small.integer_var(lowerbound=0, upperbound=2, name="Maize") # Add the objective function defining the yield in tonnes cropyield_small.maximize( linear={"Wheat": 3, "Maize": 3}, quadratic={("Maize", "Wheat"): 1, ("Maize", "Maize"): -2, ("Wheat", "Wheat"): -2}, ) # This linear constraint is not used as the model never reaches this. This is because the # sum of the upperbounds on both variables is 4 already. If this constraint is applied, the # model would require 6 qubits instead of 4. # cropyield_small.linear_constraint(linear={"Wheat": 1, "Maize": 1}, sense="<=", rhs=4) print(cropyield_small) ``` \ This file has been generated by DOcplex \ ENCODING=ISO-8859-1 \Problem name: Small Crop-Yield Maximize obj: 3 Wheat + 3 Maize + [ - 4 Wheat^2 + 2 Wheat*Maize - 4 Maize^2 ]/2 Subject To Bounds Wheat <= 2 Maize <= 2 Generals Wheat Maize End Here we verify that our small crop-yield problem requires only 4 qubits. ```python # Estimate the number of qubits required ising_operations_small, _ = ( QuadraticProgramToQubo() .convert( cropyield_small, ) .to_ising() ) print(f"Number of qubits required is {ising_operations_small.num_qubits}") ``` Number of qubits required is 4 ### Exercise 1d: Submitting a job to a real quantum computer Now that we know the problem can be run on our chosen device, we can execute the VQE algorithm. In this case we will set the optimizer, with a maximum number of iterations of 1, so that we do not occupy the device for too long. Our answer will be incorrect, but we only want to see how to send a quantum program to a real quantum computer. ```python # Create our optimizer optimizer = COBYLA(maxiter=1) ## Get result from real device with VQE vqe_result_real, vqe_eval_count_real = get_VQE_solution_for( cropyield_small, quantum_instance_real, optimizer=optimizer ) ``` /opt/conda/lib/python3.8/site-packages/qiskit/utils/run_circuits.py:695: UserWarning: max_credits is not a recognized runtime option and may be ignored by the backend. return backend.run(circuits, **run_kwargs) FAILURE: Job id: 613f10ade3f915b111349621 is cancelled. Re-submit the circuits. FAILURE: Can not get job id, Resubmit the qobj to get job id.Error: Expecting value: line 1 column 1 (char 0) FAILURE: Job id: 613f307217e5886a6fba5ba3 is cancelled. Re-submit the circuits. Qiskit uses `jobs` to track computations and their results on remote devices and simulators. We can query the backend object for the jobs it received, which would be those created by the VQE algorithm. ```python # Retrieve the VQE job sent job_real = backend_real.jobs()[0] print(f"VQE job created at {job_real.creation_date()} and has a job id of {job_real.job_id()}") ``` Put the job id for your the above job into the cell below and execute the code cell. ```python from qc_grader import grade_ex1d job_id = '613f31526feb7e17126f42a2' grade_ex1d(job_id) ``` /opt/conda/lib/python3.8/site-packages/pyscf/lib/misc.py:47: H5pyDeprecationWarning: Using default_file_mode other than 'r' is deprecated. Pass the mode to h5py.File() instead. h5py.get_config().default_file_mode = 'a' Submitting your answer for ex1/partD. Please wait... Congratulations 🎉! Your answer is correct and has been submitted. You have now completed the first lab of the IBM Quantum Challenge Africa 2021! Make sure that you are on the [Qiskit Slack channel](https://ibm.co/Africa_Slack) so you can ask questions and talk to other participants. There are two more labs left in the challenge, which are more difficult than this introductory lab, covering quantum computing for finance and HIV. ## References [1] A. A. Nel, ‘Crop rotation in the summer rainfall area of South Africa’, South African Journal of Plant and Soil, vol. 22, no. 4, pp. 274–278, Jan. 2005, doi: 10.1080/02571862.2005.10634721. [2] H. Ritchie and M. Roser, ‘Crop yields’, Our World in Data, 2013, [Online]. Available: https://ourworldindata.org/crop-yields. [3] G. Brion, ‘Controlling Pests with Plants: The power of intercropping’, UVM Food Feed, Jan. 09, 2014. https://learn.uvm.edu/foodsystemsblog/2014/01/09/controlling-pests-with-plants-the-power-of-intercropping/ (accessed Feb. 15, 2021). [4] N. O. Ogot, J. O. Pittchar, C. A. O. Midega, and Z. R. Khan, ‘Attributes of push-pull technology in enhancing food and nutrition security’, African Journal of Agriculture and Food Security, vol. 6, pp. 229–242, Mar. 2018. ```python import qiskit.tools.jupyter %qiskit_version_table %qiskit_copyright ``` /opt/conda/lib/python3.8/site-packages/qiskit/aqua/__init__.py:86: DeprecationWarning: The package qiskit.aqua is deprecated. It was moved/refactored to qiskit-terra For more information see <https://github.com/Qiskit/qiskit-aqua/blob/main/README.md#migration-guide> warn_package('aqua', 'qiskit-terra') <h3>Version Information</h3><table><tr><th>Qiskit Software</th><th>Version</th></tr><tr><td><code>qiskit-terra</code></td><td>0.18.1</td></tr><tr><td><code>qiskit-aer</code></td><td>0.8.2</td></tr><tr><td><code>qiskit-ignis</code></td><td>0.6.0</td></tr><tr><td><code>qiskit-ibmq-provider</code></td><td>0.16.0</td></tr><tr><td><code>qiskit-aqua</code></td><td>0.9.4</td></tr><tr><td><code>qiskit</code></td><td>0.29.0</td></tr><tr><td><code>qiskit-nature</code></td><td>0.1.5</td></tr><tr><td><code>qiskit-finance</code></td><td>0.2.0</td></tr><tr><td><code>qiskit-optimization</code></td><td>0.2.1</td></tr><tr><td><code>qiskit-machine-learning</code></td><td>0.2.0</td></tr><tr><th>System information</th></tr><tr><td>Python</td><td>3.8.10 | packaged by conda-forge | (default, May 11 2021, 07:01:05) [GCC 9.3.0]</td></tr><tr><td>OS</td><td>Linux</td></tr><tr><td>CPUs</td><td>8</td></tr><tr><td>Memory (Gb)</td><td>31.400043487548828</td></tr><tr><td colspan='2'>Mon Sep 13 15:04:27 2021 UTC</td></tr></table> <div style='width: 100%; background-color:#d5d9e0;padding-left: 10px; padding-bottom: 10px; padding-right: 10px; padding-top: 5px'><h3>This code is a part of Qiskit</h3><p>&copy; Copyright IBM 2017, 2021.</p><p>This code is licensed under the Apache License, Version 2.0. You may<br>obtain a copy of this license in the LICENSE.txt file in the root directory<br> of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.<p>Any modifications or derivative works of this code must retain this<br>copyright notice, and modified files need to carry a notice indicating<br>that they have been altered from the originals.</p></div> ```python ```
{"hexsha": "3fa0448cbfc2f2c779bc9a269bbb7d9f7974cc7e", "size": 425605, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "qiskit/challenges/IBMQuantumChallenge_Africa_2021/content/lab1/lab1.ipynb", "max_stars_repo_name": "mickahell/quantum_experiences", "max_stars_repo_head_hexsha": "4f94d9e536f4906e798a00439e6927fd0a55f593", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-10T18:35:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-10T18:35:14.000Z", "max_issues_repo_path": "qiskit/challenges/IBMQuantumChallenge_Africa_2021/content/lab1/lab1.ipynb", "max_issues_repo_name": "mickahell/quantum_experiments", "max_issues_repo_head_hexsha": "4f94d9e536f4906e798a00439e6927fd0a55f593", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qiskit/challenges/IBMQuantumChallenge_Africa_2021/content/lab1/lab1.ipynb", "max_forks_repo_name": "mickahell/quantum_experiments", "max_forks_repo_head_hexsha": "4f94d9e536f4906e798a00439e6927fd0a55f593", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 61.3175334966, "max_line_length": 53860, "alphanum_fraction": 0.7352592192, "converted": true, "num_tokens": 13239}
import sys, os sys.path.append(os.path.join(os.path.dirname(__file__), '..')) import argparse, json import matplotlib.pyplot as plt import numpy as np from sklearn.manifold import TSNE from matplotlib.offsetbox import * from PIL import Image from utils.experiments import load_data def load_image(path): img = Image.open(path) img = img.resize((32, 32)) return np.array(img).squeeze() if __name__ == '__main__': arg_parser = argparse.ArgumentParser(description='tSNE Plot') arg_parser.add_argument('task', choices=['mnist', 'cifar', 'bam'], help='name of the task (mnist, cifar)') arg_parser.add_argument('data_path', help='path to data (not required for original MNIST)') arg_parser.add_argument('data_split', choices=['train', 'test'], default='test', help='data split (train, test (default))') arg_parser.add_argument('latent_path', help='path to numpy latent vectors') arg_parser.add_argument('out_path', help='path to output') arg_parser.add_argument('--num_points', type=int, default=1000, help='number of data points to plot (default: 1000)') arg_parser.add_argument('--remove_outliers', type=float, default=0., help='removes outliers outside of n times the standard deviation (default: False)') arg_parser.add_argument('--eval_task', default='', help='if path to eval JSON is provided, only data points from the task are plotted (default: None)') arg_parser.add_argument('--tsne_latents', default='', help='if path to tSNE latents is provided, repeated projection will be skipped (default: None)') args = arg_parser.parse_args() # load data images, labels, label_descs, num_labels = load_data(args.task, split=args.data_split, data_path=args.data_path) cls_idx_map = [i for i in range(num_labels)] latents = np.load(args.latent_path) print("Loaded %d latents with dimensionality %d." % (latents.shape[0], latents.shape[1])) # tSNE if len(args.tsne_latents) > 0: tsne_latents = np.load(args.tsne_latents) print("Loaded tSNE latents from '%s'." % args.tsne_latents) else: tsne_model = TSNE(n_components=2, verbose=True) tsne_latents = tsne_model.fit_transform(latents) # save transformation latents_name, latents_ext = os.path.splitext(os.path.basename(args.latent_path)) tsne_path = os.path.join(args.out_path, '%s_tsne%s' % (latents_name, latents_ext)) np.save(tsne_path, tsne_latents) print("Saved tSNE latents to '%s'." % tsne_path) # create subset if len(args.eval_task) > 0: eval_task = json.load(open(args.eval_task, 'r', encoding='utf8')) eval_idcs = eval_task['examples'] + [i for task in eval_task['tasks'] for i in task['options']] other_idcs = [i for i in range(tsne_latents.shape[0]) if i not in eval_idcs] other_idcs = np.random.choice(other_idcs, args.num_points - len(eval_idcs), replace=False) subset_idcs = np.concatenate((other_idcs, eval_idcs)) print("Loaded %d data points from eval task '%s'." % (len(eval_idcs), args.eval_task)) else: subset_idcs = np.random.choice(tsne_latents.shape[0], args.num_points, replace=False) print("Reduced data points to random subset of size %d." % args.num_points) tsne_latents = tsne_latents[subset_idcs] labels = labels[subset_idcs] # shorten class names for BAM if args.task == 'bam': dmap = {'emotion_gloomy': 'g', 'emotion_happy': 'h', 'emotion_peaceful': 'p', 'emotion_scary': 's', 'unspecified': 'u'} label_descs = [''.join([dmap[e] for e in d.split('+')]) for d in label_descs] eval_task['classes'] = [''.join([dmap[e] for e in d.split('+')]) for d in eval_task['classes']] # init alphas alphas = np.zeros(tsne_latents.shape[0]) # calculate means mean_latent = np.mean(tsne_latents, axis=0) std_latent = np.std(tsne_latents, axis=0) mean_latents = np.zeros([len(label_descs), tsne_latents.shape[1]]) for c in range(num_labels): lbl_idcs = np.where(labels == (cls_idx_map[c] * np.ones_like(labels))) mean_latents[c] = np.mean(tsne_latents[lbl_idcs], axis=0) # calculate alphas if len(lbl_idcs[0]) > 1: dists = np.abs(tsne_latents[lbl_idcs] - mean_latents[c]) dists = -np.sum(dists, axis=1) max_dist = np.max(dists) alphas[lbl_idcs] = np.clip(dists * (1 / max_dist), .3, None) else: alphas[lbl_idcs] = 0. # remove outliers if args.remove_outliers > 0: inlier_idcs = [] for i in range(tsne_latents.shape[0]): if np.any(np.abs(tsne_latents[i] - mean_latent) > (std_latent * 3.)): continue inlier_idcs.append(i) tsne_latents = tsne_latents[inlier_idcs] labels = labels[inlier_idcs] alphas = alphas[inlier_idcs] subset_idcs = np.array(subset_idcs)[inlier_idcs] fig, ax = plt.subplots() ax.scatter(tsne_latents[:, 0], tsne_latents[:, 1], alpha=0., zorder=1) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # plot data points for i in range(tsne_latents.shape[0]): # load image from path or directly if type(images[subset_idcs[i]]) is str: img = load_image(images[subset_idcs[i]]) else: img = images[subset_idcs[i]].squeeze() if (len(args.eval_task) > 0) and (subset_idcs[i] in eval_idcs): bboxprops = dict(lw=2., ec='coral', alpha=.7) ab = AnnotationBbox(OffsetImage(img, zoom=.5, cmap='gray', alpha=.8), (tsne_latents[i][0], tsne_latents[i][1]), pad=0., bboxprops=bboxprops) # ax.text(tsne_latents[i][0], tsne_latents[i][1], str(subset_idcs[i]), ha='center', fontweight='bold', size=8, alpha=.75, zorder=5) else: ab = AnnotationBbox(OffsetImage(img, zoom=.5, cmap='gray', alpha=alphas[i]), (tsne_latents[i][0], tsne_latents[i][1]), pad=0., frameon=False) ax.add_artist(ab) # plot means for c in range(len(label_descs)): color = 'white' if (len(args.eval_task) > 0) and (label_descs[c] in eval_task['classes']): color = 'coral' bboxprops = dict(boxstyle='circle,pad=0.5', fc='black', ec=color) ax.text(mean_latents[c][0], mean_latents[c][1], label_descs[c], ha='center', color=color, fontweight='bold', size=8, alpha=.75, zorder=5, bbox=bboxprops) # plot mean connections (for eval) if len(args.eval_task) > 0: eval_mean_xs = [mean_latents[label_descs.index(eval_task['classes'][i%len(eval_task['classes'])])][0] for i in range(len(eval_task['classes'])+1)] eval_mean_ys = [mean_latents[label_descs.index(eval_task['classes'][i%len(eval_task['classes'])])][1] for i in range(len(eval_task['classes'])+1)] ax.plot(eval_mean_xs, eval_mean_ys, color='coral', alpha=.8, zorder=4) fig.tight_layout() fig.savefig(os.path.join(args.out_path, 'tsne.pdf')) plt.show()
{"hexsha": "d2b92fd97ea319df14fac922323ee093c15cbdcd", "size": 7134, "ext": "py", "lang": "Python", "max_stars_repo_path": "run/tsne_analysis.py", "max_stars_repo_name": "yamad07/vjvae", "max_stars_repo_head_hexsha": "dd8d6607f5ec6c46df1794f903b42aee890d970b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2019-09-04T01:15:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T09:59:15.000Z", "max_issues_repo_path": "run/tsne_analysis.py", "max_issues_repo_name": "yamad07/vjvae", "max_issues_repo_head_hexsha": "dd8d6607f5ec6c46df1794f903b42aee890d970b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run/tsne_analysis.py", "max_forks_repo_name": "yamad07/vjvae", "max_forks_repo_head_hexsha": "dd8d6607f5ec6c46df1794f903b42aee890d970b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-09-04T05:22:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-16T13:27:51.000Z", "avg_line_length": 51.6956521739, "max_line_length": 162, "alphanum_fraction": 0.6439585086, "include": true, "reason": "import numpy", "num_tokens": 1960}
{"mathlib_filename": "Mathlib.Tactic.TryThis", "llama_tokens": 0}
// Boost.Geometry (aka GGL, Generic Geometry Library) // Copyright (c) 2007-2014 Barend Gehrels, Amsterdam, the Netherlands. // Copyright (c) 2008-2014 Bruno Lalande, Paris, France. // Copyright (c) 2009-2014 Mateusz Loskot, London, UK. // Copyright (c) 2013-2014 Adam Wulkiewicz, Lodz, Poland. // This file was modified by Oracle on 2013-2014. // Modifications copyright (c) 2013-2014, Oracle and/or its affiliates. // Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle // Contributed and/or modified by Menelaos Karavelas, on behalf of Oracle // Parts of Boost.Geometry are redesigned from Geodan's Geographic Library // (geolib/GGL), copyright (c) 1995-2010 Geodan, Amsterdam, the Netherlands. // Use, modification and distribution is subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_GEOMETRY_ALGORITHMS_DETAIL_DISJOINT_SEGMENT_BOX_HPP #define BOOST_GEOMETRY_ALGORITHMS_DETAIL_DISJOINT_SEGMENT_BOX_HPP #include <cstddef> #include <utility> #include <boost/numeric/conversion/cast.hpp> #include <boost/geometry/util/math.hpp> #include <boost/geometry/util/calculation_type.hpp> #include <boost/geometry/core/access.hpp> #include <boost/geometry/core/tags.hpp> #include <boost/geometry/core/coordinate_dimension.hpp> #include <boost/geometry/core/point_type.hpp> #include <boost/geometry/algorithms/detail/assign_indexed_point.hpp> #include <boost/geometry/algorithms/dispatch/disjoint.hpp> namespace boost { namespace geometry { #ifndef DOXYGEN_NO_DETAIL namespace detail { namespace disjoint { template <std::size_t I> struct compute_tmin_tmax_per_dim { template <typename SegmentPoint, typename Box, typename RelativeDistance> static inline void apply(SegmentPoint const& p0, SegmentPoint const& p1, Box const& box, RelativeDistance& ti_min, RelativeDistance& ti_max, RelativeDistance& diff) { typedef typename coordinate_type<Box>::type box_coordinate_type; typedef typename coordinate_type < SegmentPoint >::type point_coordinate_type; RelativeDistance c_p0 = boost::numeric_cast < point_coordinate_type >( geometry::get<I>(p0) ); RelativeDistance c_p1 = boost::numeric_cast < point_coordinate_type >( geometry::get<I>(p1) ); RelativeDistance c_b_min = boost::numeric_cast < box_coordinate_type >( geometry::get<geometry::min_corner, I>(box) ); RelativeDistance c_b_max = boost::numeric_cast < box_coordinate_type >( geometry::get<geometry::max_corner, I>(box) ); if ( geometry::get<I>(p1) >= geometry::get<I>(p0) ) { diff = c_p1 - c_p0; ti_min = c_b_min - c_p0; ti_max = c_b_max - c_p0; } else { diff = c_p0 - c_p1; ti_min = c_p0 - c_b_max; ti_max = c_p0 - c_b_min; } } }; template < typename RelativeDistance, typename SegmentPoint, typename Box, std::size_t I, std::size_t Dimension > struct disjoint_segment_box_impl { template <typename RelativeDistancePair> static inline bool apply(SegmentPoint const& p0, SegmentPoint const& p1, Box const& box, RelativeDistancePair& t_min, RelativeDistancePair& t_max) { RelativeDistance ti_min, ti_max, diff; compute_tmin_tmax_per_dim<I>::apply(p0, p1, box, ti_min, ti_max, diff); if ( geometry::math::equals(diff, 0) ) { if ( (geometry::math::equals(t_min.second, 0) && t_min.first > ti_max) || (geometry::math::equals(t_max.second, 0) && t_max.first < ti_min) ) { return true; } } RelativeDistance t_min_x_diff = t_min.first * diff; RelativeDistance t_max_x_diff = t_max.first * diff; if ( t_min_x_diff > ti_max * t_min.second || t_max_x_diff < ti_min * t_max.second ) { return true; } if ( ti_min * t_min.second > t_min_x_diff ) { t_min.first = ti_min; t_min.second = diff; } if ( ti_max * t_max.second < t_max_x_diff ) { t_max.first = ti_max; t_max.second = diff; } if ( t_min.first > t_min.second || t_max.first < 0 ) { return true; } return disjoint_segment_box_impl < RelativeDistance, SegmentPoint, Box, I + 1, Dimension >::apply(p0, p1, box, t_min, t_max); } }; template < typename RelativeDistance, typename SegmentPoint, typename Box, std::size_t Dimension > struct disjoint_segment_box_impl < RelativeDistance, SegmentPoint, Box, 0, Dimension > { static inline bool apply(SegmentPoint const& p0, SegmentPoint const& p1, Box const& box) { std::pair<RelativeDistance, RelativeDistance> t_min, t_max; RelativeDistance diff; compute_tmin_tmax_per_dim<0>::apply(p0, p1, box, t_min.first, t_max.first, diff); if ( geometry::math::equals(diff, 0) ) { if ( geometry::math::equals(t_min.first, 0) ) { t_min.first = -1; } if ( geometry::math::equals(t_max.first, 0) ) { t_max.first = 1; } } if ( t_min.first > diff || t_max.first < 0 ) { return true; } t_min.second = t_max.second = diff; return disjoint_segment_box_impl < RelativeDistance, SegmentPoint, Box, 1, Dimension >::apply(p0, p1, box, t_min, t_max); } }; template < typename RelativeDistance, typename SegmentPoint, typename Box, std::size_t Dimension > struct disjoint_segment_box_impl < RelativeDistance, SegmentPoint, Box, Dimension, Dimension > { template <typename RelativeDistancePair> static inline bool apply(SegmentPoint const&, SegmentPoint const&, Box const&, RelativeDistancePair&, RelativeDistancePair&) { return false; } }; //========================================================================= template <typename Segment, typename Box> struct disjoint_segment_box { static inline bool apply(Segment const& segment, Box const& box) { assert_dimension_equal<Segment, Box>(); typedef typename util::calculation_type::geometric::binary < Segment, Box, void >::type relative_distance_type; typedef typename point_type<Segment>::type segment_point_type; segment_point_type p0, p1; geometry::detail::assign_point_from_index<0>(segment, p0); geometry::detail::assign_point_from_index<1>(segment, p1); return disjoint_segment_box_impl < relative_distance_type, segment_point_type, Box, 0, dimension<Box>::value >::apply(p0, p1, box); } }; }} // namespace detail::disjoint #endif // DOXYGEN_NO_DETAIL #ifndef DOXYGEN_NO_DISPATCH namespace dispatch { template <typename Segment, typename Box, std::size_t DimensionCount> struct disjoint<Segment, Box, DimensionCount, segment_tag, box_tag, false> : detail::disjoint::disjoint_segment_box<Segment, Box> {}; } // namespace dispatch #endif // DOXYGEN_NO_DISPATCH }} // namespace boost::geometry #endif // BOOST_GEOMETRY_ALGORITHMS_DETAIL_DISJOINT_SEGMENT_BOX_HPP
{"hexsha": "a86bcc56f703cf593ce06730f73cb6654defcbf4", "size": 8456, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ReactAndroid/build/third-party-ndk/boost/boost_1_57_0/boost/geometry/algorithms/detail/disjoint/segment_box.hpp", "max_stars_repo_name": "kimwoongkyu/react-native-0-36-1-woogie", "max_stars_repo_head_hexsha": "4fb2d44945a6305ae3ca87be3872f9432d16f1fb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 85.0, "max_stars_repo_stars_event_min_datetime": "2015-02-08T20:36:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T20:38:31.000Z", "max_issues_repo_path": "ReactAndroid/build/third-party-ndk/boost/boost_1_57_0/boost/geometry/algorithms/detail/disjoint/segment_box.hpp", "max_issues_repo_name": "kimwoongkyu/react-native-0-36-1-woogie", "max_issues_repo_head_hexsha": "4fb2d44945a6305ae3ca87be3872f9432d16f1fb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2015-01-28T16:33:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-12T23:03:28.000Z", "max_forks_repo_path": "ReactAndroid/build/third-party-ndk/boost/boost_1_57_0/boost/geometry/algorithms/detail/disjoint/segment_box.hpp", "max_forks_repo_name": "kimwoongkyu/react-native-0-36-1-woogie", "max_forks_repo_head_hexsha": "4fb2d44945a6305ae3ca87be3872f9432d16f1fb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27.0, "max_forks_repo_forks_event_min_datetime": "2015-01-28T16:33:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T05:04:39.000Z", "avg_line_length": 28.9589041096, "max_line_length": 80, "alphanum_fraction": 0.5717833491, "num_tokens": 1859}
import scipy.linalg import numpy as np def make_lapack_inverse(size): # The identity matrix iden = np.eye(size) # Optimises some of the matrix inverting by only doing some things once and by bypassing sanity checks def lapack_inverse(A): results = scipy.linalg.lapack.dgesv(A, iden) if results[3] > 0: raise np.linalg.LinAlgError('Singular matrix') return results[2] return lapack_inverse
{"hexsha": "cff5c13b46232dd1b4f79e108189ddc5a6a16a31", "size": 449, "ext": "py", "lang": "Python", "max_stars_repo_path": "Initial Testing/utils.py", "max_stars_repo_name": "MrAttoAttoAtto/CircuitSimulatorC2", "max_stars_repo_head_hexsha": "4d821c86404fe3271363fd8c1438e4ca29c17a13", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-25T14:46:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-25T07:58:33.000Z", "max_issues_repo_path": "Initial Testing/utils.py", "max_issues_repo_name": "MrAttoAttoAtto/CircuitSimulatorC2", "max_issues_repo_head_hexsha": "4d821c86404fe3271363fd8c1438e4ca29c17a13", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Initial Testing/utils.py", "max_forks_repo_name": "MrAttoAttoAtto/CircuitSimulatorC2", "max_forks_repo_head_hexsha": "4d821c86404fe3271363fd8c1438e4ca29c17a13", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4117647059, "max_line_length": 106, "alphanum_fraction": 0.6837416481, "include": true, "reason": "import numpy,import scipy", "num_tokens": 112}
import numpy as np import matplotlib.pyplot as plt import utils as ut import os from test_funcs import eval_alignment_by_div_embed from param import P def read_all_file(dir_path): all_data = [] all_file = os.listdir(dir_path) for path in all_file: with open(dir_path + "/" + path, "r") as f: count = 0 for line in f.readlines(): temp_data = line.strip("\n").split("\t") if len(all_data) <= count: all_data.append([]) if 5 >= len(temp_data) > 1: all_data[count].append({path.split("-")[0]: [temp_data[1], temp_data[3]]}) count += 1 elif len(temp_data) > 5: all_data[count].append({path.split("-")[0]: [temp_data[1], temp_data[3], temp_data[5]]}) count += 1 return all_data def get_hit_result(dirpath): hit_result = dict() all_file = os.listdir(dirpath) if len(all_file) > 0: lan_type = all_file[0].split("-")[1] training_folder = "../DBP15K/" + lan_type + "/0_3/" for files in all_file: part_dir = files.replace("-", "/") output_folder = "../out/" + part_dir ent_embeds = np.load(output_folder + "/ent_embeds.npy") triples1, triples2, _, _, ref_ent1, ref_ent2, total_triples_num, total_ent_num, total_rel_num = ut.read_input( training_folder) embed1 = ent_embeds[ref_ent1,] embed2 = ent_embeds[ref_ent2,] _, _, result = eval_alignment_by_div_embed(embed1, embed2, P.ent_top_k, accurate=True, is_euclidean=True) hit_result[files.split("-")[0]] = result return hit_result def draw_hit_result(dirpath): hit_result = get_hit_result(dirpath) plt.figure(figsize=(20, 46)) ylim = (0, 100) ylabel = "hit acc" y1 = [] y5 = [] y10 = [] x = [] for key, value in hit_result.items(): x.append(key) y1.append(value[0]) y5.append(value[1]) y10.append(value[2]) plt.ylim(ylim) plt.ylabel(ylabel) L1, = plt.plot(x, y1, color="#D35400", marker="o") L2, = plt.plot(x, y5, color="#2ECC71", marker="o") L3, = plt.plot(x, y10, color="#3498DB", marker="o") plt.legend(handles=[L1, L2, L3], labels=["hit1", "hit5", "hit10"]) def draw_conicity_result(datapath): all_data = read_all_file(datapath) # **************conicity、vs分别位于all_data[15],all_data[16] conicity = all_data[15] xlabel = [] ylabel = [] for i in range(len(conicity)): for key, values in conicity[i].items(): xlabel.append(key) ylabel.append(values) plt.figure(figsize=(20, 46)) plt.ylim((0, 0.5)) plt.ylabel("conicity") print([x[0] for x in ylabel]) L1, = plt.plot(xlabel, [float(x[0]) for x in ylabel], color="#D35400", marker="o", ) L2, = plt.plot(xlabel, [float(x[1]) for x in ylabel], color="#2ECC71", marker="o", ) L3, = plt.plot(xlabel, [float(x[2]) for x in ylabel], color="#3498DB", marker="*", ) plt.legend(handles=[L1, L2, L3], labels=["ent1", "ent2", "all_ents"]) def draw_quartile_deviation_result(datapath): all_data = read_all_file(datapath) # **************conicity、vs分别位于all_data[15],all_data[16] conicity = all_data[17] xlabel = [] ylabel = [] for i in range(len(conicity)): for key, values in conicity[i].items(): xlabel.append(key) ylabel.append(values) plt.figure(figsize=(20, 46)) plt.ylim((0, 10)) plt.ylabel("conicity") print([x[0] for x in ylabel]) L1, = plt.plot(xlabel, [float(x[0]) for x in ylabel], color="#D35400", marker="o", ) L2, = plt.plot(xlabel, [float(x[1]) for x in ylabel], color="#2ECC71", marker="o", ) plt.legend(handles=[L1, L2], labels=["quar_devi", "mean_value"]) def analyse_all(datapath): all_data = read_all_file(datapath) plt.figure(figsize=(20, 46)) ylim_list = [(0, 0.3), (0.3, 1), (0.3, 1), (0, 0.3), (0, 0.3), (0.3, 1), (0.3, 1), (0, 0.3)] ylabel_list = ["all_aver_sum", "aver_most_sim", "aver_diff_sum", "aver_diff_var", "align_all_aver_sum" , "align_aver_most_sim", "align_aver_diff_sum", "align_aver_diff_var"] for i in range(0, 8): # for i in range(len(all_data)): temp_y = [] temp_x = [] aver_temp_y = [] median_temp_y = [] plt.subplot(4, 2, i + 1) plt.ylim(ylim_list[i]) plt.ylabel(ylabel_list[i]) for j in range(len(all_data[i])): for key, values in all_data[i][j].items(): print(key) print(values) temp_x.append(key) aver_temp_y.append(float(values[0])) median_temp_y.append(float(values[1])) L1, = plt.plot(temp_x, aver_temp_y, color="orange", marker="o", ) # for xy in zip(temp_x,aver_temp_y): # plt.annotate("(%s,%.6f)"%xy,xy=xy,xytext=(-20,10),textcoords="offset points") L2, = plt.plot(temp_x, median_temp_y, color="#2980B9", linestyle="--", marker="o") # for xy in zip(temp_x,aver_temp_y): # plt.annotate("(%s,%.6f)"%xy,xy=xy,xytext=(-20,-10),textcoords="offset points") # plt.xlabel("method") # plt.legend(handles=[l1,l2],labels=["average","median"],loc="best") plt.legend(handles=[L1, L2], labels=["average", "median"]) # plt.show() def analyse_near_sim(datapath): all_data = read_all_file(datapath) ylim_list = [(0, 11), (0, 1), (0, 1), (0, 0.3), (0, 0.3)] ylabel_list = ["aver_near_number", "aver_near_sim", "aver_near_same_sim", "aver_var_near", "aver_var_near_same"] x_label = [] for i in range(len(all_data[0])): for key, _ in all_data[0][i].items(): x_label.append(key) ent1_ann_data = dict() ent2_ann_data = dict() first_fig = [ent1_ann_data, ent2_ann_data] ent1_ans_data = dict() ent2_ans_data = dict() second_fig = [ent1_ans_data, ent2_ans_data] ent1_anss_data = dict() ent2_anss_data = dict() third_fig = [ent1_anss_data, ent2_anss_data] ent1_avn_data = dict() ent2_avn_data = dict() fourth_fig = [ent1_avn_data, ent2_avn_data] ent1_avns_data = dict() ent2_avns_data = dict() fifth_figure = [ent1_avns_data, ent2_avns_data] figure_data = [first_fig, second_fig, third_fig, fourth_fig, fifth_figure] for i in range(len(all_data[9])): for key, value in all_data[9][i].items(): ent1_ann_data[key] = value for key, value in all_data[12][i].items(): ent2_ann_data[key] = value for i in range(len(all_data[10])): for key, value in all_data[10][i].items(): ent1_ans_data[key] = value[0:2] ent1_avn_data[key] = value[-1] for key, value in all_data[13][i].items(): ent2_ans_data[key] = value[0:2] ent2_avn_data[key] = value[-1] for i in range(len(all_data[11])): for key, value in all_data[11][i].items(): ent1_anss_data[key] = value[0:2] ent1_avns_data[key] = value[-1] for key, value in all_data[14][i].items(): ent2_anss_data[key] = value[0:2] ent2_avns_data[key] = value[-1] plt.figure(figsize=(20, 46)) for i in range(3): plt.subplot(3, 2, i + 1) plt.ylim(ylim_list[i]) plt.ylabel(ylabel_list[i]) draw_data = figure_data[i] x_label = [] x_data = [[], []] median_data = [[], []] for key, value in draw_data[0].items(): x_label.append(key) x_data[0].append(float(draw_data[0][key][0])) median_data[0].append(float(draw_data[0][key][1])) x_data[1].append(float(draw_data[1][key][0])) median_data[1].append(float(draw_data[1][key][1])) L1, = plt.plot(x_label, x_data[0], color="#D35400", marker="o", ) L2, = plt.plot(x_label, x_data[1], color="#28B463", marker="o", ) # L3, = plt.plot(x_label, median_data[0], color="#F39C12", marker="o", ) # L4, = plt.plot(x_label, median_data[1], color="#2E86C1", marker="o", ) # plt.legend(handles=[L1, L2, L3, L4], labels=["ent1", "ent2", "ent1_median", "ent2_median"]) plt.legend(handles=[L1, L2], labels=["ent1", "ent2"]) # 方差太小几乎分辨不出来 先注释掉 # for i in range(3,5): # plt.subplot(3, 2, i + 1) # plt.ylim((0,0.02)) # # plt.ylim(ylim_list[i]) # plt.ylabel(ylabel_list[i]) # draw_data = figure_data[i] # x_label = [] # x_data = [[], []] # for key, value in draw_data[0].items(): # x_label.append(key) # x_data[0].append(float(draw_data[0][key])) # x_data[1].append(float(draw_data[1][key])) # L4, = plt.plot(x_label, x_data[0], color="#D35400", marker="o", ) # L5, = plt.plot(x_label, x_data[1], color="#2ECC71", marker="o", ) # plt.legend(handles=[L4, L5], labels=["ent1", "ent2",]) # plt.show() if __name__ == "__main__": dirpath = "../part_analyse/near" draw_quartile_deviation_result(dirpath) # analyse_near_sim(dirpath) draw_hit_result(dirpath) plt.show()
{"hexsha": "0c24a47c510026dbf8381a53f10342c388ee121c", "size": 9210, "ext": "py", "lang": "Python", "max_stars_repo_path": "6/master/src/openea/expriment/data_analyse.py", "max_stars_repo_name": "smurf-1119/knowledge-engeneering-experiment", "max_stars_repo_head_hexsha": "7fd3647bfc5b05e5fd6f93fea324e7ec0d55d4a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-27T06:05:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T08:03:41.000Z", "max_issues_repo_path": "6/master/src/openea/expriment/data_analyse.py", "max_issues_repo_name": "smurf-1119/knowledge-engeneering-experiment", "max_issues_repo_head_hexsha": "7fd3647bfc5b05e5fd6f93fea324e7ec0d55d4a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-06T03:03:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T14:32:14.000Z", "max_forks_repo_path": "6/master/src/openea/expriment/data_analyse.py", "max_forks_repo_name": "smurf-1119/knowledge-engeneering-experiment", "max_forks_repo_head_hexsha": "7fd3647bfc5b05e5fd6f93fea324e7ec0d55d4a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1914893617, "max_line_length": 118, "alphanum_fraction": 0.5754614549, "include": true, "reason": "import numpy", "num_tokens": 2791}
# -*- coding: utf-8 -*- """ Output file exporting system. """ import json import os import shutil from typing import Optional import cv2 import numpy as np import pandas as pd from perceptree.common.configuration import Config from perceptree.common.configuration import Configurable from perceptree.common.logger import CopyBar from perceptree.common.logger import Logger from perceptree.data.loader import DataLoader from perceptree.data.treeio import TreeFile from perceptree.data.treeio import TreeImage class DataExporter(Logger, Configurable): """ Output file exporting system. """ COMMAND_NAME = "Export" """ Name of this command, used for configuration. """ def __init__(self, config: Config): super().__init__(config=config) self._set_instance() self._data_loader = self.get_instance(DataLoader) self.__l.info("Initializing data exporting system...") @classmethod def register_options(cls, parser: Config.Parser): """ Register configuration options for this class. """ option_name = cls._add_config_parameter("export_dataset_path") parser.add_argument("--export-dataset-path", action="store", default="", type=str, dest=option_name, help="Export current data-set to given path. " "Requires a loaded data-set!") option_name = cls._add_config_parameter("export_dataset_resolution") parser.add_argument("--export-dataset-resolution", action="store", default=None, type=int, dest=option_name, help="Set resolution to resize views to, when " "exporting a data-set") option_name = cls._add_config_parameter("tree_score_sorted_path") parser.add_argument("--tree-score-sorted-path", action="store", default="", type=str, metavar=("PATH"), dest=option_name, help="Specify path to save tree score sorted views to.") option_name = cls._add_config_parameter("view_score_sorted_path") parser.add_argument("--view-score-sorted-path", action="store", default="", type=str, metavar=("PATH"), dest=option_name, help="Specify path to save view score sorted views to.") option_name = cls._add_config_parameter("score_sorted_filter") parser.add_argument("--score-sorted-filter", action="store", default="base", type=str, metavar=("PATH"), dest=option_name, help="Specify filtering tag for copied views. Use base " "for standard views and None for all views.") option_name = cls._add_config_parameter("feature_image_path") parser.add_argument("--feature-image-path", action="store", default="", type=str, metavar=("PATH"), dest=option_name, help="Specify path to save the feature images to.") def _copy_views(self, input_path: str, output_path: str, view_catalogue: pd.DataFrame, view_resolution: Optional[int]): """ Copy views from given view catalogue from input path to the output path. """ view_progress = CopyBar("", max=len(view_catalogue)) for index, view in view_catalogue.iterrows(): src_view_path = f"{input_path}/{view.path}" dst_view_path = f"{output_path}/{view.path}" view_directory = os.path.dirname(dst_view_path) os.makedirs(view_directory, exist_ok=True) if view_resolution: # Resizing requested -> Load and resize. view_image = cv2.imread( filename=src_view_path ) resized_view_image = cv2.resize( src=view_image, dsize=(view_resolution, view_resolution), interpolation=cv2.INTER_LINEAR ) cv2.imwrite( filename=dst_view_path, img=resized_view_image ) else: # No resize -> Copy the image file. shutil.copyfile( src=src_view_path, dst=dst_view_path ) if view.json_path: src_json_path = f"{input_path}/{view.json_path}" dst_json_path = f"{output_path}/{view.json_path}" shutil.copyfile( src=src_json_path, dst=dst_json_path ) view_progress.next(1) view_progress.finish() def _copy_trees(self, input_path: str, output_path: str, tree_catalogue: pd.DataFrame): """ Copy trees from given view catalogue from input path to the output path. """ tree_progress = CopyBar("", max=len(tree_catalogue)) for index, tree in tree_catalogue.iterrows(): src_view_path = f"{input_path}/{tree.path}" dst_view_path = f"{output_path}/{tree.path}" view_directory = os.path.dirname(dst_view_path) os.makedirs(view_directory, exist_ok=True) shutil.copyfile( src=src_view_path, dst=dst_view_path ) if tree.json_path: src_json_path = f"{input_path}/{tree.json_path}" dst_json_path = f"{output_path}/{tree.json_path}" shutil.copyfile( src=src_json_path, dst=dst_json_path ) tree_progress.next(1) tree_progress.finish() def _export_dataset(self, output_path: str, view_resolution: Optional[int]): """ Export the currently loaded data-set using provided settings. :param output_path: Output path to export the data to. :param view_resolution: Resize all views to this resolution. Set to None to disable resizing. """ self.__l.info(f"Exporting data-set to \"{output_path}\"...") results = self._data_loader.full_results users = self._data_loader.users scores = self._data_loader.full_scores scores_indexed = self._data_loader.scores_indexed spherical_scores_indexed = self._data_loader.spherical_scores_indexed view_catalogue = self._data_loader.full_view_catalogue tree_catalogue = self._data_loader.full_tree_catalogue input_view_path = self._data_loader.view_base_path dataset_meta = self._data_loader.dataset_meta os.makedirs(output_path, exist_ok=True) results.to_csv(f"{output_path}/results.csv", sep=";", index=False) users.to_csv(f"{output_path}/users.csv", sep=";", index=True) scores.to_csv(f"{output_path}/scores.csv", sep=";", index=True) scores_indexed.to_csv(f"{output_path}/scores_indexed.csv", sep=";", index=True) spherical_scores_indexed.to_csv(f"{output_path}/spherical_scores_indexed.csv", sep=";", index=True) view_catalogue.drop(["data"], axis=1).to_csv(f"{output_path}/view_catalogue.csv", sep=";", index=True) tree_catalogue.drop(["data"], axis=1).to_csv(f"{output_path}/tree_catalogue.csv", sep=";", index=True) with open(f"{output_path}/dataset_meta.json", "w") as f: json.dump(dataset_meta, f) self._copy_views( input_path=input_view_path, output_path=output_path, view_catalogue=view_catalogue, view_resolution=view_resolution ) self._copy_trees( input_path=input_view_path, output_path=output_path, tree_catalogue=tree_catalogue ) self.__l.info(f"\tExporting completed!") @staticmethod def _generate_ordinal_prefix(current_idx: int, max_idx: int) -> str: """ Generate string prefix to keep sorted files in order. """ return str(current_idx).zfill(len(str(max_idx))) def _export_score_sorted_views(self, scores: pd.DataFrame, output_path: str, filter: str): """ Export views sorted by provided scores to given directory. :param output_path: Directory to output the results to. :param filter: Tag filter for the views. Use "None" for all views. """ self.__l.info(f"Exporting {len(scores)} scored views to \"{output_path}\"...") os.makedirs(output_path, exist_ok=True) view_catalogue = self._data_loader.full_view_catalogue view_base_path = self._data_loader.view_base_path self.__l.warning("TODO - Add support for view variants.") scored_views = view_catalogue.merge(scores, left_index=True, right_index=True) if filter != "None": scored_views = scored_views[scored_views.index.get_level_values(level="view_type") == filter] scored_views = scored_views.sort_values("jod").reset_index() view_progress = CopyBar("", max=len(scored_views)) for index, view in scored_views.iterrows(): src_view_path = f"{view_base_path}/{view.path}" view_name = os.path.basename(src_view_path) ordinal_prefix = self._generate_ordinal_prefix( current_idx=index, max_idx=len(scored_views) ) dst_view_path = f"{output_path}/{ordinal_prefix}_{view.jod}_{view_name}" shutil.copyfile( src=src_view_path, dst=dst_view_path ) view_progress.next(1) view_progress.finish() self.__l.info(f"\tExporting completed!") def _export_tree_score_sorted_views(self, output_path: str, filter: str): """ Export views sorted by tree score to given directory. :param output_path: Directory to output the results to. :param filter: Tag filter for the views. Use "None" for all views. """ scores = self._data_loader.full_scores_indexed scores = scores[scores.index.get_level_values(level="view_id") < 0] scores = scores.reset_index()\ .drop([ "tree_variant_id", "view_id", "view_variant_id" ], axis=1)\ .assign(tree_variant_id=0, view_id=0, view_variant_id=0)\ .set_index([ "tree_id", "tree_variant_id", "view_id", "view_variant_id" ]) self._export_score_sorted_views( scores=scores, output_path=output_path, filter=filter ) def _export_view_score_sorted_views(self, output_path: str, filter: str): """ Export views sorted by tree score to given directory. :param output_path: Directory to output the results to. :param filter: Tag filter for the views. Use "None" for all views. """ self._export_score_sorted_views( scores=self._data_loader.full_scores_indexed, output_path=output_path, filter=filter ) def _export_feature_images(self, output_path: str): """ Export feature images to given path. :param output_path: Directory to output the results to. """ tree_data = self._data_loader.tree_data self.__l.info(f"Exporting feature images of {len(tree_data)} trees to \"{output_path}\"...") os.makedirs(output_path, exist_ok=True) def transform_feature_image(image: np.array) -> np.array: target_dtype = np.uint16 src_limits = ( np.min(image), np.max(image) ) dst_limits = ( np.iinfo(target_dtype).min, np.iinfo(target_dtype).max ) image_0_1 = (image.astype(np.float) - src_limits[0]) / (src_limits[1] - src_limits[0]) dst_image = (image_0_1 * (dst_limits[1] - dst_limits[0])) + dst_limits[0] return np.flip(dst_image.astype(target_dtype), axis=0) view_progress = CopyBar("", max=len(tree_data)) for tree_id, tree_file in tree_data.items(): visual_meta_data = tree_file.dynamic_meta_data["stats"].get("visual", None) if visual_meta_data is not None: for image_name, image_item in visual_meta_data.items(): if not TreeImage.is_image_dict(image_item): continue tree_image = image_item["image"] dst_view_path = f"{output_path}/{tree_id}_{image_name}.png" tree_image.save_to( path=dst_view_path, transform=transform_feature_image ) view_progress.next(1) view_progress.finish() self.__l.info(f"\tExporting completed!") def process(self): """ Perform data export operations. """ self.__l.info("Starting data export operations...") if self.c.export_dataset_path: self._export_dataset( output_path=self.c.export_dataset_path, view_resolution=self.c.export_dataset_resolution ) if self.c.tree_score_sorted_path: self._export_tree_score_sorted_views( output_path=self.c.tree_score_sorted_path, filter=self.c.score_sorted_filter ) if self.c.view_score_sorted_path: self._export_view_score_sorted_views( output_path=self.c.view_score_sorted_path, filter=self.c.score_sorted_filter ) if self.c.feature_image_path: self._export_feature_images( output_path=self.c.feature_image_path ) self.__l.info("\tExporting operations finished!")
{"hexsha": "a48407a22375244d27ad921d785cf10db839d539", "size": 14309, "ext": "py", "lang": "Python", "max_stars_repo_path": "PerceptualMetric/psrc/perceptree/data/exporter.py", "max_stars_repo_name": "PolasekT/ICTree", "max_stars_repo_head_hexsha": "d13ad603101805bcc288411504ecffd6f2e1f365", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-12-09T22:37:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T13:40:44.000Z", "max_issues_repo_path": "PerceptualMetric/psrc/perceptree/data/exporter.py", "max_issues_repo_name": "PolasekT/ICTree", "max_issues_repo_head_hexsha": "d13ad603101805bcc288411504ecffd6f2e1f365", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PerceptualMetric/psrc/perceptree/data/exporter.py", "max_forks_repo_name": "PolasekT/ICTree", "max_forks_repo_head_hexsha": "d13ad603101805bcc288411504ecffd6f2e1f365", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-12-09T22:37:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T14:38:39.000Z", "avg_line_length": 38.4650537634, "max_line_length": 110, "alphanum_fraction": 0.5877419806, "include": true, "reason": "import numpy", "num_tokens": 2803}
import os import json from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score import xgboost as xgb import argparse CLI=argparse.ArgumentParser() CLI.add_argument("--trainFile", type=str, default="") CLI.add_argument("--valFile", type=str, default="") CLI.add_argument("--testFile", type=str, default="") args = CLI.parse_args() with open("config.json", 'r') as f: json_obj = json.load(f) # pull parameters from API learning_rate = float(json_obj["learning_rate"]) num_rounds = int(json_obj["num_rounds"]) max_depth = int(json_obj["max_depth"]) lam = float(json_obj["lambda"]) colsample_bytree = float(json_obj["colsample_bytree"]) # Set params params = { 'max_depth': max_depth, 'eta': learning_rate, 'tree_method': 'gpu_hist', 'max_bin': 64, 'objective': 'binary:logistic', 'lambda': lam, 'colsample_bytree': colsample_bytree, } # Convert input data from numpy to XGBoost format dtrain = xgb.DMatrix(args.trainFile) ddev = xgb.DMatrix(args.valFile) dtest = xgb.DMatrix(args.testFile) y_train = dtrain.get_label() y_dev = ddev.get_label() y_test = dtest.get_label() # Train gbm = xgb.train(params, dtrain, num_rounds) # Inference p1_train = gbm.predict(dtrain) p1_dev = gbm.predict(ddev) p1_test = gbm.predict(dtest) # Evaluate auc_train = roc_auc_score(y_train, p1_train) auc_dev = roc_auc_score(y_dev, p1_dev) auc_test = roc_auc_score(y_test, p1_test) dli_result_fs = os.environ['DLI_RESULT_FS'] user = os.environ['USER'] execid = os.environ['DLI_EXECID'] result_dir = "%s/%s/batchworkdir/%s" % (dli_result_fs, user, execid) out = [] out.append({ 'auc_train': auc_train, 'auc_dev': auc_dev, 'auc_test': auc_test }) with open('{}/val_dict_list.json'.format(result_dir), 'w') as f: json.dump(out, f)
{"hexsha": "a3f515c0dffba08fc76806800b71e17e06f75a10", "size": 1843, "ext": "py", "lang": "Python", "max_stars_repo_path": "Perform Hyper-Parameter Tuning of XGBoost models using Watson Machine Learning Accelerator/train_xgb_default.py", "max_stars_repo_name": "helena-k/wmla-assets", "max_stars_repo_head_hexsha": "4fd3cf04529c3237af595cdde016497c83e70c9f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-09-04T02:26:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T13:39:30.000Z", "max_issues_repo_path": "Perform Hyper-Parameter Tuning of XGBoost models using Watson Machine Learning Accelerator/train_xgb_default.py", "max_issues_repo_name": "helena-k/wmla-assets", "max_issues_repo_head_hexsha": "4fd3cf04529c3237af595cdde016497c83e70c9f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-10-01T03:12:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T14:06:34.000Z", "max_forks_repo_path": "Perform Hyper-Parameter Tuning of XGBoost models using Watson Machine Learning Accelerator/train_xgb_default.py", "max_forks_repo_name": "helena-k/wmla-assets", "max_forks_repo_head_hexsha": "4fd3cf04529c3237af595cdde016497c83e70c9f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2019-08-23T18:05:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T15:16:14.000Z", "avg_line_length": 24.9054054054, "max_line_length": 68, "alphanum_fraction": 0.7297883885, "include": true, "reason": "from numpy", "num_tokens": 517}
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, force_fp32 from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms from mmdet.models.builder import HEADS, build_loss from mmdet.models.losses import accuracy @HEADS.register_module() class SABLHead(BaseModule): """Side-Aware Boundary Localization (SABL) for RoI-Head. Side-Aware features are extracted by conv layers with an attention mechanism. Boundary Localization with Bucketing and Bucketing Guided Rescoring are implemented in BucketingBBoxCoder. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: cls_in_channels (int): Input channels of cls RoI feature. \ Defaults to 256. reg_in_channels (int): Input channels of reg RoI feature. \ Defaults to 256. roi_feat_size (int): Size of RoI features. Defaults to 7. reg_feat_up_ratio (int): Upsample ratio of reg features. \ Defaults to 2. reg_pre_kernel (int): Kernel of 2D conv layers before \ attention pooling. Defaults to 3. reg_post_kernel (int): Kernel of 1D conv layers after \ attention pooling. Defaults to 3. reg_pre_num (int): Number of pre convs. Defaults to 2. reg_post_num (int): Number of post convs. Defaults to 1. num_classes (int): Number of classes in dataset. Defaults to 80. cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024. reg_offset_out_channels (int): Hidden and output channel \ of reg offset branch. Defaults to 256. reg_cls_out_channels (int): Hidden and output channel \ of reg cls branch. Defaults to 256. num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1. num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0. reg_class_agnostic (bool): Class agnostic regresion or not. \ Defaults to True. norm_cfg (dict): Config of norm layers. Defaults to None. bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'. loss_cls (dict): Config of classification loss. loss_bbox_cls (dict): Config of classification loss for bbox branch. loss_bbox_reg (dict): Config of regression loss for bbox branch. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_classes, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict( type='SmoothL1Loss', beta=0.1, loss_weight=1.0), init_cfg=None): super(SABLHead, self).__init__(init_cfg) self.cls_in_channels = cls_in_channels self.reg_in_channels = reg_in_channels self.roi_feat_size = roi_feat_size self.reg_feat_up_ratio = int(reg_feat_up_ratio) self.num_buckets = bbox_coder['num_buckets'] assert self.reg_feat_up_ratio // 2 >= 1 self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio assert self.up_reg_feat_size == bbox_coder['num_buckets'] self.reg_pre_kernel = reg_pre_kernel self.reg_post_kernel = reg_post_kernel self.reg_pre_num = reg_pre_num self.reg_post_num = reg_post_num self.num_classes = num_classes self.cls_out_channels = cls_out_channels self.reg_offset_out_channels = reg_offset_out_channels self.reg_cls_out_channels = reg_cls_out_channels self.num_cls_fcs = num_cls_fcs self.num_reg_fcs = num_reg_fcs self.reg_class_agnostic = reg_class_agnostic assert self.reg_class_agnostic self.norm_cfg = norm_cfg self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox_cls = build_loss(loss_bbox_cls) self.loss_bbox_reg = build_loss(loss_bbox_reg) self.cls_fcs = self._add_fc_branch(self.num_cls_fcs, self.cls_in_channels, self.roi_feat_size, self.cls_out_channels) self.side_num = int(np.ceil(self.num_buckets / 2)) if self.reg_feat_up_ratio > 1: self.upsample_x = nn.ConvTranspose1d( reg_in_channels, reg_in_channels, self.reg_feat_up_ratio, stride=self.reg_feat_up_ratio) self.upsample_y = nn.ConvTranspose1d( reg_in_channels, reg_in_channels, self.reg_feat_up_ratio, stride=self.reg_feat_up_ratio) self.reg_pre_convs = nn.ModuleList() for i in range(self.reg_pre_num): reg_pre_conv = ConvModule( reg_in_channels, reg_in_channels, kernel_size=reg_pre_kernel, padding=reg_pre_kernel // 2, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_pre_convs.append(reg_pre_conv) self.reg_post_conv_xs = nn.ModuleList() for i in range(self.reg_post_num): reg_post_conv_x = ConvModule( reg_in_channels, reg_in_channels, kernel_size=(1, reg_post_kernel), padding=(0, reg_post_kernel // 2), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_post_conv_xs.append(reg_post_conv_x) self.reg_post_conv_ys = nn.ModuleList() for i in range(self.reg_post_num): reg_post_conv_y = ConvModule( reg_in_channels, reg_in_channels, kernel_size=(reg_post_kernel, 1), padding=(reg_post_kernel // 2, 0), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_post_conv_ys.append(reg_post_conv_y) self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1) self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1) self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1) self.relu = nn.ReLU(inplace=True) self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs, self.reg_in_channels, 1, self.reg_cls_out_channels) self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs, self.reg_in_channels, 1, self.reg_offset_out_channels) self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1) self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1) if init_cfg is None: self.init_cfg = [ dict( type='Xavier', layer='Linear', distribution='uniform', override=[ dict(type='Normal', name='reg_conv_att_x', std=0.01), dict(type='Normal', name='reg_conv_att_y', std=0.01), dict(type='Normal', name='fc_reg_cls', std=0.01), dict(type='Normal', name='fc_cls', std=0.01), dict(type='Normal', name='fc_reg_offset', std=0.001) ]) ] if self.reg_feat_up_ratio > 1: self.init_cfg += [ dict( type='Kaiming', distribution='normal', override=[ dict(name='upsample_x'), dict(name='upsample_y') ]) ] def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size, fc_out_channels): in_channels = in_channels * roi_feat_size * roi_feat_size branch_fcs = nn.ModuleList() for i in range(num_branch_fcs): fc_in_channels = (in_channels if i == 0 else fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels)) return branch_fcs def cls_forward(self, cls_x): cls_x = cls_x.view(cls_x.size(0), -1) for fc in self.cls_fcs: cls_x = self.relu(fc(cls_x)) cls_score = self.fc_cls(cls_x) return cls_score def attention_pool(self, reg_x): """Extract direction-specific features fx and fy with attention methanism.""" reg_fx = reg_x reg_fy = reg_x reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid() reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid() reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2) reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3) reg_fx = (reg_fx * reg_fx_att).sum(dim=2) reg_fy = (reg_fy * reg_fy_att).sum(dim=3) return reg_fx, reg_fy def side_aware_feature_extractor(self, reg_x): """Refine and extract side-aware features without split them.""" for reg_pre_conv in self.reg_pre_convs: reg_x = reg_pre_conv(reg_x) reg_fx, reg_fy = self.attention_pool(reg_x) if self.reg_post_num > 0: reg_fx = reg_fx.unsqueeze(2) reg_fy = reg_fy.unsqueeze(3) for i in range(self.reg_post_num): reg_fx = self.reg_post_conv_xs[i](reg_fx) reg_fy = self.reg_post_conv_ys[i](reg_fy) reg_fx = reg_fx.squeeze(2) reg_fy = reg_fy.squeeze(3) if self.reg_feat_up_ratio > 1: reg_fx = self.relu(self.upsample_x(reg_fx)) reg_fy = self.relu(self.upsample_y(reg_fy)) reg_fx = torch.transpose(reg_fx, 1, 2) reg_fy = torch.transpose(reg_fy, 1, 2) return reg_fx.contiguous(), reg_fy.contiguous() def reg_pred(self, x, offset_fcs, cls_fcs): """Predict bucketing estimation (cls_pred) and fine regression (offset pred) with side-aware features.""" x_offset = x.view(-1, self.reg_in_channels) x_cls = x.view(-1, self.reg_in_channels) for fc in offset_fcs: x_offset = self.relu(fc(x_offset)) for fc in cls_fcs: x_cls = self.relu(fc(x_cls)) offset_pred = self.fc_reg_offset(x_offset) cls_pred = self.fc_reg_cls(x_cls) offset_pred = offset_pred.view(x.size(0), -1) cls_pred = cls_pred.view(x.size(0), -1) return offset_pred, cls_pred def side_aware_split(self, feat): """Split side-aware features aligned with orders of bucketing targets.""" l_end = int(np.ceil(self.up_reg_feat_size / 2)) r_start = int(np.floor(self.up_reg_feat_size / 2)) feat_fl = feat[:, :l_end] feat_fr = feat[:, r_start:].flip(dims=(1, )) feat_fl = feat_fl.contiguous() feat_fr = feat_fr.contiguous() feat = torch.cat([feat_fl, feat_fr], dim=-1) return feat def bbox_pred_split(self, bbox_pred, num_proposals_per_img): """Split batch bbox prediction back to each image.""" bucket_cls_preds, bucket_offset_preds = bbox_pred bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0) bucket_offset_preds = bucket_offset_preds.split( num_proposals_per_img, 0) bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds)) return bbox_pred def reg_forward(self, reg_x): outs = self.side_aware_feature_extractor(reg_x) edge_offset_preds = [] edge_cls_preds = [] reg_fx = outs[0] reg_fy = outs[1] offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs, self.reg_cls_fcs) offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs, self.reg_cls_fcs) offset_pred_x = self.side_aware_split(offset_pred_x) offset_pred_y = self.side_aware_split(offset_pred_y) cls_pred_x = self.side_aware_split(cls_pred_x) cls_pred_y = self.side_aware_split(cls_pred_y) edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1) edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1) return (edge_cls_preds, edge_offset_preds) def forward(self, x): bbox_pred = self.reg_forward(x) cls_score = self.cls_forward(x) return cls_score, bbox_pred def get_targets(self, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] neg_proposals = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels = [res.pos_gt_labels for res in sampling_results] cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, rcnn_train_cfg) (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) = cls_reg_targets return (labels, label_weights, (bucket_cls_targets, bucket_offset_targets), (bucket_cls_weights, bucket_offset_weights)) def bucket_target(self, pos_proposals_list, neg_proposals_list, pos_gt_bboxes_list, pos_gt_labels_list, rcnn_train_cfg, concat=True): (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) = multi_apply( self._bucket_target_single, pos_proposals_list, neg_proposals_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bucket_cls_targets = torch.cat(bucket_cls_targets, 0) bucket_cls_weights = torch.cat(bucket_cls_weights, 0) bucket_offset_targets = torch.cat(bucket_offset_targets, 0) bucket_offset_weights = torch.cat(bucket_offset_weights, 0) return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) def _bucket_target_single(self, pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, cfg): """Compute bucketing estimation targets and fine regression targets for a single image. Args: pos_proposals (Tensor): positive proposals of a single image, Shape (n_pos, 4) neg_proposals (Tensor): negative proposals of a single image, Shape (n_neg, 4). pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals of a single image, Shape (n_pos, 4). pos_gt_labels (Tensor): gt labels assigned to positive proposals of a single image, Shape (n_pos, ). cfg (dict): Config of calculating targets Returns: tuple: - labels (Tensor): Labels in a single image. \ Shape (n,). - label_weights (Tensor): Label weights in a single image.\ Shape (n,) - bucket_cls_targets (Tensor): Bucket cls targets in \ a single image. Shape (n, num_buckets*2). - bucket_cls_weights (Tensor): Bucket cls weights in \ a single image. Shape (n, num_buckets*2). - bucket_offset_targets (Tensor): Bucket offset targets \ in a single image. Shape (n, num_buckets*2). - bucket_offset_targets (Tensor): Bucket offset weights \ in a single image. Shape (n, num_buckets*2). """ num_pos = pos_proposals.size(0) num_neg = neg_proposals.size(0) num_samples = num_pos + num_neg labels = pos_gt_bboxes.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_proposals.new_zeros(num_samples) bucket_cls_targets = pos_proposals.new_zeros(num_samples, 4 * self.side_num) bucket_cls_weights = pos_proposals.new_zeros(num_samples, 4 * self.side_num) bucket_offset_targets = pos_proposals.new_zeros( num_samples, 4 * self.side_num) bucket_offset_weights = pos_proposals.new_zeros( num_samples, 4 * self.side_num) if num_pos > 0: labels[:num_pos] = pos_gt_labels label_weights[:num_pos] = 1.0 (pos_bucket_offset_targets, pos_bucket_offset_weights, pos_bucket_cls_targets, pos_bucket_cls_weights) = self.bbox_coder.encode( pos_proposals, pos_gt_bboxes) bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights if num_neg > 0: label_weights[-num_neg:] = 1.0 return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) def loss(self, cls_score, bbox_pred, rois, labels, label_weights, bbox_targets, bbox_weights, reduction_override=None): losses = dict() if cls_score is not None: avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) losses['loss_cls'] = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) losses['acc'] = accuracy(cls_score, labels) if bbox_pred is not None: bucket_cls_preds, bucket_offset_preds = bbox_pred bucket_cls_targets, bucket_offset_targets = bbox_targets bucket_cls_weights, bucket_offset_weights = bbox_weights # edge cls bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num) bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num) bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num) losses['loss_bbox_cls'] = self.loss_bbox_cls( bucket_cls_preds, bucket_cls_targets, bucket_cls_weights, avg_factor=bucket_cls_targets.size(0), reduction_override=reduction_override) losses['loss_bbox_reg'] = self.loss_bbox_reg( bucket_offset_preds, bucket_offset_targets, bucket_offset_weights, avg_factor=bucket_offset_targets.size(0), reduction_override=reduction_override) return losses @force_fp32(apply_to=('cls_score', 'bbox_pred')) def get_bboxes(self, rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None): if isinstance(cls_score, list): cls_score = sum(cls_score) / float(len(cls_score)) scores = F.softmax(cls_score, dim=1) if cls_score is not None else None if bbox_pred is not None: bboxes, confids = self.bbox_coder.decode(rois[:, 1:], bbox_pred, img_shape) else: bboxes = rois[:, 1:].clone() confids = None if img_shape is not None: bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1) bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1) if rescale and bboxes.size(0) > 0: if isinstance(scale_factor, float): bboxes /= scale_factor else: bboxes /= torch.from_numpy(scale_factor).to(bboxes.device) if cfg is None: return bboxes, scores else: det_bboxes, det_labels = multiclass_nms( bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=confids) return det_bboxes, det_labels @force_fp32(apply_to=('bbox_preds', )) def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): """Refine bboxes during training. Args: rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, and bs is the sampled RoIs per image. labels (Tensor): Shape (n*bs, ). bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \ (n*bs, num_buckets*2)]. pos_is_gts (list[Tensor]): Flags indicating if each positive bbox is a gt bbox. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Refined bboxes of each image in a mini-batch. """ img_ids = rois[:, 0].long().unique(sorted=True) assert img_ids.numel() == len(img_metas) bboxes_list = [] for i in range(len(img_metas)): inds = torch.nonzero( rois[:, 0] == i, as_tuple=False).squeeze(dim=1) num_rois = inds.numel() bboxes_ = rois[inds, 1:] label_ = labels[inds] edge_cls_preds, edge_offset_preds = bbox_preds edge_cls_preds_ = edge_cls_preds[inds] edge_offset_preds_ = edge_offset_preds[inds] bbox_pred_ = [edge_cls_preds_, edge_offset_preds_] img_meta_ = img_metas[i] pos_is_gts_ = pos_is_gts[i] bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, img_meta_) # filter gt bboxes pos_keep = 1 - pos_is_gts_ keep_inds = pos_is_gts_.new_ones(num_rois) keep_inds[:len(pos_is_gts_)] = pos_keep bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) return bboxes_list @force_fp32(apply_to=('bbox_pred', )) def regress_by_class(self, rois, label, bbox_pred, img_meta): """Regress the bbox for the predicted class. Used in Cascade R-CNN. Args: rois (Tensor): shape (n, 4) or (n, 5) label (Tensor): shape (n, ) bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \ (n, num_buckets *2)] img_meta (dict): Image meta info. Returns: Tensor: Regressed bboxes, the same shape as input rois. """ assert rois.size(1) == 4 or rois.size(1) == 5 if rois.size(1) == 4: new_rois, _ = self.bbox_coder.decode(rois, bbox_pred, img_meta['img_shape']) else: bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred, img_meta['img_shape']) new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) return new_rois
{"hexsha": "07c542ef144ca664516f2a5b87913c6d92a75405", "size": 25025, "ext": "py", "lang": "Python", "max_stars_repo_path": "downstream/tinypersons/mmdet/models/roi_heads/bbox_heads/sabl_head.py", "max_stars_repo_name": "bwconrad/solo-learn", "max_stars_repo_head_hexsha": "ec510d803a4428d7d8803b90fa1484c42cb9cb52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 326, "max_stars_repo_stars_event_min_datetime": "2021-05-06T01:15:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:52:13.000Z", "max_issues_repo_path": "downstream/tinypersons/mmdet/models/roi_heads/bbox_heads/sabl_head.py", "max_issues_repo_name": "bwconrad/solo-learn", "max_issues_repo_head_hexsha": "ec510d803a4428d7d8803b90fa1484c42cb9cb52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 48, "max_issues_repo_issues_event_min_datetime": "2021-07-06T07:17:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T11:38:36.000Z", "max_forks_repo_path": "downstream/tinypersons/mmdet/models/roi_heads/bbox_heads/sabl_head.py", "max_forks_repo_name": "bwconrad/solo-learn", "max_forks_repo_head_hexsha": "ec510d803a4428d7d8803b90fa1484c42cb9cb52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 54, "max_forks_repo_forks_event_min_datetime": "2021-07-07T08:40:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T05:02:35.000Z", "avg_line_length": 42.8510273973, "max_line_length": 79, "alphanum_fraction": 0.5774625375, "include": true, "reason": "import numpy", "num_tokens": 5475}
#redirect Sage Environmental
{"hexsha": "49f14cb1bddb9b3826c62bab904af9815c0942cd", "size": 29, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Fritsch_Environmental.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Fritsch_Environmental.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Fritsch_Environmental.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.5, "max_line_length": 28, "alphanum_fraction": 0.8620689655, "num_tokens": 5}
import pandas as pd import numpy as np import matplotlib.pyplot as plt from wordcloud import WordCloud, STOPWORDS replacements = {'éèêë':'e', 'àâä':'a', 'ùûü':'u', 'ôö':'o', 'îï':'i'} sep = {',', ';', '(', "'", ')', ';', '\\', ' ', '’'} unwanted = {'y', 'c', 'de', 'le', 'd', 'l', 'du', 'la', 'un', 'une', 'des', 'les', 'et', 'a', 'à', 'au', 'association', 'activités', 'associations'} folder = 'C:\\Users\\evill\\Documents\\D4GG_charities\\small_data_shipped\\' filename = 'clean_data_small.csv' test_mode = False verbose_mode = False def clean_word(word): cleaned_word = word.lower() for accents in replacements: for a in accents: cleaned_word = cleaned_word.replace(a, replacements[accents]) return cleaned_word def extract_cat_words(cat_w, count_words): mod_count_words = count_words.copy() cat_df = pd.DataFrame(cat_w) cat_df['count'] = np.nan length = len(cat_w) for i in range(length): w = cat_w[i] if w in count_words.keys(): cat_df.iloc[i, 1] = mod_count_words.get(w) del mod_count_words[w] return cat_df, mod_count_words def create_wordcloud_list(word_list, in_count_words, verbose=True): # Create and generate a word cloud image: cat_df, count_words = extract_cat_words(word_list, in_count_words) cat_freq_dict = dict() for w, c in cat_df.values: cat_freq_dict[w] = c / len(cat_df) # to get the frequencies if verbose: print(cat_df) out_wordcloud = WordCloud(width=1360, height=1200).generate_from_frequencies(cat_freq_dict) return out_wordcloud def plot_wordcloud(in_wordcloud): # Display the generated image plt.imshow(in_wordcloud, interpolation='bilinear') plt.axis("off") plt.show() return if test_mode: data = pd.read_csv(folder + filename, sep=',', nrows=100) else: data = pd.read_csv(folder + filename, sep=',') print(data.columns) # social desc = free text description print('objet social description -------------------') print(data['objet_social1_desc']) # What are their main objectives? desc = data['objet_social1_desc'].dropna() words = desc.str.split(pat=',', n=-1, expand=True) words = words.stack().reset_index(drop=True) desc_str = str(desc) print(words.head()) # separate meaningful words and count them count_words = dict() for obj in desc: w = '' for c in obj: if c in sep: if w != '' and w not in unwanted: detected_word = clean_word(w) if detected_word not in count_words.keys(): count_words[detected_word] = 0 count_words[detected_word] += 1 w = '' else: w += c print(count_words) # make categories cat_sport_w = ['sports', 'football', 'futsal', 'athletisme', 'pentathlon', 'footing', 'triathlon', 'jogging', 'petanque', 'velo', 'vtt', 'course', 'cyclisme', 'danse'] cat_help_w = ['humanitaires', 'caritatives', 'aide', 'benevolat', 'developpement', 'urgence', 'solidarites'] #cat_culture_w = ['theatre', 'spectacles', 'variete'] stopwords_1 = set(STOPWORDS) stopwords_1.update(unwanted) # # wordcloud_sport = WordCloud(stopwords=stopwords_1, background_color="black").generate(cat_sport_df[0]) # # Save picture # wordcloud.to_file('wordcloud.png') wordcloud_sport = create_wordcloud_list(cat_sport_w, count_words, verbose_mode) plot_wordcloud(wordcloud_sport) wordcloud_sport.to_file('wordcloud_sport.png') wordcloud_help = create_wordcloud_list(cat_help_w, count_words, verbose_mode) plot_wordcloud(wordcloud_help) wordcloud_help.to_file('wordcloud_help.png') # wordcloud_culture = create_wordcloud_list(cat_culture_w, count_words, verbose_mode) # plot_wordcloud(wordcloud_culture)
{"hexsha": "2aafdff05818a00391efe5d8369d12aebf7e5430", "size": 3881, "ext": "py", "lang": "Python", "max_stars_repo_path": "Emma/main_insight4.py", "max_stars_repo_name": "D4GGrenoble/finding_associations", "max_stars_repo_head_hexsha": "ccee4d9814365d3e71f963013fc412887f03491f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-05T18:28:23.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-05T18:28:23.000Z", "max_issues_repo_path": "Emma/main_insight4.py", "max_issues_repo_name": "D4GGrenoble/finding_associations", "max_issues_repo_head_hexsha": "ccee4d9814365d3e71f963013fc412887f03491f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Emma/main_insight4.py", "max_forks_repo_name": "D4GGrenoble/finding_associations", "max_forks_repo_head_hexsha": "ccee4d9814365d3e71f963013fc412887f03491f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-12-10T14:04:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-15T18:03:40.000Z", "avg_line_length": 31.8114754098, "max_line_length": 149, "alphanum_fraction": 0.6480288585, "include": true, "reason": "import numpy", "num_tokens": 1007}
/********************************************************************* * Software License Agreement (BSD License) * * Copyright (c) 2008, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Willow Garage nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. *********************************************************************/ /* Author: Wim Meeussen */ #include <boost/algorithm/string.hpp> #include <boost/lexical_cast.hpp> #include "common/Console.hh" #include "sdf/interface/parser_deprecated.hh" #include "sdf/interface/Param.hh" #include "math/Pose.hh" #include "math/Vector3.hh" #include "math/Vector2d.hh" #include "common/Color.hh" #include "math/Quaternion.hh" namespace deprecated_sdf { void copyChildren(xmlNodePtr _config, sdf::ElementPtr _sdf) { // Iterate over all the child elements for (xmlNodePtr elemXml = xmlFirstElementChild(_config); elemXml != NULL; elemXml = xmlNextElementSibling(elemXml)) { sdf::ElementPtr element(new sdf::Element); element->SetParent(_sdf); // copy name (with prefix if exists? std::string prefix; if (elemXml->ns) prefix = (const char*)elemXml->ns->prefix; if (prefix.empty()) { element->SetName((const char*)elemXml->name); // copy attributes for (xmlAttrPtr attrXml = elemXml->properties; attrXml && attrXml->name && attrXml->children; attrXml = attrXml->next) { element->AddAttribute((const char*)attrXml->name, "string", "defaultvalue", false); initAttr(elemXml, (const char*)attrXml->name, element->GetAttribute((const char*)attrXml->name)); } // copy value std::string value = getValue(elemXml); if (!value.empty()) element->AddValue("string", value, "1"); _sdf->InsertElement(element); } else { gzdbg << "skipping prefixed element [" << prefix << ":" << (const char*)elemXml->name << "] when copying plugins\n"; // element->SetName(prefix + ":" + (const char*)elemXml->name); } } } bool getPlugins(xmlNodePtr _config, sdf::ElementPtr _sdf) { // Get all plugins for (xmlNodePtr pluginXml = _config->xmlChildrenNode; pluginXml != NULL; pluginXml = pluginXml->next) { if (pluginXml->ns && (const char*)pluginXml->ns->prefix == std::string("controller")) { sdf::ElementPtr sdfPlugin = _sdf->AddElement("plugin"); initAttr(pluginXml, "name", sdfPlugin->GetAttribute("name")); if (xmlGetProp(pluginXml, reinterpret_cast<const xmlChar*>("plugin"))) initAttr(pluginXml, "plugin", sdfPlugin->GetAttribute("filename")); else // sdfPlugin->GetAttribute("filename")->SetFromString( // (const char*)pluginXml->name); initAttr(pluginXml, "filename", sdfPlugin->GetAttribute("filename")); deprecated_sdf::copyChildren(pluginXml, sdfPlugin); } else if (std::string((const char*)pluginXml->name) == "plugin") { gzerr << "there is a <plugin> block in the deprecated xml\n"; sdf::ElementPtr sdfPlugin = _sdf->AddElement("plugin"); initAttr(pluginXml, "name", sdfPlugin->GetAttribute("name")); initAttr(pluginXml, "filename", sdfPlugin->GetAttribute("filename")); for (xmlNodePtr dataXml = pluginXml->xmlChildrenNode; dataXml != NULL; dataXml = dataXml->next) { if (std::string((const char*) dataXml->ns->prefix) != "interface") { sdf::ElementPtr sdfData = sdfPlugin->AddElement("data"); sdfData->GetAttribute("value")->SetFromString(getValue(dataXml)); } else gzerr << "<interface:...> are not copied\n"; } } } return true; } // light parsing bool initLight(xmlNodePtr _config, sdf::ElementPtr _sdf) { initOrigin(_config, _sdf); xmlNodePtr lightNode = firstChildElement(_config, "light"); if (!lightNode) { gzerr << "Light is missing the <light> child node\n"; return false; } initAttr(_config, "name", _sdf->GetAttribute("name")); if (firstChildElement(lightNode, "castShadows")) initAttr(lightNode, "castShadows", _sdf->GetAttribute("cast_shadows")); initAttr(lightNode, "type", _sdf->GetAttribute("type")); sdf::ElementPtr sdfDiffuse = _sdf->AddElement("diffuse"); initAttr(lightNode, "diffuse", sdfDiffuse->GetAttribute("rgba")); sdf::ElementPtr sdfSpecular = _sdf->AddElement("specular"); initAttr(lightNode, "specular", sdfDiffuse->GetAttribute("rgba")); sdf::ElementPtr sdfAttenuation = _sdf->AddElement("attenuation"); initAttr(lightNode, "range", sdfAttenuation->GetAttribute("range")); sdfAttenuation->GetAttribute("constant")->SetFromString( getNodeTuple(lightNode, "attenuation", 0)); sdfAttenuation->GetAttribute("linear")->SetFromString( getNodeTuple(lightNode, "attenuation", 1)); sdfAttenuation->GetAttribute("quadratic")->SetFromString( getNodeTuple(lightNode, "attenuation", 2)); sdf::ElementPtr sdfDirection = _sdf->AddElement("direction"); initAttr(lightNode, "direction", sdfDirection->GetAttribute("xyz")); if (firstChildElement(lightNode, "sportCone")) { sdf::ElementPtr sdfSpot = _sdf->AddElement("spot"); double innerAngle = boost::lexical_cast<double>(getNodeTuple(lightNode, "spotCone", 0)); double outerAngle = boost::lexical_cast<double>(getNodeTuple(lightNode, "spotCone", 1)); sdfSpot->GetAttribute("inner_angle")->SetFromString( boost::lexical_cast<std::string>(GZ_DTOR(innerAngle))); sdfSpot->GetAttribute("outer_angle")->SetFromString( boost::lexical_cast<std::string>(GZ_DTOR(outerAngle))); sdfSpot->GetAttribute("falloff")->SetFromString( getNodeTuple(lightNode, "spotCone", 2)); } return true; } // Sensor parsing bool initSensor(xmlNodePtr _config, sdf::ElementPtr _sdf) { initAttr(_config, "name", _sdf->GetAttribute("name")); initAttr(_config, "alwaysOn", _sdf->GetAttribute("always_on")); initAttr(_config, "updateRate", _sdf->GetAttribute("update_rate")); initOrigin(_config, _sdf); if (std::string((const char*)_config->name) == "contact") { sdf::ElementPtr contact = _sdf->AddElement("contact"); initContact(_config, contact); if (!_sdf->GetAttribute("type")->SetFromString("contact")) { gzerr << "Unable to set type to contact\n"; return false; } } else if (std::string((const char*)_config->name) == "camera") { sdf::ElementPtr camera = _sdf->AddElement("camera"); initCamera(_config, camera); // convert all camera to depth cameras so we can get point cloud if needed if (!_sdf->GetAttribute("type")->SetFromString("depth")) { gzerr << "Unable to set type to camera\n"; return false; } } else if (std::string((const char*)_config->name) == "ray") { sdf::ElementPtr ray = _sdf->AddElement("ray"); initRay(_config, ray); if (!_sdf->GetAttribute("type")->SetFromString("ray")) { gzerr << "Unable to set type to ray\n"; return false; } } /// Get all the plugins getPlugins(_config, _sdf); return true; } bool initCamera(xmlNodePtr _config, sdf::ElementPtr _sdf) { sdf::ElementPtr sdfHFOV = _sdf->AddElement("horizontal_fov"); double hfov = boost::lexical_cast<double>(getNodeValue(_config, "hfov")); if (!sdfHFOV->GetAttribute("angle")->SetFromString( boost::lexical_cast<std::string>(GZ_DTOR(hfov)))) { gzerr << "Unable to parse hfov angle\n"; return false; } sdf::ElementPtr sdfImage = _sdf->AddElement("image"); // parse imageSize std::string image_size_str = getNodeValue(_config, "imageSize"); std::vector<unsigned int> sizes; std::vector<std::string> pieces; boost::split(pieces, image_size_str, boost::is_any_of(" ")); for (unsigned int i = 0; i < pieces.size(); ++i) { if (pieces[i] != "") { try { sizes.push_back(boost::lexical_cast<unsigned int>(pieces[i].c_str())); } catch(boost::bad_lexical_cast &e) { gzerr << "<imageSize> value [" << pieces[i] << "] is not a valid unsigned int from a 2-tuple\n"; return false; } } } if (sizes.size() != 2) { gzerr << "Vector contains [" << static_cast<int>(sizes.size()) << "] elements instead of 3 elements\n"; return false; } sdfImage->GetAttribute("width")->SetFromString(pieces[0]); sdfImage->GetAttribute("height")->SetFromString(pieces[1]); initAttr(_config, "imageFormat", sdfImage->GetAttribute("format")); sdf::ElementPtr sdfClip = _sdf->AddElement("clip"); initAttr(_config, "nearClip", sdfClip->GetAttribute("near")); initAttr(_config, "farClip", sdfClip->GetAttribute("far")); // save if (firstChildElement(_config, "saveFrames")) { sdf::ElementPtr sdfSave = _sdf->AddElement("save"); initAttr(_config, "saveFrames", sdfSave->GetAttribute("enabled")); initAttr(_config, "saveFramePath", sdfSave->GetAttribute("path")); } return true; } bool initRay(xmlNodePtr _config, sdf::ElementPtr _sdf) { sdf::ElementPtr sdfScan = _sdf->AddElement("scan"); /* FIXME: moved up to sensor/visualize attribute if (firstChildElement(_config, "displayRays")) { std::string display = getNodeValue(_config, "displayRays"); if (display != "false") sdfScan->GetAttribute("display")->SetFromString("false"); else sdfScan->GetAttribute("display")->SetFromString("true"); } */ sdf::ElementPtr sdfHoriz = sdfScan->AddElement("horizontal"); initAttr(_config, "rangeCount", sdfHoriz->GetAttribute("samples")); int rangeCount = boost::lexical_cast<int>(getNodeValue(_config, "rangeCount")); int rayCount = boost::lexical_cast<int>(getNodeValue(_config, "rayCount")); if (!sdfHoriz->GetAttribute("resolution")->SetFromString( boost::lexical_cast<std::string>(rangeCount / rayCount))) { gzerr << "Unable to parse ray sensor rayCount\n"; return false; } double minAngle = boost::lexical_cast<double>(getNodeValue(_config, "minAngle")); double maxAngle = boost::lexical_cast<double>(getNodeValue(_config, "maxAngle")); if (!sdfHoriz->GetAttribute("min_angle")->SetFromString( boost::lexical_cast<std::string>(GZ_DTOR(minAngle)))) { gzerr << "Unable to parse min_angle\n"; return false; } if (!sdfHoriz->GetAttribute("max_angle")->SetFromString( boost::lexical_cast<std::string>(GZ_DTOR(maxAngle)))) { gzerr << "Unable to parse max_angle\n"; return false; } sdf::ElementPtr sdfRange = _sdf->AddElement("range"); initAttr(_config, "minRange", sdfRange->GetAttribute("min")); initAttr(_config, "maxRange", sdfRange->GetAttribute("max")); initAttr(_config, "resRange", sdfRange->GetAttribute("resolution")); return true; } bool initContact(xmlNodePtr _config, sdf::ElementPtr _sdf) { sdf::ElementPtr sdfCollision = _sdf->AddElement("collision"); initAttr(_config, "geom", sdfCollision->GetAttribute("name")); return true; } // _config = <body> // _sdf = <inertial> bool initInertial(xmlNodePtr _config, sdf::ElementPtr _sdf) { // Origin (old gazebo xml supports only cx, cy, cz translations, no rotation // xyz and rpy under body:... is for the link frame xmlNodePtr cx_xml = firstChildElement(_config, "cx"); xmlNodePtr cy_xml = firstChildElement(_config, "cy"); xmlNodePtr cz_xml = firstChildElement(_config, "cz"); std::string poseString; if (cx_xml) poseString += getValue(cx_xml) + " "; else { gzerr << "Missing cx\n"; return false; } if (cy_xml) poseString += getValue(cy_xml) + " "; else { gzerr << "Missing cy\n"; return false; } if (cz_xml) poseString += getValue(cz_xml) + " "; else { gzerr << "Missing cz\n"; return false; } // Put in the rpy values poseString += "0 0 0"; sdf::ElementPtr sdfOrigin = _sdf->AddElement("origin"); sdfOrigin->GetAttribute("pose")->SetFromString(poseString); initAttr(_config, "mass", _sdf->GetAttribute("mass")); sdf::ElementPtr sdfInertia = _sdf->AddElement("inertia"); xmlNodePtr ixx_xml = firstChildElement(_config, "ixx"); xmlNodePtr ixy_xml = firstChildElement(_config, "ixy"); xmlNodePtr ixz_xml = firstChildElement(_config, "ixz"); xmlNodePtr iyy_xml = firstChildElement(_config, "iyy"); xmlNodePtr iyz_xml = firstChildElement(_config, "iyz"); xmlNodePtr izz_xml = firstChildElement(_config, "izz"); if (!ixx_xml || !ixy_xml || !ixz_xml || !iyy_xml || !iyz_xml || !izz_xml) { gzerr << "Inertial: inertia element must have ixx," << " ixy, ixz, iyy, iyz, izz attributes\n"; return false; } if (!sdfInertia->GetAttribute("ixx")->SetFromString(getValue(ixx_xml)) || !sdfInertia->GetAttribute("ixy")->SetFromString(getValue(ixy_xml)) || !sdfInertia->GetAttribute("ixz")->SetFromString(getValue(ixz_xml)) || !sdfInertia->GetAttribute("iyy")->SetFromString(getValue(iyy_xml)) || !sdfInertia->GetAttribute("iyz")->SetFromString(getValue(iyz_xml)) || !sdfInertia->GetAttribute("izz")->SetFromString(getValue(izz_xml))) { gzerr << "one of the inertia elements: " << "ixx (" << getValue(ixx_xml) << ") " << "ixy (" << getValue(ixy_xml) << ") " << "ixz (" << getValue(ixz_xml) << ") " << "iyy (" << getValue(iyy_xml) << ") " << "iyz (" << getValue(iyz_xml) << ") " << "izz (" << getValue(izz_xml) << ") " << "is not a valid double.\n"; return false; } return true; } // _conifg = "geom" // _sdf = "collision" bool initCollision(xmlNodePtr _config, sdf::ElementPtr _sdf) { initAttr(_config, "name", _sdf->GetAttribute("name")); // Origin initOrigin(_config, _sdf); sdf::ElementPtr sdfGeom = _sdf->AddElement("geometry"); if (std::string((const char *)_config->name) == "plane") { sdf::ElementPtr sdfPlane = sdfGeom->AddElement("plane"); sdfPlane->GetAttribute("normal")->SetFromString( getNodeValue(_config, "normal")); } else if (std::string((const char *)_config->name) == "box") { sdf::ElementPtr sdfBox = sdfGeom->AddElement("box"); sdfBox->GetAttribute("size")->SetFromString(getNodeValue(_config, "size")); } else if (std::string((const char *)_config->name) == "sphere") { sdf::ElementPtr sdfSphere = sdfGeom->AddElement("sphere"); sdfSphere->GetAttribute("radius")->SetFromString( getNodeValue(_config, "size")); } else if (std::string((const char *)_config->name) == "cylinder") { sdf::ElementPtr sdfCylinder = sdfGeom->AddElement("cylinder"); if (firstChildElement(_config, "size")) { sdfCylinder->GetAttribute("radius")->SetFromString( getNodeTuple(_config, "size", 0)); sdfCylinder->GetAttribute("length")->SetFromString( getNodeTuple(_config, "size", 1)); } } else if (std::string((const char *)_config->name) == "trimesh") { sdf::ElementPtr sdfMesh = sdfGeom->AddElement("mesh"); sdfMesh->GetAttribute("filename")->SetFromString( getNodeValue(_config, "mesh")); initAttr(_config, "scale", sdfMesh->GetAttribute("scale")); } // // TODO: parse surface properties // sdf::ElementPtr sdfSurface = _sdf->AddElement("surface"); // friction ode has mu, mu2, fdir1, slip1, slip2 attributes sdf::ElementPtr sdfSurfaceFriction = sdfSurface->AddElement("friction"); sdf::ElementPtr sdfSurfaceFrictionOde = sdfSurfaceFriction->AddElement("ode"); // mu1 --> mu initAttr(_config, "mu1", sdfSurfaceFrictionOde->GetAttribute("mu")); // mu2 --> mu2 initAttr(_config, "mu2", sdfSurfaceFrictionOde->GetAttribute("mu2")); // fdir1 --> fdir1 initAttr(_config, "fdir1", sdfSurfaceFrictionOde->GetAttribute("fdir1")); // slip1 --> slip1 initAttr(_config, "slip1", sdfSurfaceFrictionOde->GetAttribute("slip1")); // slip2 --> slip2 initAttr(_config, "slip2", sdfSurfaceFrictionOde->GetAttribute("slip2")); // bounce has restitution_coefficient and threshold attributes sdf::ElementPtr sdfSurfaceBounce = sdfSurface->AddElement("bounce"); // bounce --> restitution_coefficient initAttr(_config, "bounce", sdfSurfaceBounce->GetAttribute( "restitution_coefficient")); // bounceVel --> threshold initAttr(_config, "bounceVel", sdfSurfaceBounce->GetAttribute("threshold")); // contact ode has soft_cfm, kp, kd, max_vel, min_depth attributes sdf::ElementPtr sdfSurfaceContact = sdfSurface->AddElement("contact"); sdf::ElementPtr sdfSurfaceContactOde = sdfSurfaceContact->AddElement("ode"); // kp --> kp initAttr(_config, "kp", sdfSurfaceContactOde->GetAttribute("kp")); // kd --> kd initAttr(_config, "kd", sdfSurfaceContactOde->GetAttribute("kd")); // softCFM --> soft_cfm initAttr(_config, "softCFM", sdfSurfaceContactOde->GetAttribute("soft_cfm")); // maxVel --> max_vel initAttr(_config, "maxVel", sdfSurfaceContactOde->GetAttribute("max_vel")); // minDepth --> min_depth initAttr(_config, "minDepth", sdfSurfaceContactOde->GetAttribute("min_depth")); return true; } // _config = a node with an xyz and/or rpy children // _sdf = an sdf element that has an origin child element bool initOrigin(xmlNodePtr _config, sdf::ElementPtr _sdf) { // Origin xmlNodePtr xyz_xml = firstChildElement(_config, "xyz"); // parse xyz sdf::ElementPtr origin = _sdf->AddElement("origin"); std::string poseStr; if (xyz_xml) poseStr += getValue(xyz_xml) + " "; else poseStr += "0 0 0 "; // parse rpy xmlNodePtr rpy_xml = firstChildElement(_config, "rpy"); if (rpy_xml != NULL) { std::string rpy_str = getNodeValue(rpy_xml, "rpy"); std::vector<double> degrees; std::vector<std::string> pieces; boost::split(pieces, rpy_str, boost::is_any_of(" ")); for (unsigned int i = 0; i < pieces.size(); ++i) { if (pieces[i] != "") { try { degrees.push_back(boost::lexical_cast<double>(pieces[i].c_str())); } catch(boost::bad_lexical_cast &e) { gzerr << "rpy value [" << pieces[i] << "] is not a valid double from a 3-tuple\n"; return false; } } } if (degrees.empty()) { poseStr += "0 0 0"; } else { if (degrees.size() != 3) { gzerr << "Vector contains [" << static_cast<int>(degrees.size()) << "] elements instead of 3 elements\n"; return false; } // convert degrees to radian std::ostringstream rpy_stream; rpy_stream << GZ_DTOR(degrees[0]) << " " << GZ_DTOR(degrees[1]) << " " << GZ_DTOR(degrees[2]); if (rpy_stream.str().empty()) { gzerr << "rpy_stream is empty, something is wrong\n"; return false; } poseStr += rpy_stream.str(); } } else poseStr += "0 0 0"; origin->GetAttribute("pose")->SetFromString(poseStr); return true; } // _config = <body> // _sdf = <link> bool initLink(xmlNodePtr _config, sdf::ElementPtr _sdf) { initAttr(_config, "name", _sdf->GetAttribute("name")); initOrigin(_config, _sdf); // optional features (turnGravityOff, selfCollide) initAttr(_config, "selfCollide", _sdf->GetAttribute("self_collide")); // kind of tricky, new attribute gravity is opposite of turnGravityOff initAttr(_config, "", _sdf->GetAttribute("gravity")); xmlNodePtr tgo = firstChildElement(_config, "turnGravityOff"); if (tgo) { sdf::ParamT<bool> tgoP("turnGravity", "false", false); tgoP.SetFromString(getValue(tgo).c_str()); _sdf->GetAttribute("gravity")->Set(!tgoP.GetValue()); } // Inertial (optional) xmlNodePtr mm = firstChildElement(_config, "massMatrix"); if (mm) { sdf::ParamT<bool> custom_mass_matrix("mass", "false", false); custom_mass_matrix.SetFromString(getValue(mm).c_str()); if (custom_mass_matrix.GetValue()) { sdf::ElementPtr sdfInertial = _sdf->AddElement("inertial"); if (!initInertial(_config, sdfInertial)) { gzerr << "Could not parse inertial element for Link '" << _sdf->GetAttribute("name")->GetAsString() << "'\n"; return false; } } } // Multiple Collisions (optional) for (xmlNodePtr collision_xml = getChildByNSPrefix(_config, "geom"); collision_xml; collision_xml = getNextByNSPrefix(collision_xml, "geom")) { sdf::ElementPtr sdfCollision = _sdf->AddElement("collision"); if (!initCollision(collision_xml, sdfCollision)) { gzerr << "Unable to parser geom\n"; return false; } for (xmlNodePtr visual_xml = firstChildElement(collision_xml, "visual"); visual_xml; visual_xml = nextSiblingElement(visual_xml, "visual")) { sdf::ElementPtr sdfVisual = _sdf->AddElement("visual"); // set name to geom(collision) name append _visual sdfVisual->GetAttribute("name")->SetFromString( sdfCollision->GetAttribute("name")->GetAsString() + "_visual"); if (!initVisual(visual_xml, sdfVisual)) { gzerr << "Unable to parse visual\n"; return false; } // In order to parse old gazebo xml (nested format) // to new sdf, we need to unwrap visual pose from within collision. // take origin of visual, multiply it by collision's transform gazebo::math::Pose col_pose = sdfCollision->GetElement("origin")->GetValuePose("pose"); gazebo::math::Pose vis_pose = sdfVisual->GetElement("origin")->GetValuePose("pose"); vis_pose = col_pose*vis_pose; // update the sdf pose sdfVisual->GetElement("origin")->GetAttribute("pose")->Set(vis_pose); } // TODO: check for duplicate geoms } // Get all sensor elements // FIXME: instead of child elements, get namespace == sensor blocks for (xmlNodePtr sensor_xml = getChildByNSPrefix(_config, "sensor"); sensor_xml; sensor_xml = getNextByNSPrefix(sensor_xml, "sensor")) { sdf::ElementPtr sdfSensor = _sdf->AddElement("sensor"); if (!initSensor(sensor_xml, sdfSensor)) { gzerr << "Unable to parse sensor\n"; return false; } // TODO: check for duplicate sensors } return true; } /// _config = <visual> /// _sdf = visual bool initVisual(xmlNodePtr _config, sdf::ElementPtr _sdf) { _sdf->GetAttribute("cast_shadows")->SetFromString("true"); initOrigin(_config, _sdf); sdf::ElementPtr sdfGeom = _sdf->AddElement("geometry"); std::string mesh_attribute = getNodeValue(_config, "mesh"); // check each mesh type if (mesh_attribute == "unit_box") { sdf::ElementPtr sdfBox = sdfGeom->AddElement("box"); if (firstChildElement(_config, "scale")) { sdfBox->GetAttribute("size")->SetFromString( getNodeValue(_config, "scale")); } else if (firstChildElement(_config, "size")) { sdfBox->GetAttribute("size")->SetFromString( getNodeValue(_config, "size")); } else { sdfBox->GetAttribute("size")->SetFromString("1 1 1"); } } else if (mesh_attribute == "unit_sphere") { sdf::ElementPtr sdfSphere = sdfGeom->AddElement("sphere"); if (firstChildElement(_config, "scale")) { // FIXME: using first elem double sx = boost::lexical_cast<double>(getNodeTuple(_config, "scale", 0)); sdfSphere->GetAttribute("radius")->Set(0.5*sx); } else if (firstChildElement(_config, "size")) { // FIXME: using first elem double sx = boost::lexical_cast<double>(getNodeTuple(_config, "size", 0)); sdfSphere->GetAttribute("radius")->Set(0.5*sx); } else sdfSphere->GetAttribute("radius")->SetFromString("1.0"); } else if (mesh_attribute == "unit_cylinder") { sdf::ElementPtr sdfCylinder = sdfGeom->AddElement("cylinder"); if (firstChildElement(_config, "scale")) { double sx = boost::lexical_cast<double>(getNodeTuple(_config, "scale", 0)); sdfCylinder->GetAttribute("radius")->Set(0.5*sx); sdfCylinder->GetAttribute("length")->SetFromString( getNodeTuple(_config, "scale", 2)); } else if (firstChildElement(_config, "size")) { double sx = boost::lexical_cast<double>(getNodeTuple(_config, "size", 0)); sdfCylinder->GetAttribute("radius")->Set(0.5*sx); sdfCylinder->GetAttribute("length")->SetFromString( getNodeTuple(_config, "size", 2)); } else { sdfCylinder->GetAttribute("radius")->SetFromString("1"); sdfCylinder->GetAttribute("length")->SetFromString("1"); } } else if (!mesh_attribute.empty()) { sdf::ElementPtr sdfMesh = sdfGeom->AddElement("mesh"); sdfMesh->GetAttribute("filename")->SetFromString(mesh_attribute); if (firstChildElement(_config, "scale")) { sdfMesh->GetAttribute("scale")->SetFromString( getNodeValue(_config, "scale")); } } else { sdf::ElementPtr sdfPlane = sdfGeom->AddElement("plane"); sdfPlane->GetAttribute("normal")->SetFromString( getNodeValue(_config, "normal")); } // Material xmlNodePtr mat_xml = firstChildElement(_config, "material"); if (mat_xml) { sdf::ElementPtr sdfMat = _sdf->AddElement("material"); initAttr(_config, "material", sdfMat->GetAttribute("script")); } return true; } // _config = <joint> // _sdf = joint bool initJoint(xmlNodePtr _config, sdf::ElementPtr _sdf) { initAttr(_config, "name", _sdf->GetAttribute("name")); // old <anchorOffset> translates to origin in the new sdf context xmlNodePtr anchor_offset_xml = firstChildElement(_config, "anchorOffset"); std::string poseStr; if (anchor_offset_xml) poseStr += getValue(anchor_offset_xml) + " "; else poseStr += "0 0 0 "; // for rpy, which doesn't exist in old model xml poseStr += "0 0 0"; sdf::ElementPtr origin = _sdf->AddElement("origin"); origin->GetAttribute("pose")->SetFromString(poseStr); // setup parent / child links sdf::ElementPtr sdfParent = _sdf->AddElement("parent"); sdf::ElementPtr sdfChild = _sdf->AddElement("child"); // Get Parent Link // parent is specified by <anchor> element in old xml // once anchor is found, <body1> or <body2> are parsed // as child and parent if (!firstChildElement(_config, "anchor")) { gzerr << "No parent link specified for joint[" << _sdf->GetAttribute("name")->GetAsString() << "]\n"; return false; } initAttr(_config, "anchor", sdfChild->GetAttribute("link")); // Get Child Link xmlNodePtr body1Xml = firstChildElement(_config, "body1"); xmlNodePtr body2Xml = firstChildElement(_config, "body2"); if (body1Xml && body2Xml) { if (sdfChild->GetAttribute("link")->GetAsString() == getValue(body1Xml)) { initAttr(_config, "body2", sdfParent->GetAttribute("link")); } else if (sdfChild->GetAttribute("link")->GetAsString() == getValue(body2Xml)) { initAttr(_config, "body1", sdfParent->GetAttribute("link")); } else { gzerr << "body1 and body2 does not match anchor, " << "not sure which one is parent.\n"; return false; } } else { gzerr << "No child link specified for joint[" << _sdf->GetAttribute("name")->GetAsString() << "]\n"; return false; } if (std::string((const char*)_config->name) == "hinge") _sdf->GetAttribute("type")->SetFromString("revolute"); else if (std::string((const char*)_config->name) == "hinge2") _sdf->GetAttribute("type")->SetFromString("revolute2"); else if (std::string((const char*)_config->name) == "slider") _sdf->GetAttribute("type")->SetFromString("prismatic"); else if (std::string((const char*)_config->name) == "ball") _sdf->GetAttribute("type")->SetFromString("ball"); else if (std::string((const char*)_config->name) == "universal") _sdf->GetAttribute("type")->SetFromString("universal"); else if (std::string((const char*)_config->name) == "screw") _sdf->GetAttribute("type")->SetFromString("screw"); else gzerr << "Unknown joint type[" << (const char*)_config->name << "]\n"; // for screw joints, if threadPitch exists, translate to // "thread_pitch" in sdf xmlNodePtr threadPitchXml = firstChildElement(_config, "threadPitch"); if (threadPitchXml) _sdf->GetOrCreateElement("thread_pitch")->GetValue()->SetFromString( getValue(threadPitchXml)); // _sdf->GetOrCreateElement("thread_pitch")->value->SetFromString( // getValue(threadPitchXml)); initAttr(_config, "anchor", sdfChild->GetAttribute("link")); if (firstChildElement(_config, "axis")) { sdf::ElementPtr sdfAxis = _sdf->AddElement("axis"); initAttr(_config, "axis", sdfAxis->GetAttribute("xyz")); sdf::ElementPtr sdfDynamics = sdfAxis->AddElement("dynamics"); if (firstChildElement(_config, "damping")) { initAttr(_config, "damping", sdfDynamics->GetAttribute("damping")); } sdf::ElementPtr sdfLimit = sdfAxis->AddElement("limit"); // Get limit if (firstChildElement(_config, "lowStop")) { double stop_angle = boost::lexical_cast<double>(getNodeValue(_config, "lowStop")); if ((std::string((const char*)_config->name) == "slider") || (std::string((const char*)_config->name) == "screw")) sdfLimit->GetAttribute("lower")->Set(stop_angle); else sdfLimit->GetAttribute("lower")->Set(GZ_DTOR(stop_angle)); } if (firstChildElement(_config, "highStop")) { double stop_angle = boost::lexical_cast<double>(getNodeValue(_config, "highStop")); if ((std::string((const char*)_config->name) == "slider") || (std::string((const char*)_config->name) == "screw")) sdfLimit->GetAttribute("upper")->Set(stop_angle); else sdfLimit->GetAttribute("upper")->Set(GZ_DTOR(stop_angle)); } } if (firstChildElement(_config, "axis2")) { sdf::ElementPtr sdfAxis = _sdf->AddElement("axis2"); initAttr(_config, "axis", sdfAxis->GetAttribute("xyz")); sdf::ElementPtr sdfDynamics = sdfAxis->AddElement("dynamics"); if (firstChildElement(_config, "damping")) { initAttr(_config, "damping", sdfDynamics->GetAttribute("damping")); } sdf::ElementPtr sdfLimit = sdfAxis->AddElement("limit"); // Get limit if (firstChildElement(_config, "lowStop")) { double stop_angle = boost::lexical_cast<double>(getNodeValue(_config, "lowStop")); if ((std::string((const char*)_config->name) == "slider") || (std::string((const char*)_config->name) == "screw")) sdfLimit->GetAttribute("lower")->Set(stop_angle); else sdfLimit->GetAttribute("lower")->Set(GZ_DTOR(stop_angle)); } if (firstChildElement(_config, "highStop")) { double stop_angle = boost::lexical_cast<double>(getNodeValue(_config, "highStop")); if ((std::string((const char*)_config->name) == "slider") || (std::string((const char*)_config->name) == "screw")) sdfLimit->GetAttribute("upper")->Set(stop_angle); else sdfLimit->GetAttribute("upper")->Set(GZ_DTOR(stop_angle)); } } return true; } ////////////////////////////////////////////////// bool initModel(xmlNodePtr _config, sdf::ElementPtr _sdf) { initAttr(_config, "name", _sdf->GetAttribute("name")); initAttr(_config, "static", _sdf->GetAttribute("static")); initOrigin(_config, _sdf); // Get all Link elements for (xmlNodePtr linkXml = getChildByNSPrefix(_config, "body"); linkXml; linkXml = getNextByNSPrefix(linkXml, "body")) { sdf::ElementPtr sdfLink = _sdf->AddElement("link"); if (!initLink(linkXml, sdfLink)) { gzerr << "link xml is not initialized correctly\n"; return false; } } // Get all Joint elements for (xmlNodePtr jointXml = getChildByNSPrefix(_config, "joint"); jointXml; jointXml = getNextByNSPrefix(jointXml, "joint")) { sdf::ElementPtr sdfJoint = _sdf->AddElement("joint"); if (!initJoint(jointXml, sdfJoint)) { gzerr << "joint xml is not initialized correctly\n"; return false; } } /// Get all the plugins getPlugins(_config, _sdf); return true; } ////////////////////////////////////////////////// bool initWorld(xmlNodePtr _config, sdf::ElementPtr _sdf) { // Set world name if (!_sdf->GetAttribute("name")->SetFromString("default")) { gzerr << "Unable to set world name\n"; return false; } sdf::ElementPtr sdfScene = _sdf->AddElement("scene"); initScene(firstChildElement(_config, "ogre"), sdfScene); sdf::ElementPtr sdfPhysics = _sdf->AddElement("physics"); initPhysics(firstChildElement(_config, "ode"), sdfPhysics); // Get all model elements for (xmlNodePtr modelXml = getChildByNSPrefix(_config, "model"); modelXml; modelXml = getNextByNSPrefix(modelXml, "model")) { if (strcmp((const char*)modelXml->name, "renderable")== 0) { sdf::ElementPtr sdfLight = _sdf->AddElement("light"); if (!initLight(modelXml, sdfLight)) { gzerr << "light xml is not initialized correctly\n"; return false; } } else { sdf::ElementPtr sdfModel = _sdf->AddElement("model"); if (!initModel(modelXml, sdfModel)) { gzerr << "model xml is not initialized correctly\n"; return false; } } } /// Get all the plugins getPlugins(_config, _sdf); return true; } ////////////////////////////////////////////////// bool initScene(xmlNodePtr _config, sdf::ElementPtr _sdf) { sdf::ElementPtr sdfAmbient = _sdf->AddElement("ambient"); if (sdfAmbient) initAttr(_config, "ambient", sdfAmbient->GetAttribute("rgba")); sdf::ElementPtr sdfBackground = _sdf->AddElement("background"); if (sdfBackground) initAttr(_config, "background", sdfBackground->GetAttribute("rgba")); xmlNodePtr sky = firstChildElement(_config, "sky"); if (sky) { sdf::ElementPtr sdfSky = sdfBackground->AddElement("sky"); if (sdfSky) initAttr(sky, "material", sdfSky->GetAttribute("material")); } sdf::ElementPtr sdfShadow = _sdf->AddElement("shadows"); if (sdfShadow) initAttr(_config, "shadows", sdfShadow->GetAttribute("enabled")); // per pixel shading does not allow options // sdfShadow->GetAttribute("rgba")->SetFromString("0 0 0 0"); // initAttr(_config, "shadowTechnique", sdfShadow->GetAttribute("type")); return true; } // _config = physics:ode // _sdf = physics bool initPhysics(xmlNodePtr _config, sdf::ElementPtr _sdf) { _sdf->GetAttribute("type")->SetFromString("ode"); sdf::ElementPtr sdfGravity = _sdf->AddElement("gravity"); sdf::ElementPtr sdfODE = _sdf->AddElement("ode"); sdf::ElementPtr sdfODESolver = sdfODE->AddElement("solver"); sdf::ElementPtr sdfODEConstraints = sdfODE->AddElement("constraints"); if (sdfGravity) initAttr(_config, "gravity", sdfGravity->GetAttribute("xyz")); if (sdfODESolver) { initAttr(_config, "stepType", sdfODESolver->GetAttribute("type")); initAttr(_config, "stepTime", sdfODESolver->GetAttribute("dt")); if (sdfODESolver->GetAttribute("type")->GetAsString() == "quick") { initAttr(_config, "stepIters", sdfODESolver->GetAttribute("iters")); initAttr(_config, "stepW", sdfODESolver->GetAttribute("sor")); } if (sdfODESolver->GetAttribute("type")->GetAsString() == "pgs") { initAttr(_config, "stepIters", sdfODESolver->GetAttribute("iters")); initAttr(_config, "stepW", sdfODESolver->GetAttribute("sor")); } } // Contraints if (sdfODEConstraints) { initAttr(_config, "cfm", sdfODEConstraints->GetAttribute("cfm")); initAttr(_config, "erp", sdfODEConstraints->GetAttribute("erp")); initAttr(_config, "contactMaxCorrectingVel", sdfODEConstraints->GetAttribute("contact_max_correcting_vel")); initAttr(_config, "contactSurfaceLayer", sdfODEConstraints->GetAttribute("contact_surface_layer")); } return true; } bool initAttr(xmlNodePtr _node, const std::string &_key, sdf::ParamPtr _attr) { if (_node) { std::string value = getNodeValue(_node, _key); if (value.empty()) { gzdbg << "Node[" << _node->name << "] Has empty key value[" << _key << "]\n"; return false; } if (!_attr->SetFromString(value)) { gzerr << "Unable to set attribute from node[" << _node->name << "] and key[" << _key << "]\n"; return false; } } else { gzerr << "Unable to get attribute. Node is null\n"; return false; } return true; } ////////////////////////////////////////////////// bool initModelFile(const std::string &_filename, sdf::SDFPtr &_sdf) { std::ifstream fin; fin.open(_filename.c_str(), std::ios::in); if (!fin.is_open()) { gzerr << "The model file can not be opened, check path and permissions\n"; fin.close(); return false; } fin.close(); // Enable line numbering xmlLineNumbersDefault(1); std::string output; PreParser(_filename, output); return initModelString(output, _sdf); } ////////////////////////////////////////////////// bool initModelString(const std::string &_xmlString, sdf::SDFPtr &_sdf) { xmlDocPtr xmlDoc = xmlParseDoc(reinterpret_cast<const xmlChar*>(_xmlString.c_str())); return initModelDoc(xmlDoc, _sdf); } ////////////////////////////////////////////////// bool initModelDoc(xmlDocPtr _xmlDoc, sdf::SDFPtr &_sdf) { if (!_xmlDoc) { gzerr << "Could not parse the xml\n"; return false; } bool model_initialized = false; xmlNodePtr modelXml = firstChildElement(_xmlDoc, "physical"); while (modelXml) { sdf::ElementPtr model = _sdf->root->AddElement("model"); initModel(modelXml, model); modelXml = nextSiblingElement(modelXml, "physical"); // need at least one model, otherwise, return false and try as model model_initialized = true; } if (model_initialized) return true; else return false; } ////////////////////////////////////////////////// bool initWorldFile(const std::string &_filename, sdf::SDFPtr &_sdf) { std::ifstream fin; fin.open(_filename.c_str(), std::ios::in); if (!fin.is_open()) { gzerr << "The world file can not be opened, check path and permissions\n"; } fin.close(); // Enable line numbering xmlLineNumbersDefault(1); std::string output; PreParser(_filename, output); return initWorldString(output, _sdf); } ////////////////////////////////////////////////// bool initWorldString(const std::string &_xmlString, sdf::SDFPtr &_sdf) { xmlDocPtr xmlDoc = xmlParseDoc(reinterpret_cast<const xmlChar*>(_xmlString.c_str())); return initWorldDoc(xmlDoc, _sdf); } ////////////////////////////////////////////////// bool initWorldDoc(xmlDocPtr _xmlDoc, sdf::SDFPtr &_sdf) { if (!_xmlDoc) { gzerr << "Could not parse the xml\n"; return false; } // add or set version string if needed if (_sdf->root->GetAttribute("version")) _sdf->root->GetAttribute("version")->SetFromString("1.0"); else _sdf->root->AddAttribute("version", "string", "1.0", false); bool world_initialized = false; xmlNodePtr worldXml = firstChildElement(_xmlDoc, "world"); while (worldXml) { sdf::ElementPtr world = _sdf->root->AddElement("world"); world_initialized = initWorld(worldXml, world); worldXml = nextSiblingElement(worldXml, "world"); } // need all worlds successfully initialized, otherwise, // return false and try as model if (world_initialized) return true; else return false; } xmlNodePtr firstChildElement(xmlDocPtr node, const std::string &name) { xmlNodePtr tmp; for (tmp = node->xmlChildrenNode; tmp != NULL; tmp = tmp->next) if (tmp->name && name == (const char*)tmp->name) break; return tmp; } xmlNodePtr firstChildElement(xmlNodePtr node, const std::string &name) { xmlNodePtr tmp; for (tmp = xmlFirstElementChild(node); tmp != NULL; tmp = xmlNextElementSibling(tmp)) if (tmp->name && (name == (const char*)tmp->name)) break; return tmp; } xmlNodePtr nextSiblingElement(xmlNodePtr node, const std::string &name) { xmlNodePtr tmp; for (tmp = xmlNextElementSibling(node); tmp != NULL; tmp = xmlNextElementSibling(tmp)) if (tmp->name && (name == (const char*)tmp->name)) break; return tmp; } xmlNodePtr getNextByNSPrefix(xmlNodePtr node, const std::string &prefix) { xmlNodePtr tmp; for (tmp = xmlNextElementSibling(node); tmp != NULL; tmp = xmlNextElementSibling(tmp)) if (tmp->ns && prefix == (const char*)tmp->ns->prefix) break; return tmp; } xmlNodePtr getChildByNSPrefix(xmlNodePtr node, const std::string &prefix) { xmlNodePtr tmp; for (tmp = node->xmlChildrenNode; tmp != NULL; tmp = xmlNextElementSibling(tmp)) if (tmp->ns && prefix == (const char*)tmp->ns->prefix) break; return tmp; } std::string getNodeValue(xmlNodePtr node, const std::string &key) { std::string result; xmlChar *value = NULL; // First check if the key is an attribute if (xmlHasProp(node, reinterpret_cast<const xmlChar*>(key.c_str()))) { value = xmlGetProp(node, reinterpret_cast<const xmlChar*>(key.c_str())); // If not an attribute, then it should be a child node } else if (key == reinterpret_cast<const char*>(node->name)) { value = xmlNodeListGetString(node->doc, node->xmlChildrenNode, 1); } else { xmlNodePtr currNode; currNode = node->xmlChildrenNode; // Loop through children while (currNode) { // If the name matches, then return its value if (key == reinterpret_cast<const char*>(currNode->name)) { value = xmlNodeListGetString(node->doc, currNode->xmlChildrenNode, 1); break; } currNode = currNode->next; } } if (value) { result = reinterpret_cast<char*>(value); boost::trim(result); xmlFree(value); } return result; } std::string getNodeTuple(xmlNodePtr node, const std::string &key, int index) { std::string value; std::string nvalue; int i, a, b, state, count; value = getNodeValue(node, key); if (value.empty()) return std::string(); state = 0; count = 0; a = b = 0; for (i = 0; i < static_cast<int>(value.size()); i++) { // Look for start of element if (state == 0) { if (!isspace(value[i])) { a = i; state = 1; } } // Look for end of element else if (state == 1) { if (isspace(value[i])) { state = 0; b = i - 1; count++; if (count > index) break; } } } if (state == 1) { b = i - 1; count++; } if (count == index + 1) { const char *s = value.c_str() + a; size_t size = b-a+2; const char *end = reinterpret_cast<const char *>(memchr(s, 0, size)); if (end) size = end - s + 1; char *r = static_cast<char *>(malloc(size)); if (size) { memcpy(r, s, size-1); r[size-1] = '\0'; } nvalue = r; } return nvalue; } std::string getValue(xmlNodePtr node) { const char *v = (const char*)xmlNodeListGetString(node->doc, node->xmlChildrenNode, 1); if (v) return std::string(v); else return std::string(); } void PreParser(const std::string &fname, std::string &output) { std::ifstream ifs(fname.c_str(), std::ios::in); std::string line; while (ifs.good()) { std::getline(ifs, line); boost::trim(line); if (boost::find_first(line, "<include")) { int start = line.find("filename ="); start += strlen("filename =") + 1; int end = line.find_first_of("'\"", start); std::string fname2 = line.substr(start, end-start); PreParser(fname2, output); } else output += line + "\n"; } ifs.close(); } }
{"hexsha": "d264f96cea17c6773ff2db6dd05b609d10a4a7c9", "size": 45320, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/sdf/interface/parser_deprecated.cc", "max_stars_repo_name": "nherment/gazebo", "max_stars_repo_head_hexsha": "fff0aa30b4b5748e43c2b0aa54ffcd366e9f042a", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2016-01-17T20:41:39.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-01T12:02:58.000Z", "max_issues_repo_path": "src/sdf/interface/parser_deprecated.cc", "max_issues_repo_name": "nherment/gazebo", "max_issues_repo_head_hexsha": "fff0aa30b4b5748e43c2b0aa54ffcd366e9f042a", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sdf/interface/parser_deprecated.cc", "max_forks_repo_name": "nherment/gazebo", "max_forks_repo_head_hexsha": "fff0aa30b4b5748e43c2b0aa54ffcd366e9f042a", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2015-09-29T02:30:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T12:11:22.000Z", "avg_line_length": 30.477471419, "max_line_length": 80, "alphanum_fraction": 0.6408870256, "num_tokens": 12255}
########################################################## # POSTPROCESSING FUNCTIONS # ########################################################## # rcATT is a tool to prediction tactics and techniques # from the ATT&CK framework, using multilabel text # classification and post processing. # Version: 1.00 # Author: Valentine Legoy # Date: 2019_10_22 # File for post-processing functions. Two types of post- # processing methods are compared to the non-post- # processing classification at each training of the model # with new data: confidence propagation and hanging node. # The results are saved in the configuration file and these # functions are reused during prediction with the best # post-processing method. import joblib import pandas as pd import numpy as np from sklearn.svm import LinearSVC from sklearn.multiclass import OneVsRestClassifier from sklearn.pipeline import Pipeline from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_selection import chi2, SelectPercentile from sklearn.metrics import fbeta_score from sklearn.model_selection import KFold from nltk.corpus import stopwords import classification_tools.preprocessing as prp import classification_tools as clt def print_progress_bar(iteration): """ Print a progress bar for command-line interface training """ percent = ("{0:.1f}").format(100 * (iteration / float(50))) filledLength = int(iteration) bar = '█' * filledLength + '-' * (50 - filledLength) prefix = "Progress:" suffix = "Complete" printEnd = "\r" print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd) if iteration == 50: print() def confidence_propagation_single(tactics_confidence_list, technique_name, technique_confidence_score): """ Modify predictions and confidence scores of one technique using a boosting method depending on this technique's and its related tactics' confidence score. """ new_confidence_score = technique_confidence_score i = 0 for tactic in clt.CODE_TACTICS: if not clt.TACTICS_TECHNIQUES_RELATIONSHIP_DF.loc[clt.TACTICS_TECHNIQUES_RELATIONSHIP_DF[tactic] == technique_name].empty: lambdaim = 1/(np.exp(abs(technique_confidence_score-tactics_confidence_list[tactic]))) new_confidence_score = new_confidence_score + lambdaim * tactics_confidence_list[tactic] i = i+1 return new_confidence_score def confidence_propagation( predprob_tactics, pred_techniques, predprob_techniques): """ Modify predictions and confidences scores of all techniques of the whole set using confidence_propagation_single function. """ pred_techniques_corrected = pred_techniques predprob_techniques_corrected = predprob_techniques tactics_confidence_df = pd.DataFrame(data = predprob_tactics, columns = clt.CODE_TACTICS) for j in range(len(predprob_techniques[0])): for i in range(len(predprob_techniques)): predprob_techniques_corrected[i][j] = confidence_propagation_single(tactics_confidence_df[i:(i+1)], clt.CODE_TECHNIQUES[j], predprob_techniques[i][j]) if predprob_techniques_corrected[i][j] >= float(0) : pred_techniques_corrected[i][j] = int(1) else: pred_techniques_corrected[i][j] = int(0) return pred_techniques_corrected, predprob_techniques_corrected def hanging_node(pred_tactics, predprob_tactics, pred_techniques, predprob_techniques, c, d): """ Modify prediction of techniques depending on techniques and related tactics confidence score on a threshold basis. """ predprob_techniques_corrected = pred_techniques for i in range(len(pred_techniques)): for j in range(len(pred_techniques[0])): for k in range(len(pred_tactics[0])): if not clt.TACTICS_TECHNIQUES_RELATIONSHIP_DF.loc[clt.TACTICS_TECHNIQUES_RELATIONSHIP_DF[clt.CODE_TACTICS[k]] == clt.CODE_TECHNIQUES[j]].empty: if predprob_techniques[i][j] < c and predprob_techniques[i][j] > 0 and predprob_tactics[i][k] < d: predprob_techniques_corrected[i][k] = 0 return predprob_techniques_corrected def combinations(c, d): """ Compute all combinations possible between c and d and their derived values. """ c_list = [c-0.1, c, c+0.1] d_list = [d-0.1, d, d+0.1] possibilities = [] for cl in c_list: for dl in d_list: possibilities.append([cl, dl]) return possibilities def hanging_node_threshold_comparison(pred_tactics, predprob_tactics, pred_techniques, predprob_techniques, known_pred_techniques, permutations): """ Using different combinations of thresholds retrieve all the F0.5 score macro-averaged between the post-processed predictions and the true labels. """ f05list = [] for pl in permutations: f05list_temp = [pl] new_pred_techniques = hanging_node(pred_tactics, predprob_tactics, pred_techniques, predprob_techniques, pl[0], pl[1]) f05list_temp.append(fbeta_score(known_pred_techniques, new_pred_techniques, beta=0.5, average='macro')) f05list.append(f05list_temp) return f05list def find_best_post_processing(cmd): """ Find best postprocessing approach to use with the new dataset based on the f0.5 score macro-averaged. """ # add stop words to the list found during the development of rcATT stop_words = stopwords.words('english') new_stop_words = ["'ll", "'re", "'ve", 'ha', 'wa',"'d", "'s", 'abov', 'ani', 'becaus', 'befor', 'could', 'doe', 'dure', 'might', 'must', "n't", 'need', 'onc', 'onli', 'ourselv', 'sha', 'themselv', 'veri', 'whi', 'wo', 'would', 'yourselv'] stop_words.extend(new_stop_words) # download both dataset: original to the tool and added by the user train_data_df = pd.read_csv('classification_tools/data/training_data_original.csv', encoding = "ISO-8859-1") train_data_added = pd.read_csv('classification_tools/data/training_data_added.csv', encoding = "ISO-8859-1") train_data_df.append(train_data_added, ignore_index = True) # preprocess the report train_data_df = prp.processing(train_data_df) # split the dataset in 5 fold to be able to give a more accurate F0.5 score kf = KFold(n_splits=5, shuffle = True, random_state=42) reports = train_data_df[clt.TEXT_FEATURES] overall_ttps = train_data_df[clt.ALL_TTPS] # get current configuration parameters for post-processing method hanging-node to define new thresholds parameters = joblib.load("classification_tools/data/configuration.joblib") c = parameters[1][0] d = parameters[1][1] permutations = combinations(c, d) f05_NO = [] #list of f0.5 score for all techniques predictions sets without post-processing f05_HN = [] #list of f0.5 score for all techniques predictions sets with hanging node post-processing f05_CP = [] #list of f0.5 score for all techniques predictions sets with confidence propagation post-processing # retrieve minimum and maximum probabilities to use in MinMaxScaler min_prob_tactics = 0.0 max_prob_tactics = 0.0 min_prob_techniques = 0.0 max_prob_techniques = 0.0 i = 6 # print progress bar counter for index1, index2 in kf.split(reports, overall_ttps): # splits the dataset according to the kfold split into training and testing sets, and data and labels reports_train, reports_test = reports.iloc[index1], reports.iloc[index2] overall_ttps_train, overall_ttps_test = overall_ttps.iloc[index1], overall_ttps.iloc[index2] train_reports = reports_train[clt.TEXT_FEATURES] test_reports = reports_test[clt.TEXT_FEATURES] train_tactics = overall_ttps_train[clt.CODE_TACTICS] train_techniques = overall_ttps_train[clt.CODE_TECHNIQUES] test_tactics = overall_ttps_test[clt.CODE_TACTICS] test_techniques = overall_ttps_test[clt.CODE_TECHNIQUES] # Define a pipeline combining a text feature extractor with multi label classifier for the tactics predictions pipeline_tactics = Pipeline([ ('columnselector', prp.TextSelector(key = 'processed')), ('tfidf', TfidfVectorizer(tokenizer = prp.LemmaTokenizer(), stop_words = stop_words, max_df = 0.90)), ('selection', SelectPercentile(chi2, percentile = 50)), ('classifier', OneVsRestClassifier(LinearSVC(penalty = 'l2', loss = 'squared_hinge', dual = True, class_weight = 'balanced'), n_jobs = 1)) ]) # train the model and predict the tactics pipeline_tactics.fit(train_reports, train_tactics) pred_tactics = pipeline_tactics.predict(test_reports) predprob_tactics = pipeline_tactics.decision_function(test_reports) if np.amin(predprob_tactics) < min_prob_tactics: min_prob_tactics = np.amin(predprob_tactics) if np.amax(predprob_tactics) > max_prob_tactics: max_prob_tactics = np.amax(predprob_tactics) if cmd: print_progress_bar(i) # Define a pipeline combining a text feature extractor with multi label classifier for the techniques predictions pipeline_techniques = Pipeline([ ('columnselector', prp.TextSelector(key = 'processed')), ('tfidf', TfidfVectorizer(tokenizer = prp.StemTokenizer(), stop_words = stop_words, min_df = 2, max_df = 0.99)), ('selection', SelectPercentile(chi2, percentile = 50)), ('classifier', OneVsRestClassifier(LinearSVC(penalty = 'l2', loss = 'squared_hinge', dual = False, max_iter = 1000, class_weight = 'balanced'), n_jobs = 1)) ]) # train the model and predict the techniques pipeline_techniques.fit(train_reports, train_techniques) pred_techniques = pipeline_techniques.predict(test_reports) predprob_techniques = pipeline_techniques.decision_function(test_reports) if np.amin(predprob_techniques) < min_prob_techniques: min_prob_techniques = np.amin(predprob_techniques) if np.amax(predprob_techniques) > max_prob_techniques: max_prob_techniques = np.amax(predprob_techniques) i+=2 if cmd: print_progress_bar(i) # calculate the F0.5 score for each type of post processing and append to the list to keep track over the different folds f05_NO.append(fbeta_score(test_techniques, pred_techniques, beta = 0.5, average = 'macro')) f05_HN.extend(hanging_node_threshold_comparison(pred_tactics, predprob_tactics, pred_techniques, predprob_techniques, test_techniques, permutations)) i+=2 if cmd: print_progress_bar(i) CPres, _ = confidence_propagation(predprob_tactics, pred_techniques, predprob_techniques) i+=2 if cmd: print_progress_bar(i) f05_CP.append(fbeta_score(test_techniques, CPres, beta = 0.5, average = 'macro')) i+=2 save_post_processing_comparison=[] # find the F0.5 average for each post-processing fb05_NO_avg = np.mean(f05_NO) fb05_CP_avg = np.mean(f05_CP) best_HN=[] fb05_Max_HN_avg = 0 if cmd: print_progress_bar(48) for ps in permutations: sum = [] for prhn in f05_HN: if ps == prhn[0]: sum.append(prhn[1]) avg_temp = np.mean(sum) if avg_temp >= fb05_Max_HN_avg: fb05_Max_HN_avg = avg_temp best_HN = ps # define the best post-processing based on the F0.5 score average if fb05_NO_avg >= fb05_CP_avg and fb05_NO_avg >= fb05_Max_HN_avg: save_post_processing_comparison = ["N"] elif fb05_CP_avg >= fb05_Max_HN_avg and fb05_CP_avg >= fb05_NO_avg: save_post_processing_comparison = ["CP"] else: save_post_processing_comparison = ["HN"] save_post_processing_comparison.extend([best_HN, [min_prob_tactics, max_prob_tactics], [min_prob_techniques, max_prob_techniques]]) # save the results joblib.dump(save_post_processing_comparison, "classification_tools/data/configuration.joblib") if cmd: print_progress_bar(50) print()
{"hexsha": "bb9e7a2035814096bad5ea187d3e5876ee659cb2", "size": 11596, "ext": "py", "lang": "Python", "max_stars_repo_path": "unsupported/attack-predictor/1.0.0/src/classification_tools/postprocessing.py", "max_stars_repo_name": "sais7/python-apps", "max_stars_repo_head_hexsha": "3cef25e95216843ec461897c489d22d0a4cf4a19", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 68, "max_stars_repo_stars_event_min_datetime": "2020-04-10T13:55:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T10:12:50.000Z", "max_issues_repo_path": "unsupported/attack-predictor/1.0.0/src/classification_tools/postprocessing.py", "max_issues_repo_name": "sais7/python-apps", "max_issues_repo_head_hexsha": "3cef25e95216843ec461897c489d22d0a4cf4a19", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 179, "max_issues_repo_issues_event_min_datetime": "2020-05-22T08:11:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-22T15:48:27.000Z", "max_forks_repo_path": "unsupported/attack-predictor/1.0.0/src/classification_tools/postprocessing.py", "max_forks_repo_name": "sais7/python-apps", "max_forks_repo_head_hexsha": "3cef25e95216843ec461897c489d22d0a4cf4a19", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 57, "max_forks_repo_forks_event_min_datetime": "2020-07-07T10:38:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-21T20:43:04.000Z", "avg_line_length": 43.4307116105, "max_line_length": 240, "alphanum_fraction": 0.7390479476, "include": true, "reason": "import numpy", "num_tokens": 3078}
# Copyright 2022 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities to allow for custom modelling in Crystalvalue.""" import os import subprocess from typing import Any, List, Mapping, Optional, Sequence, Union from google.cloud import bigquery from google.cloud import storage from google.cloud import aiplatform import joblib import numpy as np import pandas as pd from sklearn import base from sklearn import impute from sklearn import pipeline from sklearn import preprocessing import tensorflow as tf from typing_extensions import Protocol _MODEL_DIR = 'crystalvalue/custom_model/' _MODEL_FILENAME = os.path.join(_MODEL_DIR, 'model.joblib') _EMBEDDING_MODEL_FILENAME = os.path.join(_MODEL_DIR, 'embedding_model.h5') _CONTAINER_IMAGE = 'gcr.io/prem-data-science/custom_model:latest' _DOCKER_BUILD_COMMAND = f'docker build -t custom_model {_MODEL_DIR}' _DOCKER_TAG_COMMAND = f'docker tag custom_model {_CONTAINER_IMAGE}' _DOCKER_PUSH_COMMAND = 'docker push gcr.io/prem-data-science/custom_model' class Model(Protocol): """Class for structural subtyping of any sklearn regressor or classifier.""" def __init__(self, *args: Any, **kwargs: Any) -> None: ... def fit(self, features: Union[pd.DataFrame, np.ndarray], target: Union[pd.Series, np.ndarray], **kwargs: Any) -> None: ... def predict(self, features: Union[pd.DataFrame, np.ndarray]) -> np.ndarray: ... # ColumnTransformer gives some errors when pickleing a FunctionTransformer that # contain/use a Keras model. That is why we need to use a custom made # ColumnSelector. class ColumnSelector(base.BaseEstimator, base.TransformerMixin): """Select only specified columns. Attributes: columns: List of columns to select from the raw data for the next step in the pipeline. """ def __init__(self, columns: Sequence[str]) -> None: """Constructor of the ColumnSelector class. Args: columns: """ self.columns = columns def fit(self, data: pd.DataFrame, target: np.ndarray) -> 'ColumnSelector': """This fit method does not perform any operations. Args: data: Input data parameter. target: Target data parameter. Returns: The transformer class being fit. """ return self def transform(self, data: pd.DataFrame) -> np.ndarray: """Transforms the input data based on the columns to select. Args: data: Data to trasnform. Returns: Data columns selected """ return data[self.columns] class OrdinalEncoderPlusOne(preprocessing.OrdinalEncoder): """OrdinalEncoder class that adds 1 to the output when transforming. This is needed since currently Scikit-Learn does not allow to have multiple values (one per column) for 'unkown_value'. Therefore we need to use -1 and then add +1 so it can fit properly into the tf.keras.layers.Embedding later. """ def __init__(self, categories='auto', dtype=np.float64, handle_unknown='error', unknown_value=None) -> None: """Constructor for OrdinalEncoderPlusOne. Please visit the following documentation site for detailed information: https://github.com/scikit-learn/scikit-learn/blob/2beed5584/sklearn/preprocessing/_encoders.py#L649 Args: categories: Sequence of categories or 'auto' for automatically determining the categories. dtype: Data type desired. handle_unknown: Either {'error', 'use_encoded_value'}. If use_encoded_value then 'unknown_value' also needs to be provided. unknown_value: Value to use for unknown categories. """ super().__init__( categories=categories, dtype=dtype, handle_unknown=handle_unknown, unknown_value=unknown_value) def fit(self, data: pd.DataFrame, target: Optional[np.ndarray] = None) -> 'OrdinalEncoderPlusOne': """Fits the regular OrdinalEncoder based on the given data. Args: data: Input data parameter. target: Target data parameter. Returns: The transformer class being fit. """ return super().fit(X=data, y=target) def transform(self, data: pd.DataFrame) -> np.ndarray: """Transforms the given data with OrdinalEncoder and then adds one. Args: data: Data to trasnform. Returns: Data columns selected """ return super().transform(X=data) + 1 def _save_model_locally_and_gcp(base_path: str, model_name: str, memory_object: Any, bucket_name: str, model_filename: str = 'model.joblib') -> str: """Saves a model both locally and then uploads it to GCP bucket. Args: base_path: Base path where model folders should go. model_name: Name of the model. This will create a folder with the model name. memory_object: Model to save. bucket_name: Name of the Google Storage bucket. model_filename: Name of the pickled file to be saved. By default is 'model.joblib' as required by VertexAI pre built containers. Returns: The path of the folder where the model will be saved, but not the path to the model itself. If model is saved in 'folder1/folder2/model_name/model.joblib' then the returned path will be: 'folder1/folder2/model_name/'. """ model_dir = os.path.join(base_path, model_name) if not os.path.exists(model_dir): os.mkdir(model_dir) model_path = os.path.join(model_dir, model_filename) joblib.dump(value=memory_object, filename=model_path) storage_uri = f'gs://{bucket_name}/{model_path}' blob = storage.blob.Blob.from_string(storage_uri, client=storage.Client()) blob.upload_from_filename(model_path) return f'gs://{bucket_name}/{model_dir}' def _embedding_layer(n_categories: int) -> tf.keras.Sequential: """Builds a tf.keras.Sequential model with an embeddign and a flatten layer. It is meant to be used as a helper function to re-create this embedding model for multiple categorical columns. Args: n_categories: Number of unique categories in the data column/array. Returns: The sequential model for creating embedding. """ embedding = tf.keras.Sequential([ tf.keras.layers.Embedding( input_dim=n_categories + 1, output_dim=int(tf.math.ceil(n_categories**0.5)), input_length=1), tf.keras.layers.Flatten(), ]) return embedding def _train_categories_embedding( data: pd.DataFrame, categorical_features: Sequence[str], target_column: str, epochs: int, embedding_model_path: str = _EMBEDDING_MODEL_FILENAME, learning_rate: float = 0.1) -> str: """Trains the embedding model for creating embeddings for categorical columns. It creates a model that will generate a embedding for each categorical column given. The returned model is a part of the model trained and is not compiled. Args: data: Data to use for training the embedding model. categorical_features: Sequence of columns containing the categories. target_column: Target column to use for fitting the model. epochs: Number of epochs to train for. embedding_model_path: Path to save the embedding model. learning_rate: Learning rate to use in the Adam optimizer. Returns: The trained embedding model. """ ordinal_encoder = OrdinalEncoderPlusOne( handle_unknown='use_encoded_value', unknown_value=-1) categorical_data = pd.DataFrame( data=ordinal_encoder.fit_transform(data[categorical_features]), columns=categorical_features, index=data.index) embedding_inputs = [ tf.keras.layers.Input(shape=(1,), dtype=np.int64) for feature in categorical_features ] embedding_outputs = [] for feature, embedding_input in zip(categorical_features, embedding_inputs): # Plus 1 is always needed and the second + 1 is for the offset we are # introducing in OrdinalEncoderPlusOne. n_categories = int(categorical_data[feature].max() + 2) embedding_output = _embedding_layer(n_categories=n_categories)( embedding_input) embedding_outputs.append(embedding_output) embeddings_concat = tf.keras.layers.concatenate(embedding_outputs) model_output = tf.keras.layers.Dense(1)(embeddings_concat) full_model = tf.keras.Model(inputs=embedding_inputs, outputs=model_output) full_model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss='mse') embedding_model = tf.keras.Model( inputs=embedding_inputs, outputs=embeddings_concat) full_model.fit( x=[categorical_data.iloc[:, i] for i in range(categorical_data.shape[1])], y=data[target_column], epochs=epochs) tf.keras.models.save_model( model=embedding_model, filepath=embedding_model_path) return embedding_model_path def _get_data_embedding(data: np.ndarray, embedding_model_path: str) -> np.ndarray: """Get the embedding prediction for the given data based on given model. Args: data: Input data to generate the embeddings from. embedding_model_path: Path to the embedding model to use for generating the embeddings. Returns: The array containing the embedding for the given data/categories. """ embedding_model = tf.keras.models.load_model(embedding_model_path) data_inputs = [data[:, i] for i in range(data.shape[1])] return embedding_model.predict(data_inputs) def _passthrough(data: np.ndarray) -> np.ndarray: return data def _build_sklearn_pipeline( feature_types: Mapping[str, List[str]], custom_model: Model, embedding_model_path: Optional[str]) -> pipeline.Pipeline: """Builds a sklearn pipeline with preprocessing and a given model. This pipelines tries to provide a similar (but simpler) interface to what using AutoML in Crystalvalue looks like. For that reason it includes null inputing and preprocessing for numerical and categorical features. Args: feature_types: Mapping of feature types along side with the list of columns of that type. custom_model: Custom model to include in the pipeline. embedding_model_path: Path to the embeddign model to use for categorical variables. Returns: A sklearn pipeline to use in Crystalvalue. """ if embedding_model_path: embedding_transformer = preprocessing.FunctionTransformer( func=_get_data_embedding, validate=False, check_inverse=False, kw_args={'embedding_model_path': embedding_model_path}) else: embedding_transformer = preprocessing.FunctionTransformer( func=_passthrough) numeric_pipeline = pipeline.Pipeline( steps=[('selector', ColumnSelector(feature_types['numeric'])), ('numeric_imputer', impute.SimpleImputer(strategy='median')), ('scaler', preprocessing.StandardScaler())]) categorical_pipeline = pipeline.Pipeline( steps=[('selector', ColumnSelector(feature_types['string_or_categorical'])), ('category_imputer', impute.SimpleImputer(strategy='most_frequent')), ('ordinal_encoder', OrdinalEncoderPlusOne( handle_unknown='use_encoded_value', unknown_value=-1) ), ('embeddings', embedding_transformer)]) boolean_selector = ColumnSelector(feature_types['boolean']) preprocessor = pipeline.FeatureUnion([('categorical', categorical_pipeline), ('numeric', numeric_pipeline), ('boolean', boolean_selector)]) return pipeline.Pipeline( steps=[('preprocessor', preprocessor), ('regressor', custom_model)], verbose=True) def _build_and_push_docker_image() -> None: subprocess.run(_DOCKER_BUILD_COMMAND.split(), check=True, shell=False) subprocess.run(_DOCKER_TAG_COMMAND.split(), check=True, shell=False) subprocess.run(_DOCKER_PUSH_COMMAND.split(), check=True, shell=False) def train_custom_model(custom_model: Model, model_name: str, bigquery_client: bigquery.Client, feature_types: Mapping[str, List[str]], target_column: str, dataset_id: str, table_name: str, location: str) -> aiplatform.Model: """Trains a custom model and uploads it to VertexAI. Args: custom_model: Model object. Must follow a sklearn like approach. model_name: Name to use to save the model. bigquery_client: Bigquery client to fetch the training data. feature_types: Mapping of feature types along side with the list of columns of that type. target_column: Name of the target column to use. dataset_id: GCP dataset id. table_name: Name of the table containing the training data. location: Location of the resources, make sure they are all in the same region. Returns: Object of aiplatform.Model as uploaded in GCP. """ query_sql = f""" SELECT * FROM `{bigquery_client.project}.{dataset_id}.{table_name}` WHERE predefined_split_column = 'TRAIN' """ data = bigquery_client.query( query_sql, location=location).result().to_dataframe() embedding_model_path = None if feature_types['string_or_categorical']: embedding_model_path = _train_categories_embedding( data=data, categorical_features=feature_types['string_or_categorical'], target_column=target_column, epochs=5) # Train and save locally custom model custom_pipeline = _build_sklearn_pipeline( feature_types=feature_types, custom_model=custom_model, embedding_model_path=embedding_model_path) custom_pipeline.fit(data, data[target_column]) custom_pipeline.column_order = data.columns.to_list() joblib.dump(value=custom_pipeline, filename=_MODEL_FILENAME) # Build docker image containing trained model along side with prediction code. _build_and_push_docker_image() # Upload model to VertexAI aiplatform.init(project=bigquery_client.project, location=location) model = aiplatform.Model.upload( display_name=model_name, serving_container_image_uri=_CONTAINER_IMAGE, serving_container_predict_route='/predict', serving_container_health_route='/health_check', ) model.wait() return model
{"hexsha": "cefcf0e247de70fb3608c8a929165dd3b28ff421", "size": 14955, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/custom_model.py", "max_stars_repo_name": "google/crystalvalue", "max_stars_repo_head_hexsha": "719226fb302d414e94fcdb3ac4b468977f3529ec", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-28T20:35:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T21:58:35.000Z", "max_issues_repo_path": "src/custom_model.py", "max_issues_repo_name": "google/crystalvalue", "max_issues_repo_head_hexsha": "719226fb302d414e94fcdb3ac4b468977f3529ec", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/custom_model.py", "max_forks_repo_name": "google/crystalvalue", "max_forks_repo_head_hexsha": "719226fb302d414e94fcdb3ac4b468977f3529ec", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8633093525, "max_line_length": 103, "alphanum_fraction": 0.7070544968, "include": true, "reason": "import numpy", "num_tokens": 3199}
import numpy as np from neuraxle.hyperparams.space import HyperparameterSamples from neuraxle.steps.flow import OptionalStep from neuraxle.steps.numpy import MultiplyByN def test_optional_should_disable_wrapped_step_when_disabled(): p = OptionalStep(MultiplyByN(2), nullified_return_value=[]).set_hyperparams(HyperparameterSamples({ 'enabled': False })) data_inputs = np.array(list(range(10))) outputs = p.transform(data_inputs) assert outputs == [] def test_optional_should_enable_wrapped_step_when_enabled(): p = OptionalStep(MultiplyByN(2), nullified_return_value=[]).set_hyperparams(HyperparameterSamples({ 'enabled': True })) data_inputs = np.array(list(range(10))) outputs = p.transform(data_inputs) assert np.array_equal(outputs, data_inputs * 2)
{"hexsha": "ade912977db829f3a5af3d2a70be281213a66013", "size": 820, "ext": "py", "lang": "Python", "max_stars_repo_path": "testing/test_optional.py", "max_stars_repo_name": "Kimoby/Neuraxle", "max_stars_repo_head_hexsha": "af96f79d4f770f50174e2edf40da4147cdb8a5b5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "testing/test_optional.py", "max_issues_repo_name": "Kimoby/Neuraxle", "max_issues_repo_head_hexsha": "af96f79d4f770f50174e2edf40da4147cdb8a5b5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "testing/test_optional.py", "max_forks_repo_name": "Kimoby/Neuraxle", "max_forks_repo_head_hexsha": "af96f79d4f770f50174e2edf40da4147cdb8a5b5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.275862069, "max_line_length": 103, "alphanum_fraction": 0.7512195122, "include": true, "reason": "import numpy", "num_tokens": 185}
/** * \file boost/numeric/ublasx/operation/empty.hpp * * \brief Check for emptiness a ginve vector/matrix expression. * * Copyright (c) 2010, Marco Guazzone * * Distributed under the Boost Software License, Version 1.0. (See * accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * \author Marco Guazzone, marco.guazzone@gmail.com */ #ifndef BOOST_NUMERIC_UBLAS_OPERATION_EMPTY_HPP #define BOOST_NUMERIC_UBLAS_OPERATION_EMPTY_HPP #include <boost/numeric/ublas/expression_types.hpp> #include <boost/numeric/ublasx/operation/num_columns.hpp> #include <boost/numeric/ublasx/operation/num_rows.hpp> #include <boost/numeric/ublasx/operation/size.hpp> namespace boost { namespace numeric { namespace ublasx { using namespace ::boost::numeric::ublas; /** * \brief Check for emptiness the given vector expression. * * \tparam VectorExprT The type of the input vector expression. * \param ve The input vector expression. * \return \c true if \a ve is a zero-sized vector; \c false otherwise. * * A zero-sized vector is a vector of zero length. */ template <typename VectorExprT> bool empty(vector_expression<VectorExprT> const& ve) { return size(ve) == 0; } /** * \brief Check for emptiness the given matrix expression. * * \tparam MatrixExprT The type of the input matrix expression. * \param me The input matrix expression. * \return \c true if \a me is a zero-sized matrix; \c false otherwise. * * A zero-sized matrix is a vector with either zero rows or zero columns or * both. */ template <typename MatrixExprT> bool empty(matrix_expression<MatrixExprT> const& me) { return num_rows(me) == 0 || num_columns(me) == 0; } }}} // Namespace boost::numeric::ublasx #endif // BOOST_NUMERIC_UBLAS_OPERATION_EMPTY_HPP
{"hexsha": "9ddf3fa7ee2e70d8b62e11cc896bb95c5a53a1bf", "size": 1785, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/numeric/ublasx/operation/empty.hpp", "max_stars_repo_name": "comcon1/boost-ublasx", "max_stars_repo_head_hexsha": "290b92b643a944825df99bece3468a4f81518056", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "boost/numeric/ublasx/operation/empty.hpp", "max_issues_repo_name": "comcon1/boost-ublasx", "max_issues_repo_head_hexsha": "290b92b643a944825df99bece3468a4f81518056", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "boost/numeric/ublasx/operation/empty.hpp", "max_forks_repo_name": "comcon1/boost-ublasx", "max_forks_repo_head_hexsha": "290b92b643a944825df99bece3468a4f81518056", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6417910448, "max_line_length": 75, "alphanum_fraction": 0.7422969188, "num_tokens": 456}
import torch import utils import random import logging import sys import json import numpy as np from utils import parse_arguments from hyperpyyaml import load_hyperpyyaml logger = logging.getLogger(__name__) class VoxCelebDataset(torch.utils.data.Dataset): def __init__(self, hparams, csv_data_file): super().__init__() self.hparams = hparams self.snt_len_sample = int(hparams["sample_rate"] * hparams["sentence_len"]) # dictionary of id:str - {duration:float, wav:str, start:int, stop:int, spk_id:str} self.csv_data = utils.load_data_csv(csv_data_file) self.data = [] # get the labels self.labels = {} for key in self.csv_data: self.labels[self.csv_data[key]["spk_id"]] = None self.data.append(key) self.labels = list(self.labels.keys()) def __getitem__(self, index): item = self.csv_data[self.data[index]] # read audio start = item["start"] stop = item["stop"] duration = item["duration"] # tensor with wav wav_tensor = self.load_audio(item["wav"], start, stop, duration) # return audio_sample, y # one hot encoding for label # y = self.to_categorical(self.labels.index(item["spk_id"]), len(self.labels)) # use indexes instead - hot encode them in loss fn - label count from here needs to match final output neurons y = torch.tensor([self.labels.index(item["spk_id"])]) res = wav_tensor, y, self.data[index] return res def __len__(self): return len(self.data) def get_label_count(self): return len(self.labels) def load_audio(self, wav, start, stop, duration): # instead of predefined chunks pick random chunk over the whole clip if self.hparams["random_chunk"]: duration_sample = int(duration * self.hparams["sample_rate"]) start = random.randint(0, duration_sample - self.snt_len_sample - 1) stop = start + self.snt_len_sample num_frames = stop - start # Resulting Tensor and sample rate. sig, fs = utils.load_audio(wav, num_frames=num_frames, frame_offset=start) # sig, fs = torchaudio.load(wav, num_frames=num_frames, frame_offset=start) sig = sig.transpose(0, 1).squeeze(1) return sig @staticmethod def to_categorical(class_num, num_classes): """ 1-hot encodes a tensor """ return np.eye(num_classes, dtype='uint8')[class_num] def main(): logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) logger.info("Starting...") hparams_file, run_opts, overrides = parse_arguments(sys.argv[1:]) with open(hparams_file) as fin: hparam_str = fin.read() if 'yaml' in run_opts: for yaml_file in run_opts['yaml']: logging.info(f"Loading additional yaml file: {yaml_file[0]}") with open(yaml_file[0]) as fin: hparam_str = hparam_str + "\n" + fin.read(); hparams = load_hyperpyyaml(hparam_str, overrides) logging.info(f"Params: {json.dumps(hparams, indent=4)}") dataset = VoxCelebDataset(hparams, hparams["train_data"]) for idx in range(10): x_wav, y, descr = dataset.__getitem__(idx) print(f"x_wav: {x_wav}") print(f" y: {y}") print(f"descr: {descr}") if __name__ == "__main__": main()
{"hexsha": "1357986f64bea0cdaba0f579918dff6570cbdf4e", "size": 3423, "ext": "py", "lang": "Python", "max_stars_repo_path": "VoxCelebDataset.py", "max_stars_repo_name": "akaver/speaker-embed-augm", "max_stars_repo_head_hexsha": "eef03926d1bc18a2463260b3bc4a7c12c624fee6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "VoxCelebDataset.py", "max_issues_repo_name": "akaver/speaker-embed-augm", "max_issues_repo_head_hexsha": "eef03926d1bc18a2463260b3bc4a7c12c624fee6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "VoxCelebDataset.py", "max_forks_repo_name": "akaver/speaker-embed-augm", "max_forks_repo_head_hexsha": "eef03926d1bc18a2463260b3bc4a7c12c624fee6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8378378378, "max_line_length": 118, "alphanum_fraction": 0.6351153959, "include": true, "reason": "import numpy", "num_tokens": 818}
SUBROUTINE genmul(n,p,ncat,ix) C********************************************************************** C C SUBROUTINE GENMUL( N, P, NCAT, IX ) C GENerate an observation from the MULtinomial distribution C C C Arguments C C C N --> Number of events that will be classified into one of C the categories 1..NCAT C INTEGER N C C P --> Vector of probabilities. P(i) is the probability that C an event will be classified into category i. Thus, P(i) C must be [0,1]. Only the first NCAT-1 P(i) must be defined C since P(NCAT) is 1.0 minus the sum of the first C NCAT-1 P(i). C REAL P(NCAT-1) C C NCAT --> Number of categories. Length of P and IX. C INTEGER NCAT C C IX <-- Observation from multinomial distribution. All IX(i) C will be nonnegative and their sum will be N. C INTEGER IX(NCAT) C C C Method C C C Algorithm from page 559 of C C Devroye, Luc C C Non-Uniform Random Variate Generation. Springer-Verlag, C New York, 1986. C C********************************************************************** C .. Scalar Arguments .. INTEGER n,ncat C .. C .. Array Arguments .. REAL p(*) INTEGER ix(*) C .. C .. Local Scalars .. REAL prob,ptot,sum INTEGER i,icat,ntot C .. C .. External Functions .. INTEGER ignbin EXTERNAL ignbin C .. C .. Intrinsic Functions .. INTRINSIC abs C .. C .. Executable Statements .. C Check Arguments IF (n.LT.0) STOP 'N < 0 in GENMUL' IF (ncat.LE.1) STOP 'NCAT <= 1 in GENMUL' ptot = 0.0 DO 10,i = 1,ncat - 1 IF (p(i).LT.0.0) STOP 'Some P(i) < 0 in GENMUL' IF (p(i).GT.1.0) STOP 'Some P(i) > 1 in GENMUL' ptot = ptot + p(i) 10 CONTINUE IF (ptot.GT.0.99999) STOP 'Sum of P(i) > 1 in GENMUL' C Initialize variables ntot = n sum = 1.0 DO 20,i = 1,ncat ix(i) = 0 20 CONTINUE C Generate the observation DO 30,icat = 1,ncat - 1 prob = p(icat)/sum ix(icat) = ignbin(ntot,prob) ntot = ntot - ix(icat) IF (ntot.LE.0) RETURN sum = sum - p(icat) 30 CONTINUE ix(ncat) = ntot C Finished RETURN END
{"hexsha": "75377beadf840d7cb995efdca2fb70650d164f01", "size": 2464, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "Ions/Source/v_1.3/Sampler/Ranlib/src/genmul.f", "max_stars_repo_name": "ppernot/MC-ChemDB", "max_stars_repo_head_hexsha": "376c0b7e4596d8652833b5ff2ebe6316039587c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Ions/Source/v_1.3/Sampler/Ranlib/src/genmul.f", "max_issues_repo_name": "ppernot/MC-ChemDB", "max_issues_repo_head_hexsha": "376c0b7e4596d8652833b5ff2ebe6316039587c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Ions/Source/v_1.3/Sampler/Ranlib/src/genmul.f", "max_forks_repo_name": "ppernot/MC-ChemDB", "max_forks_repo_head_hexsha": "376c0b7e4596d8652833b5ff2ebe6316039587c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4946236559, "max_line_length": 71, "alphanum_fraction": 0.4959415584, "num_tokens": 731}
[STATEMENT] lemma iso_image: "mono ((`) f)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. mono ((`) f) [PROOF STEP] by (simp add: image_mono monoI)
{"llama_tokens": 70, "file": "Order_Lattice_Props_Order_Duality", "length": 1}
# # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Author: Pengcheng He (penhe@microsoft.com) # Date: 05/15/2019 # import os import numpy as np import math import sys from torch.utils.data import Sampler __all__=['BatchSampler', 'DistributedBatchSampler', 'RandomSampler', 'SequentialSampler'] class BatchSampler(Sampler): def __init__(self, sampler, batch_size): self.sampler = sampler self.batch_size = batch_size def __iter__(self): batch = [] for idx in self.sampler: batch.append(idx) if len(batch)==self.batch_size: yield batch batch = [] if len(batch)>0: yield batch def __len__(self): return (len(self.sampler) + self.batch_size - 1)//self.batch_size class DistributedBatchSampler(Sampler): def __init__(self, sampler, rank=0, world_size = 1, drop_last = False): self.sampler = sampler self.rank = rank self.world_size = world_size self.drop_last = drop_last def __iter__(self): for b in self.sampler: if len(b)%self.world_size != 0: if self.drop_last: break else: b.extend([b[0] for _ in range(self.world_size-len(b)%self.world_size)]) chunk_size = len(b)//self.world_size yield b[self.rank*chunk_size:(self.rank+1)*chunk_size] def __len__(self): return len(self.sampler) class RandomSampler(Sampler): def __init__(self, total_samples:int, data_seed:int = 0): self.indices = np.array(np.arange(total_samples)) self.rng = np.random.RandomState(data_seed) def __iter__(self): self.rng.shuffle(self.indices) for i in self.indices: yield i def __len__(self): return len(self.indices) class SequentialSampler(Sampler): def __init__(self, total_samples:int): self.indices = np.array(np.arange(total_samples)) def __iter__(self): for i in self.indices: yield i def __len__(self): return len(self.indices)
{"hexsha": "1aec3c2b4298503556aef1b8d4f0b2abb934f5fa", "size": 2003, "ext": "py", "lang": "Python", "max_stars_repo_path": "DeBERTa/data/data_sampler.py", "max_stars_repo_name": "tirkarthi/DeBERTa", "max_stars_repo_head_hexsha": "c558ad99373dac695128c9ec45f39869aafd374e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-02-04T01:26:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T00:38:47.000Z", "max_issues_repo_path": "DeBERTa/data/data_sampler.py", "max_issues_repo_name": "tirkarthi/DeBERTa", "max_issues_repo_head_hexsha": "c558ad99373dac695128c9ec45f39869aafd374e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-18T00:23:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-05T15:36:48.000Z", "max_forks_repo_path": "src/LASER/data/data_sampler.py", "max_forks_repo_name": "BigBird01/LASER", "max_forks_repo_head_hexsha": "57143200814583410acdd0c5ac0a0f8bab8a1f7e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.012987013, "max_line_length": 89, "alphanum_fraction": 0.6839740389, "include": true, "reason": "import numpy", "num_tokens": 515}
from __future__ import division #Implements K-means algorithm import numpy as np from random import randint from copy import deepcopy from time import time from misc import inf #@dist is the distance used to compute calculus #@elementSet is the set of elements to cluster #@k is the number of clusters required (it will likely be the number of clusters obtained by partitionning the set of elements by the values of metadata) #@kClusters is a list of k elements that will represent the means of each cluster (these elements will be likely be k elements which values of metadata are known) #Returns the k clusters, the distance dictionary (key=(sample1,sample2),value=distance between sample1 and sample2) #Clusters are ordered def shouldStop(kClusters,previouskClusters,k): endIt = True i = 0 while i < k: endIt = endIt and (set(kClusters[i]) == set(previouskClusters[i])) i += 1 return endIt #The mean sample of a cluster is the one that minimizes the distance to the other samples def updateMean(cluster,distanceDict): distanceInCluster = [] minDistance = inf currMean = None if not cluster: print "\n/!\ ERROR: Empty cluster." raise ValueError for samplei in cluster: distanceToThisSample = 0 for samplej in set(cluster): if not (samplej == samplei): d = distanceDict.get((samplei,samplej)) if d: distanceToThisSample += d #else: #print samplei,samplej distanceInCluster.append((samplei,distanceToThisSample)) if distanceToThisSample < minDistance: minDistance = distanceToThisSample + 0 currMean = samplei return currMean,distanceInCluster def getClusters(currAssignments,k,totalElementSet): kClusters = [None] * k n = len(totalElementSet) for i in range(n): if not kClusters[currAssignments[i]]: kClusters[currAssignments[i]] = [totalElementSet[i]] else: kClusters[currAssignments[i]].append(totalElementSet[i]) return kClusters #@startSet solves the problem of initialization in K-Means Algorithm def kMeans(elementSet,k,kClusters,startSet,distanceDict,dataArray,meanSamples=None): start = time() totalElementSet = elementSet + startSet n = len(totalElementSet) if (n > len(dataArray[3])): print "\n/!\ ERROR: Different lengths of set of samples",n,len(dataArray[3]),"." raise ValueError #Initialization of the clusters and the means meanSamples = meanSamples or [x for x in startSet] endIt = False previouskClusters = deepcopy(kClusters) #currAssignments[i] is the index of the cluster where totalElementSet[i] currently is currAssignments = [None]*(n-k) + [i for i in range(k)] #distanceInClusters is a list of lists of (sample,sum of all distances from this samples to any other sample in the same cluster) pairs distanceInClusters = [None]*k print "/!\ Starting clustering..." while not endIt: print "/!\ Next iteration",currAssignments,"." for unassignedElement in range(n-k): minDist = inf minCluster = None for clusterIndex in range(k): distance = distanceDict.get((meanSamples[clusterIndex],totalElementSet[unassignedElement])) #In case of "infinite" distance for every cluster, the element would be assigned nowhere if the condition was distance < minDist if distance <= minDist: minDist = distance minCluster = clusterIndex #Deletes the element from another cluster, if it exists currAssign = currAssignments[unassignedElement] #Meaning the element has already been assigned to another cluster if currAssign and not (currAssign == minCluster): newCluster = [] for x in kClusters[currAssign]: if not (x == totalElementSet[unassignedElement]) and not (x in newCluster): newCluster.append(x) kClusters[currAssign] = newCluster meanSamples[currAssign],distanceInCluster = updateMean(kClusters[currAssign],distanceDict) distanceInClusters[currAssign] = distanceInCluster #If the element is unassigned (minCluster =/= None and currAssign == None => currAssign =/= minCluster) elif not (currAssign == minCluster): currAssignments[unassignedElement] = minCluster newCluster = [] for x in kClusters[minCluster]: if not (x in newCluster) and not (x == totalElementSet[unassignedElement]): newCluster.append(x) elif (x == totalElementSet[unassignedElement]): print "/!\ ERROR:",x,"is in cluster #",currAssign," whereas it should not be assigned to any cluster." kClusters[minCluster] = newCluster + [totalElementSet[unassignedElement]] meanSamples[minCluster],distanceInCluster = updateMean(kClusters[minCluster],distanceDict) distanceInClusters[minCluster] = distanceInCluster #else currAssign == minCluster, nothing should be done endIt = shouldStop(kClusters,previouskClusters,k) previouskClusters = deepcopy(kClusters) print "-- End of clustering." end = time() print "TIME:",(end-start) kClusters = getClusters(currAssignments,k,totalElementSet) return kClusters,meanSamples,distanceDict,distanceInClusters
{"hexsha": "84f54cc1db734d6d7cde28128124cf048c9508ed", "size": 5647, "ext": "py", "lang": "Python", "max_stars_repo_path": "kMeans.py", "max_stars_repo_name": "kuredatan/taxocluster", "max_stars_repo_head_hexsha": "acec6219ae5b7bd8e3831d71ee79ecfeebb53c8b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kMeans.py", "max_issues_repo_name": "kuredatan/taxocluster", "max_issues_repo_head_hexsha": "acec6219ae5b7bd8e3831d71ee79ecfeebb53c8b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kMeans.py", "max_forks_repo_name": "kuredatan/taxocluster", "max_forks_repo_head_hexsha": "acec6219ae5b7bd8e3831d71ee79ecfeebb53c8b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.264957265, "max_line_length": 162, "alphanum_fraction": 0.6591110324, "include": true, "reason": "import numpy", "num_tokens": 1242}
import unittest import numpy as np import torch from qmctorch.utils import set_torch_double_precision from qmctorch.scf import Molecule from qmctorch.wavefunction import SlaterJastrow class TestGTO2STOFit(unittest.TestCase): def setUp(self): torch.manual_seed(101) np.random.seed(101) set_torch_double_precision() # molecule mol = Molecule( atom='C 0 0 0', unit='bohr', calculator='pyscf', basis='sto-3g', redo_scf=True) self.wf = SlaterJastrow(mol, kinetic='auto', configs='ground_state').gto2sto() self.pos = -0.25 + 0.5 * \ torch.as_tensor(np.random.rand(10, 18)) self.pos.requires_grad = True def test_forward(self): wfvals = self.wf(self.pos) ref = torch.as_tensor([[-8.4430e-06], [1.5092e-02], [3.3809e-03], [9.7981e-03], [-6.8513e-02], [-4.6836e-03], [-3.2847e-04], [2.3636e-02], [5.5934e-04], [1.3205e-02]]) assert torch.allclose(wfvals.data, ref, rtol=1E-4, atol=1E-4) if __name__ == "__main__": unittest.main()
{"hexsha": "8aea1c7851d1bb94a491c47a90579f299486bd68", "size": 1418, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/scf/test_gto2sto_fit.py", "max_stars_repo_name": "NLESC-JCER/QMCTorch", "max_stars_repo_head_hexsha": "c56472cd3e9cc59f2e01a880e674b7270d2cdc2b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-06-26T17:43:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T14:16:02.000Z", "max_issues_repo_path": "tests/scf/test_gto2sto_fit.py", "max_issues_repo_name": "NLESC-JCER/QMCTorch", "max_issues_repo_head_hexsha": "c56472cd3e9cc59f2e01a880e674b7270d2cdc2b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 57, "max_issues_repo_issues_event_min_datetime": "2020-05-01T07:13:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-13T19:51:55.000Z", "max_forks_repo_path": "tests/scf/test_gto2sto_fit.py", "max_forks_repo_name": "NLESC-JCER/QMCTorch", "max_forks_repo_head_hexsha": "c56472cd3e9cc59f2e01a880e674b7270d2cdc2b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-30T09:56:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T02:55:45.000Z", "avg_line_length": 26.7547169811, "max_line_length": 69, "alphanum_fraction": 0.4767277856, "include": true, "reason": "import numpy", "num_tokens": 358}
import torch.nn as nn import time import numpy as np import torch import math from config import * class sp500_element: # the element of the sp500 variant dataset. def __init__(self, features, label): self.features, self.label = features, label class dataset_element: # the element of the trivial dataset. def __init__(self, beta_1, beta_2): self.beta_1, self.beta_2 = beta_1, beta_2 self.features, self.label = calc_features(beta_1, beta_2) torch.random.manual_seed(179090813) twistnet1 = nn.Sequential( # deprecated. nn.Linear(28, 28), nn.Sigmoid(), nn.Linear(28, 28), nn.Sigmoid(), nn.Linear(28, 28), nn.Sigmoid(), nn.Linear(28, 28), nn.Tanh(), nn.Linear(28, 1) ).double() for param in twistnet1.parameters(): param.requires_grad = False def Synthesize_from_SP500_debug(train_dataset): trainset, validset, testset = [], [], [] trainlen, validlen, testlen = 1400, 400, 200 for i in range(trainlen): idx = train_dataset.sampler.indices[i] trainset.append(sp500_element(train_dataset.dataset[idx][0], torch.clamp(train_dataset.dataset[idx][2], min=-0.1, max=0.1) * 10 + 1)) for i in range(trainlen, trainlen + validlen): idx = train_dataset.sampler.indices[i] validset.append(sp500_element(train_dataset.dataset[idx][0], torch.clamp(train_dataset.dataset[idx][2], min=-0.1, max=0.1) * 10 + 1)) for i in range(trainlen + validlen, trainlen + validlen + testlen): idx = train_dataset.sampler.indices[i] testset.append(sp500_element(train_dataset.dataset[idx][0], torch.clamp(train_dataset.dataset[idx][2], min=-0.1, max=0.1) * 10 + 1)) return trainset, len(trainset), validset, len(validset), testset, len(testset) def Twist(A): # deprecated. n, m = A.shape[0], A.shape[1] twistmatrix = np.random.randint(1, 4, size=(n, n)) / 2 for i in range(n): for j in range(n): if i != j: twistmatrix[i, j] = 0 return np.matmul(twistmatrix, A) def rounding(x, eps): if math.fmod(x, 1) < eps: x = math.floor(x) elif math.fmod(x, 1) >= 1 - eps: x = math.ceil(x) return x def calc_features(beta_1, beta_2): # beta1 and beta2 are 1-dimensional np arrays; deprecated. features = np.zeros((len(beta_1), 28)) label = np.zeros((len(beta_1), 1)) for i in range(29): # last 28 days if i < 28: features[:, i] = (np.abs(np.sin(beta_1 + i)) * 3 + np.abs(np.cos(beta_2 + i)) * 5 + np.random.normal(size=1) * 0.05).squeeze() else: label = np.abs(np.sin(beta_1 + i)) * 3 + np.abs(np.cos(beta_2 + i)) * 5 + np.random.normal(size=1) * 0.05 features, label = twistnet1(torch.from_numpy(features)), torch.from_numpy(label) return features, label def getconstrlist(x, C, d): # print(C.shape, x.shape, d.shape) constr = np.matmul(C, x.reshape(-1, 1)) - d.reshape(-1, 1) idx_none, idx_linear, idx_quad = [], [], [] for i in range(C.shape[0]): if constr[i] < - 1 / (4 * QUAD_SOFT_K): idx_none.append(i) if -1 / (4 * QUAD_SOFT_K) <= constr[i] and constr[i] <= 1 / (4 * QUAD_SOFT_K): idx_quad.append(i) if constr[i] > 1 / (4 * QUAD_SOFT_K): idx_linear.append(i) return idx_quad, idx_linear, idx_none def merge_constraints(A0, b0, C0, d0, alpha0, theta): N, M1, M2 = A0.shape[1], A0.shape[0], C0.shape[0] # merged data C = np.concatenate((C0, A0, -np.identity(N)), axis=0) alpha = np.concatenate((alpha0, math.sqrt(N) * 5 * np.max(np.abs(theta)) * np.ones((M1, 1)), np.zeros((N, 1))), axis=0) d = np.concatenate((d0, b0, np.zeros((N, 1))), axis=0) # construct alpha """ mxratio = 0 # Deprecated; no need to calculate the angle actually. for i in range(C.shape[0]): n = np.zeros((C.shape[1], 1)) A = np.zeros((C.shape[1] - 1, C.shape[1])) for j in range(C.shape[1] - 1): x1, x2 = np.random.random((C.shape[1], 1)), np.random.random((C.shape[1], 1)) k = 0 for k2 in range(C.shape[1]): k = k2 if C[i, k2] != 0: break # k is the non-zero part x1[k], x2[k] = x1[k] - (C[i].dot(x1) - d[i]) / C[i, k], x2[k] - (C[i].dot(x2) - d[i]) / C[i, k] A[j] = (x1 - x2).squeeze() j = 0 for j2 in range(C.shape[1]): j = j2 if C[i, j2] != 0: break # j is the non-zero part n[j] = 1 idx, b_solve = [i for i in range(j)] + [i for i in range(j + 1, A.shape[1])], np.zeros((A.shape[0])) for l in range(A.shape[0]): b_solve[l] = -A[l, j] # print(A[0], C[i].dot(x1) - d[i], C[i].dot(x2) - d[i]) if j == 0: A_solve = A[:, 1:] elif j == A.shape[1] - 1: A_solve = A[:, :A.shape[1] - 1] else: A_solve = np.concatenate((A[:, :j], A[:, j + 1:]), axis=1) # print(A) # print(np.linalg.matrix_rank(A_solve), A.shape, j, A.shape[0]-1) # print("C:",C,"d:",d,"A:", A) # print(A_solve) nn = np.matmul(np.linalg.inv(A_solve), b_solve) # print("nn:", nn) for l in range(A.shape[0]): n[idx[l]] = nn[l] # print("C:",C[i],"d:", d[i], "n:", n) ln_n = math.sqrt(n.T.dot(n)) # cos_n = np.min(n) / ln_n cos_n = 1 eps = 1e-8 for i in range(n.shape[0]): if n[i] > eps: cos_n = min(cos_n, n[i] / ln_n) # print(i,":", n[i] ,"/", ln_n) if cos_n < 1 - eps: mxratio = max(mxratio, 1 / cos_n) print("mxratio:", mxratio) exit(0) """ alpha[M1 + M2: M1 + M2 + N] = 20 * np.ones((theta.shape[0], 1)) # math.sqrt(mxratio ** 2 + 1) * np.max(np.abs(theta)) return C, d, alpha
{"hexsha": "eb986c983c8769c01aaac17ac6a770f3a77f9e11", "size": 5722, "ext": "py", "lang": "Python", "max_stars_repo_path": "synthetic_linear_programming/util.py", "max_stars_repo_name": "PredOptwithSoftConstraint/PredOptwithSoftConstraint", "max_stars_repo_head_hexsha": "c0ec41a8c2c48034851cf04cd848013ceba1dd40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-12-12T15:23:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-15T23:24:02.000Z", "max_issues_repo_path": "synthetic_linear_programming/util.py", "max_issues_repo_name": "PredOptwithSoftConstraint/PredOptwithSoftConstraint", "max_issues_repo_head_hexsha": "c0ec41a8c2c48034851cf04cd848013ceba1dd40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synthetic_linear_programming/util.py", "max_forks_repo_name": "PredOptwithSoftConstraint/PredOptwithSoftConstraint", "max_forks_repo_head_hexsha": "c0ec41a8c2c48034851cf04cd848013ceba1dd40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-12-23T01:02:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T02:12:40.000Z", "avg_line_length": 42.0735294118, "max_line_length": 145, "alphanum_fraction": 0.5651869976, "include": true, "reason": "import numpy", "num_tokens": 1874}