text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
struct CoNLL{S} filepaths::Vector{S} year::Int trainpath::String testpath::String devpath::String end function CoNLL(dirpath, year=2003) @assert(isdir(dirpath), dirpath) files = Dict() if year == 2003 inner_files = readdir(dirpath) if "train.txt" ∈ inner_files files["train"] = "train.txt" end if "test.txt" ∈ inner_files files["test"] = "test.txt" end if "valid.txt" ∈ inner_files files["valid"] = "valid.txt" end for tuple in files files[tuple[1]] = joinpath(dirpath, tuple[2]) end end return CoNLL(collect(values(files)), year, files["train"], files["test"], files["valid"]) end CoNLL() = CoNLL(datadep"CoNLL 2003") MultiResolutionIterators.levelname_map(::Type{CoNLL}) = [ :doc=>1, :document=>1, :article=>1, :sent=>2, :sentence=>2, :word=>3, :token=>3, :char=>4, :character=>4 ] function parse_conll2003_tagged_word(line::AbstractString) tokens_tags = split(line) length(tokens_tags) != 4 && throw("Error parsing line: \"$line\". Invalid Format.") return NERTaggedWord(tokens_tags[4], tokens_tags[3], tokens_tags[2], tokens_tags[1]) end function parse_conll2003file(filename) local sent local doc docs = @NestedVector(NERTaggedWord,3)() context = Document(intern(basename(filename)), docs) # structure function new_document() doc = @NestedVector(NERTaggedWord,2)() push!(docs, doc) end function new_sentence() sent = @NestedVector(NERTaggedWord,1)() push!(doc, sent) end # words get_tagged(line) = push!(sent, parse_conll2003_tagged_word(line)) # parse for line in eachline(filename) if length(line) == 0 new_sentence() elseif startswith(strip(line), "-DOCSTART-") length(docs) > 0 && isempty(doc[end]) && deleteat!(doc, lastindex(doc)) new_document() else get_tagged(line) end end isempty(doc[end]) && deleteat!(doc, lastindex(doc)) return context end function load(corpus::CoNLL, file="train") if (corpus.year == 2003) file == "train" && return parse_conll2003file(corpus.trainpath) file == "test" && return parse_conll2003file(corpus.testpath) file == "dev" && return parse_conll2003file(corpus.devpath) throw("Invalid filename! Available datasets are `train`, `test` and `dev`") end end
{"hexsha": "3e0cb29f0a3cb29dd1264253cbb48ff75205868f", "size": 2560, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/CoNLL.jl", "max_stars_repo_name": "AdarshKumar712/CorpusLoaders.jl", "max_stars_repo_head_hexsha": "379ff7bf902a1d8e48153f3da53eb811afda00ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-01T17:03:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-01T17:03:47.000Z", "max_issues_repo_path": "src/CoNLL.jl", "max_issues_repo_name": "AdarshKumar712/CorpusLoaders.jl", "max_issues_repo_head_hexsha": "379ff7bf902a1d8e48153f3da53eb811afda00ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/CoNLL.jl", "max_forks_repo_name": "AdarshKumar712/CorpusLoaders.jl", "max_forks_repo_head_hexsha": "379ff7bf902a1d8e48153f3da53eb811afda00ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5268817204, "max_line_length": 87, "alphanum_fraction": 0.6015625, "num_tokens": 702}
from multiprocessing import Process import argparse, time, math import numpy as np import os os.environ['OMP_NUM_THREADS'] = '16' import mxnet as mx from mxnet import gluon import dgl from dgl import DGLGraph from dgl.data import register_data_args, load_data from gcn_ns_sc import gcn_ns_train from gcn_cv_sc import gcn_cv_train from graphsage_cv import graphsage_cv_train def main(args): g = dgl.contrib.graph_store.create_graph_from_store(args.graph_name, "shared_mem") # We need to set random seed here. Otherwise, all processes have the same mini-batches. mx.random.seed(g.worker_id) features = g.nodes[:].data['features'] labels = g.nodes[:].data['labels'] train_mask = g.nodes[:].data['train_mask'] val_mask = g.nodes[:].data['val_mask'] test_mask = g.nodes[:].data['test_mask'] if args.num_gpus > 0: ctx = mx.gpu(g.worker_id % args.num_gpus) else: ctx = mx.cpu() train_nid = mx.nd.array(np.nonzero(train_mask.asnumpy())[0]).astype(np.int64) test_nid = mx.nd.array(np.nonzero(test_mask.asnumpy())[0]).astype(np.int64) n_classes = len(np.unique(labels.asnumpy())) n_train_samples = train_mask.sum().asscalar() n_val_samples = val_mask.sum().asscalar() n_test_samples = test_mask.sum().asscalar() if args.model == "gcn_ns": gcn_ns_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samples) elif args.model == "gcn_cv": gcn_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samples, True) elif args.model == "graphsage_cv": graphsage_cv_train(g, ctx, args, n_classes, train_nid, test_nid, n_test_samples, True) else: print("unknown model. Please choose from gcn_ns, gcn_cv, graphsage_cv") print("parent ends") if __name__ == '__main__': parser = argparse.ArgumentParser(description='GCN') register_data_args(parser) parser.add_argument("--model", type=str, help="select a model. Valid models: gcn_ns, gcn_cv, graphsage_cv") parser.add_argument("--graph-name", type=str, default="", help="graph name") parser.add_argument("--num-feats", type=int, default=100, help="the number of features") parser.add_argument("--dropout", type=float, default=0.5, help="dropout probability") parser.add_argument("--num-gpus", type=int, default=0, help="the number of GPUs to train") parser.add_argument("--lr", type=float, default=3e-2, help="learning rate") parser.add_argument("--n-epochs", type=int, default=200, help="number of training epochs") parser.add_argument("--batch-size", type=int, default=1000, help="batch size") parser.add_argument("--test-batch-size", type=int, default=1000, help="test batch size") parser.add_argument("--num-neighbors", type=int, default=3, help="number of neighbors to be sampled") parser.add_argument("--n-hidden", type=int, default=16, help="number of hidden gcn units") parser.add_argument("--n-layers", type=int, default=1, help="number of hidden gcn layers") parser.add_argument("--self-loop", action='store_true', help="graph self-loop (default=False)") parser.add_argument("--weight-decay", type=float, default=5e-4, help="Weight for L2 loss") args = parser.parse_args() print(args) main(args)
{"hexsha": "872506f5a02937446fb4534f2432b8b7773cf336", "size": 3445, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/mxnet/_deprecated/sampling/multi_process_train.py", "max_stars_repo_name": "ketyi/dgl", "max_stars_repo_head_hexsha": "a1b859c29b63a673c148d13231a49504740e0e01", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9516, "max_stars_repo_stars_event_min_datetime": "2018-12-08T22:11:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T13:04:33.000Z", "max_issues_repo_path": "examples/mxnet/_deprecated/sampling/multi_process_train.py", "max_issues_repo_name": "ketyi/dgl", "max_issues_repo_head_hexsha": "a1b859c29b63a673c148d13231a49504740e0e01", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2494, "max_issues_repo_issues_event_min_datetime": "2018-12-08T22:43:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:16:27.000Z", "max_forks_repo_path": "examples/mxnet/_deprecated/sampling/multi_process_train.py", "max_forks_repo_name": "ketyi/dgl", "max_forks_repo_head_hexsha": "a1b859c29b63a673c148d13231a49504740e0e01", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2529, "max_forks_repo_forks_event_min_datetime": "2018-12-08T22:56:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T13:07:41.000Z", "avg_line_length": 40.5294117647, "max_line_length": 94, "alphanum_fraction": 0.6682148041, "include": true, "reason": "import numpy", "num_tokens": 868}
# (C) Copyright IBM Corp. 2016 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __author__ = "Takayuki Osogami" import unittest import numpy as np from six.moves import xrange import tests.simple from tests.arraymath import NumpyTestMixin, CupyTestMixin import pydybm.arraymath as amath from pydybm.time_series.vector_regression import VectorRegressionWithVariance from pydybm.time_series.vector_regression import VectorLogisticRegression from pydybm.time_series.vector_regression import MultiTargetVectorRegression from pydybm.base.sgd import AdaGrad class VectorRegressionTestCase(object): """ unit test for VectorRegression """ def setUp(self): self.max_repeat = 100000 self.in_dim = 3 # dimension of input sequence self.out_dim = 2 # dimension of target sequence self.rate = 0.1 # learning rate def tearDown(self): pass def testGenerative(self): """ testing minimal consistency in learning a sequence """ print("VectorRegressionTestCase.testGenerative") for order in [0, 2]: for SGD in [AdaGrad]: model = VectorRegressionWithVariance( self.in_dim, self.in_dim, order, SGD()) model.set_learning_rate(self.rate) i = tests.simple.test_real_model(model, self.max_repeat, True) self.assertLess(i, self.max_repeat) def testDiscriminative(self): """ testing minimal consistency in learning a sequence to an output """ print("VectorRegressionTestCase.testDiscriminative") for order in [0, 2]: for SGD in [AdaGrad]: model = VectorRegressionWithVariance(self.in_dim, self.out_dim, order, SGD()) model.set_learning_rate(self.rate) i = tests.simple.test_real_model(model, self.max_repeat, True) self.assertLess(i, self.max_repeat) def testFifo(self): """ testing fifo and _update_state method in VectorRegressionWithVariance """ print("\n * testing fifo and update_state method " "in VectorRegressionWithVariance \n") in_dim = 3 order = 3 len_ts = 10 model = VectorRegressionWithVariance(in_dim, in_dim, order) random = amath.random.RandomState(0) in_patterns = amath.random.uniform(size=(len_ts, in_dim)) fifo_test = amath.zeros((order, in_dim)) for i in xrange(len_ts): self.assertTrue(amath.allclose(model.fifo.to_array(), fifo_test)) model.learn_one_step(in_patterns[i]) popped_in_pattern = model._update_state(in_patterns[i]) if i < order: self.assertTrue(amath.allclose(popped_in_pattern, amath.zeros(in_dim))) else: self.assertTrue(amath.allclose(popped_in_pattern, in_patterns[i - order])) fifo_test[1:] = fifo_test[:-1] fifo_test[0] = in_patterns[i] class VectorRegressionTestCaseNumpy(NumpyTestMixin, VectorRegressionTestCase, unittest.TestCase): pass class VectorRegressionTestCaseCupy(CupyTestMixin, VectorRegressionTestCase, unittest.TestCase): pass class VectorLogisticRegressionTestCase(object): """ unit test for VectorLogisticRegression """ def setUp(self): self.rate = 0.1 def tearDown(self): pass def testGenerative(self): """ testing minimal consistency in learning a sequence """ print("VectorLogisticRegressionTestCase.testGenerative") in_dim = 3 # dimension of input sequence max_repeat = 10000 for order in [1, 2]: for SGD in [AdaGrad]: model = VectorLogisticRegression(in_dim, in_dim, order, SGD()) model.set_learning_rate(self.rate) i = tests.simple.test_binary_model(model, max_repeat) self.assertLess(i, max_repeat) def testDiscriminative(self): """ testing minimal consistency in learning a sequence to an output """ print("VectorLogisticRegressionTestCase.testDiscriminative") in_dim = 3 out_dim = 2 # dimension of output sequence max_repeat = 10000 for order in [1, 2]: for SGD in [AdaGrad]: model = VectorLogisticRegression(in_dim, out_dim, order, SGD()) model.set_learning_rate(self.rate) i = tests.simple.test_binary_model(model, max_repeat) self.assertLess(i, max_repeat) class VectorLogisticRegressionTestCaseNumpy(NumpyTestMixin, VectorLogisticRegressionTestCase, unittest.TestCase): pass class VectorLogisticRegressionTestCaseCupy(CupyTestMixin, VectorLogisticRegressionTestCase, unittest.TestCase): pass class MultiTargetVectorRegressionTestCase(object): """ unit test for MultiTargetVectorRegression """ def setUp(self): # learning rate self.rate = 0.01 def testFifo(self): """ testing fifo and _update_state method in MultiTargetVectorRegression """ print("\n * testing fifo and update_state method " "in MultiTargetVectorRegression \n") in_dim = 3 out_dims = [2, 4] SGDs = [AdaGrad(), AdaGrad()] order = 3 len_ts = 10 model = MultiTargetVectorRegression(in_dim, out_dims, SGDs, order) random = amath.random.RandomState(0) in_patterns = amath.random.uniform(size=(len_ts, in_dim)) out_pattern_0 = amath.random.uniform(size=(len_ts, out_dims[0])) out_pattern_1 = amath.random.uniform(size=(len_ts, out_dims[1])) fifo_test = amath.zeros((order, in_dim)) for i in xrange(len_ts): self.assertTrue(amath.allclose(model.layers[0].fifo.to_array(), fifo_test)) model.learn_one_step([out_pattern_0[i], out_pattern_1[i]]) popped_in_pattern = model._update_state(in_patterns[i]) if i < order: self.assertTrue(amath.allclose(popped_in_pattern, amath.zeros(in_dim))) else: self.assertTrue(amath.allclose(popped_in_pattern, in_patterns[i - order])) fifo_test[1:] = fifo_test[:-1] fifo_test[0] = in_patterns[i] class MultiTargetVectorRegressionTestCaseNumpy( NumpyTestMixin, MultiTargetVectorRegressionTestCase, unittest.TestCase): pass class MultiTargetVectorRegressionTestCaseCupy( CupyTestMixin, MultiTargetVectorRegressionTestCase, unittest.TestCase): pass if __name__ == "__main__": unittest.main()
{"hexsha": "b8df261eb839b994ff309a3625128853647b1e73", "size": 7955, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tests/vector_regression_test.py", "max_stars_repo_name": "ibm-research-tokyo/dybm", "max_stars_repo_head_hexsha": "a6d308c896c2f66680ee9c5d05a3d7826cc27c64", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 126, "max_stars_repo_stars_event_min_datetime": "2017-09-04T11:53:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T12:47:56.000Z", "max_issues_repo_path": "src/tests/vector_regression_test.py", "max_issues_repo_name": "IBM-DSE/dybm", "max_issues_repo_head_hexsha": "3484e337954c017f0a20166403a6ddba4ce274c0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2017-10-14T03:26:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-22T22:22:42.000Z", "max_forks_repo_path": "src/tests/vector_regression_test.py", "max_forks_repo_name": "IBM-DSE/dybm", "max_forks_repo_head_hexsha": "3484e337954c017f0a20166403a6ddba4ce274c0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2017-09-04T13:40:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T14:02:53.000Z", "avg_line_length": 34.7379912664, "max_line_length": 86, "alphanum_fraction": 0.5940917662, "include": true, "reason": "import numpy", "num_tokens": 1603}
import numpy as np import codecs import json import sys import math import scipy from scipy.spatial.distance import cdist, pdist, squareform from scipy.linalg import eigh from sklearn.cluster import KMeans def load_json_files(file_path): ''' Loads data from a json file Inputs: file_path the path of the .json file that you want to read in Outputs: my_array this is a numpy array if data is numeric, it's a list if it's a string ''' # load data from json file with codecs.open(file_path, 'r', encoding='utf-8') as handle: json_data = json.loads(handle.read()) # if a string, then returns list of strings if not isinstance(json_data[0], str): # otherwise, it's assumed to be numeric and returns numpy array json_data = np.array(json_data) return json_data def gaussian_kernel(X, kernel_type="gaussian", sigma=3.0, k=5): """gaussian_kernel: Build an adjacency matrix for data using a Gaussian kernel Args: X (N x d np.ndarray): Input data kernel_type: "gaussian" or "adaptive". Controls bandwidth sigma (float): Scalar kernel bandwidth k (integer): nearest neighbor kernel bandwidth Returns: W (N x N np.ndarray): Weight/adjacency matrix induced from X """ _g = "gaussian" _a = "adaptive" kernel_type = kernel_type.lower() D = squareform(pdist(X)) if kernel_type == "gaussian": # gaussian bandwidth checking print("fixed bandwidth specified") if not all([type(sigma) is float, sigma > 0]): # [float, positive] print("invalid gaussian bandwidth, using sigma = max(min(D)) as bandwidth") D_find = D + np.eye(np.size(D, 1)) * 1e15 sigma = np.max(np.min(D_find, 1)) del D_find sigma = np.ones(np.size(D, 1)) * sigma elif kernel_type == "adaptive": # adaptive bandwidth print("adaptive bandwidth specified") # [integer, positive, less than the total samples] if not all([type(k) is int, k > 0, k < np.size(D, 1)]): print("invalid adaptive bandwidth, using k=5 as bandwidth") k = 5 knnDST = np.sort(D, axis=1) # sorted neighbor distances sigma = knnDST[:, k] # k-nn neighbor. 0 is self. del knnDST else: raise ValueError W = ((D**2) / sigma[:, np.newaxis]**2).T W = np.exp(-1 * (W)) W = (W + W.T) / 2 # symmetrize W = W - np.eye(W.shape[0]) # remove the diagonal return W def sbm(N, k, pij, pii, sigma): """sbm: Construct a stochastic block model Args: N (integer): Graph size k (integer): Number of clusters pij (float): Probability of intercluster edges pii (float): probability of intracluster edges Returns: A (numpy.array): Adjacency Matrix gt (numpy.array): Ground truth cluster labels coords(numpy.array): plotting coordinates for the sbm """ p_matrix = np.random.uniform(0,1,(k,k)) gt = np.append(np.random.randint(1,k+1,(N // k) * k), np.array(range(1, (N % k) + 1))) A = np.zeros((N,N)) for i in range(A.shape[0]): for j in range(A.shape[1]): cluster_1 = gt[i] cluster_2 = gt[j] if cluster_1 == cluster_2: p = np.random.uniform(0,1,1) if p <= pii: A[i][j] = 1 else: p = np.random.uniform(0,1,1) if p <= pij: A[i][j] = 1 coords = np.zeros((N, 2)) for idx, i in enumerate(gt): unit_circle_degree = 2 * np.pi / k * (i - 1) unit_circle_x = math.cos(unit_circle_degree) unit_circle_y = math.sin(unit_circle_degree) coords[idx, 0] = np.random.normal(unit_circle_x, sigma, 1) coords[idx, 1] = np.random.normal(unit_circle_y, sigma, 1) return A, gt, coords def L(A, normalized=False): """L: compute a graph laplacian Args: A (N x N np.ndarray): Adjacency matrix of graph normalized (bool, optional): Normalized or combinatorial Laplacian Returns: L (N x N np.ndarray): graph Laplacian """ n, m = A.shape D = np.diag(A.sum(axis=1).flatten()) L = D - A if normalized == True: inverse_D = np.linalg.inv(D)**(1/2) L = inverse_D @ L @ inverse_D return L def compute_fourier_basis(L): """compute_fourier_basis: Laplacian Diagonalization Args: L (N x N np.ndarray): graph Laplacian Returns: e (N x 1 np.ndarray): graph Laplacian eigenvalues psi (N x N np.ndarray): graph Laplacian eigenvectors """ e, psi = eigh(L) return e, psi def gft(s, psi): """gft: Graph Fourier Transform (GFT) Args: s (N x d np.ndarray): Matrix of graph signals. Each column is a signal. psi (N x N np.ndarray): graph Laplacian eigenvectors Returns: s_hat (N x d np.ndarray): GFT of the data """ s_hat = psi.T @ s return s_hat def igft(s_hat, psi): s = psi @ s_hat return s def filterbank_matrix(psi, e, h): """filterbank_matrix: build a filter matrix using the input filter h Args: psi (N x N np.ndarray): graph Laplacian eigenvectors e (N x 1 np.ndarray): graph Laplacian eigenvalues h (function handle): A function that takes in eigenvalues and returns values in the interval (0,1) Returns: H (N x N np.ndarray): Filter matrix that can be used in the form filtered_s = H@s """ H = psi @ np.diag(h(e)) @ psi.T return H def kmeans(X, k, nrep=5, itermax=300): """kmeans: cluster data into k partitions Args: X (n x d np.ndarray): input data, rows = points, cols = dimensions k (int): Number of clusters to partition nrep (int): Number of repetitions to average for final clustering itermax (int): Number of iterations to perform before terminating Returns: labels (n x 1 np.ndarray): Cluster labels assigned by kmeans """ dist_mat = np.zeros((k, X.shape[0])) n_points = X.shape[0] # get the number of points in the data within_cluster_dist = np.zeros(nrep) nrep_labels = np.zeros((n_points, nrep)) for rep in range(nrep): init = kmeans_plusplus(X, k) # find your initial centroids old_labels = np.random.randint(0, k, n_points) # randomly initialize old labels new_labels = old_labels.copy() num_same_assignment = 0 # number of same assignment # perform kmeans for iteration in range(itermax): if iteration % 100 == 0 and iteration > 0: print("iteration " + str(iteration)) for c in range(0, k): #calculate the euclidean distance between each data point and each centroid for n in range(n_points): dist_mat[c, n] = np.linalg.norm(X[n, :]-init[c, :]) ** 2 #label each point with distance to closest centroid new_labels = np.argmin(dist_mat, axis=0) # n dim if np.array_equal(new_labels, old_labels): #check if the assignment is the same as last cycle num_same_assignment = num_same_assignment + 1 if num_same_assignment == 3: #finish clustering if same assignment 3 times in a roll break else: num_same_assignment = 0 #continue clustering if not same assignment #calculate new centroids for c in range(0, k): init[c, :] = np.mean(X[new_labels==c, :], axis=0) #use new_labels as old_labels for the next cycle nrep_labels[:, rep] = new_labels old_labels = new_labels within_cluster_dist[rep] = np.sum(np.mean(dist_mat, axis=1)) # choose the repetition with smallest with cluster distance smallest_iteration = np.argmin(within_cluster_dist[rep]) labels = nrep_labels[:, smallest_iteration] return labels def kmeans_plusplus(X, k): """kmeans_plusplus: initialization algorithm for kmeans Args: X (n x d np.ndarray): input data, rows = points, cols = dimensions k (int): Number of clusters to partition Returns: centroids (k x d np.ndarray): centroids for initializing k-means """ centroids = [] n_points = X.shape[0] centroids.append(X[np.random.randint(0, n_points, 1), :]) ## compute remaining k - 1 centroids for remaining_cluster in range(k - 1): ## initialize stores distances of points to first centroid dist = np.zeros(n_points) for i in range(n_points): d = sys.maxsize for j in range(len(centroids)): temp_dist = np.linalg.norm(X[i, :] - centroids[j]) d = min(d, temp_dist) dist[i] = d next_centroid = X[np.random.choice(np.arange(len(dist)), size = 1, p = dist / sum(dist)), :] centroids.append(next_centroid) return np.array(centroids).squeeze() def SC(L, k, psi=None, nrep=5, itermax=300, sklearn=True): """SC: Perform spectral clustering via the Ng method Args: L (np.ndarray): Normalized graph Laplacian k (integer): number of clusters to compute nrep (int): Number of repetitions to average for final clustering itermax (int): Number of iterations to perform before terminating sklearn (boolean): Flag to use sklearn kmeans to test your algorithm Returns: labels (N x 1 np.array): Learned cluster labels """ if psi is None: # compute the first k elements of the Fourier basis # use scipy.linalg.eigh e, psi = scipy.linalg.eigh(L) e_k = e[:k] psi_k = psi[:, :k] else: # just grab the first k eigenvectors psi_k = psi[:, :k] # normalize your eigenvector rows psi_k_row_sums = np.linalg.norm(psi.copy(), axis=1) psi_norm = psi_k / psi_k_row_sums[:, np.newaxis] if sklearn: labels = KMeans(n_clusters=k, n_init=nrep, max_iter=itermax).fit_predict(psi_norm) else: labels = kmeans(psi_norm, k, nrep=nrep, itermax=itermax) return labels
{"hexsha": "7e923f6bf1d7878b67a6c498de747a8dda4251be", "size": 10449, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/test_functions2.py", "max_stars_repo_name": "flynn-chen/forest-fire-clustering", "max_stars_repo_head_hexsha": "98437a80d37e6ab842d1dae18feba1487e2eb0eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/test_functions2.py", "max_issues_repo_name": "flynn-chen/forest-fire-clustering", "max_issues_repo_head_hexsha": "98437a80d37e6ab842d1dae18feba1487e2eb0eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/test_functions2.py", "max_forks_repo_name": "flynn-chen/forest-fire-clustering", "max_forks_repo_head_hexsha": "98437a80d37e6ab842d1dae18feba1487e2eb0eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3498452012, "max_line_length": 105, "alphanum_fraction": 0.5940281367, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 2674}
# This is a anti-pattern to disable warnings # I'm using just for a simplification import warnings warnings.filterwarnings("ignore") import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pyLDAvis.sklearn import re import seaborn as sns import spacy import string from collections import Counter from io import StringIO from sklearn.decomposition import LatentDirichletAllocation from sklearn.feature_extraction.text import CountVectorizer from spacy.lang.en.stop_words import STOP_WORDS from unicodedata import normalize from wordcloud import WordCloud # Local directory ROOT_DIR = os.getcwd() # NLP nlp = spacy.load("en_core_web_sm") stoplist = list(STOP_WORDS) punctuations = string.punctuation def load_text_file(filepath): f = open(filepath, "r") file = [] for x in f: file.append(x) raw_text = " ".join([str(x) for x in file]) df = pd.read_csv(StringIO(raw_text), delimiter="\n") df.columns = ["text"] return df df = load_text_file( filepath="commission-white-paper-artificial-intelligence-feb2020_en.txt" ) # Pre-Processing special_by_space = re.compile('[/(){}\[\]"\|@,;]') def clean_text(text): text = str(text) text = text.lower() text = text.replace("\n", " ") text = special_by_space.sub(" ", text) text = " ".join(word for word in text.split() if word not in stoplist) return text def remove_punctuation(text): """ This function remove the replacement_patterns from input string. Parameters ---------- text : String Input string to the function. Returns ------- text : String Output string after replacement. """ rem = string.punctuation pattern = r"[{}]".format(rem) text = re.sub(r"[-()\"#/@;:&<>{}`+=~|.!?,[\]©_*]", " ", text) text = text.replace(pattern, "") return text def replace_ptbr_char_by_word(word): word = str(word) word = normalize("NFKD", word).encode("ASCII", "ignore").decode("ASCII") return word def remove_pt_br_char_by_text(text): text = str(text) text = " ".join( replace_ptbr_char_by_word(word) for word in text.split() if word not in stoplist ) return text def get_word_frequency(df): # Word Frequency per Category def cleanup_text(docs, logging=False): texts = [] counter = 1 for doc in docs: if counter % 1000 == 0 and logging: print("Processed %d out of %d documents." % (counter, len(docs))) counter += 1 doc = nlp(doc, disable=["parser", "ner"]) tokens = [str(tok).lower().strip() for tok in doc if tok.lemma_ != "-PRON-"] tokens = [ tok for tok in tokens if tok not in stoplist and tok not in punctuations ] tokens = " ".join(tokens) texts.append(tokens) return pd.Series(texts) df_text = [str(text) for text in df["text"]] df_text_clean = cleanup_text(df_text) df_text_clean = " ".join(df_text_clean).split() df_text_clean_counts = Counter(df_text_clean) df_common_words = [word[0] for word in df_text_clean_counts.most_common(41)] df_common_counts = [word[1] for word in df_text_clean_counts.most_common(41)] df_common_words.pop(0) df_common_counts.pop(0) fig = plt.figure(figsize=(18, 6)) sns.barplot(x=df_common_words, y=df_common_counts) plt.title(f"Most Common Words used in European Commission White Paper on AI") plt.xticks(rotation=45) plt.show() def show_wordcloud(text): # Create and generate a word cloud image: wordcloud = WordCloud(stopwords=stoplist, background_color="white").generate(text) # Display the generated image: fig = plt.figure(figsize=(25, 10)) plt.imshow(wordcloud, interpolation="bilinear") plt.title(f"Word Cloud for European Commission White Paper on AI", fontsize=20) plt.axis("off") plt.show() def get_wordcloud(df): # Get all texts and generate a cloud text = " ".join(str(review) for review in df.text) show_wordcloud(text) def get_tfidf_df(df): # This one came from Analytics Vidhya # Ref: https://www.analyticsvidhya.com/blog/2018/04/a-comprehensive-guide-to-understand-and-implement-text-classification-in-python/ # Generate the TF tf1 = ( (df["text"][:]) .apply(lambda x: pd.value_counts(x.split(" "))) .sum(axis=0) .reset_index() ) tf1.columns = ["word", "tf"] # Remove some instances with NaN tf1 = tf1.dropna() df = df.dropna() # Calculate the log of the terms according to the TF for i, word in enumerate(tf1["word"]): tf1.loc[i, "idf"] = np.log( df.shape[0] / (len(df[df["text"].str.contains(word)])) ) # Full calculation of TF-IDF tf1["tfidf"] = tf1["tf"] * tf1["idf"] return ( tf1.head(300).sort_values(by=["tfidf"], ascending=False).reset_index(drop=True) ) def get_word_ngrams_list(df, word_ngram): def get_top_word_n_bigram(corpus, n=None): vec = CountVectorizer(ngram_range=(word_ngram, word_ngram)).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [ (word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items() ] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) return words_freq[:n] common_words = get_top_word_n_bigram(df["text"], 20) df3 = pd.DataFrame(common_words, columns=["ngram", "qty"]) return df3 def get_topics(df, n_components, number_words): # Convert to list data = df.text.values.tolist() # Remove special characters data = [re.sub("\S*@\S*\s?", "", sent) for sent in data] # Remove new line characters data = [re.sub("\s+", " ", sent) for sent in data] # Remove distracting single quotes data = [re.sub("'", "", sent) for sent in data] vectorizer = CountVectorizer(analyzer="word", stop_words=stoplist, lowercase=True) data_vectorized = vectorizer.fit_transform(data) # Materialize the sparse data data_dense = data_vectorized.todense() # Compute Sparsicity = Percentage of Non-Zero cells print("Sparsicity: ", ((data_dense > 0).sum() / data_dense.size) * 100, "%") # Build LDA Model lda_model = LatentDirichletAllocation( n_components=n_components, max_iter=10, learning_method="online", random_state=42, batch_size=10, evaluate_every=-1, n_jobs=-1, ) lda_output = lda_model.fit_transform(data_vectorized) # Helper function def print_topics(model, count_vectorizer, n_top_words): words = count_vectorizer.get_feature_names() for topic_idx, topic in enumerate(model.components_): print("\nTopic #%d:" % topic_idx) print( " ".join([words[i] for i in topic.argsort()[: -n_top_words - 1 : -1]]) ) # Print the topics found by the LDA model print("Topics found via LDA:") lda_model.fit(data_vectorized) print_topics(lda_model, vectorizer, number_words) return lda_model, data_vectorized, data, lda_output, vectorizer def get_lda_plot(lda_model, data_vectorized, vectorizer): pyLDAvis.enable_notebook() panel = pyLDAvis.sklearn.prepare(lda_model, data_vectorized, vectorizer, mds="tsne") return panel df["text"] = df["text"].apply( lambda x: " ".join([word for word in x.split() if word not in (stoplist)]) ) df["text"] = df["text"].apply(remove_pt_br_char_by_text) df["text"] = df["text"].apply(clean_text) df["text"] = df["text"].str.replace("[^\w\s]", "") df["text"] = df["text"].apply(remove_punctuation) df["text"] = df["text"].str.strip() df["text"] = df["text"].str.replace("\d+", "") get_word_frequency(df) get_wordcloud(df) df_tfidf = get_tfidf_df(df) df_tfidf.head(30) get_word_ngrams_list(df, 2) get_word_ngrams_list(df, 3) lda_model, data_vectorized, data, lda_output, vectorizer = get_topics( df, n_components=7, number_words=7 ) get_lda_plot(lda_model, data_vectorized, vectorizer)
{"hexsha": "a0335bcba503053fcfd2f770cafedc1561713e91", "size": 8172, "ext": "py", "lang": "Python", "max_stars_repo_path": "eu_ai.py", "max_stars_repo_name": "fclesio/european-union-ai", "max_stars_repo_head_hexsha": "efea836ac584d25d515d88ab96af0708aeaf9a87", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "eu_ai.py", "max_issues_repo_name": "fclesio/european-union-ai", "max_issues_repo_head_hexsha": "efea836ac584d25d515d88ab96af0708aeaf9a87", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eu_ai.py", "max_forks_repo_name": "fclesio/european-union-ai", "max_forks_repo_head_hexsha": "efea836ac584d25d515d88ab96af0708aeaf9a87", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0818505338, "max_line_length": 136, "alphanum_fraction": 0.6518600098, "include": true, "reason": "import numpy", "num_tokens": 2108}
import copy import os import math import numpy as np import random class Node(object): def __init__(self, idx, x, y, load, minTime, maxTime): super(Node, self).__init__() self.idx = idx self.x = x self.y = y self.load = load self.minTime = minTime self.maxTime = maxTime self.profit = 0 def getDist(location1, location2): x1 = allNodes[location1].x y1 = allNodes[location1].y x2 = allNodes[location2].x y2 = allNodes[location2].y return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 def travelTime(startTime, startNode, endNode): # infeasible if startTime >= Tmax: return False arcCat = speedChooseMat[startNode][endNode] speed = speedMat[arcCat] distance = distMat[startNode][endNode] # determine start in which timezone for i in range(len(speed)): if startTime >= speed[i][0] and startTime < speed[i][1]: timezone = i break # calculate time taken timeTravelled = 0 for i in range(timezone, len(speed)): maxDistance = (speed[i][1] - startTime) * speed[i][2] # if cannot reach destination in this timezone, check next timezone if distance > maxDistance: distance -= maxDistance timeTravelled += speed[i][1] - startTime # can reach in this timezone else: timeTravelled += distance / speed[i][2] distance = 0 break startTime = speed[i][1] if distance == 0: return (timeTravelled) # cannot reach within the last timezone == infeasible else: return False def checkFeasible(path, timing=False): #timing=True to return time taken of the route if path == [0,0]: return 0.0 # check load capacity load = 0 for i in range(len(path) - 1): nextNode = path[i + 1] load += allNodes[nextNode].load if load > maxCapacity: return False # check time startNode = path[0] firstNode = path[1] #get latest starting time to leave depot if allNodes[firstNode].minTime > travelTime(0,startNode,firstNode): startTime = allNodes[firstNode].minTime - travelTime(0,startNode,firstNode) else: startTime = 0 #calculate time to reach back to the depot time = 0 profit = 0 for i in range(len(path) - 1): currNode = path[i] nextNode = path[i + 1] timeTaken = travelTime(time, currNode, nextNode) if timeTaken and time < allNodes[nextNode].maxTime: time = max(time + timeTaken, allNodes[nextNode].minTime) else: return False profit += allNodes[nextNode].profit #pass all checks, calculate profit objFunction = profit - time+startTime if timing: return time if objFunction > 0: return objFunction else: return False def BFS(): basicSolution = [[0,0] for _ in range(noOfVehicles)] remainingPickups = copy.deepcopy(pickupNodes) currOptimalSolution = [] for vehicle in basicSolution: if remainingPickups != {}: #initialse the next best pickup bestTiming = float('inf') pickupFlag = False #insert earliest pickup option to allow for more pickups later for item in remainingPickups: testPath = copy.deepcopy(vehicle) testPath.insert(-1, item) testPath.insert(-1, pickupDeliveryPair[item]) timing = checkFeasible(testPath, timing=True) score = checkFeasible(testPath) if timing and score and timing < bestTiming: bestTiming = timing bestPath = testPath pickupFlag = True if pickupFlag: #check if there is a initial feasible pickup vehicle = bestPath remainingPickups.pop(vehicle[-3]) #remove last route taken #insert the rest based on objectiveFunction bestScore = checkFeasible(vehicle) availablePaths = True while availablePaths: availablePaths = False for item in remainingPickups: testPath = copy.deepcopy(vehicle) testPath.insert(-1, item) #insert pickup node testPath.insert(-1, pickupDeliveryPair[item]) #insert delivery node score = checkFeasible(testPath) if score and score > bestScore: availablePaths = True bestScore = score bestPath = testPath if availablePaths: vehicle = bestPath remainingPickups.pop(vehicle[-3]) #remove last route taken currOptimalSolution.append(vehicle) return currOptimalSolution def RandomBFS(): basicSolution = [[0, 0] for _ in range(noOfVehicles)] remainingPickups = copy.deepcopy(pickupNodes) currOptimalSolution = [] counter = 0 while remainingPickups != {}: # initialse the next best pickup bestTiming = float('inf') pickupFlag = False # insert earliest pickup option to allow for more pickups later car = np.random.randint(len(basicSolution)) vehicle = basicSolution[car] for item in remainingPickups: testPath = copy.deepcopy(vehicle) testPath.insert(-1, item) testPath.insert(-1, pickupDeliveryPair[item]) timing = checkFeasible(testPath, timing=True) score = checkFeasible(testPath) counter += 1 if timing and score and timing < bestTiming: bestTiming = timing bestPath = testPath pickupFlag = True if pickupFlag: # check if there is a initial feasible pickup vehicle = bestPath remainingPickups.pop(vehicle[-3]) # remove last route taken basicSolution[car] = vehicle if counter > 10000: break currOptimalSolution.append(vehicle) return basicSolution '''inputs''' os.chdir('input/') questionInput = open('Prob-30A-50.txt', 'r') questionInput = questionInput.readlines() noOfVehicles = int(questionInput[0]) maxCapacity = int(questionInput[1]) Tmax = int(questionInput[2]) depot = questionInput[5].replace(',', '.').split() depot = Node(int(depot[0]), float(depot[1]), float(depot[2]), 0, 0, Tmax) pickupNodes = {} requests=0 for i in range(9, 999): # additional logic to detect end of pick up nodes if len(questionInput[i]) < 3: break else: node = questionInput[i].replace(',', '.').split() if node==[]: break pickupNodes[int(node[0])] = Node(int(node[0]), float(node[1]), float(node[2]), int(node[4]), float(node[6]), float(node[7])) # count number of requests requests += 1 deliveryNodes = {} for i in range(9+requests+3, 9+requests+3+requests): node = questionInput[i].replace(',', '.').split() deliveryNodes[int(node[0])] = Node(int(node[0]), float(node[1]), float(node[2]), int(node[4]), float(node[6]), float(node[7])) deliveryNodes[int(node[0])].profit = 80 #each node's profit upon delivery allNodes = {0: depot, **pickupNodes, **deliveryNodes} # build the pickup delivery matching dict pickupDeliveryPair = {} iter = 1 for item in deliveryNodes: pickupDeliveryPair[iter] = deliveryNodes[item].idx iter += 1 speedMat = [] # blockcount = 9+requests+3+requests+2 brings you to the first speed pattern in input file blockcount = 9+requests+3+requests+2 for i in range(5): speed = [] for j in range(i * 6 + blockcount, i * 6 + (blockcount+4)): time = questionInput[j].replace(',', '.').split() speed.append([float(time[0]), float(time[1]), float(time[3])]) speedMat.append(speed) speedChooseMat = [] # use blockcocunt to read the speed choose matrix for i in range(blockcount+31, blockcount+31+2*requests+1): speedChooseMat.append([int(i) for i in questionInput[i].replace(',', '.').split()]) ''' processing input ''' # calculate distance matrix # total lines required = 2*requests + 2 (but we minus 1 because range starts from 0) distMat = [[0.0] * (2*requests + 2-1) for i in range(2*requests + 2-1)] for i in range((2*requests + 2-1)): for j in range(i + 1, (2*requests + 2-1)): dist = getDist(i, j) distMat[i][j] = dist distMat[j][i] = dist def Profit_Solution(s1): Totaltime=0 for k_ in range(len(s1)): s1[k_][1] = checkFeasible(s1[k_][0]) # print(s1[k_][1]) if s1[k_][1] is not False and s1[k_][1] >= 0: Totaltime += s1[k_][1] elif s1[k_][1] is False: Totaltime = 0 break return Totaltime ''' generate BFS ''' currOptimalSolution = RandomBFS() ''' how to use checking function ''' # currOptimalSolution = [ # [0,1,11,3,13,0], # [0,10,20,0], # [0,5,15,6,16,0]] #just an example # #iterate based on this # print(currOptimalSolution) s0=[] for vehicle in currOptimalSolution: oneroute=[vehicle, checkFeasible(vehicle)] s0.append(oneroute) nn= len(pickupNodes) profit0 = Profit_Solution(s0) print('******* inital solution') print(profit0) print(*s0, sep = "\n") # print(*s0) print('******** inital solution') # print( 'score:' + str(checkFeasible(vehicle)) + ' route:' + str(vehicle) ) def Remove_node(s, tour, node_pos, data=None): # data defaulted as none because data struc not in use # print(s[tour]) # print('Remove ' + str(s[tour][0][node_pos])+ ' at ' + str(node_pos) +' from tour'+ str(tour)) del s[tour][0][node_pos] def Insert_node(s, node, tour, point): # node_1 = s[tour][point - 1] # Original code uses s[tour][0] # arr.insert(point, node) # print('Insert ' + str(node) + ' at ' + str(point)+' from tour'+ str(tour)) s[tour][0].insert(point, node) # s0 = [[0, 14, 29, 11, 26, 1, 16, 3, 18, 0], 138.45952332463185], [[0, 10, 25, 0], 18.873851271071658], [ # [0, 8, 23, 6, 21, 13, 28, 5, 20, 15, 30, 4, 19, 2, 17, 0], 366.359325517866], [[0, 7, 22, 0], 29.4378156146646], [ # [0, 0], False] def NEIGHBOR_VRP(s, halfnumberofnode, data=None): # 1. random pick a tour1 # for i_ in range(4): n = halfnumberofnode tour_a = np.random.randint(len(s)) while len(s[tour_a][0]) <= 4: tour_a = np.random.randint(len(s)) # print (tour_a) pos_a = np.random.randint(len(s[tour_a][0]) - 2) + 1 # random int from [1, n-2], n-1 is the last node- depot # 2. random pick a tour 2 tour_b = np.random.randint(len(s)) # print(tour_b) if tour_a == tour_b and len(s[tour_a][0]) >= 6: # at least two pairs point_b = pos_a while point_b == pos_a: point_b = np.random.randint(len(s[tour_a][0]) - 1) + 1 # remove node_a sNew = copy.deepcopy(s) temp_node = sNew[tour_a][0][pos_a] ### need detail code here! # 2.1 if sNew[tour_a][0][pos_a] is a delivery code: (temp_node>n) if sNew[tour_a][0][pos_a] > n: pickup_node = sNew[tour_a][0][pos_a] - n pos_c = sNew[tour_a][0].index(pickup_node) # print(sNew[tour_a][0][pos_a]) # print(pickup_node) sNew[tour_a][0].remove(sNew[tour_a][0][pos_a]) sNew[tour_a][0].remove(pickup_node) # Remove_node(sNew, tour_a, pos_a, data) # Remove_node(sNew, tour_a, pos_c, data) # make sure the sNew is updated after deleting two nodes point_b = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 while point_c > point_b: point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 Insert_node(sNew, temp_node, tour_a, point_b) Insert_node(sNew, pickup_node, tour_a, point_c) return sNew # 3.2 if sNew[tour_a][0][pos_a] is a pickup node: (temp_node<=n) elif sNew[tour_a][0][pos_a] <= n: delivery_node = sNew[tour_a][0][pos_a] + n pos_c = sNew[tour_a][0].index(delivery_node) # print(sNew[tour_a][0][pos_a]) # print(delivery_node) sNew[tour_a][0].remove(sNew[tour_a][0][pos_a]) sNew[tour_a][0].remove(delivery_node) # Remove_node(sNew, tour_a, pos_c, data) # Remove_node(sNew, tour_a, pos_a, data) point_b = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 Insert_node(sNew, temp_node, tour_b, point_b) point_c = np.random.randint(len(sNew[tour_a][0]) - 1) + 1 while point_c <= point_b: point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 Insert_node(sNew, delivery_node, tour_a, point_c) return sNew # 3. if tour1 != tour 2: elif tour_a != tour_b: # and len(s[tour_a][0]) >=6 : # at least two pairs point_b = pos_a while point_b == pos_a: point_b = np.random.randint(len(s[tour_a][0]) - 1) + 1 # remove node_a sNew = copy.deepcopy(s) temp_node = sNew[tour_a][0][pos_a] ### need detail code here! # 3.1 if sNew[tour_a][0][pos_a] is a delivery code: (temp_node>n) if sNew[tour_a][0][pos_a] > n: pickup_node = sNew[tour_a][0][pos_a] - n pos_c = sNew[tour_a][0].index(pickup_node) # print(sNew[tour_a][0][pos_a]) # print(pickup_node) sNew[tour_a][0].remove(sNew[tour_a][0][pos_a]) sNew[tour_a][0].remove(pickup_node) # Remove_node(sNew, tour_a, pos_a, data) # Remove_node(sNew, tour_a, pos_c, data) # make sure the sNew is updated after deleting two nodes point_b = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 while point_c > point_b: point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 Insert_node(sNew, temp_node, tour_b, point_b) Insert_node(sNew, pickup_node, tour_b, point_c) return sNew # 3.2 if sNew[tour_a][0][pos_a] is a pickup node: (temp_node<=n) elif sNew[tour_a][0][pos_a] <= n: delivery_node = sNew[tour_a][0][pos_a] + n pos_c = sNew[tour_a][0].index(delivery_node) # print(sNew[tour_a][0][pos_a]) # print(delivery_node) sNew[tour_a][0].remove(sNew[tour_a][0][pos_a]) sNew[tour_a][0].remove(delivery_node) # Remove_node(sNew, tour_a, pos_c, data) # Remove_node(sNew, tour_a, pos_a, data) point_b = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 Insert_node(sNew, temp_node, tour_b, point_b) point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 while point_c <= point_b: point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1 Insert_node(sNew, delivery_node, tour_b, point_c) return sNew # nn=15 # s1= NEIGHBOR_VRP(s0,nn) # print(s1) def get_population(S0, halfnodes=nn): candidate =[] for n_ in range(20): s1 = NEIGHBOR_VRP(S0, halfnodes) profit = Profit_Solution(s1) routes= [] # for solu in s1: # # solu[0].pop(0) # # solu[0].pop() # route = solu[0] # # print(route) # routes += route # routes.insert(0,0) # routes.append(0) candidate.append((s1, profit)) candidate.sort(key=lambda x: x[1], reverse=True) res= [x[0] for x in candidate] return res #GA Part def selection(candidate): # retain_rate = 0.3 # graded= candidate.sort(key= lambda x: x[1], reverse=True) # graded = [x[0] for x in candidate] graded = [x for x in candidate] retain_length = int(len(graded) * retain_rate) parents = graded[:retain_length] for chromosome in graded[retain_length:]: if random.random() < random_select_rate: parents.append(chromosome) return parents def rank_route(s): # routes=[] for route in s: temp=[] route=route.sort(key=lambda x: x[1],reverse=True) # route= [i[0] for i in route] # for i in route: # temp.append(route[0]) # routes.append(temp) return s def remove_profit(s): S=[] for i in s: J=[] for j in i: J.append(j[0]) S.append(J) return S def Crossover(parents): count=20 target_count = count - len(parents) # 孩子列表 children = [] while len(children) < target_count: male_index = random.randint(0, len(parents) - 1) female_index = random.randint(0, len(parents) - 1) if male_index != female_index: male = parents[male_index] female = parents[female_index] left = random.randint(0, len(male) - 2) right = random.randint(left + 1, len(male) - 1) # 交叉片段 gene1 = male[left:right] gene2 = female[left:right] child1_c = male[right:] + male[:right] child2_c = female[right:] + female[:right] child1 = child1_c.copy() child2 = child2_c.copy() for o in gene2: child1_c.remove(o) for o in gene1: child2_c.remove(o) child1[left:right] = gene2 child2[left:right] = gene1 child1[right:] = child1_c[0:len(child1) - right] child1[:left] = child1_c[len(child1) - right:] child2[right:] = child2_c[0:len(child1) - right] child2[:left] = child2_c[len(child1) - right:] children.append(child1) children.append(child2) return children def crossover(parents): count=20 target_count = count - len(parents) # 孩子列表 children = [] while len(children) < target_count: male_index = random.randint(0, len(parents) - 1) female_index = random.randint(0, len(parents) - 1) if male_index != female_index: male = parents[male_index] female = parents[female_index] # routeindex = random.sample(range(0,noOfVehicles),int(noOfVehicles/2)) child=[] used=set([0]) # for index in routeindex: # child.append(male[index]) # for node in male[index]: # used.add(node) for i in range(len(male)): comb=male[i]+female[i] comb= list(dict.fromkeys(comb)) comb= [x for x in comb if x not in used] set1= set(comb) used.update(set1) child.append([0]+comb+[0]) children.append(child) return children def profit_children(Solution): res = [] for currOptimalSolution in Solution: s=[] for vehicle in currOptimalSolution: oneroute = [vehicle, checkFeasible(vehicle)] s.append(oneroute) res.append(s) return res def mutation(children): for i in range(len(children)): if random.random() < mutation_rate: child = children[i] for j in range(random.randint(1, len(child) - 4)): child = NEIGHBOR_VRP(child,nn, data=None) children[i] = child return children def find_max(pop): max=[] for s1 in pop: profit = Profit_Solution(s1) max.append((s1, profit)) max.sort(key=lambda x: x[1], reverse=True) return max[0] pop= get_population(s0,nn) retain_rate=0.3 random_select_rate=0.5 mutation_rate=0.4 # # selected = selection(pop) # # selected=rank_route(selected) # grandpare = remove_profit(selected) # # crossed = crossover(grandpare) # children = profit_children(crossed) # children=mutation(children) # # pop= selected+children # # pop = get_population(newpop,nn) # selected = selection(pop) maxprofit=0 register = [] i = 0 itter_time=100 while i < itter_time: # 选择繁殖个体群 selected = selection(pop) # selected=rank_route(selected) grandpare = remove_profit(selected) # 交叉繁殖 crossed = crossover(grandpare) children= profit_children(crossed) # 变异操作 children = mutation(children) # 更新种群 pop = selected + children [bestroute, bestprofit] = find_max(pop) if maxprofit < bestprofit: maxprofit = bestprofit # print('New best profit is ' + str(maxprofit)) # print('New best route is ' + str(bestroute)) i=i+1 print('Current population is :') print(*pop,sep='\n') print('---------------------------------------------------------------------') # print('The best profit is ' + str(maxprofit)) # print('The best route is ' + str(bestroute)) print(maxprofit) # def decoding2(corssed): # decoded=[] # for routes in crossed: # start = routes.index(0) # if routes[start + 1] == 0: # start += 1 # # newroutes = routes[start:] + routes[:start] # routes = newroutes # routes.append(0) # # print(routes) # l = [] # for w in routes: # if start: start = l.append([]) # l.append() returns None, that is falsey... # if w != 0: # l[-1].append(w) # if w == 0: start = 1 # # res = [] # for m in l: # if m != []: # res.append([0] + m + [0]) # while len(res) < noOfVehicles: # res.append([0, 0]) # # # soluion0 = [res, checkFeasible(res)] # # soluion0.append(oneroute) # decoded.append(res) # return decoded # s2=[] # for n_ in crossed_res: # oneroute_=[n_, checkFeasible(n_)] # s2.append(oneroute_) # print(allNodes[25].load) #
{"hexsha": "bcfbd567ed08786b87b9a99550d434769f5476b2", "size": 22876, "ext": "py", "lang": "Python", "max_stars_repo_path": "Meta-heuristic project/Algorithm Codes/Project 3 - GA- V01.py", "max_stars_repo_name": "nusstu-dz/IE5600-Applied-Programming-for-Industrial-Systems", "max_stars_repo_head_hexsha": "2289d9a63f49d8d730a671de1b491acdbdca9650", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Meta-heuristic project/Algorithm Codes/Project 3 - GA- V01.py", "max_issues_repo_name": "nusstu-dz/IE5600-Applied-Programming-for-Industrial-Systems", "max_issues_repo_head_hexsha": "2289d9a63f49d8d730a671de1b491acdbdca9650", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Meta-heuristic project/Algorithm Codes/Project 3 - GA- V01.py", "max_forks_repo_name": "nusstu-dz/IE5600-Applied-Programming-for-Industrial-Systems", "max_forks_repo_head_hexsha": "2289d9a63f49d8d730a671de1b491acdbdca9650", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8164116829, "max_line_length": 121, "alphanum_fraction": 0.552587865, "include": true, "reason": "import numpy", "num_tokens": 6113}
struct Handler{P<:AbstractPath} path::P settings # Could be Vector or Pairs on 0.6 or 1.0 respectively end """ Handler(path::Union{String, AbstractPath}; kwargs...) Handler(bucket::String, prefix::String; kwargs...) Handles iteratively saving JLSO file to the specified path location. FilePath are used to abstract away differences between paths on S3 or locally. """ Handler(path::AbstractPath; kwargs...) = Handler(path, kwargs) Handler(path::String; kwargs...) = Handler(Path(path), kwargs) Handler(bucket::String, prefix::String; kwargs...) = Handler(S3Path("s3://$bucket/$prefix"), kwargs) """ path(handler, name) Determines the path to save to based on the handlers path prefix, name, and context. Tags are used to dynamically prefix the named file with the handler's path. Names with a '.' separators will be used to form subdirectories (e.g., "Foo.bar.x" will be saved to "\$prefix/Foo/bar/x.jlso"). """ function path(handler::Handler{P}, name::String) where P prefix = ["$key=$val" for (key,val) in CONTEXT_TAGS[]] # Split up the name by '.' and add the jlso extension parts = split(name, '.') parts[end] = string(parts[end], ".jlso") return join(handler.path, prefix..., parts...) end """ stage!(handler::Handler, jlso::JLSOFIle, data::Dict{Symbol}) Update the JLSOFile with the new data. """ function stage!(handler::Handler, jlso::JLSO.JLSOFile, data::Dict{Symbol}) for (k, v) in data jlso[k] = v end return jlso end """ commit!(handler, path, jlso) Write the JLSOFile to the path as bytes. """ function commit!(handler::Handler{P}, path::P, jlso::JLSO.JLSOFile) where P <: AbstractPath # NOTE: This is only necessary because FilePathsBase.FileBuffer needs to support # write(::FileBuffer, ::UInt8) # https://github.com/rofinn/FilePathsBase.jl/issues/45 io = IOBuffer() write(io, jlso) bytes = take!(io) mkdir(parent(path); recursive=true, exist_ok=true) write(path, bytes) end function checkpoint(handler::Handler, name::String, data::Dict{Symbol}; tags...) checkpoint_deprecation(tags...) with_checkpoint_tags(tags...) do debug(LOGGER, "Checkpoint $name triggered, with context: $(join(CONTEXT_TAGS[], ", ")).") jlso = JLSO.JLSOFile(Dict{Symbol, Vector{UInt8}}(); handler.settings...) p = path(handler, name) stage!(handler, jlso, data) commit!(handler, p, jlso) end end #= Define our no-op conditions just to be safe =# function checkpoint(handler::Nothing, name::String, data::Dict{Symbol}; tags...) checkpoint_deprecation(tags...) with_checkpoint_tags(tags...) do debug(LOGGER, "Checkpoint $name triggered, but no handler has been set.") nothing end end
{"hexsha": "f8da4cb2683edc8cd3b4c7eed31f00d5376d28b6", "size": 2771, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/handler.jl", "max_stars_repo_name": "aisopous/Checkpoints.jl", "max_stars_repo_head_hexsha": "708cde9c3e6bd4e3b25bad15a992363a54c4ae82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/handler.jl", "max_issues_repo_name": "aisopous/Checkpoints.jl", "max_issues_repo_head_hexsha": "708cde9c3e6bd4e3b25bad15a992363a54c4ae82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/handler.jl", "max_forks_repo_name": "aisopous/Checkpoints.jl", "max_forks_repo_head_hexsha": "708cde9c3e6bd4e3b25bad15a992363a54c4ae82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6, "max_line_length": 100, "alphanum_fraction": 0.6755683869, "num_tokens": 725}
#!/usr/bin/python # -*- coding: UTF-8 -*- """ This is the implementation of CAB method Ref: Babakhani, Pedram, and Parham Zarei. "Automatic gamma correction based on average of brightness." Advances in Computer Science: an International Journal 4.6 (2015): 156-159. Author: Yong Lee E-Mail: yongli.cv@gmail.com C-Data: 2019.04.10 """ import cv2 import time import numpy as np def CAB(image, mask=None): """ :param image: input image, color (3 channels) or gray (1 channel); :param mask: calc gamma value in the mask area, default is the whole image; :param normalize: normalize the input with max/min or not :return: gamma, and output """ # Step 1. Check the inputs: image if np.ndim(image) == 3 and image.shape[-1] == 3: # color image hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) img = hsv[:, :, 2] color_flag = True elif np.ndim(image) == 2: # gray image img = image color_flag = False else: print("ERROR:check the input image of CAB function...") return None # Step 3. Main steps of CAB # Step 3.1 image normalization to range (0,1) img_n = (img + 0.5) / 256 # Step 3.2 calculate the gamma if mask is not None: mask = mask < 128 img_n[mask] = np.NaN gamma = -np.log(2.0) / np.log(np.nanmean(img_n[:])) # Step 3.3 weather optimize for human visual system # Step 3.4 apply gamma transformation output = np.power(img_n, gamma) # Step 4.0 stretch back and post-process output = (output * 256 - 0.5).round().astype(np.uint8) if mask is not None: output[mask] = 0 if color_flag: hsv[:, :, 2] = output output = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) return gamma, output # test function def simple_example(): image = cv2.imread(r"../../images/natural_image_sets/CBSD68/14037.png") visual = True start_time = time.time() gamma, output = CAB(image) end_time = time.time() print("Estimated gamma =" + str(gamma) + ", with time cost=" + str(end_time - start_time) + "s") # cv2.namedWindow("input", cv2.WINDOW_NORMAL) # cv2.namedWindow("output", cv2.WINDOW_NORMAL) # cv2.imshow("input", image) # cv2.imshow("output", output) # cv2.waitKey() import matplotlib.pyplot as plt plt.figure() plt.imshow(image[:, :, ::-1]) plt.title("Before: CAB") plt.figure() plt.imshow(output[:, :, ::-1]) plt.title("After: CAB") plt.show() if __name__ == '__main__': simple_example()
{"hexsha": "b63b6f1255208b05bd474b8a8a03e9b32562f294", "size": 2645, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/methods/CAB.py", "max_stars_repo_name": "yongleex/GCME", "max_stars_repo_head_hexsha": "77227e70605069c4fbfec570621fd19efdce1da4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-12-08T05:31:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T20:37:55.000Z", "max_issues_repo_path": "code/methods/CAB.py", "max_issues_repo_name": "yongleex/GCME", "max_issues_repo_head_hexsha": "77227e70605069c4fbfec570621fd19efdce1da4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/methods/CAB.py", "max_forks_repo_name": "yongleex/GCME", "max_forks_repo_head_hexsha": "77227e70605069c4fbfec570621fd19efdce1da4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-17T04:42:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-17T04:42:48.000Z", "avg_line_length": 28.75, "max_line_length": 103, "alphanum_fraction": 0.5996219282, "include": true, "reason": "import numpy", "num_tokens": 730}
[STATEMENT] lemma irreducible\<^sub>d_def_0: fixes f :: "'a :: {comm_semiring_1,semiring_no_zero_divisors} poly" shows "irreducible\<^sub>d f = (degree f \<noteq> 0 \<and> (\<forall> g h. degree g \<noteq> 0 \<longrightarrow> degree h \<noteq> 0 \<longrightarrow> f \<noteq> g * h))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. irreducible\<^sub>d f = (degree f \<noteq> 0 \<and> (\<forall>g h. degree g \<noteq> 0 \<longrightarrow> degree h \<noteq> 0 \<longrightarrow> f \<noteq> g * h)) [PROOF STEP] proof- [PROOF STATE] proof (state) goal (1 subgoal): 1. irreducible\<^sub>d f = (degree f \<noteq> 0 \<and> (\<forall>g h. degree g \<noteq> 0 \<longrightarrow> degree h \<noteq> 0 \<longrightarrow> f \<noteq> g * h)) [PROOF STEP] have "degree g \<noteq> 0 \<Longrightarrow> g \<noteq> 0" for g :: "'a poly" [PROOF STATE] proof (prove) goal (1 subgoal): 1. degree g \<noteq> 0 \<Longrightarrow> g \<noteq> 0 [PROOF STEP] by auto [PROOF STATE] proof (state) this: degree ?g \<noteq> 0 \<Longrightarrow> ?g \<noteq> 0 goal (1 subgoal): 1. irreducible\<^sub>d f = (degree f \<noteq> 0 \<and> (\<forall>g h. degree g \<noteq> 0 \<longrightarrow> degree h \<noteq> 0 \<longrightarrow> f \<noteq> g * h)) [PROOF STEP] note 1 = degree_mult_eq[OF this this, simplified] [PROOF STATE] proof (state) this: \<lbrakk>0 < degree ?p; 0 < degree ?q\<rbrakk> \<Longrightarrow> degree (?p * ?q) = degree ?p + degree ?q goal (1 subgoal): 1. irreducible\<^sub>d f = (degree f \<noteq> 0 \<and> (\<forall>g h. degree g \<noteq> 0 \<longrightarrow> degree h \<noteq> 0 \<longrightarrow> f \<noteq> g * h)) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<lbrakk>0 < degree ?p; 0 < degree ?q\<rbrakk> \<Longrightarrow> degree (?p * ?q) = degree ?p + degree ?q [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: \<lbrakk>0 < degree ?p; 0 < degree ?q\<rbrakk> \<Longrightarrow> degree (?p * ?q) = degree ?p + degree ?q goal (1 subgoal): 1. irreducible\<^sub>d f = (degree f \<noteq> 0 \<and> (\<forall>g h. degree g \<noteq> 0 \<longrightarrow> degree h \<noteq> 0 \<longrightarrow> f \<noteq> g * h)) [PROOF STEP] by (force elim!: irreducible\<^sub>dE) [PROOF STATE] proof (state) this: irreducible\<^sub>d f = (degree f \<noteq> 0 \<and> (\<forall>g h. degree g \<noteq> 0 \<longrightarrow> degree h \<noteq> 0 \<longrightarrow> f \<noteq> g * h)) goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 961, "file": "Berlekamp_Zassenhaus_Poly_Mod_Finite_Field", "length": 8}
import numpy as np import pandas as pd from sklearn import utils import matplotlib from scipy.optimize import minimize from tflearn import DNN from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression, oneClassNN import tensorflow as tf import tflearn import numpy as np import tflearn.variables as va import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as srn dataPath = './data/' colNames = ["sklearn-OCSVM-Linear-Train","sklearn-OCSVM-RBF-Train","sklearn-OCSVM-Linear-Test","sklearn-OCSVM-RBF-Test","sklearn-explicit-Linear-Train","sklearn-explicit-Sigmoid-Train","sklearn-explicit-Linear-Test","sklearn-explicit-Sigmoid-Test","tf-Linear-Train","tf-Sigmoid-Train","tf-Linear-Test","tf-Sigmoid-Test","tfLearn-Linear-Train","tfLearn-Sigmoid-Train","tfLearn-Linear-Test","tfLearn-Sigmoid-Test"] # Create empty dataframe with given column names. df_usps_scores = {} df_fake_news_scores = {} df_spam_vs_ham_scores = {} df_cifar_10_scores = {} nu = 0.04 K = 4 # Hyper parameters for the one class Neural Network v = 0.04 def tflearn_OneClass_NN_linear(data_train,data_test,labels_train): X = data_train Y = labels_train D = X.shape[1] No_of_inputNodes = X.shape[1] # Clear all the graph variables created in previous run and start fresh tf.reset_default_graph() # Define the network input_layer = input_data(shape=[None, No_of_inputNodes]) # input layer of size np.random.seed(42) theta0 = np.random.normal(0, 1, K + K*D + 1) *0.0001 #theta0 = np.random.normal(0, 1, K + K*D + 1) # For linear hidden_layer = fully_connected(input_layer, 4, bias=False, activation='linear', name="hiddenLayer_Weights", weights_init="normal") # hidden layer of size 2 output_layer = fully_connected(hidden_layer, 1, bias=False, activation='linear', name="outputLayer_Weights", weights_init="normal") # output layer of size 1 # Initialize rho value = 0.01 init = tf.constant_initializer(value) rho = va.variable(name='rho', dtype=tf.float32, shape=[], initializer=init) rcomputed = [] auc = [] sess = tf.Session() sess.run(tf.initialize_all_variables()) # print sess.run(tflearn.get_training_mode()) #False tflearn.is_training(True, session=sess) print sess.run(tflearn.get_training_mode()) #now True temp = theta0[-1] oneClassNN_Net = oneClassNN(output_layer, v, rho, hidden_layer, output_layer, optimizer='sgd', loss='OneClassNN_Loss', learning_rate=1) model = DNN(oneClassNN_Net, tensorboard_verbose=3) model.set_weights(output_layer.W, theta0[0:K][:,np.newaxis]) model.set_weights(hidden_layer.W, np.reshape(theta0[K:K +K*D],(D,K))) iterStep = 0 while (iterStep < 100): print "Running Iteration :", iterStep # Call the cost function y_pred = model.predict(data_train) # Apply some ops tflearn.is_training(False, session=sess) y_pred_test = model.predict(data_test) # Apply some ops tflearn.is_training(True, session=sess) value = np.percentile(y_pred, v * 100) tflearn.variables.set_value(rho, value,session=sess) rStar = rho model.fit(X, Y, n_epoch=2, show_metric=True, batch_size=100) iterStep = iterStep + 1 rcomputed.append(rho) temp = tflearn.variables.get_value(rho, session=sess) # print "Rho",temp # print "y_pred",y_pred # print "y_predTest", y_pred_test # g = lambda x: x g = lambda x : 1/(1 + tf.exp(-x)) def nnScore(X, w, V, g): return tf.matmul(g((tf.matmul(X, w))), V) # Format the datatype to suite the computation of nnscore X = X.astype(np.float32) X_test = data_test X_test = X_test.astype(np.float32) # assign the learnt weights # wStar = hidden_layer.W # VStar = output_layer.W # Get weights values of fc2 wStar = model.get_weights(hidden_layer.W) VStar = model.get_weights(output_layer.W) # print "Hideen",wStar # print VStar train = nnScore(X, wStar, VStar, g) test = nnScore(X_test, wStar, VStar, g) # Access the value inside the train and test for plotting # Create a new session and run the example # sess = tf.Session() # sess.run(tf.initialize_all_variables()) arrayTrain = train.eval(session=sess) arrayTest = test.eval(session=sess) # print "Train Array:",arrayTrain # print "Test Array:",arrayTest # plt.hist(arrayTrain-temp, bins = 25,label='Normal'); # plt.hist(arrayTest-temp, bins = 25, label='Anomalies'); # plt.legend(loc='upper right') # plt.title('r = %1.6f- Sigmoid Activation ' % temp) # plt.show() pos_decisionScore = arrayTrain-temp neg_decisionScore = arrayTest-temp return [pos_decisionScore,neg_decisionScore] def tflearn_OneClass_NN_Sigmoid(data_train,data_test,labels_train): X = data_train Y = labels_train D = X.shape[1] No_of_inputNodes = X.shape[1] # Clear all the graph variables created in previous run and start fresh tf.reset_default_graph() # Define the network input_layer = input_data(shape=[None, No_of_inputNodes]) # input layer of size np.random.seed(42) theta0 = np.random.normal(0, 1, K + K*D + 1) *0.0001 #theta0 = np.random.normal(0, 1, K + K*D + 1) # For linear hidden_layer = fully_connected(input_layer, 4, bias=False, activation='sigmoid', name="hiddenLayer_Weights", weights_init="normal") # hidden layer of size 2 output_layer = fully_connected(hidden_layer, 1, bias=False, activation='linear', name="outputLayer_Weights", weights_init="normal") # output layer of size 1 # Initialize rho value = 0.01 init = tf.constant_initializer(value) rho = va.variable(name='rho', dtype=tf.float32, shape=[], initializer=init) rcomputed = [] auc = [] sess = tf.Session() sess.run(tf.initialize_all_variables()) # print sess.run(tflearn.get_training_mode()) #False tflearn.is_training(True, session=sess) print sess.run(tflearn.get_training_mode()) #now True temp = theta0[-1] oneClassNN_net = oneClassNN(output_layer, v, rho, hidden_layer, output_layer, optimizer='sgd', loss='OneClassNN_Loss', learning_rate=1) model = DNN(oneClassNN_net, tensorboard_verbose=3) model.set_weights(output_layer.W, theta0[0:K][:,np.newaxis]) model.set_weights(hidden_layer.W, np.reshape(theta0[K:K +K*D],(D,K))) iterStep = 0 while (iterStep < 100): print "Running Iteration :", iterStep # Call the cost function y_pred = model.predict(data_train) # Apply some ops tflearn.is_training(False, session=sess) y_pred_test = model.predict(data_test) # Apply some ops tflearn.is_training(True, session=sess) value = np.percentile(y_pred, v * 100) tflearn.variables.set_value(rho, value,session=sess) rStar = rho model.fit(X, Y, n_epoch=2, show_metric=True, batch_size=100) iterStep = iterStep + 1 rcomputed.append(rho) temp = tflearn.variables.get_value(rho, session=sess) # print "Rho",temp # print "y_pred",y_pred # print "y_predTest", y_pred_test # g = lambda x: x g = lambda x : 1/(1 + tf.exp(-x)) def nnScore(X, w, V, g): return tf.matmul(g((tf.matmul(X, w))), V) # Format the datatype to suite the computation of nnscore X = X.astype(np.float32) X_test = data_test X_test = X_test.astype(np.float32) # assign the learnt weights # wStar = hidden_layer.W # VStar = output_layer.W # Get weights values of fc2 wStar = model.get_weights(hidden_layer.W) VStar = model.get_weights(output_layer.W) # print "Hideen",wStar # print VStar train = nnScore(X, wStar, VStar, g) test = nnScore(X_test, wStar, VStar, g) # Access the value inside the train and test for plotting # Create a new session and run the example # sess = tf.Session() # sess.run(tf.initialize_all_variables()) arrayTrain = train.eval(session=sess) arrayTest = test.eval(session=sess) # print "Train Array:",arrayTrain # print "Test Array:",arrayTest # plt.hist(arrayTrain-temp, bins = 25,label='Normal'); # plt.hist(arrayTest-temp, bins = 25, label='Anomalies'); # plt.legend(loc='upper right') # plt.title('r = %1.6f- Sigmoid Activation ' % temp) # plt.show() pos_decisionScore = arrayTrain-temp neg_decisionScore = arrayTest-temp return [pos_decisionScore,neg_decisionScore] def func_getDecision_Scores_tflearn_OneClass_NN(dataset,data_train,data_test,labels_train): # print "Decision_Scores_sklearn_OCSVM Using Linear and RBF Kernels....." if(dataset=="USPS" ): Y = labels_train Y = Y.tolist() labels_train = [[i] for i in Y] result = tflearn_OneClass_NN_linear(data_train,data_test,labels_train) df_usps_scores["tflearn_OneClass_NN-Linear-Train"] = result[0] df_usps_scores["tflearn_OneClass_NN-Linear-Test"] = result[1] result = tflearn_OneClass_NN_Sigmoid(data_train,data_test,labels_train) df_usps_scores["tflearn_OneClass_NN-Sigmoid-Train"] = result[0] df_usps_scores["tflearn_OneClass_NN-Sigmoid-Test"] = result[1] # if(dataset=="FAKE_NEWS" ): # Y = labels_train # Y = Y.tolist() # labels_train = [[i] for i in Y] # result = tflearn_OneClass_NN_linear(data_train,data_test,labels_train) # df_fake_news_scores["tflearn_OneClass_NN-Linear-Train"] = result[0] # df_fake_news_scores["tflearn_OneClass_NN-Linear-Test"] = result[1] # result = tflearn_OneClass_NN_Sigmoid(data_train,data_test,labels_train) # df_fake_news_scores["tflearn_OneClass_NN-Sigmoid-Train"] = result[0] # df_fake_news_scores["tflearn_OneClass_NN-Sigmoid-Test"] = result[1] # if(dataset=="SPAM_Vs_HAM" ): # Y = labels_train # Y = Y.tolist() # labels_train = [[i] for i in Y] # result = tflearn_OneClass_NN_linear(data_train,data_test) # df_spam_vs_ham_scores["tflearn_OneClass_NN-Linear-Train"] = result[0] # df_spam_vs_ham_scores["tflearn_OneClass_NN-Linear-Test"] = result[1] # result = tflearn_OneClass_NN_Sigmoid(data_train,data_test) # df_spam_vs_ham_scores["tflearn_OneClass_NN-Sigmoid-Train"] = result[0] # df_spam_vs_ham_scores["tflearn_OneClass_NN-Sigmoid-Test"] = result[1] # if(dataset=="CIFAR-10" ): # Y = labels_train # Y = Y.tolist() # labels_train = [[i] for i in Y] # result = tflearn_OneClass_NN_linear(data_train,data_test,labels_train) # df_cifar_10_scores["tflearn_OneClass_NN-Linear-Train"] = result[0] # df_cifar_10_scores["tflearn_OneClass_NN-Linear-Test"] = result[1] # result = tflearn_OneClass_NN_Sigmoid(data_train,data_test,labels_train) # df_cifar_10_scores["tflearn_OneClass_NN_Sigmoid-Train"] = result[0] # df_cifar_10_scores["tflearn_OneClass_NN_Sigmoid-Test"] = result[1] return [df_usps_scores,df_fake_news_scores,df_spam_vs_ham_scores,df_cifar_10_scores]
{"hexsha": "ddf2db9fe800bb24ccace6d5095a8f84c80d5f4d", "size": 11518, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/tflearn_OneClass_NN_model.py", "max_stars_repo_name": "chihyunsong/oc-nn", "max_stars_repo_head_hexsha": "f57130545f221fee67e9780d2a93ca48b9d10ba5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 203, "max_stars_repo_stars_event_min_datetime": "2018-02-26T09:52:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T12:51:38.000Z", "max_issues_repo_path": "models/tflearn_OneClass_NN_model.py", "max_issues_repo_name": "dherath/oc-nn", "max_issues_repo_head_hexsha": "f57130545f221fee67e9780d2a93ca48b9d10ba5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2018-05-07T19:36:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T04:00:25.000Z", "max_forks_repo_path": "models/tflearn_OneClass_NN_model.py", "max_forks_repo_name": "dherath/oc-nn", "max_forks_repo_head_hexsha": "f57130545f221fee67e9780d2a93ca48b9d10ba5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 81, "max_forks_repo_forks_event_min_datetime": "2018-02-22T21:17:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T04:53:46.000Z", "avg_line_length": 33.3855072464, "max_line_length": 412, "alphanum_fraction": 0.6661746831, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3162}
[STATEMENT] lemma WT_fv: "P,E \<turnstile> e :: T \<Longrightarrow> fv e \<subseteq> dom E" and "P,E \<turnstile> es [::] Ts \<Longrightarrow> fvs es \<subseteq> dom E" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (P,E \<turnstile> e :: T \<Longrightarrow> fv e \<subseteq> dom E) &&& (P,E \<turnstile> es [::] Ts \<Longrightarrow> fvs es \<subseteq> dom E) [PROOF STEP] apply(induct rule:WT_WTs.inducts) [PROOF STATE] proof (prove) goal (18 subgoals): 1. \<And>C E. is_class P C \<Longrightarrow> fv (new C) \<subseteq> dom E 2. \<And>E e D C. \<lbrakk>P,E \<turnstile> e :: Class D; fv e \<subseteq> dom E; is_class P C; P \<turnstile> Path D to C unique \<or> (\<forall>Cs. \<not> P \<turnstile> Path D to C via Cs )\<rbrakk> \<Longrightarrow> fv (Cast C e) \<subseteq> dom E 3. \<And>E e D C. \<lbrakk>P,E \<turnstile> e :: Class D; fv e \<subseteq> dom E; is_class P C; P \<turnstile> Path D to C unique \<or> P \<turnstile> C \<preceq>\<^sup>* D \<and> (\<forall>Cs. P \<turnstile> Path C to D via Cs \<longrightarrow> Subobjs\<^sub>R P C Cs)\<rbrakk> \<Longrightarrow> fv (\<lparr>C\<rparr>e) \<subseteq> dom E 4. \<And>v T E. typeof v = \<lfloor>T\<rfloor> \<Longrightarrow> fv (Val v) \<subseteq> dom E 5. \<And>E V T. E V = \<lfloor>T\<rfloor> \<Longrightarrow> fv (Var V) \<subseteq> dom E 6. \<And>E e\<^sub>1 T\<^sub>1 e\<^sub>2 T\<^sub>2 bop T. \<lbrakk>P,E \<turnstile> e\<^sub>1 :: T\<^sub>1; fv e\<^sub>1 \<subseteq> dom E; P,E \<turnstile> e\<^sub>2 :: T\<^sub>2; fv e\<^sub>2 \<subseteq> dom E; case bop of Eq \<Rightarrow> T\<^sub>1 = T\<^sub>2 \<and> T = Boolean | Add \<Rightarrow> T\<^sub>1 = Integer \<and> T\<^sub>2 = Integer \<and> T = Integer\<rbrakk> \<Longrightarrow> fv (e\<^sub>1 \<guillemotleft>bop\<guillemotright> e\<^sub>2) \<subseteq> dom E 7. \<And>E V T e T'. \<lbrakk>E V = \<lfloor>T\<rfloor>; P,E \<turnstile> e :: T'; fv e \<subseteq> dom E; P \<turnstile> T' \<le> T\<rbrakk> \<Longrightarrow> fv (V:=e) \<subseteq> dom E 8. \<And>E e C F T Cs. \<lbrakk>P,E \<turnstile> e :: Class C; fv e \<subseteq> dom E; P \<turnstile> C has least F:T via Cs\<rbrakk> \<Longrightarrow> fv (e\<bullet>F{Cs}) \<subseteq> dom E 9. \<And>E e\<^sub>1 C F T Cs e\<^sub>2 T'. \<lbrakk>P,E \<turnstile> e\<^sub>1 :: Class C; fv e\<^sub>1 \<subseteq> dom E; P \<turnstile> C has least F:T via Cs; P,E \<turnstile> e\<^sub>2 :: T'; fv e\<^sub>2 \<subseteq> dom E; P \<turnstile> T' \<le> T\<rbrakk> \<Longrightarrow> fv (e\<^sub>1\<bullet>F{Cs} := e\<^sub>2) \<subseteq> dom E 10. \<And>E e C' C M Ts T m Cs es Ts'. \<lbrakk>P,E \<turnstile> e :: Class C'; fv e \<subseteq> dom E; P \<turnstile> Path C' to C unique; P \<turnstile> C has least M = (Ts, T, m) via Cs; P,E \<turnstile> es [::] Ts'; fvs es \<subseteq> dom E; P \<turnstile> Ts' [\<le>] Ts\<rbrakk> \<Longrightarrow> fv (Call e \<lfloor>C\<rfloor> M es) \<subseteq> dom E A total of 18 subgoals... [PROOF STEP] apply(simp_all del: fun_upd_apply) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<And>E V T. E V = \<lfloor>T\<rfloor> \<Longrightarrow> V \<in> dom E 2. \<And>E V T e T'. \<lbrakk>E V = \<lfloor>T\<rfloor>; P,E \<turnstile> e :: T'; fv e \<subseteq> dom E; P \<turnstile> T' \<le> T\<rbrakk> \<Longrightarrow> V \<in> dom E 3. \<And>T E V e T'. \<lbrakk>is_type P T; P,E(V \<mapsto> T) \<turnstile> e :: T'; fv e \<subseteq> insert V (dom E)\<rbrakk> \<Longrightarrow> fv e - {V} \<subseteq> dom E [PROOF STEP] apply fast+ [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{"llama_tokens": 1473, "file": "CoreC++_WellType", "length": 4}
In Nyx a stochastic force field can be applied. To make sure this option is chosen correctly, we must always set \\ \noindent {\bf USE\_FORCING = TRUE} \\ \noindent in the GNUmakefile and \\ \noindent {\bf nyx.do\_forcing} = 1 \\ \noindent in the inputs file. \\ The external forcing term in the momentum equation~(\ref{eq:momt}) is then given by \begin{equation} {\bf S}_{\rho \Ub} = \rho_b \fb \end{equation} where the acceleration field $\mathbf{f}(\mathbf{x},t)$ is computed as inverse Fourier transform of the forcing spectrum $\widehat{\mathbf{f}}(\mathbf{k},t$). The time evolution of each wave mode is given by an Ornstein-Uhlenbeck process (see \cite{SchmHille06,Schmidt14} for details). Since the real space forcing acts on large scales $L$, non-zero modes are confined to a narrow window of small wave numbers with a prescribed shape (the forcing profile). The resulting flow reaches a statistically stationary and isotropic state with a root-mean-square velocity of the order $V=L/T$, where the integral time scale $T$ (also known as large-eddy turn-over time) is usually set equal to the autocorrelation time of the forcing. It is possible to vary the force field from solenoidal (divergence-free) if the weight parameter $\zeta=1$ to dilational (rotation-free) if $\zeta=0$. To maintain a nearly constant root-mean-square Mach number, a simple model for radiative heating and cooling around a given equilibrium temperature $T_0$ is applied in the energy equation~(\ref{eq:energy}): \begin{equation} S_{\rho E} = S_{\rho e} + \Ub \cdot {\bf S}_{\rho \Ub} = -\frac{\alpha k_{\rm B}(T-T_0)}{\mu m_{\rm H}(\gamma-1)} + \rho_b\Ub\cdot\fb \end{equation} The parameters $T_0$ and $\alpha$ correspond to temp0 and alpha, respectively, in the probin file (along with rho0 for the mean density, which is unity by default). While the gas is adiabatic for $\alpha=0$, it becomes nearly isothermal if the cooling time scale given by $1/\alpha$ is chosen sufficiently short compared to $T$. For performance reasons, a constant composition (corresponding to constant molecular weight $\mu$) is assumed. \section{List of Parameters} \begin{table*}[h] \begin{scriptsize} \begin{tabular}{|l|l|l|l|} \hline Parameter & Definition & Acceptable Values & Default \\ \hline {\bf forcing.seed} & seed of the random number generator & Integer $>0$ & 27011974 \\ {\bf forcing.profile} & shape of forcing spectrum & 1 (plane), 2 (band), 3 (parabolic) & 3\\ {\bf forcing.alpha} & ratio of domain size $X$ to integral length $L=X/\alpha$ & Integer $>0$ & 2 2 2\\ {\bf forcing.band\_width} & band width of the forcing spectrum relative to alpha & Real $\ge 0$ and $\le 1$ & 1.0 1.0 1.0\\ {\bf forcing.intgr\_vel} & characteristic velocity $V$ & Real $> 0$ & must be set\\ {\bf forcing.auto\_corrl} & autocorrelation time in units of $T=L/V$ & Real $> 0$ & 1.0 1.0 1.0\\ {\bf forcing.soln\_weight} & weight $\zeta$ of solenoidal relative to dilatational modes & Real $\ge 0$ and $\le 1$ & 1.0\\ \hline \end{tabular} \label{Table:Geometry} \end{scriptsize} \end{table*} Triples for forcing.alpha, forcing.band\_width, forcing.intgr\_vel, and forcing.auto\_corrl correspond to the three spatial dimensions.
{"hexsha": "5cc803eaa098218b406c3a94f059cd0ae3a23510", "size": 3207, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "UsersGuide/Forcing/NyxForcing.tex", "max_stars_repo_name": "Gosenca/axionyx_1.0", "max_stars_repo_head_hexsha": "7e2a723e00e6287717d6d81b23db32bcf6c3521a", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-02-18T09:13:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T21:27:46.000Z", "max_issues_repo_path": "UsersGuide/Forcing/NyxForcing.tex", "max_issues_repo_name": "Gosenca/axionyx_1.0", "max_issues_repo_head_hexsha": "7e2a723e00e6287717d6d81b23db32bcf6c3521a", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-12T08:54:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-12T08:54:31.000Z", "max_forks_repo_path": "UsersGuide/Forcing/NyxForcing.tex", "max_forks_repo_name": "Gosenca/axionyx_1.0", "max_forks_repo_head_hexsha": "7e2a723e00e6287717d6d81b23db32bcf6c3521a", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-04T10:26:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T23:51:51.000Z", "avg_line_length": 66.8125, "max_line_length": 878, "alphanum_fraction": 0.7271593389, "num_tokens": 942}
/** \file gameengine/Expressions/InputParser.cpp * \ingroup expressions */ // Parser.cpp: implementation of the CParser class. /* * Copyright (c) 1996-2000 Erwin Coumans <coockie@acm.org> * * Permission to use, copy, modify, distribute and sell this software * and its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appear in all copies and * that both that copyright notice and this permission notice appear * in supporting documentation. Erwin Coumans makes no * representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied warranty. * */ #include "EXP_InputParser.h" #include <boost/algorithm/string.hpp> #include <boost/format.hpp> #include "CM_Message.h" #include "EXP_BoolValue.h" #include "EXP_ConstExpr.h" #include "EXP_EmptyValue.h" #include "EXP_ErrorValue.h" #include "EXP_FloatValue.h" #include "EXP_IdentifierExpr.h" #include "EXP_IntValue.h" #include "EXP_Operator1Expr.h" #include "EXP_Operator2Expr.h" #include "EXP_StringValue.h" // this is disable at the moment, I expected a memleak from it, but the error-cleanup was the // reason well, looks we don't need it anyway, until maybe the Curved Surfaces are integrated into // CSG cool things like (IF(LOD==1,CCurvedValue,IF(LOD==2,CCurvedValue2)) etc... #include "EXP_IfExpr.h" #if defined(WIN32) || defined(WIN64) # define strcasecmp _stricmp # ifndef strtoll # define strtoll _strtoi64 # endif #endif /* Def WIN32 or Def WIN64 */ #define NUM_PRIORITY 6 CParser::CParser() : m_identifierContext(nullptr) { } CParser::~CParser() { if (m_identifierContext) { m_identifierContext->Release(); } } void CParser::ScanError(const std::string &str) { /* Sets the global variable errmsg to an errormessage with * contents str, appending if it already exists. */ if (errmsg) { errmsg = new COperator2Expr(VALUE_ADD_OPERATOR, errmsg, Error(str)); } else { errmsg = Error(str); } sym = errorsym; } CExpression *CParser::Error(const std::string &str) { // Makes and returns a new CConstExpr filled with an CErrorValue with string str. return new CConstExpr(new CErrorValue(str)); } void CParser::NextCh() { /* Sets the global variable ch to the next character, if it exists * and increases the global variable chcount */ ++chcount; if (chcount < text.size()) { ch = text[chcount]; } else { ch = 0x00; } } void CParser::TermChar(char c) { /* Generates an error if the next char isn't the specified char c, * otherwise, skip the char. */ if (ch == c) { NextCh(); } else { CM_Warning(c << " expected. Continuing without it."); } } void CParser::DigRep() { // Changes the current character to the first character that isn't a decimal. while ((ch >= '0') && (ch <= '9')) { NextCh(); } } void CParser::CharRep() { // Changes the current character to the first character that isn't an alphanumeric character. while (((ch >= '0') && (ch <= '9')) || ((ch >= 'a') && (ch <= 'z')) || ((ch >= 'A') && (ch <= 'Z')) || (ch == '.') || (ch == '_')) { NextCh(); } } void CParser::GrabString(int start) { /* Puts part of the input string into the global variable * const_as_string, from position start, to position chchount. */ const_as_string = text.substr(start, chcount - start); } void CParser::GrabRealString(int start) { /* Works like GrabString but converting \\n to \n * puts part of the input string into the global variable * const_as_string, from position start, to position chchount. */ const_as_string = std::string(); for (int i = start; i < chcount; i++) { char tmpch = text[i]; if ((tmpch == '\\') && (text[i + 1] == 'n')) { tmpch = '\n'; i++; } const_as_string += tmpch; } } void CParser::NextSym() { /* Sets the global variable sym to the next symbol, and * if it is an operator * sets the global variable opkind to the kind of operator * if it is a constant * sets the global variable constkind to the kind of operator * if it is a reference to a cell * sets the global variable cellcoord to the kind of operator */ errmsg = nullptr; while (ch == ' ' || ch == 0x9) { NextCh(); } switch (ch) { case '(': { sym = lbracksym; NextCh(); break; } case ')': { sym = rbracksym; NextCh(); break; } case ',': { sym = commasym; NextCh(); break; } case '%': { sym = opsym; opkind = OPmodulus; NextCh(); break; } case '+': { sym = opsym; opkind = OPplus; NextCh(); break; } case '-': { sym = opsym; opkind = OPminus; NextCh(); break; } case '*': { sym = opsym; opkind = OPtimes; NextCh(); break; } case '/': { sym = opsym; opkind = OPdivide; NextCh(); break; } case '&': { sym = opsym; opkind = OPand; NextCh(); TermChar('&'); break; } case '|': { sym = opsym; opkind = OPor; NextCh(); TermChar('|'); break; } case '=': { sym = opsym; opkind = OPequal; NextCh(); TermChar('='); break; } case '!': { sym = opsym; NextCh(); if (ch == '=') { opkind = OPunequal; NextCh(); } else { opkind = OPnot; } break; } case '>': { sym = opsym; NextCh(); if (ch == '=') { opkind = OPgreaterequal; NextCh(); } else { opkind = OPgreater; } break; } case '<': { sym = opsym; NextCh(); if (ch == '=') { opkind = OPlessequal; NextCh(); } else { opkind = OPless; } break; } case '\"': { sym = constsym; constkind = stringtype; NextCh(); int start = chcount; while ((ch != '\"') && (ch != 0x0)) { NextCh(); } GrabRealString(start); TermChar('\"'); // check for eol before '\"' break; } case 0x0: { sym = eolsym; break; } default: { int start = chcount; DigRep(); if ((start != chcount) || (ch == '.')) { // number sym = constsym; if (ch == '.') { constkind = floattype; NextCh(); DigRep(); } else { constkind = inttype; } if ((ch == 'e') || (ch == 'E')) { constkind = floattype; NextCh(); if ((ch == '+') || (ch == '-')) { NextCh(); } int mark = chcount; DigRep(); if (mark == chcount) { ScanError("Number expected after 'E'"); return; } } GrabString(start); } else if (((ch >= 'a') && (ch <= 'z')) || ((ch >= 'A') && (ch <= 'Z'))) { start = chcount; CharRep(); GrabString(start); if (boost::iequals(const_as_string, "SUM")) { sym = sumsym; } else if (boost::iequals(const_as_string, "NOT")) { sym = opsym; opkind = OPnot; } else if (boost::iequals(const_as_string, "AND")) { sym = opsym; opkind = OPand; } else if (boost::iequals(const_as_string, "OR")) { sym = opsym; opkind = OPor; } else if (boost::iequals(const_as_string, "IF")) { sym = ifsym; } else if (boost::iequals(const_as_string, "WHOMADE")) { sym = whocodedsym; } else if (boost::iequals(const_as_string, "FALSE")) { sym = constsym; constkind = booltype; boolvalue = false; } else if (boost::iequals(const_as_string, "TRUE")) { sym = constsym; constkind = booltype; boolvalue = true; } else { sym = idsym; } } else { std::string str = (boost::format("Unexpected character '%c'") % ch).str(); NextCh(); ScanError(str); return; } } } } const std::string CParser::Symbol2Str(int s) { // Returns a string representation of of symbol s, for use in Term when generating an error. switch (s) { case errorsym: { return "error"; } case lbracksym: { return "("; } case rbracksym: { return ")"; } case commasym: { return ","; } case opsym: { return "operator"; } case constsym: { return "constant"; } case sumsym: { return "SUM"; } case ifsym: { return "IF"; } case whocodedsym: { return "WHOMADE"; } case eolsym: { return "end of line"; } case idsym: { return "identifier"; } } return "unknown"; // should not happen } void CParser::Term(int s) { /* Generates an error if the next symbol isn't the specified symbol s * otherwise, skip the symbol. */ if (s == sym) { NextSym(); } else { CM_Warning(Symbol2Str(s) << "expected. Continuing without it."); } } int CParser::Priority(int optorkind) { // Returns the priority of an operator higher number means higher priority. switch (optorkind) { case OPor: { return 1; } case OPand: { return 2; } case OPgreater: case OPless: case OPgreaterequal: case OPlessequal: case OPequal: case OPunequal: { return 3; } case OPplus: case OPminus: { return 4; } case OPmodulus: case OPtimes: case OPdivide: return 5; } BLI_assert(false); return 0; // should not happen } CExpression *CParser::Ex(int i) { /* Parses an expression in the imput, starting at priority i, and * returns an CExpression, containing the parsed input. */ CExpression *e1 = nullptr, *e2 = nullptr; if (i < NUM_PRIORITY) { e1 = Ex(i + 1); while ((sym == opsym) && (Priority(opkind) == i)) { int opkind2 = opkind; NextSym(); e2 = Ex(i + 1); switch (opkind2) { case OPmodulus: { e1 = new COperator2Expr(VALUE_MOD_OPERATOR, e1, e2); } break; case OPplus: { e1 = new COperator2Expr(VALUE_ADD_OPERATOR, e1, e2); } break; case OPminus: { e1 = new COperator2Expr(VALUE_SUB_OPERATOR, e1, e2); } break; case OPtimes: { e1 = new COperator2Expr(VALUE_MUL_OPERATOR, e1, e2); } break; case OPdivide: { e1 = new COperator2Expr(VALUE_DIV_OPERATOR, e1, e2); } break; case OPand: { e1 = new COperator2Expr(VALUE_AND_OPERATOR, e1, e2); } break; case OPor: { e1 = new COperator2Expr(VALUE_OR_OPERATOR, e1, e2); } break; case OPequal: { e1 = new COperator2Expr(VALUE_EQL_OPERATOR, e1, e2); } break; case OPunequal: { e1 = new COperator2Expr(VALUE_NEQ_OPERATOR, e1, e2); } break; case OPgreater: { e1 = new COperator2Expr(VALUE_GRE_OPERATOR, e1, e2); } break; case OPless: { e1 = new COperator2Expr(VALUE_LES_OPERATOR, e1, e2); } break; case OPgreaterequal: { e1 = new COperator2Expr(VALUE_GEQ_OPERATOR, e1, e2); } break; case OPlessequal: { e1 = new COperator2Expr(VALUE_LEQ_OPERATOR, e1, e2); } break; default: { BLI_assert(false); } break; // should not happen } } } else if (i == NUM_PRIORITY) { if ((sym == opsym) && ((opkind == OPminus) || (opkind == OPnot) || (opkind == OPplus))) { NextSym(); switch (opkind) { /* +1 is also a valid number! */ case OPplus: { e1 = new COperator1Expr(VALUE_POS_OPERATOR, Ex(NUM_PRIORITY)); } break; case OPminus: { e1 = new COperator1Expr(VALUE_NEG_OPERATOR, Ex(NUM_PRIORITY)); } break; case OPnot: { e1 = new COperator1Expr(VALUE_NOT_OPERATOR, Ex(NUM_PRIORITY)); } break; default: { // should not happen e1 = Error("operator +, - or ! expected"); } } } else { switch (sym) { case constsym: { switch (constkind) { case booltype: { e1 = new CConstExpr(new CBoolValue(boolvalue)); break; } case inttype: { cInt temp; temp = std::stol(const_as_string, nullptr, 10); /* atoi is for int only */ e1 = new CConstExpr(new CIntValue(temp)); break; } case floattype: { double temp; temp = std::stof(const_as_string); e1 = new CConstExpr(new CFloatValue(temp)); break; } case stringtype: { e1 = new CConstExpr(new CStringValue(const_as_string, "")); break; } default: { BLI_assert(false); break; } } NextSym(); break; } case lbracksym: { NextSym(); e1 = Ex(1); Term(rbracksym); break; } case ifsym: { CExpression *e3; NextSym(); Term(lbracksym); e1 = Ex(1); Term(commasym); e2 = Ex(1); if (sym == commasym) { NextSym(); e3 = Ex(1); } else { e3 = new CConstExpr(new CEmptyValue()); } Term(rbracksym); e1 = new CIfExpr(e1, e2, e3); break; } case idsym: { e1 = new CIdentifierExpr(const_as_string, m_identifierContext); NextSym(); break; } case errorsym: { BLI_assert(!e1); std::string errtext = "[no info]"; if (errmsg) { CValue *errmsgval = errmsg->Calculate(); errtext = errmsgval->GetText(); errmsgval->Release(); // e1 = Error(errmsg->Calculate()->GetText());//new CConstExpr(errmsg->Calculate()); if (!(errmsg->Release())) { errmsg = nullptr; } else { // does this happen ? BLI_assert("does this happen"); } } e1 = Error(errtext); break; } default: NextSym(); // return Error("Expression expected"); BLI_assert(!e1); e1 = Error("Expression expected"); } } } return e1; } CExpression *CParser::Expr() { // parses an expression in the imput, and // returns an CExpression, containing the parsed input return Ex(1); } CExpression *CParser::ProcessText(const std::string &intext) { // and parses the string in intext and returns it. CExpression *expr; text = intext; chcount = 0; if (text.size() == 0) { return nullptr; } ch = text[0]; /* if (ch != '=') { * expr = new CConstExpr(new CStringValue(text)); * *dependent = deplist; * return expr; * } else */ // NextCh(); NextSym(); expr = Expr(); if (sym != eolsym) { CExpression *oldexpr = expr; expr = new COperator2Expr( VALUE_ADD_OPERATOR, oldexpr, Error("Extra characters after expression")); // new CConstExpr(new CErrorValue("Extra // characters after expression"))); } if (errmsg) { errmsg->Release(); } return expr; } void CParser::SetContext(CValue *context) { if (m_identifierContext) { m_identifierContext->Release(); } m_identifierContext = context; }
{"hexsha": "d710a904bfccea803621a7dfb94104da50436eeb", "size": 15962, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "source/gameengine/Expressions/intern/InputParser.cpp", "max_stars_repo_name": "aseer95/upbge", "max_stars_repo_head_hexsha": "f99d5f781f3c2cded0c7fc8ef387908fd35af505", "max_stars_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/gameengine/Expressions/intern/InputParser.cpp", "max_issues_repo_name": "aseer95/upbge", "max_issues_repo_head_hexsha": "f99d5f781f3c2cded0c7fc8ef387908fd35af505", "max_issues_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/gameengine/Expressions/intern/InputParser.cpp", "max_forks_repo_name": "aseer95/upbge", "max_forks_repo_head_hexsha": "f99d5f781f3c2cded0c7fc8ef387908fd35af505", "max_forks_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5427728614, "max_line_length": 98, "alphanum_fraction": 0.5262498434, "num_tokens": 4226}
"""Stanford Question Answering Dataset (SQuAD). Includes MLM and QA tasks. Author: Jeffrey Shen """ import torch import torch.utils.data as data import numpy as np import random class MLM(data.IterableDataset): """ Each item in the dataset is a tuple with the following entries (in order): - x: Masked blocks of text, starting with [CLS], separated by [SEP] - y: Target blocks of text, starting with [CLS], separated by [SEP] Args: data_path (str): Path to .npz file containing pre-processed dataset. max_tokens (int): Range of indices to generate for the random tokens. """ def __init__( self, data_path, max_tokens, epoch_size, mask_prob=0.15, unmask_prob=0.1, randomize_prob=0.1, block_size=512, ignore_idx=-1, padding_idx=0, cls_idx=1, sep_idx=2, mask_idx=3, ): super(MLM, self).__init__() self.epoch_size = epoch_size self.max_tokens = max_tokens self.mask_prob = mask_prob self.unmask_prob = unmask_prob self.randomize_prob = randomize_prob self.block_size = block_size self.ignore_idx = ignore_idx self.padding_idx = padding_idx self.cls_idx = cls_idx self.sep_idx = sep_idx self.mask_idx = mask_idx self.random_weights = [1] * self.max_tokens self.random_weights[self.padding_idx] = 0 self.random_weights[self.cls_idx] = 0 self.random_weights[self.sep_idx] = 0 self.random_weights[self.mask_idx] = 0 # Don't need to do ignore_idx, since it should always be outside the range dataset = np.load(data_path) self.context_idxs = torch.from_numpy(dataset["context_idxs"]).long() self.question_idxs = torch.from_numpy(dataset["ques_idxs"]).long() def mask(self, x, y): size = x.size(0) num_mask = int(self.mask_prob * size + random.random()) masks = torch.tensor(random.sample(range(size), num_mask), dtype=torch.long) change_masks = torch.rand(num_mask) unmask = change_masks < self.unmask_prob random_mask = change_masks < (self.randomize_prob + self.unmask_prob) random_mask = random_mask & (~unmask) random_content = torch.tensor( random.choices( range(self.max_tokens), weights=self.random_weights, k=random_mask.sum().item(), ), dtype=torch.long, ) masked = torch.tensor([False] * size, dtype=torch.bool) masked[masks] = True x[masks[~unmask]] = self.mask_idx x[masks[random_mask]] = random_content y[~masked] = self.ignore_idx return x, y def __len__(self): return self.epoch_size def __iter__(self): worker_info = torch.utils.data.get_worker_info() worker_id = 0 num_workers = 1 if worker_info is not None: worker_id = worker_info.id num_workers = worker_info.num_workers epoch_size = self.epoch_size // num_workers next = torch.full((self.block_size,), self.padding_idx, dtype=torch.long) next[0] = self.cls_idx next_index = 1 n_samples = 0 while True: dataset_size = self.context_idxs.size(0) ids = list(range(worker_id, dataset_size, num_workers)) random.shuffle(ids) for i in ids: for j in range(2): if j == 0: sample = self.context_idxs[i] else: sample = self.question_idxs[i] sample_length = (sample != self.padding_idx).sum().item() sample_index = 0 while sample_index < sample_length: fill = min( sample_length - sample_index, next.size(0) - next_index ) next[next_index : next_index + fill] = sample[ sample_index : sample_index + fill ] next_index += fill sample_index += fill if next_index >= next.size(0): x = next.clone().detach() y = next.clone().detach() yield self.mask(x, y) next = torch.full( (self.block_size,), self.padding_idx, dtype=torch.long ) next[0] = self.cls_idx next_index = 1 n_samples += 1 if n_samples >= epoch_size: return else: next[next_index] = self.sep_idx next_index += 1 def collate_fn(examples): # Group by tensor type x, y = zip(*examples) return torch.stack(x, dim=0), torch.stack(y, dim=0) class SQuAD(data.Dataset): """Stanford Question Answering Dataset (SQuAD). Each item in the dataset is a tuple with the following entries (in order): - x: [CLS] context window [SEP] question - y: start and end indices, adjusted to the context window - c_padding_mask: mask out [SEP] question (True) or keep [CLS] context window (False) - ids: ids for each entry Args: data_path (str): Path to .npz file containing pre-processed dataset. """ def __init__( self, data_path, block_size=512, ignore_idx=-1, padding_idx=0, cls_idx=1, sep_idx=2, mask_idx=3, use_v2=True, ): super(SQuAD, self).__init__() self.block_size = block_size self.ignore_idx = ignore_idx self.padding_idx = padding_idx self.cls_idx = cls_idx self.sep_idx = sep_idx self.mask_idx = mask_idx dataset = np.load(data_path) self.context_idxs = torch.from_numpy(dataset["context_idxs"]).long() self.question_idxs = torch.from_numpy(dataset["ques_idxs"]).long() self.y1s = torch.from_numpy(dataset["y1s"]).long() self.y2s = torch.from_numpy(dataset["y2s"]).long() self.ids = torch.from_numpy(dataset["ids"]).long() self.valid_idxs = [ idx for idx in range(len(self.ids)) if use_v2 or self.y1s[idx].item() >= 0 ] def __getitem__(self, idx): idx = self.valid_idxs[idx] example = ( self.context_idxs[idx], self.question_idxs[idx], self.y1s[idx], self.y2s[idx], self.ids[idx], ) return example def __len__(self): return len(self.valid_idxs) def get_sliding_window_collate(self, stride, randomize): """ Gets a collate function which creates inputs at most the block size. If randomize is True, we get a single random sliding window (for training/dev). Otherwise, we keep all the sliding windows (for evaluation). """ def sliding_window_collate(examples): windows = [] for example in examples: c, q, y1, y2, id = example c_len = (c != self.padding_idx).sum() q_len = (q != self.padding_idx).sum() # We want to keep going so long as c_end = c_start + (block_size - q_len - 2) # has not been at least c_len for the first time, i.e. c_end < c_len + stride. # We also want to take at least one step. c_range = range( 0, max(1, c_len + q_len + 2 - self.block_size + stride), stride ) if randomize: c_start = random.sample(c_range, k=1)[0] c_range = range(c_start, c_start + 1) for c_start in c_range: c_end = min(self.block_size - q_len - 2 + c_start, c_len) if y1 < c_start or y2 < c_start or y1 >= c_end or y2 >= c_end: y1 = -1 y2 = -1 else: y1 -= c_start y2 -= c_start windows.append((c[c_start:c_end], q[:q_len], y1, y2, c_start, id)) # Collate windows max_len = max(len(window[0]) + len(window[1]) + 2 for window in windows) assert max_len <= self.block_size x = torch.full((len(windows), max_len), self.padding_idx, dtype=torch.long) y = torch.zeros(len(windows), 2, dtype=torch.long) c_padding_mask = torch.ones(len(windows), max_len, dtype=torch.bool) c_starts = torch.zeros(len(windows), dtype=torch.long) ids = torch.zeros(len(windows), dtype=torch.long) for i, window in enumerate(windows): c, q, y1, y2, c_start, id = window x[i, 0] = self.cls_idx x[i, 1 : 1 + len(c)] = c x[i, 1 + len(c)] = self.sep_idx x[i, 2 + len(c) : 2 + len(c) + len(q)] = q c_padding_mask[i][0 : 1 + len(c)] = False y[i, 0] = y1 + 1 y[i, 1] = y2 + 1 c_starts[i] = c_start ids[i] = id return x, y, c_padding_mask, c_starts, ids return sliding_window_collate class QuestionsMLM(data.Dataset): """ Args: data_path (str): Path to .npz file containing pre-processed dataset. max_tokens (int): Range of indices to generate for the random tokens. """ def __init__( self, data_path, max_tokens, mask_prob=0.15, unmask_prob=0.1, randomize_prob=0.1, ignore_idx=-1, padding_idx=0, cls_idx=1, sep_idx=2, mask_idx=3, use_v2=True, ): super().__init__() self.max_tokens = max_tokens self.mask_prob = mask_prob self.unmask_prob = unmask_prob self.randomize_prob = randomize_prob self.ignore_idx = ignore_idx self.padding_idx = padding_idx self.cls_idx = cls_idx self.sep_idx = sep_idx self.mask_idx = mask_idx self.random_weights = [1] * self.max_tokens self.random_weights[self.padding_idx] = 0 self.random_weights[self.cls_idx] = 0 self.random_weights[self.sep_idx] = 0 self.random_weights[self.mask_idx] = 0 # Don't need to do ignore_idx, since it should always be outside the range dataset = np.load(data_path) self.context_idxs = torch.from_numpy(dataset["context_idxs"]).long() self.question_idxs = torch.from_numpy(dataset["ques_idxs"]).long() self.y1s = torch.from_numpy(dataset["y1s"]).long() self.y2s = torch.from_numpy(dataset["y2s"]).long() self.ids = torch.from_numpy(dataset["ids"]).long() self.valid_idxs = [ idx for idx in range(len(self.ids)) if use_v2 or self.y1s[idx].item() >= 0 ] self.max_id = torch.max(self.ids) + 1 def mask(self, x, y): size = (x != self.padding_idx).sum().item() num_mask = int(self.mask_prob * size + random.random()) masks = torch.tensor(random.sample(range(size), num_mask), dtype=torch.long) change_masks = torch.rand(num_mask) unmask = change_masks < self.unmask_prob random_mask = change_masks < (self.randomize_prob + self.unmask_prob) random_mask = random_mask & (~unmask) random_content = torch.tensor( random.choices( range(self.max_tokens), weights=self.random_weights, k=random_mask.sum().item(), ), dtype=torch.long, ) masked = torch.tensor([False] * x.size(0), dtype=torch.bool) masked[masks] = True x[masks[~unmask]] = self.mask_idx x[masks[random_mask]] = random_content y[~masked] = self.ignore_idx return x, y def __getitem__(self, idx): idx = self.valid_idxs[idx] x = torch.full((self.question_idxs.size(-1) + 1,), self.padding_idx, dtype=torch.long) x[0] = self.cls_idx x[1:] = self.question_idxs[idx] x = x.clone().detach() y = x.clone().detach() x, y = self.mask(x, y) return x, y, self.context_idxs[idx], self.ids[idx] def __len__(self): return len(self.valid_idxs) @staticmethod def get_collate_fn(): def mlm_collate_fn(examples): # Group by tensor type x, y, c, ids = zip(*examples) return torch.stack(x, dim=0), torch.stack(y, dim=0), torch.stack(c, dim=0), torch.stack(ids, dim=0) return mlm_collate_fn
{"hexsha": "e5e24f4df3a53974dd8e6ebe3b0c32217edbd205", "size": 13014, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/bpe_squad.py", "max_stars_repo_name": "jeffdshen/squad", "max_stars_repo_head_hexsha": "61ed2120fc06f5e33204200ac0f8d86d1da6f361", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasets/bpe_squad.py", "max_issues_repo_name": "jeffdshen/squad", "max_issues_repo_head_hexsha": "61ed2120fc06f5e33204200ac0f8d86d1da6f361", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/bpe_squad.py", "max_forks_repo_name": "jeffdshen/squad", "max_forks_repo_head_hexsha": "61ed2120fc06f5e33204200ac0f8d86d1da6f361", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6547945205, "max_line_length": 111, "alphanum_fraction": 0.5471031197, "include": true, "reason": "import numpy", "num_tokens": 3025}
-- Interseccion_con_su_union.lean -- Intersección con su unión.lean -- José A. Alonso Jiménez -- Sevilla, 26 de abril de 2022 -- --------------------------------------------------------------------- -- --------------------------------------------------------------------- -- Demostrar que -- s ∩ (s ∪ t) = s -- ---------------------------------------------------------------------- import data.set.basic import tactic open set variable {α : Type} variables s t : set α -- 1ª demostración -- =============== example : s ∩ (s ∪ t) = s := begin ext, split, { intro h, dsimp at h, exact h.left, }, { intro xs, split, { exact xs, }, { left, exact xs, }}, end -- 2ª demostración -- =============== example : s ∩ (s ∪ t) = s := begin ext, split, { intro h, exact h.left, }, { intro xs, split, { exact xs, }, { exact or.inl xs, }}, end -- 3ª demostración -- =============== example : s ∩ (s ∪ t) = s := begin ext, exact ⟨λ h, h.left, λ xs, ⟨xs , or.inl xs⟩,⟩, end -- 4ª demostración -- =============== example : s ∩ (s ∪ t) = s := begin ext, exact ⟨and.left, λ xs, ⟨xs , or.inl xs⟩,⟩, end -- 5ª demostración -- =============== example : s ∩ (s ∪ t) = s := begin ext, split, { rintros ⟨xs,-⟩, exact xs, }, { intro xs, use xs, left, exact xs, }, end -- 6ª demostración -- =============== example : s ∩ (s ∪ t) = s := begin apply subset_antisymm, { rintros x ⟨xs,-⟩, exact xs, }, { intros x xs, exact ⟨xs, or.inl xs⟩, }, end -- 7ª demostración -- =============== example : s ∩ (s ∪ t) = s := inf_sup_self -- 8ª demostración -- =============== example : s ∩ (s ∪ t) = s := by fifknish
{"author": "jaalonso", "repo": "Razonando-con-Lean", "sha": "d6e3fe9e384bdb6d8cc6ce4383d86c72bbcc154c", "save_path": "github-repos/lean/jaalonso-Razonando-con-Lean", "path": "github-repos/lean/jaalonso-Razonando-con-Lean/Razonando-con-Lean-d6e3fe9e384bdb6d8cc6ce4383d86c72bbcc154c/src/Interseccion_con_su_union.lean"}
[STATEMENT] lemma top_finfun_apply [simp]: "($) top = (\<lambda>_. top)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ($) top = (\<lambda>_. top) [PROOF STEP] by(auto simp add: top_finfun_def)
{"llama_tokens": 82, "file": "FinFun_FinFunPred", "length": 1}
[STATEMENT] theorem TBtheorem4a_notP2: assumes "\<not> ine Q E" and "subcomponents PQ = {P,Q}" and "correctCompositionIn PQ" and "ine_exprChannelSet P ChSet E" and "\<forall> (x ::chanID). ((x \<in> ChSet) \<longrightarrow> (x \<in> (loc PQ)))" shows "\<not> ine PQ E" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<not> ine PQ E [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: \<not> ine Q E subcomponents PQ = {P, Q} correctCompositionIn PQ ine_exprChannelSet P ChSet E \<forall>x. x \<in> ChSet \<longrightarrow> x \<in> loc PQ goal (1 subgoal): 1. \<not> ine PQ E [PROOF STEP] by (simp add: ine_def correctCompositionIn_def ine_exprChannelSet_def, auto)
{"llama_tokens": 293, "file": "CryptoBasedCompositionalProperties_Secrecy", "length": 2}
# Authors: Hugo Richard, Pierre Ablin # License: BSD 3 clause import numpy as np import warnings from scipy.linalg import expm from .reduce_data import reduce_data from ._permica import permica from ._groupica import groupica from time import time def multiviewica( X, n_components=None, dimension_reduction="pca", noise=1.0, max_iter=1000, init="permica", random_state=None, tol=1e-3, verbose=False, ): """ Performs MultiViewICA. It optimizes: :math:`l(W) = mean_t [sum_k log(cosh(Y_{avg}(t)[k])) + sum_i l_i(X_i(t))]` where :math:`l_i(X_i(t)) = - log(|W_i|) + 1/(2 noise) ||W_iX_i(t) - Y_{avg}(t)||^2` :math:`X_i` is the data of group i (ex: subject i) :math:`W_i` is the mixing matrix of subject i and :math:`Y_avg = mean_i W_i X_i` Parameters ---------- X : np array of shape (n_groups, n_features, n_samples) Training vector, where n_groups is the number of groups, n_samples is the number of samples and n_components is the number of components. n_components : int, optional Number of components to extract. If None, no dimension reduction is performed dimension_reduction: str, optional if srm: use srm to reduce the data if pca: use group specific pca to reduce the data noise : float, optional Gaussian noise level max_iter : int, optional Maximum number of iterations to perform init : str or np array of shape (n_groups, n_components, n_components) If permica: initialize with perm ICA, if groupica, initialize with group ica. Else, use the provided array to initialize. random_state : int, RandomState instance or None, optional (default=None) Used to perform a random initialization. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random. tol : float, optional A positive scalar giving the tolerance at which the un-mixing matrices are considered to have converged. verbose : bool, optional Print information Returns ------- P : np array of shape (n_groups, n_components, n_features) K is the projection matrix that projects data in reduced space W : np array of shape (n_groups, n_components, n_components) Estimated un-mixing matrices S : np array of shape (n_components, n_samples) Estimated source See also -------- groupica permica """ P, X = reduce_data( X, n_components=n_components, dimension_reduction=dimension_reduction ) # Initialization if type(init) is str: if init not in ["permica", "groupica"]: raise ValueError("init should either be permica or groupica") if init == "permica": _, W, S = permica( X, max_iter=max_iter, random_state=random_state, tol=tol ) else: _, W, S = groupica( X, max_iter=max_iter, random_state=random_state, tol=tol ) else: if type(init) is not np.ndarray: raise TypeError("init should be a numpy array") W = init # Performs multiview ica W, S = _multiview_ica_main( X, noise=noise, n_iter=max_iter, tol=tol, init=W, verbose=verbose, ) return P, W, S def _logcosh(X): Y = np.abs(X) return Y + np.log1p(np.exp(-2 * Y)) def _multiview_ica_main( X_list, noise=1.0, n_iter=1000, tol=1e-6, verbose=False, init=None, ortho=False, return_gradients=False, timing=False, ): tol_init = None if tol > 0 and tol_init is None: tol_init = tol if tol == 0 and tol_init is None: tol_init = 1e-6 # Turn list into an array to make it compatible with the rest of the code if type(X_list) == list: X_list = np.array(X_list) # Init n_pb, p, n = X_list.shape basis_list = init.copy() Y_avg = np.mean([np.dot(W, X) for W, X in zip(basis_list, X_list)], axis=0) # Start scaling g_norms = 0 g_list = [] for i in range(n_iter): g_norms = 0 # Start inner loop: decrease the loss w.r.t to each W_j convergence = False for j in range(n_pb): X = X_list[j] W_old = basis_list[j].copy() # Y_denoise is the estimate of the sources without Y_j Y_denoise = Y_avg - W_old.dot(X) / n_pb # Perform one ICA quasi-Newton step converged, basis_list[j], g_norm = _noisy_ica_step( W_old, X, Y_denoise, noise, n_pb, ortho, scale=True ) convergence = convergence or converged # Update the average vector (estimate of the sources) Y_avg += np.dot(basis_list[j] - W_old, X) / n_pb g_norms = max(g_norm, g_norms) # If line search does not converge for any subject we ll stop there if convergence is False: break if verbose: print( "it %d, loss = %.4e, g=%.4e" % ( i + 1, _loss_total(basis_list, X_list, Y_avg, noise), g_norms, ) ) if g_norms < tol_init: break # Start outer loop if timing: t0 = time() timings = [] g_norms = 0 for i in range(n_iter): g_norms = 0 convergence = False # Start inner loop: decrease the loss w.r.t to each W_j for j in range(n_pb): X = X_list[j] W_old = basis_list[j].copy() # Y_denoise is the estimate of the sources without Y_j Y_denoise = Y_avg - W_old.dot(X) / n_pb # Perform one ICA quasi-Newton step converged, basis_list[j], g_norm = _noisy_ica_step( W_old, X, Y_denoise, noise, n_pb, ortho ) # Update the average vector (estimate of the sources) Y_avg += np.dot(basis_list[j] - W_old, X) / n_pb g_norms = max(g_norm, g_norms) convergence = converged or convergence if convergence is False: break g_list.append(g_norms) if timing: timings.append( ( i, time() - t0, _loss_total(basis_list, X_list, Y_avg, noise), g_norms, ) ) if verbose: print( "it %d, loss = %.4e, g=%.4e" % ( i + 1, _loss_total(basis_list, X_list, Y_avg, noise), g_norms, ) ) if g_norms < tol: break else: warnings.warn( "Multiview ICA has not converged - gradient norm: %e " % g_norms ) if return_gradients: return basis_list, Y_avg, g_list if timing: return basis_list, Y_avg, timings return basis_list, Y_avg def _loss_total(basis_list, X_list, Y_avg, noise): n_pb, p, _ = basis_list.shape loss = np.mean(_logcosh(Y_avg)) * p for i, (W, X) in enumerate(zip(basis_list, X_list)): Y = W.dot(X) loss -= np.linalg.slogdet(W)[1] loss += 1 / (2 * noise) * np.mean((Y - Y_avg) ** 2) * p return loss def _loss_partial(W, X, Y_denoise, noise, n_pb): p, _ = W.shape Y = np.dot(W, X) loss = -np.linalg.slogdet(W)[1] loss += np.mean(_logcosh(Y / n_pb + Y_denoise)) * p fact = (1 - 1 / n_pb) / (2 * noise) loss += fact * np.mean((Y - n_pb * Y_denoise / (n_pb - 1)) ** 2) * p return loss def _noisy_ica_step( W, X, Y_denoise, noise, n_pb, ortho, lambda_min=0.001, n_ls_tries=50, scale=False, ): """ ICA minimization using quasi Newton method. Used in the inner loop. Returns ------- converged: bool True if line search has converged new_W: np array of shape (m, p, p) New values for the basis g_norm: float """ p, n = X.shape loss0 = _loss_partial(W, X, Y_denoise, noise, n_pb) Y = W.dot(X) Y_avg = Y / n_pb + Y_denoise # Compute relative gradient and Hessian thM = np.tanh(Y_avg) G = np.dot(thM, Y.T) / n / n_pb # print(G) const = 1 - 1 / n_pb res = Y - Y_denoise / const G += np.dot(res, Y.T) * const / noise / n G -= np.eye(p) if scale: G = np.diag(np.diag(G)) # print(G) if ortho: G = 0.5 * (G - G.T) g_norm = np.max(np.abs(G)) # These are the terms H_{ijij} of the approximated hessian # (approximation H2 in Pierre's thesis) h = np.dot((1 - thM ** 2) / n_pb ** 2 + const / noise, (Y ** 2).T,) / n # Regularize discr = np.sqrt((h - h.T) ** 2 + 4.0) eigenvalues = 0.5 * (h + h.T - discr) problematic_locs = eigenvalues < lambda_min np.fill_diagonal(problematic_locs, False) i_pb, j_pb = np.where(problematic_locs) h[i_pb, j_pb] += lambda_min - eigenvalues[i_pb, j_pb] # Compute Newton's direction det = h * h.T - 1 direction = (h.T * G - G.T) / det if ortho: direction = 0.5 * (direction - direction.T) # print(direction) # Line search step = 1 for j in range(n_ls_tries): if ortho: new_W = expm(-step * direction).dot(W) else: new_W = W - step * direction.dot(W) new_loss = _loss_partial(new_W, X, Y_denoise, noise, n_pb) if new_loss < loss0: return True, new_W, g_norm else: step /= 2.0 return False, W, g_norm
{"hexsha": "f6df7a7bc02234555917b783e359e868a1657c8e", "size": 9841, "ext": "py", "lang": "Python", "max_stars_repo_path": "multiviewica/_multiviewica.py", "max_stars_repo_name": "hugorichard/multiviewica", "max_stars_repo_head_hexsha": "54405b6adf66c9aec1f40dda2ef9c355aadec8f9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2020-06-15T13:47:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T21:28:26.000Z", "max_issues_repo_path": "multiviewica/_multiviewica.py", "max_issues_repo_name": "ApuAmp/multiviewica", "max_issues_repo_head_hexsha": "54405b6adf66c9aec1f40dda2ef9c355aadec8f9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-06-16T09:25:48.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-16T22:15:12.000Z", "max_forks_repo_path": "multiviewica/_multiviewica.py", "max_forks_repo_name": "ApuAmp/multiviewica", "max_forks_repo_head_hexsha": "54405b6adf66c9aec1f40dda2ef9c355aadec8f9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-06-16T09:15:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T23:11:39.000Z", "avg_line_length": 30.3734567901, "max_line_length": 81, "alphanum_fraction": 0.5671171629, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2724}
from PIL import Image from torchvision import transforms from torchvision.datasets import CIFAR10, Omniglot # + import cv2 import numpy as np from torchvision.datasets.utils import check_integrity, list_dir, list_files from os.path import join # - # np.random.seed(0) class GaussianBlur(object): # Implements Gaussian blur as described in the SimCLR paper def __init__(self, kernel_size, min=0.1, max=2.0): self.min = min self.max = max # kernel size is set to be 10% of the image height/width self.kernel_size = kernel_size def __call__(self, sample): sample = np.array(sample) # blur the image with a 50% chance prob = np.random.random_sample() if prob < 0.5: sigma = (self.max - self.min) * np.random.random_sample() + self.min sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma) return sample class CIFAR10Pair(CIFAR10): """CIFAR10 Dataset. """ def __getitem__(self, index): img, target = self.data[index], self.targets[index] img = Image.fromarray(img) if self.transform is not None: pos_1 = self.transform(img) pos_2 = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return pos_1, pos_2, target class Our_Omniglot(Omniglot): ''' The code is adapted from https://github.com/pytorch/vision/blob/master/torchvision/datasets/omniglot.py [Usage] contrastive_training_data = Our_Omniglot(root='data', background=True, transform=None, character_target_transform=None, alphabet_target_transform=None, download=True, contrast_training=True) classifier_train_data = Our_Omniglot(root='data', background=False, transform=None, character_target_transform=None, alphabet_target_transform=None, download=True, eval_split_train=True, out_character=False, contrast_training=False) classifier_test_data = Our_Omniglot(root='data', background=False, transform=None, character_target_transform=None, alphabet_target_transform=None, download=True, eval_split_train=False, out_character=False, contrast_training=False) ''' def __init__(self, root, background=True, transform=None, character_target_transform=None, alphabet_target_transform=None, download=False, eval_split_train=True, out_character=False, contrast_training=True): super(Omniglot, self).__init__(join(root, self.folder), transform=transform, target_transform=character_target_transform) self.background = background if download: self.download() if not self._check_integrity(): raise RuntimeError('Dataset not found or corrupted.' + ' You can use download=True to download it') self.character_target_transform = character_target_transform self.alphabet_target_transform = alphabet_target_transform self.target_folder = join(self.root, self._get_target_folder()) self._alphabets = list_dir(self.target_folder) self._characters = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets], []) self._character_images = [[(image, idx, self._alphabets.index(character.split('/')[0])) for image in list_files(join(self.target_folder, character), '.png')] for idx, character in enumerate(self._characters)] self._flat_character_images = sum(self._character_images, []) self.contrast_training = contrast_training # we adopt contrastive training in the background split if self.contrast_training: # 20 samples per character self._flat_character_images = np.array(self._flat_character_images).reshape(-1,20,3) self.out_character = out_character # we adopt standard classification training in the evaluation split else: # 20 samples per character self._flat_character_images = np.array(self._flat_character_images).reshape(-1,20,3) if eval_split_train: self._flat_character_images = self._flat_character_images[:,:5,:] else: self._flat_character_images = self._flat_character_images[:,5:,:] self._flat_character_images = self._flat_character_images.reshape(-1,3) self.out_character = out_character if self.out_character: self.targets = self._flat_character_images[:,1].astype(np.int64) else: self.targets = self._flat_character_images[:,2].astype(np.int64) def __getitem__(self, index): """ Args: index (int): Index Returns: when contrastive training: tuple: (image0, image1) image0 and image1 are belong to the same class when not contrastive training: tuple: (image, character_target, alphabet_target) where character_target is index of the target character class and alphabet_target is index of the target alphabet class. """ if self.contrast_training: random_idx = np.random.randint(20, size=2) image_name_0, character_class_0, alphabet_class_0 = self._flat_character_images[index,random_idx[0]] character_class_0, alphabet_class_0 = int(character_class_0), int(alphabet_class_0) image_name_1, character_class_1, alphabet_class_1 = self._flat_character_images[index,random_idx[1]] character_class_1, alphabet_class_1 = int(character_class_1), int(alphabet_class_1) image_path_0 = join(self.target_folder, self._characters[character_class_0], image_name_0) image_0 = Image.open(image_path_0, mode='r').convert('L') image_path_1 = join(self.target_folder, self._characters[character_class_1], image_name_1) image_1 = Image.open(image_path_1, mode='r').convert('L') if self.transform: image_0 = self.transform(image_0) image_1 = self.transform(image_1) if self.character_target_transform: character_class_0 = self.character_target_transform(character_class_0) # character_class_1 = self.character_target_transform(character_class_1) if self.alphabet_target_transform: alphabet_class_0 = self.alphabet_target_transform(alphabet_class_0) # alphabet_class_1 = self.alphabet_target_transform(alphabet_class_1) if self.out_character: return image_0, image_1, character_class_0#, character_class_1, alphabet_class_0, alphabet_class_1 else: return image_0, image_1, alphabet_class_0#, character_class_1, alphabet_class_0, alphabet_class_1 else: image_name, character_class, alphabet_class = self._flat_character_images[index] character_class, alphabet_class = int(character_class), int(alphabet_class) image_path = join(self.target_folder, self._characters[character_class], image_name) image = Image.open(image_path, mode='r').convert('L') if self.transform: image = self.transform(image) if self.character_target_transform: character_class = self.character_target_transform(character_class) if self.alphabet_target_transform: alphabet_class = self.alphabet_target_transform(alphabet_class) if self.out_character: return image, character_class else: return image, alphabet_class class Our_Omniglot_v2(Omniglot): ''' The code is adapted from https://github.com/pytorch/vision/blob/master/torchvision/datasets/omniglot.py [Usage] contrastive_training_data = Our_Omniglot(root='data', background=True, transform=None, character_target_transform=None, alphabet_target_transform=None, download=True, contrast_training=True) classifier_train_data = Our_Omniglot(root='data', background=False, transform=None, character_target_transform=None, alphabet_target_transform=None, download=True, eval_split_train=True, out_character=False, contrast_training=False) classifier_test_data = Our_Omniglot(root='data', background=False, transform=None, character_target_transform=None, alphabet_target_transform=None, download=True, eval_split_train=False, out_character=False, contrast_training=False) ''' def __init__(self, root, background=True, transform=None, character_target_transform=None, alphabet_target_transform=None, download=False, eval_split_train=True, out_character=True, contrast_training=True): super(Omniglot, self).__init__(join(root, self.folder), transform=transform, target_transform=character_target_transform) self.background = background if download: self.download() if not self._check_integrity(): raise RuntimeError('Dataset not found or corrupted.' + ' You can use download=True to download it') self.character_target_transform = character_target_transform self.alphabet_target_transform = alphabet_target_transform self.target_folder = join(self.root, self._get_target_folder()) self._alphabets = list_dir(self.target_folder) self._characters = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets], []) self._character_images = [[(image, idx, self._alphabets.index(character.split('/')[0])) for image in list_files(join(self.target_folder, character), '.png')] for idx, character in enumerate(self._characters)] self._flat_character_images = sum(self._character_images, []) self.contrast_training = contrast_training # 20 samples per character self._flat_character_images = np.array(self._flat_character_images).reshape(-1,20,3) if eval_split_train: self._flat_character_images = self._flat_character_images[:,:5,:] else: self._flat_character_images = self._flat_character_images[:,5:,:] self._flat_character_images = self._flat_character_images.reshape(-1,3) self.out_character = out_character if self.out_character: self.targets = self._flat_character_images[:,1].astype(np.int64) else: self.targets = self._flat_character_images[:,2].astype(np.int64) def __getitem__(self, index): """ Args: index (int): Index Returns: when contrastive training: tuple: (image0, image1) image0 and image1 are the same image with different image augmentations when not contrastive training: tuple: (image, character_target, alphabet_target) where character_target is index of the target character class and alphabet_target is index of the target alphabet class. """ image_name, character_class, alphabet_class = self._flat_character_images[index] character_class, alphabet_class = int(character_class), int(alphabet_class) image_path = join(self.target_folder, self._characters[character_class], image_name) image = Image.open(image_path, mode='r').convert('L') if self.character_target_transform: character_class = self.character_target_transform(character_class) if self.alphabet_target_transform: alphabet_class = self.alphabet_target_transform(alphabet_class) if self.contrast_training: if self.transform: image_0 = self.transform(image) image_1 = self.transform(image) if self.out_character: return image_0, image_1, character_class else: return image_0, image_1, alphabet_class else: if self.transform: image = self.transform(image) if self.out_character: return image, character_class else: return image, alphabet_class # GausssianBlur is False for CIFAR10 train_transform = transforms.Compose([ transforms.RandomResizedCrop(32), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), #GaussianBlur(kernel_size=int(0.1 * self.input_shape[0])), transforms.ToTensor(), #transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]) ]) omniglot_train_transform = transforms.Compose([ transforms.RandomAffine(degrees=10.0, translate=(0.1, 0.1)), #transforms.RandomResizedCrop(105, scale=(0.85, 1.0), ratio=(0.8, 1.25)), #transforms.RandomResizedCrop(56, scale=(0.85, 1.0), ratio=(0.8, 1.25)), transforms.RandomResizedCrop(28, scale=(0.85, 1.0), ratio=(0.8, 1.25)), transforms.ToTensor(), lambda x: 1. - x, ]) test_transform = transforms.Compose([ transforms.ToTensor(), #transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]) ]) omniglot_test_transform = transforms.Compose([ #transforms.Resize(105), #transforms.Resize(56), transforms.Resize(28), transforms.ToTensor(), lambda x: 1. - x, ])
{"hexsha": "22db97ea2b82d2f49183e4569931a0da0db0aff6", "size": 14551, "ext": "py", "lang": "Python", "max_stars_repo_path": "Omniglot/utils.py", "max_stars_repo_name": "yaohungt/Demystifying_Self_Supervised_Learning", "max_stars_repo_head_hexsha": "e3de6b90b22215742c6515ef676e193c95234b04", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2020-06-11T18:43:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-17T07:24:12.000Z", "max_issues_repo_path": "Omniglot/utils.py", "max_issues_repo_name": "yaohungt/Self_Supervised_Learning_Multiview", "max_issues_repo_head_hexsha": "e3de6b90b22215742c6515ef676e193c95234b04", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-10-20T12:13:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-17T14:23:55.000Z", "max_forks_repo_path": "Omniglot/utils.py", "max_forks_repo_name": "yaohungt/Self_Supervised_Learning_Multiview", "max_forks_repo_head_hexsha": "e3de6b90b22215742c6515ef676e193c95234b04", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-11-05T06:19:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T08:47:34.000Z", "avg_line_length": 47.3973941368, "max_line_length": 138, "alphanum_fraction": 0.6279293519, "include": true, "reason": "import numpy", "num_tokens": 2985}
from styx_msgs.msg import TrafficLight import rospy import numpy as np class TLClassifier(object): def __init__(self): #TODO load classifier pass def get_classification(self, image): """Determines the color of the traffic light in the image Args: image (cv::Mat): image containing the traffic light Returns: int: ID of traffic light color (specified in styx_msgs/TrafficLight) """ #TODO implement light color prediction area_threshold = 79 # get red image red_img = image[:,:,2] # get the green image green_img = image[:,:,1] # get red and green areas red_area = np.sum(red_img == red_img.max()) green_area = np.sum(green_img == green_img.max()) prediction = TrafficLight.UNKNOWN if red_area >= area_threshold and green_area <= area_threshold: prediction = TrafficLight.RED elif red_area >= area_threshold and green_area >= area_threshold: prediction = TrafficLight.YELLOW if 0.8 <= red_area / green_area <= 1.2 else TrafficLight.RED elif green_area >= area_threshold: prediction = TrafficLight.GREEN else: prediction = TrafficLight.UNKNOWN if prediction == TrafficLight.RED: rospy.logwarn("RED!") return prediction
{"hexsha": "0368d260bb9437957bbd27e9dc9fb79f72e02346", "size": 1395, "ext": "py", "lang": "Python", "max_stars_repo_path": "ros/src/tl_detector/light_classification/tl_classifier.py", "max_stars_repo_name": "OctopusNO1/CarND-Capstone-master", "max_stars_repo_head_hexsha": "39f153cb0bf09bfda0a455864bd5a61c6a45501a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-12T07:25:55.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-12T07:25:55.000Z", "max_issues_repo_path": "ros/src/tl_detector/light_classification/tl_classifier.py", "max_issues_repo_name": "OctopusNO1/CarND-Capstone-master", "max_issues_repo_head_hexsha": "39f153cb0bf09bfda0a455864bd5a61c6a45501a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ros/src/tl_detector/light_classification/tl_classifier.py", "max_forks_repo_name": "OctopusNO1/CarND-Capstone-master", "max_forks_repo_head_hexsha": "39f153cb0bf09bfda0a455864bd5a61c6a45501a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6808510638, "max_line_length": 105, "alphanum_fraction": 0.6207885305, "include": true, "reason": "import numpy", "num_tokens": 292}
#!/usr/bin/env python2 # Copyright (c) 2011 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Client to control DUT hardware connected to servo debug board """ import collections import logging import optparse import pkg_resources import sys import time from socket import error as SocketError import numpy import multiservo import client class ControlError(Exception): pass VERSION = pkg_resources.require('servo')[0].version # used to aid sorting of dict keys KEY_PREFIX = '__' STATS_PREFIX = '@@' GNUPLOT_PREFIX = '##' # dict key for tracking sampling time TIME_KEY = KEY_PREFIX + 'sample_msecs' def _parse_args(): """Parse commandline arguments. Note, reads sys.argv directly Returns: tuple (options, args) as described by optparse.OptionParser.parse_args() method """ description = ( "%prog allows users to set and get various controls on a DUT system via" " the servo debug & control board. This client communicates to the board" " via a socket connection to the servo server." ) examples = ( "\nExamples:\n" " %prog\n\tgets value for all controls\n" " %prog -v\n\tgets value for all controls verbosely\n" " %prog i2c_mux\n\tgets value for 'i2c_mux' control\n" "\tif the exact control name is not found, " "a list of similar controls is printed\n" " %prog -r 100 i2c_mux\n\tgets value for 'i2c_mux' control 100 times\n" " %prog -t 2 loc_0x40_mv\n\tgets value for 'loc_0x40_mv' control for 2 " "seconds\n" " %prog -y -t 2 loc_0x40_mv\n\tgets value for 'loc_0x40_mv' control for " "2 seconds and prepends time in seconds to results\n" " %prog -g -y -t 2 loc_0x40_mv loc_0x41_mv\n" "\tgets value for 'loc_0x4[0|1]_mv' control for 2 seconds with gnuplot " "style" " %prog -z 100 -t 2 loc_0x40_mv\n\tgets value for 'loc_0x40_mv' control " "for 2 seconds sampling every 100ms\n" " %prog -v i2c_mux\n\tgets value for 'i2c_mux' control verbosely\n" " %prog i2c_mux:remote_adcs\n\tsets 'i2c_mux' to value 'remote_adcs'\n" ) parser = optparse.OptionParser(version="%prog "+VERSION) parser.description = description parser.add_option("-s", "--server", help="host where servod is running", default=client.DEFAULT_HOST) parser.add_option("-p", "--port", help="port where servod is listening", default=None) parser.add_option("-v", "--verbose", help="show verbose info about controls", action="store_true", default=False) parser.add_option("-i", "--info", help="show info about controls", action="store_true", default=False) parser.add_option("-r", "--repeat", type=int, help="repeat requested command multiple times", default=1) parser.add_option("-t", "--time_in_secs", help="repeat requested command for " + "this many seconds", type='float', default=0.0) parser.add_option("-z", "--sleep_msecs", help="sleep for this many " + "milliseconds between queries", type='float', default=0.0) parser.add_option("-y", "--print_time", help="print time in seconds with " + "queries to stdout", action="store_true", default=False) parser.add_option("-g", "--gnuplot", help="gnuplot style to stdout. Implies " "print_time", action="store_true", default=False) parser.add_option("--hwinit", help="Initialize controls to their POR/safe " "state", action="store_true", default=False) parser.add_option("-d", "--debug", help="enable debug messages", action="store_true", default=False) multiservo.add_multiservo_parser_options(parser) parser.set_usage(parser.get_usage() + examples) return parser.parse_args() def display_table(table, prefix): """Display a two-dimensional array ( list-of-lists ) as a table. The table will be spaced out. >>> table = [['aaa', 'bbb'], ['1', '2222']] >>> display_table(table) @@ aaa bbb @@ 1 2222 >>> display_table(table, prefix='%') % aaa bbb % 1 2222 >>> table = [['a']] >>> display_table(table) @@ a >>> table = [] >>> display_table(table) >>> table = [[]] >>> display_table(table) >>> table = [['a'], ['1', '2']] >>> display_table(table) Traceback (most recent call last): ... IndexError: list index out of range >>> table = [['a', 'b'], ['1']] >>> display_table(table) Traceback (most recent call last): ... IndexError: list index out of range >>> table = [['aaa', 'bbb', 'c'], ['1', '2222', '0']] >>> display_table(table) @@ aaa bbb c @@ 1 2222 0 Args: table: A two-dimensional array (list of lists) to show. prefix: All lines will be prefixed with this and a space. """ if len(table) == 0 or len(table[0]) == 0: return max_col_width = [] for col_idx in xrange(len(table[0])): col_item_widths = [len(row[col_idx]) for row in table] max_col_width.append(max(col_item_widths)) for row in table: out_str = '' for i in xrange(len(row)): out_str += row[i].rjust(max_col_width[i] + 2) print prefix, out_str def display_stats(stats, prefix=STATS_PREFIX): """Display various statistics for data captured in a table. >>> stats = {} >>> stats[TIME_KEY] = [50.0, 25.0, 40.0, 10.0] >>> stats['frobnicate'] = [11.5, 9.0] >>> stats['foobar'] = [11111.0, 22222.0] >>> display_stats(stats) @@ NAME COUNT AVERAGE STDDEV MAX MIN @@ sample_msecs 4 31.25 15.16 50.00 10.00 @@ foobar 2 16666.50 5555.50 22222.00 11111.00 @@ frobnicate 2 10.25 1.25 11.50 9.00 Args: stats: A dictionary of stats to show. Key is name of result and value is a list of floating point values to show stats for. See doctest. Any key starting with '__' will be sorted first and have its prefix stripped. prefix: All lines will be prefixed with this and a space. """ table = [['NAME', 'COUNT', 'AVERAGE', 'STDDEV', 'MAX', 'MIN']] for key in sorted(stats.keys()): if stats[key]: stats_np = numpy.array(stats[key]) disp_key = key.lstrip(KEY_PREFIX) row = [disp_key, str(len(stats_np))] row.append("%.2f" % stats_np.mean()) row.append("%.2f" % stats_np.std()) row.append("%.2f" % stats_np.max()) row.append("%.2f" % stats_np.min()) table.append(row) display_table(table, prefix) def timed_loop(time_in_secs): """Pause for time_in_secs.""" start_time = time.time() secs_so_far = 0.0 while secs_so_far <= time_in_secs: yield secs_so_far secs_so_far = time.time() - start_time def _print_gnuplot_header(control_args): """Prints gnuplot header. Args: control_args: list of controls to get or set Note, calls sys.exit() """ hdr = [] # Don't put setting of controls into gnuplot output hdr.extend(arg for arg in control_args if ':' not in arg) if not hdr: logging.critical("Can't use --gnuplot without supplying controls to read " "on command line") sys.exit(-1) print GNUPLOT_PREFIX + ' seconds ' + ' seconds '.join(hdr) def do_iteration(requests, options, sclient, stats): """Perform one iteration across the controls. Args: requests: list of strings to make requests to servo about Example = ['dev_mode', 'dev_mode:on', 'dev_mode'] options: optparse object options sclient: ServoRequest object stats: dict of key=control name, value=control value for stats calcs Returns: out_str: results string from iteration based on formats in options """ results = [] out_list = [] time_str = '' sample_start = time.time() if options.info: for request_str in requests: control = request_str if ':' in request_str: logging.warn("Ignoring %s, can't perform set with --info", request_str) continue results.append(sclient.doc(control)) else: results = sclient.set_get_all(requests) if options.print_time: time_str = "%.4f " % (time.time() - _start_time) for i, result in enumerate(results): control = requests[i] if options.info: request_type = 'doc' elif ':' in control: request_type = 'set' else: request_type = 'get' try: stats[control].append(float(result)) except ValueError: pass if options.verbose: out_list.append("%s%s %s -> %s" % (time_str, request_type.upper(), control, result)) elif request_type is not 'set': if options.gnuplot: out_list.append("%s%s" % (time_str, result)) else: out_list.append("%s%s:%s" % (time_str, control, result)) # format of gnuplot is <seconds_val1> <val1> <seconds_val2> <val2> ... such # that plotting can then be done with time on x-axis, value on y-axis. For # example, this # command would plot two values across time # plot "file.out" using 1:2 with linespoint # replot "file.out" using 3:4 with linespoint if options.gnuplot: out_str = " ".join(out_list) else: out_str = "\n".join(out_list) iter_time_msecs = (time.time() - sample_start) * 1000 stats[TIME_KEY].append(iter_time_msecs) if options.sleep_msecs: if iter_time_msecs < options.sleep_msecs: time.sleep((options.sleep_msecs - iter_time_msecs) / 1000) return out_str def iterate(controls, options, sclient): """Perform iterations on various controls. Args: controls: list of controls to iterate over options: optparse object options sclient: ServoRequest object """ if options.gnuplot: options.print_time = True _print_gnuplot_header(controls) stats = collections.defaultdict(list) if options.time_in_secs > 0: iterate_over = timed_loop(options.time_in_secs) else: iterate_over = xrange(options.repeat) for _ in iterate_over: iter_output = do_iteration(controls, options, sclient, stats) if iter_output: # Avoid printing empty lines print iter_output if (options.repeat != 1) or (options.time_in_secs > 0): prefix = STATS_PREFIX if options.gnuplot: prefix = GNUPLOT_PREFIX display_stats(stats, prefix=prefix) def real_main(): (options, args) = _parse_args() loglevel = logging.INFO if options.debug: loglevel = logging.DEBUG logging.basicConfig(level=loglevel, format="%(asctime)s - %(name)s - " + "%(levelname)s - %(message)s") logger = logging.getLogger() multiservo.get_env_options(logger, options) rc = multiservo.parse_rc(logger, options.rcfile) if not options.port: if options.name: if options.name not in rc: raise ControlError('%s not in the config file' % options.name) options.port = int(rc.get(options.name)['port']) if not options.port: raise ControlError('unknown port for %s' % options.name) else: options.port = client.DEFAULT_PORT if options.verbose and options.gnuplot: logging.critical("Can't use --verbose with --gnuplot") sys.exit(-1) if options.info and options.hwinit: logging.critical("Can't use --hwinit with --info") sys.exit(-1) sclient = client.ServoClient(host=options.server, port=options.port, verbose=options.verbose) global _start_time _start_time = time.time() # Perform 1st in order to allow user to then override below if options.hwinit: sclient.hwinit() # all done, don't read all controls if not len(args): return if not len(args) and options.info: # print all the doc info for the controls print sclient.doc_all() elif not len(args): print sclient.get_all() else: if not ':' in ' '.join(args): # Sort args only if none of them sets values - otherwise the order is # important. args = sorted(args) iterate(args, options, sclient) def main(): try: real_main() except KeyboardInterrupt: sys.exit(0) except (client.ServoClientError, ControlError) as e: sys.stderr.write(e.message + '\n') sys.exit(1) except SocketError as e: sys.stderr.write(e.strerror + '\n') sys.exit(1) # global start time for script _start_time = 0 if __name__ == '__main__': main()
{"hexsha": "9f2311a2ea0cfb59dce51213e7f39aea8869b020", "size": 12436, "ext": "py", "lang": "Python", "max_stars_repo_path": "servo/dut_control.py", "max_stars_repo_name": "mmind/servo-hdctools", "max_stars_repo_head_hexsha": "c7d50190837497dafc45f6efe18bf01d6e70cfd2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-25T22:44:39.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-26T22:29:20.000Z", "max_issues_repo_path": "servo/dut_control.py", "max_issues_repo_name": "mmind/servo-hdctools", "max_issues_repo_head_hexsha": "c7d50190837497dafc45f6efe18bf01d6e70cfd2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "servo/dut_control.py", "max_forks_repo_name": "mmind/servo-hdctools", "max_forks_repo_head_hexsha": "c7d50190837497dafc45f6efe18bf01d6e70cfd2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4699738903, "max_line_length": 80, "alphanum_fraction": 0.6437761338, "include": true, "reason": "import numpy", "num_tokens": 3330}
import random import threading import numpy as np import sqlite3 import pickle from contextlib import closing from blist import sortedlist import time from rl import AsyncMethodExecutor class DataPacket(object): def __init__(self): self.data = None class ExperienceReplay(object): def __init__( self, max_size=100, sample_size=32, should_pop_oldest=True, database_file='memory.db', table_name='memory', reuse_db=True, verbose=False, ): self.max_size = max_size self.sample_size = sample_size self._time = 0 self._should_pop_oldest = should_pop_oldest self._size = 0 self._verbose = verbose self._table_name = table_name self._ids = sortedlist() self._ids_idx = [] self._last_query_data = None self._last_query_time = 0. self._db_thread = AsyncMethodExecutor() self._db_thread.start() self._db_lock = threading.Event() self._db_lock.clear() self.log("Database initialization started.") self._db_thread.run_on_thread( self._init_db, database_file, reuse_db ) self._db_lock.wait() self.log("Database initialization complete.") def log(self, *args, **kwargs): if self._verbose: print(*args, **kwargs) def _init_db(self, database_file, reuse_db): self._database_connection = sqlite3.connect(database_file, isolation_level=None) with closing(self._database_connection.cursor()) as cursor: if not reuse_db: cursor.execute('DROP TABLE IF EXISTS %s' % self._table_name) cursor.execute( 'CREATE TABLE IF NOT EXISTS %s (' 'id INTEGER PRIMARY KEY AUTOINCREMENT,' 'state blob,' 'reward blob,' 'chosen_action blob,' 'next_state blob,' 'is_final_state blob)' % self._table_name ) data = cursor.execute('SELECT id FROM %s' % self._table_name).fetchall() ids = [row[0] for row in data] self._ids = sortedlist(ids) self._ids_idx = list(range(len(self._ids))) self.log("Initial memory size:", len(self._ids)) self._db_lock.set() def is_ready(self): return len(self._ids) >= self.sample_size def size(self): return len(self._ids) def is_full(self): return len(self._ids) == self.max_size @staticmethod def _to_sqlite_blob(obj): return sqlite3.Binary(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)) @staticmethod def _to_python_obj(blob): return pickle.loads(blob) def _add(self, state, reward, action, next_state, is_final_state, scores): start_time = time.process_time() with closing(self._database_connection.cursor()) as cursor: if self.is_full(): id_to_delete = self._ids[0] if not self._should_pop_oldest: id_to_delete = self._ids[random.randint(0, len(self._ids) - 1)] cursor.execute('DELETE FROM %s WHERE id = ?;' % self._table_name, (id_to_delete, )) self._ids.remove(id_to_delete) self._ids_idx.pop() else: self._size += 1 pickled_data = tuple(map(ExperienceReplay._to_sqlite_blob, (state, reward, action, next_state, is_final_state))) cursor.execute( 'INSERT INTO %s' '(state, reward, chosen_action, next_state, is_final_state)' 'VALUES (?, ?, ?, ?, ?)' % self._table_name, pickled_data ) self._ids.add(cursor.lastrowid) self._ids_idx.append(len(self._ids_idx)) end_time = time.process_time() self.log("Insert query time:", end_time - start_time) def add(self, state, reward, action, next_state, is_final_state, scores): self._db_thread.run_on_thread( self._add, state, reward, action, next_state, is_final_state, scores ) def _fetch_sample_from_db(self): start_time = time.process_time() sample_idx = random.sample(self._ids_idx, self.sample_size) ids = [self._ids[idx] for idx in sample_idx] id_list = ",".join(map(str, ids)) with closing(self._database_connection.cursor()) as cursor: sqlite_data = cursor.execute( 'SELECT * FROM %s WHERE id IN (%s)' % (self._table_name, id_list) ).fetchall() samples = [] for row in sqlite_data: memory_row = list(map( ExperienceReplay._to_python_obj, row[1:] )) samples.append(memory_row) self._last_query_data = samples end_time = time.process_time() self._last_query_time = end_time - start_time self._db_lock.set() def _fetch_data_async(self): self.log("Async fetching sample.") self._db_lock.clear() self._db_thread.run_on_thread( self._fetch_sample_from_db ) def get_sample_data(self): if not self.is_ready(): return start_time = time.process_time() self._db_lock.wait() end_time = time.process_time() self.log("Waiting for async fetch:", end_time - start_time) if self._last_query_data is None: self.log("Query not fetched. Started async fetch.") self._fetch_data_async() self._db_lock.wait() self.log("Query time:", self._last_query_time) self._fetch_data_async() self._time += 1 return self._last_query_data
{"hexsha": "4c5c8bea2c5fb1defc81544842801b61a2687f8b", "size": 5956, "ext": "py", "lang": "Python", "max_stars_repo_path": "rl/ExperienceReplay.py", "max_stars_repo_name": "valiro21/MarioLearningCompany", "max_stars_repo_head_hexsha": "a8ffdafa70d735e609296b13b0aa9950f73cfb07", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rl/ExperienceReplay.py", "max_issues_repo_name": "valiro21/MarioLearningCompany", "max_issues_repo_head_hexsha": "a8ffdafa70d735e609296b13b0aa9950f73cfb07", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rl/ExperienceReplay.py", "max_forks_repo_name": "valiro21/MarioLearningCompany", "max_forks_repo_head_hexsha": "a8ffdafa70d735e609296b13b0aa9950f73cfb07", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0208333333, "max_line_length": 124, "alphanum_fraction": 0.5775688381, "include": true, "reason": "import numpy", "num_tokens": 1301}
[STATEMENT] lemma sees_fields_fun: "(Cs,T) \<in> FieldDecls P C F \<Longrightarrow> (Cs,T') \<in> FieldDecls P C F \<Longrightarrow> T = T'" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>(Cs, T) \<in> FieldDecls P C F; (Cs, T') \<in> FieldDecls P C F\<rbrakk> \<Longrightarrow> T = T' [PROOF STEP] by(fastforce simp:FieldDecls_def)
{"llama_tokens": 146, "file": "CoreC++_SubObj", "length": 1}
''' A Shallow Constrained RGB Autoencoder Some utility methods... ''' import numpy as N import tensorflow as tf ''' Force matplotlib to not use any Xwindows backend. see: http://stackoverflow.com/questions/29217543/why-does-this-solve-the-no-display-environment-issue-with-matplotlib ''' import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt def get_random_crop( image, patch_dim=(16,16), image_max_extent=(1024,768)#(288,288)#(756,1008)#(384,288) ): border = max(patch_dim[1], patch_dim[0]) bordermax_x = image_max_extent[1]-border bordermax_y = image_max_extent[0]-border r = tf.Variable( tf.random_uniform([1], border, bordermax_x, dtype=tf.int32)) c = tf.Variable( tf.random_uniform([1], border, bordermax_y, dtype=tf.int32)) return tf.image.crop_to_bounding_box( image, r[0], c[0], patch_dim[1], patch_dim[0]) def read_a_file( fname_queue, subtract_mean=False, normalize=True, patch_dim=(16,16), chance_to_sawp_axes=50 ): reader = tf.WholeFileReader() key, value = reader.read(fname_queue) rimg = tf.image.decode_png(value) # preprocess crop = get_random_crop(rimg, patch_dim) # if N.random.randint(0, 100) > 100-chance_to_sawp_axes: # crop = tf.image.rot90(crop) # crop = tf.image.flip_left_right(crop) crop = tf.to_float(crop) if normalize: crop = crop / tf.abs(tf.reduce_max(crop)) # crop = crop / 255.0 if subtract_mean: crop = crop - tf.reduce_mean(crop) # create flat tensor flat = tf.reshape(crop, [patch_dim[0]*patch_dim[1],3]) flat = tf.reshape(tf.transpose(flat,[1,0]), [-1]) return flat def input_pipeline_multiple( fnames, batch_size, num_epochs, read_threads, subtract_mean=False, normalize=True, patch_dim=(16,16) ): fname_queue = tf.train.string_input_producer( fnames, num_epochs=num_epochs, shuffle=True, seed=None) min_after_dequeue = 10000 capacity = min_after_dequeue + 3 * batch_size patch_list = [read_a_file(fname_queue, subtract_mean=subtract_mean, normalize=normalize, patch_dim=patch_dim) for _ in range(read_threads)] batch = tf.train.shuffle_batch_join( [patch_list], batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue) return batch # below move! # to scae.util def pad_sequence_num( num, padlen=8, rev=False ): strn = str(num) n_to_pad = padlen - len(strn) if rev: return strn+'0'*n_to_pad else: return '0'*n_to_pad+strn # TODO clean up!!! def normalize_color2( a ): amax = N.max(a) return a / amax def transpose_color( a ): if type(a) is list: a = N.array(a) amax = N.max(a) amin = N.min(a) print '_------' print a print (a + N.abs(amin))# / amax return (a + N.abs(amin)) # '''transpose color from [-1, 1] to [0, 1]''' # return (a + 1.)/2. def normalize_color( a ): highS = N.max(a) lowS = N.min(a) shiftS = lowS if shiftS > 0.0: lowS = 0.0 a = (a - shiftS) / (highS - shiftS) return N.copy(a) def transpose_color_zero_to_one( a ): if type(a) is list: a = N.array(a) '''transpose color from [-1, 1] to [0, 1]''' return (a + 1.)/2. def write_matrix_as_png( filename, outw, visible, hidden, infostr ): '''reshape weight matrix; RGB = (h,w,3)''' m = N.zeros((hidden**2, visible**2, 3)) for row in xrange(outw.shape[0]): channel_magn = outw[row].shape[0]/3 r = outw[row][0:channel_magn:1] g = outw[row][channel_magn:2*channel_magn:1] b = outw[row][2*channel_magn::1] for c in xrange(0, channel_magn): m[row, c] = (r[c], g[c], b[c]) '''write square tile map''' frame = 1 margin = 2*hidden out = N.zeros( ( frame+hidden*(frame+visible)+margin, frame+hidden*(frame+visible), 3) ) + N.max(outw)/2. tile_num = 0 for xx in range(hidden): for yy in range(hidden): start_h, start_w = \ frame+xx*(frame+visible),\ frame+yy*(frame+visible) tile = N.array([ N.reshape(m[tile_num].T[0],(visible, visible)), N.reshape(m[tile_num].T[1],(visible, visible)), N.reshape(m[tile_num].T[2],(visible, visible)) ]).T out[start_h : start_h+visible, start_w : start_w+visible] = tile tile_num += 1 out = normalize_color(out) '''create and write matplotlib figure''' fig = plt.figure() fig.set_size_inches(1, 1) ax = plt.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() fig.add_axes(ax) ax.imshow(out, interpolation='nearest') ax.set_axis_bgcolor = 'black' ax.text(.0, .02, infostr, transform=ax.transAxes, fontsize=2, color=(.9,.9,.9), bbox=dict(boxstyle='square', fc='black', ec='none')) plt.savefig(filename, dpi=1024, facecolor='black') plt.close(fig)
{"hexsha": "8ede7f04703f48ddace8e7bf9a59e9b6fbe26202", "size": 4558, "ext": "py", "lang": "Python", "max_stars_repo_path": "scae/__init__.py", "max_stars_repo_name": "dvpc/rgb-autoenc-tf", "max_stars_repo_head_hexsha": "ec06d89d8d5e69bc51fc51a6c43161ecff7c4e65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scae/__init__.py", "max_issues_repo_name": "dvpc/rgb-autoenc-tf", "max_issues_repo_head_hexsha": "ec06d89d8d5e69bc51fc51a6c43161ecff7c4e65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scae/__init__.py", "max_forks_repo_name": "dvpc/rgb-autoenc-tf", "max_forks_repo_head_hexsha": "ec06d89d8d5e69bc51fc51a6c43161ecff7c4e65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-17T08:38:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-17T08:38:25.000Z", "avg_line_length": 22.9045226131, "max_line_length": 117, "alphanum_fraction": 0.6772707328, "include": true, "reason": "import numpy", "num_tokens": 1473}
(* Specific program we care about *) Require Import dumb_oeuf. (* Oeuf program in cminor *) Require Import dumb_cm. (* Linked program in cminor *) Require Import Dumb. (* Original Oeuf program *) Require Import dumb_axioms. (* necessary axioms for proof *) Require Import compcert.common.Globalenvs. Require Import compcert.common.Memory. Require Import compcert.common.Values. Require Import compcert.lib.Coqlib. Require Import compcert.common.Events. Require Import compcert.common.Smallstep. Require Import Semantics. Require Import compcert.backend.Cminor. (* prog is the whole program *) Require Import StructTact.StructTactics. Require Import StructTact.Util. Require Import NewCont. Require Import EricTact. Require Import StuartTact. Require Import OeufProofIntern. Require Cmajor. Require Import CminorLib. Require Import Monads. Section SIM. Definition prog := dumb_cm.prog. (* make sure we get correct prog *) Definition oprog := dumb_oeuf.prog. Definition ge := Genv.globalenv prog. Variable st : Cminor.state. Hypothesis init_state : initial_state prog st. Lemma Plt_one_succ : forall x, Plt 1 (Pos.succ x). Proof. induction x; intros. simpl. econstructor; eauto. simpl. econstructor; eauto. eapply Plt_succ. Qed. Lemma Plt_Ple_succ : forall x y, Ple x y -> Plt x (Pos.succ y). Proof. intros. unfold Ple in *. unfold Plt in *. unfold Pos.le in *. unfold Pos.lt. destruct ((x ?= y)%positive) eqn:?; try congruence. eapply Pos.compare_eq in Heqc. subst x. replace ((y ?= Pos.succ y)%positive = Lt) with (Plt y (Pos.succ y)) by (unfold Plt; unfold Pos.lt; reflexivity). eapply Plt_succ. replace ((x ?= Pos.succ y)%positive = Lt) with (Plt x (Pos.succ y)) by (unfold Plt; unfold Pos.lt; reflexivity). replace ((x ?= y)%positive = Lt) with (Plt x (y)) in * by (unfold Plt; unfold Pos.lt; reflexivity). assert (Plt y (Pos.succ y)) by (eapply Plt_succ; eauto). eapply Plt_trans; eauto. Qed. Lemma steps : exists st1, Smallstep.star step ge st E0 st1. Proof. (* first step *) inv init_state. unfold prog in H0. simpl in *. assert (ge = ge0). unfold ge. subst ge0. reflexivity. subst. unfold prog in H0. unfold Genv.globalenv in H0. simpl in H0. unfold Genv.find_symbol in *. simpl in H0. inv H0. unfold Genv.find_funct_ptr in H1. unfold prog in H1. simpl in H1. inv H1. (* more steps *) alloc. take_step. take_step. take_step. take_step. take_step. alloc. store_step. take_step. take_step. take_step. take_step. take_step. store_step. take_step. take_step. take_step. take_step. alloc. store_step. take_step. take_step. take_step. take_step. take_step. assert (Genv.find_symbol ge _id = Some 1%positive). { unfold Genv.find_symbol. simpl. reflexivity. } idtac. store_step. Focus 4. simpl. eauto. usable_chain. store_auto. store_auto. take_step. take_step. take_step. load_step. (* TODO: wrap following stuff in load_step *) unfold Genv.find_funct. unfold Genv.find_funct_ptr. simpl. unfold Integers.Int.zero. break_match; try congruence. reflexivity. reflexivity. (* end load_step stuff *) (* HERE is where we call into Oeuf *) (* This is the complicated continuation we've built up *) (* We'll need this later, after we're back from oeuf *) match goal with | [ |- exists _, Smallstep.star _ _ (Callstate _ _ ?K _) _ _ ] => let KK := fresh "K" in remember K as KK end. (* give nice names to the oeuf and linked states *) remember (Callstate (AST.Internal f_id) (Vptr b1 (Integers.Int.repr 0) :: Vptr b0 (Integers.Int.repr 0) :: nil) K x2) as LST. remember (Callstate (AST.Internal f_id) (Vptr b1 (Integers.Int.repr 0) :: Vptr b0 (Integers.Int.repr 0) :: nil) Kstop x2) as OST. (* make sure it's a callstate *) assert (Cmajor.cminor_is_callstate oprog (HighValues.Close _id nil) (HighValues.Constr Integers.Int.zero nil) OST). { assert (b1 <> b0). { copy Heqp0. eapply Mem.nextblock_alloc in Heqp0. eapply Mem.alloc_result in H5. copy Heqp1. eapply Mem.nextblock_alloc in Heqp1. eapply Mem.alloc_result in H6. eapply Mem.nextblock_store in e. eapply Mem.nextblock_store in e0. subst. rewrite e0. rewrite e. rewrite Heqp0. assert (Plt (Mem.nextblock m) (Pos.succ (Mem.nextblock m))) by (eapply Plt_succ). eapply Plt_ne in H5. congruence. } idtac. subst. econstructor. econstructor. Focus 3. unfold Genv.find_symbol. simpl. reflexivity. Focus 2. unfold Genv.find_funct_ptr. simpl. reflexivity. eapply loadable_load. simpl_int_add. loadable_chain. simpl. reflexivity. intros. simpl in *. invp False. econstructor. eapply loadable_load. simpl_int_add. loadable_chain. simpl. reflexivity. intros. simpl in *. invp False. eapply loadable_load. simpl_int_add. loadable_chain. unfold Genv.find_funct_ptr. simpl. reflexivity. unfold Genv.find_symbol. simpl. reflexivity. simpl. reflexivity. eapply init_mem_global_blocks_almost_valid in H. eapply Mem.nextblock_alloc in Heqp. eapply Mem.nextblock_alloc in Heqp0. eapply Mem.nextblock_store in e. eapply Mem.nextblock_store in e0. eapply Mem.nextblock_alloc in Heqp1. eapply Mem.nextblock_store in e1. eapply Mem.nextblock_store in e2. unfold HighValues.global_blocks_valid. assert (Ple (Genv.genv_next (Genv.globalenv oprog)) (Genv.genv_next ge)). { copy LINKED. unfold oprog. unfold ge. unfold prog. eapply Linker.genv_next_Ple; eauto. } idtac. unfold ge in H6. rewrite H in H6. rewrite e2. rewrite e1. rewrite Heqp1. rewrite e0. rewrite e. rewrite Heqp0. rewrite Heqp. do 2 (eapply Plt_trans_succ; eauto). eapply Plt_Ple_succ; eauto. eapply no_future_pointers_store; try eapply e2. eapply no_future_pointers_store; try eapply e1. eapply no_future_pointers_alloc; try eapply Heqp1. eapply no_future_pointers_store; try eapply e0. eapply no_future_pointers_store; try eapply e. eapply no_future_pointers_alloc; try eapply Heqp0. eapply no_future_pointers_alloc; try eapply Heqp. eapply init_mem_no_future_pointers; eauto. simpl. eauto. simpl. eauto. simpl. eauto. simpl. eapply Mem.nextblock_alloc in Heqp1. eapply Mem.nextblock_store in e1. rewrite e1. rewrite Heqp1. eapply Plt_one_succ. econstructor; eauto. simpl. left. reflexivity. econstructor. eauto. } idtac. remember (@SourceLifted.VConstr id_G _ _ _ SourceLifted.CTO HList.hnil) as SZero. remember (@SourceLifted.VClose id_G _ _ _ HList.Here HList.hnil) as SID. (* establish matching callstates *) copy H5. eapply (OeufProofIntern.oeuf_match_callstate Dumb.oeuf_prog _ Dumb.idM dumb_axioms.TRANSF) in H5. Focus 2. instantiate (1 := SID). unfold match_values. do 4 eexists. split. subst SID. simpl. reflexivity. split. econstructor; eauto. split. econstructor; eauto. unfold EFP2. destruct EFTRANSF. simpl. break_and. unfold dumb_oeuf.prog in H7. unfold oeuf_prog in H7. unfold ut in H7. simpl in H7. unfold Oeuf.transf_untyped_to_elim_func in H7. simpl in H7. inversion H7. inversion H10. destruct x3. simpl in H11. subst p. simpl in H8. inversion H8. simpl. reflexivity. simpl. omega. econstructor; eauto. split. econstructor; eauto. econstructor; eauto. Focus 2. instantiate (1 := SZero). unfold match_values. do 4 eexists. split. subst SZero. simpl. reflexivity. split. econstructor; eauto. split. econstructor; eauto. split. econstructor; eauto. instantiate (1 := Integers.Int.zero). instantiate (1 := O). simpl. rewrite Integers.Int.unsigned_zero. reflexivity. econstructor; eauto. repeat break_exists. repeat break_and. (* use matching states to step *) eapply OeufProofIntern.oeuf_star_simulation in H5. Focus 2. subst SID. subst SZero. clear -H7. inversion H7. subst ret_ty. eapply existT_eq in H13. subst free_tys. eapply existT_eq in H13. eapply existT_eq in H13. subst mb0. eapply existT_eq in H14. subst free0. eapply existT_eq in H17. subst x3. eapply existT_eq in H15. subst av. all: try solve [try eapply list_eq_dec; eapply SourceLifted.type_eq_dec]. eapply star_left. econstructor; eauto. eapply star_left. econstructor; eauto. eapply star_refl. assert (SourceLifted.final_state (SourceLifted.run_cont SourceLifted.KStop (HList.hget (HList.hcons SZero HList.hnil) HList.Here)) SZero). { econstructor; eauto. } idtac. repeat progress (try break_exists; try break_and). eapply OeufProofIntern.oeuf_match_final_states in H9; try eassumption. break_exists; break_and. eapply subst_in_cont in H5; try eassumption. instantiate (1 := K) in H5. unfold NewCont.ge in H5. repeat break_exists. repeat break_and. Focus 2. subst K. econstructor; eauto. assert (x8 = LST). { subst LST OST. inv H5. f_equal. invp match_cont. reflexivity. } subst x8. eapply star_to_star in H11. assert (Linker.match_states LST LST). { subst. econstructor. repeat econstructor. econstructor. econstructor. econstructor. econstructor. econstructor. simpl. exact I. simpl. split; try split; exact I. simpl. exact I. econstructor. unfold Linker.env_lessdef. intros. eexists; split. eassumption. econstructor. simpl. repeat (try split; try exact I). eapply Mem.extends_refl. simpl. exact I. simpl. split; exact I. } idtac. eapply Linker.star_step_sim in H11; try eapply H13; try eapply dumb_axioms.LINKED. unfold Linker.link_ge in H11. unfold ge. break_exists. break_and. eapply estar_left_app; nil_trace. split. eassumption. (* Now we have to pick apart all of these final_state and matching state definitions *) inversion H8. eapply existT_eq in H18. Focus 2. eapply SourceLifted.type_eq_dec. subst v. clear H17. subst ty. inversion H10. repeat break_exists. repeat break_and. rewrite HeqSZero in H15. simpl in H15. subst x10. inversion H16. subst x11. subst aargs. subst ctor. subst tag. inversion H23. subst bargs. inversion H17. subst x12. subst tag. subst aargs. inversion H22. subst bargs. inversion H18. subst atag. subst aargs. subst x13. inversion H25. subst bargs. inversion H19. subst x7. subst aargs. subst tag. clear H22. clear H23. clear H25. inversion H26. subst bargs. clear H26. inversion H9. subst x6. subst v. inversion H12. subst v. subst orig. subst m4. subst x9. inversion H14. subst v. subst k. subst m4. subst x8. inversion H26. subst new. clear H26. clear -H HeqK H25 H29 H15 H28 H Heqp Heqp0 e e0 Heqp1 e1 e2 H21. rewrite HeqK in H28. clear HeqK. remember (Maps.PTree.set _id_closure (Vptr b1 (Integers.Int.repr 0)) (set_optvar (Some 128%positive) (Vptr b1 (Integers.Int.repr 0)) (Maps.PTree.set _zero_value (Vptr b0 (Integers.Int.repr 0)) (set_optvar (Some 127%positive) (Vptr b0 (Integers.Int.repr 0)) (set_locals (fn_vars f_main) (set_params nil (fn_params f_main))))))) as e_main. inversion H28. subst k'. clear H28. subst oid f v e3 k. clear H9. inversion H6. subst k s k'0. clear H4. clear H6. inversion H2. subst k k' s. clear H2. clear H5. inversion H3. subst k'0 k s. clear H3. clear H5. inversion H2. subst k'. clear H2. inversion H7. subst v v'1. clear H7. inversion H15. subst v' n values. simpl in H3. inversion H3. subst l'. clear H3. clear H5. inversion H25. subst v'0 v. clear H25. eapply HighValues.value_inject_mem_extends in H15; eauto. simpl in H2. eapply Mem.load_extends in H2; eauto. break_exists. break_and. inversion H1. clear H1. subst x3 v. take_step. take_step. take_step. unfold set_optvar. rewrite Maps.PTree.gss. reflexivity. take_step. take_step. take_step. unfold set_optvar. rewrite Maps.PTree.gss. reflexivity. simpl. eassumption. unfold Val.cmp. unfold Val.cmp_bool. unfold Integers.Int.cmp. unfold Val.of_optbool. unfold Integers.Int.eq. rewrite <- H21. unfold Z.of_nat. replace (Integers.Int.repr 0) with (Integers.Int.zero) by auto. rewrite Integers.Int.unsigned_zero. rewrite zeq_true. unfold Vtrue. econstructor; eauto. rewrite Integers.Int.eq_false. Focus 2. eapply Integers.Int.one_not_zero. unfold negb. take_step. eexists. eapply Smallstep.star_refl. Qed. End SIM.
{"author": "uwplse", "repo": "oeuf", "sha": "f3e4d236465ba872d1f1b8229548fa0edf8f7a3f", "save_path": "github-repos/coq/uwplse-oeuf", "path": "github-repos/coq/uwplse-oeuf/oeuf-f3e4d236465ba872d1f1b8229548fa0edf8f7a3f/shim_verif/dumb_proof.v"}
import os import numpy as np from matplotlib import pyplot as plt from matplotlib import colors as _colors from scipy import interpolate import figlatex import hist2d import colormap command = '-m 100000 -L 1 -t -l 500 darksidehd/nuvhd_lf_3x_tile57_77K_64V_6VoV_1.wav' ########################### def naivelinear(colors=['black', '#f55', 'white'], N=256, return_pos=False): rgb0 = np.array([_colors.to_rgb(color) for color in colors]) t0 = np.linspace(0, 1, len(rgb0)) t = np.linspace(0, 1, N) rgb = interpolate.interp1d(t0, rgb0, axis=0)(t) rt = _colors.ListedColormap(rgb) if return_pos: rt = (rt, t0) return rt cmap1 = naivelinear() cmap2 = colormap.uniform() figs = [] for ifile, cmap in enumerate([cmap1, cmap2]): figname = f'fighist2dtile57-cmap-{ifile}' fig = plt.figure(num=figname, clear=True, figsize=[9, 4]) save = f'figthesis/{figname}.npz' if not os.path.exists(save): hist = hist2d.Hist2D(command.split()) print(f'save {save}...') hist.save(save, compress=True) print(f'load {save}...') hist = hist2d.Hist2D.load(save) hist.hist2d(fig, cmap=cmap) figs.append(fig) for fig in figs: fig.show()
{"hexsha": "da2822117d0ae55d114714cf4a436a71e53ef5c7", "size": 1223, "ext": "py", "lang": "Python", "max_stars_repo_path": "figthesis/fighist2dtile57-cmap.py", "max_stars_repo_name": "Gattocrucco/sipmfilter", "max_stars_repo_head_hexsha": "74215d6c53b998808fc6c677b46030234d996bdf", "max_stars_repo_licenses": ["CC-BY-4.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "figthesis/fighist2dtile57-cmap.py", "max_issues_repo_name": "Gattocrucco/sipmfilter", "max_issues_repo_head_hexsha": "74215d6c53b998808fc6c677b46030234d996bdf", "max_issues_repo_licenses": ["CC-BY-4.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figthesis/fighist2dtile57-cmap.py", "max_forks_repo_name": "Gattocrucco/sipmfilter", "max_forks_repo_head_hexsha": "74215d6c53b998808fc6c677b46030234d996bdf", "max_forks_repo_licenses": ["CC-BY-4.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.46, "max_line_length": 85, "alphanum_fraction": 0.6434995912, "include": true, "reason": "import numpy,from scipy", "num_tokens": 384}
import h5py import numpy as np from PIL import Image def rotate_image(image): return image.rotate(-90, expand=True) class LabeledDataset: """Python interface for the labeled subset of the NYU dataset. To save memory, call the `close()` method of this class to close the dataset file once you're done using it. """ def __init__(self, path): """Opens the labeled dataset file at the given path.""" self.file = h5py.File(path, mode='r') self.color_maps = self.file['images'] self.depth_maps = self.file['depths'] self.label_maps = self.file['labels'] self.instances_maps = self.file['instances'] self.names = self.file['names'][0] self.names = [''.join(chr(i) for i in self.file[obj][:]) for obj in self.names] self.names = ['unlabeled'] + self.names # print(self.names) def close(self): """Closes the HDF5 file from which the dataset is read.""" self.file.close() def __len__(self): return len(self.color_maps) def _get_bounding_box_(self, instances_map, labels_map): boxes = {} w, h = instances_map.shape for i in range(w): for j in range(h): id = instances_map[i][j] tp = labels_map[i][j] key = str((id, tp)) if key in boxes: if i < boxes[key][0]: boxes[key][0] = i if j < boxes[key][1]: boxes[key][1] = j if i > boxes[key][2]: boxes[key][2] = i if j > boxes[key][3]: boxes[key][3] = j else: boxes[key] = np.zeros(5) boxes[key][0] = i boxes[key][1] = j boxes[key][2] = i boxes[key][3] = j boxes[key][4] = labels_map[i][j] boxes = np.array(list(boxes.values()), dtype=int) return boxes[boxes.T[4] != 0] def __getitem__(self, idx): color_map = self.color_maps[idx] color_map = np.moveaxis(color_map, 0, -1) color_image = Image.fromarray(color_map, mode='RGB') color_image = rotate_image(color_image) depth_map = self.depth_maps[idx] depth_image = Image.fromarray(depth_map, mode='F') depth_image = rotate_image(depth_image) labels_map = self.label_maps[idx] labels_image = Image.fromarray(labels_map) labels_image = rotate_image(labels_image) instances_map = self.instances_maps[idx] np.set_printoptions(threshold=np.inf) # print(instances_map) instances_image = Image.fromarray(instances_map) instances_image = rotate_image(instances_image) labels_map = self.label_maps[idx] instances_map = self.instances_maps[idx] val_bbox = self._get_bounding_box_(instances_map, labels_map) label_dict = [{"bbox": list(bbox[:4]), "class": self.names[bbox[4]]} for bbox in val_bbox] return color_image, depth_image, labels_image, instances_image, label_dict
{"hexsha": "5d1383696ca987e78b0fbcecb5289e6edc71955b", "size": 3190, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/nyuv2/toolbox/labeled.py", "max_stars_repo_name": "SJTU-CV-2021/Single-Image-3D-Reconstruction-Based-On-ShapeNet", "max_stars_repo_head_hexsha": "7c0f4d478c99bf51176ba1d1b883984f41aa93d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/nyuv2/toolbox/labeled.py", "max_issues_repo_name": "SJTU-CV-2021/Single-Image-3D-Reconstruction-Based-On-ShapeNet", "max_issues_repo_head_hexsha": "7c0f4d478c99bf51176ba1d1b883984f41aa93d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/nyuv2/toolbox/labeled.py", "max_forks_repo_name": "SJTU-CV-2021/Single-Image-3D-Reconstruction-Based-On-ShapeNet", "max_forks_repo_head_hexsha": "7c0f4d478c99bf51176ba1d1b883984f41aa93d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8426966292, "max_line_length": 98, "alphanum_fraction": 0.5620689655, "include": true, "reason": "import numpy", "num_tokens": 729}
[STATEMENT] lemma rel_star_contl: "X ; Y^* = (\<Union>i. X ; rel_d.power Y i)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. X ; Y\<^sup>* = (\<Union>i. X ; rel_d.power Y i) [PROOF STEP] by (simp add: rel_star_def relcomp_UNION_distrib)
{"llama_tokens": 109, "file": "Algebraic_VCs_AVC_KAT_VC_KAT_scratch", "length": 1}
import numpy as np import spacy import collections import time import os class MyInputGenerator(object): def __init__(self, dirname, vocab, seq_length, sequences_step, num_epochs, batch_size=1) : self.dirname = dirname self.batch_size = batch_size self.num_epochs = num_epochs self.vocab = vocab self.vocab_size = len(vocab) self.seq_length = seq_length self.sequences_step = sequences_step self.nlp = spacy.load("fr_core_news_sm") def __iter__(self): for i in range(self.num_epochs): for fname in os.listdir(self.dirname)[1:]: yield self.__data_generation(fname) def __data_generation(self, file): 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels) # Initialization wordlist = self.__wl(file) #seq_length, sequences_step = self.__pre_pro(wordlist) sequences, next_words = self.__make_arg(self.seq_length, self.sequences_step, wordlist) len_s = len(sequences) X, y = self.__init_X_y(len_s, self.seq_length, self.vocab_size) X, y = self.__fill_X_y(X, y, sequences, self.vocab, next_words) return X, y def __create_wordlist(self, doc): wl = [] nw = ['\n\xa0', '\n\xa0\n', '\n\xa0\n\xa0\n', '\n\xa0\n\xa0\n\xa0\n\xa0\n', '\n\xa0 ', '\n\xa0 \xa0', '\n\xa0\xa0 ', '\n\xa0\xa0\xa0 ', '\n\xa0\xa0\xa0\xa0 ', '\n\xa0\xa0\xa0\xa0\xa0 ', '\x1c',"\n","\n\n",'\u2009','\xa0','\xa0 '] for word in doc: if word.text not in nw: wl.append(word.text.lower()) return wl def __wl(self, file): # Generate data with open(self.dirname+'/'+file, 'r') as f: text = f.read() doc = self.nlp(text) wordlist = self.__create_wordlist(doc) return wordlist def __make_arg(self, seq_length, sequences_step, wordlist): sequences = [] next_words = [] for i in range(0, len(wordlist) - seq_length, sequences_step): sequences.append(wordlist[i: i + seq_length]) next_words.append(wordlist[i + seq_length]) return sequences, next_words def __init_X_y(self, len_s, seq_length, vocab_size): X = np.zeros((len_s, seq_length, vocab_size), dtype=np.bool) y = np.zeros((len_s, vocab_size), dtype=np.bool) return X, y def __fill_X_y(self, X, y, sequences, vocab, next_words): for i, sentence in enumerate(sequences): for t, word in enumerate(sentence): X[i, t, vocab[word]] = 1 y[i, vocab[next_words[i]]] = 1 return X, y
{"hexsha": "60f2da8ab77903e1a4709d1f4ee06b53748d4de8", "size": 2545, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scripts/Generator.py", "max_stars_repo_name": "saadmoumad/DiscoursDeRoi", "max_stars_repo_head_hexsha": "5c6f0d4b48fdc347c0f6766ad7d1dc2c3b104b49", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-15T23:00:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T01:19:47.000Z", "max_issues_repo_path": "Scripts/Generator.py", "max_issues_repo_name": "younesslanda/DiscoursDeRoi", "max_issues_repo_head_hexsha": "5c6f0d4b48fdc347c0f6766ad7d1dc2c3b104b49", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scripts/Generator.py", "max_forks_repo_name": "younesslanda/DiscoursDeRoi", "max_forks_repo_head_hexsha": "5c6f0d4b48fdc347c0f6766ad7d1dc2c3b104b49", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-09T23:21:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-09T23:21:24.000Z", "avg_line_length": 23.5648148148, "max_line_length": 91, "alphanum_fraction": 0.6345776031, "include": true, "reason": "import numpy", "num_tokens": 745}
import statsmodels.stats.multitest as smm import pickle import matplotlib.pyplot as plt import seaborn import numpy as np alphas = [0.01,0.0001,0.000001] sizes = [128,256,512,1024,2048,4096] aggregations = ['mean','median'] GTEx_directory = '/hps/nobackup/research/stegle/users/willj/GTEx' [most_expressed_transcript_idx, most_varying_feature_idx, retrained_results] = pickle.load(open(GTEx_directory + '/small_data/retrained_pvalues.py','rb')) [most_expressed_transcript_idx, most_varying_feature_idx, raw_results] = pickle.load(open(GTEx_directory + '/small_data/raw_pvalues.py','rb')) # print ("Plotting p-values across patch-size") # # Significant p-value across patch-size # plt.figure(figsize=(10,7)) # plt.title("Number of significant p-values (Bonf), varying patch size and FDR", size=17) # plt.xticks(range(len(sizes)),sizes,s ize=15) # plt.xlabel('Patch size', size=15) # plt.ylabel('Number of significant pvalues (Bonf)', size=15) # colours = ['blue','red','green'] # for a in aggregations[0:1]: # for (k, alph) in enumerate(alphas): # points = [sum(smm.multipletests(retrained_results['{}_{}_{}'.format(a,s,'pvalues')].flatten(),method='bonferroni',alpha=alph)[0]) for s in sizes] # plt.plot(points, c=colours[k],label=alph) # plt.legend() # plt.savefig(GTEx_directory + '/figures/associations/sign_pvalues_vary_patchsize.eps',format='eps', dpi=600) # # print ("Plotting p-values raw vs retrained") # # Significant p-values comparing Raw vs Retrained Inceptionet # plt.figure() # sizes = [128,256,512,1024,2048,4096] # plt.title("Number of significant p-values (Bonf 1e-6), varying patch size. Raw vs Retrained Inceptionet") # plt.plot([sum(smm.multipletests(retrained_results['{}_{}_{}'.format('mean',s,'pvalues')].flatten(),method='bonferroni',alpha=1e-6)[0]) for s in sizes],c='blue',alpha=1,label='retrained') # plt.plot([sum(smm.multipletests(raw_results['{}_{}_{}'.format('mean',s,'pvalues')].flatten(),method='bonferroni',alpha=1e-6)[0]) for s in sizes],c='red', alpha=1,label='raw') # plt.xlabel('Patch size',size=15) # plt.ylabel('Number of significant pvalues (Bonf)',size=15) # plt.xticks(range(len(sizes)),sizes,size=15) # plt.legend(fontsize=15) # plt.tight_layout() # plt.savefig(GTEx_directory + '/figures/associations/sign_pvalues_raw_vs_retrained.eps',format='eps', dpi=600) # # print ("Plotting p-values mean vs median") # Significant p-values comparing mean / median aggregation # plt.figure(figsize=(10,10)) # plt.title("Comparing Aggregation methods. Median: red, Mean: blue",size=20) # plt.xticks(range(len(sizes)),alphas,size=15) # plt.xlabel('FDR',size=20) # plt.ylabel('Number of BH significant pvalues',size=20) # for a in aggregations: # for (k,s) in enumerate([256]): # if a == 'mean': # c = 'red' # else: # c = 'blue' # # assoc_key = '{}_{}_{}'.format(a,s,'pvalues') # print (assoc_key) # associations = retrained_results[assoc_key] # # points = [sum(smm.multipletests(associations.flatten(),method='bonferroni',alpha=a)[0]) for a in alphas] # plt.plot(points, c=c,label=s,alpha=1) # plt.tight_layout() # plt.savefig(GTEx_directory + '/figures/associations/sign_pvalues_mean_vs_median.eps',format='eps', dpi=600) # # # print ("Plotting number of features with significant transcripts") # # # Number of features with significant transcripts # # plt.figure(figsize=(10,10)) # # plt.title("Number of features with significant pvalues (Bonf), varying patch size and FDR", size=20) # # plt.xticks(range(len(sizes)),sizes,size=15) # # plt.xlabel('Patch size',size=15) # # plt.ylabel('Number of features with significant pvalues (Bonf)',size=15) # # colours = ['blue','red','green'] # # for a in aggregations[0:1]: # # for (k, alph) in enumerate(alphas): # # points = [sum(np.sum(smm.multipletests(retrained_results['{}_{}_{}'.format(a,s,'pvalues')].flatten(),method='bonferroni',alpha=alph)[0].reshape(retrained_results['{}_{}_{}'.format(a,s,'R')].shape),axis=1) > 0) for s in sizes] # # plt.plot(points, c=colours[k],label=alph) # # plt.legend() # # plt.tight_layout() # # plt.savefig(GTEx_directory + '/figures/associations/features_with_sign_transcripts.eps',format='eps', dpi=600) # # print ("Plotting number of transcripts with significant features") # # Number of transcripts with significant features # plt.figure(figsize=(10,10)) # plt.title("Number of transcripts significant to at least 1 feature (Bonf), varying patch size and FDR", size=20) # plt.xticks(range(len(sizes)),sizes,size=15) # plt.xlabel('Patch size',size=15) # plt.ylabel('Number of transcripts significant to at least 1 feature (Bonf)',size=15) # colours = ['blue','red','green'] # for a in aggregations[0:1]: # for (k, alph) in enumerate(alphas): # points = [sum(np.sum(smm.multipletests(retrained_results['{}_{}_{}'.format(a,s,'pvalues')].flatten(),method='bonferroni',alpha=alph)[0].reshape(retrained_results['{}_{}_{}'.format(a,s,'R')].shape),axis=0) > 0) for s in sizes] # plt.plot(points, c=colours[k],label=alph) # plt.legend() # plt.tight_layout() # plt.savefig(GTEx_directory + '/figures/associations/transcripts_with_sign_features.eps',format='eps', dpi=600)
{"hexsha": "3388ef653dcd33c0764d4a30e6b9f5964a99ae76", "size": 5230, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/figures/associations/association_graphs.py", "max_stars_repo_name": "willgdjones/GTEx", "max_stars_repo_head_hexsha": "c56a5d548978545ab8a98e74236d52343113e9e6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-02-21T13:05:31.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-02T14:37:29.000Z", "max_issues_repo_path": "src/figures/associations/association_graphs.py", "max_issues_repo_name": "willgdjones/GTEx", "max_issues_repo_head_hexsha": "c56a5d548978545ab8a98e74236d52343113e9e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/figures/associations/association_graphs.py", "max_forks_repo_name": "willgdjones/GTEx", "max_forks_repo_head_hexsha": "c56a5d548978545ab8a98e74236d52343113e9e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.7821782178, "max_line_length": 237, "alphanum_fraction": 0.6996175908, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 1489}
//================================================================================================== /*! @file @copyright 2016 NumScale SAS @copyright 2016 J.T. Lapreste Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) */ //================================================================================================== #ifndef BOOST_SIMD_FUNCTION_SIGNNZ_HPP_INCLUDED #define BOOST_SIMD_FUNCTION_SIGNNZ_HPP_INCLUDED #if defined(DOXYGEN_ONLY) namespace boost { namespace simd { /*! @ingroup group-ieee Function object implementing signnz capabilities Returns the sign of x. I.e. -1 or 1, according x is negative or positive. This function never returns @ref Zero (zero is considered positive for integers). For floating point numbers the bit of sign is taken into account and so we always have signnz(-z) == -signnz(z)). The result for @ref Nan entry is undefined @par Semantic: @code T r = signnz(x); @endcode is similar to: @code T r = is_nan(x) ? Nan<T>() : (is_negative(x) ? T(-1) : T(1)); @endcode @see Mzero, sign, is_negative, is_positive **/ const boost::dispatch::functor<tag::signnz_> signnz = {}; } } #endif #include <boost/simd/function/scalar/signnz.hpp> #include <boost/simd/function/simd/signnz.hpp> #endif
{"hexsha": "4681258b9ec890a56185b962664e09129e2a381b", "size": 1414, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/function/signnz.hpp", "max_stars_repo_name": "yaeldarmon/boost.simd", "max_stars_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/simd/function/signnz.hpp", "max_issues_repo_name": "yaeldarmon/boost.simd", "max_issues_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/function/signnz.hpp", "max_forks_repo_name": "yaeldarmon/boost.simd", "max_forks_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8070175439, "max_line_length": 100, "alphanum_fraction": 0.5933521924, "num_tokens": 331}
function disp(f) %DISP Display a BALLFUNV to the command line. % Copyright 2019 by The University of Oxford and The Chebfun Developers. % See http://www.chebfun.org/ for Chebfun information. loose = strcmp(get(0,'FormatSpacing'),'loose'); % Compact version: if ( isempty(f) ) fprintf('empty ballfunv\n\n') return end disp(' ballfunv object containing') if ( loose ) fprintf('\n'); end F = f.comp; for j = 1:3 disp(F{j}); end end
{"author": "chebfun", "repo": "chebfun", "sha": "8c49396a55e46ddd57a1d108c6a8f32e37536d54", "save_path": "github-repos/MATLAB/chebfun-chebfun", "path": "github-repos/MATLAB/chebfun-chebfun/chebfun-8c49396a55e46ddd57a1d108c6a8f32e37536d54/@ballfunv/disp.m"}
// Copyright Marek Dalewski 2017 // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.md or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <commander/detail__type_traits/always_false.hpp> #include <boost/test/unit_test.hpp> BOOST_AUTO_TEST_CASE( always_false ) { namespace comd = commander::detail; static_assert(!comd::always_false<>::value); static_assert(!comd::always_false<int>::value); static_assert(!comd::always_false<volatile int>::value); static_assert(!comd::always_false<const int>::value); static_assert(!comd::always_false<const volatile int>::value); static_assert(!comd::always_false<int&>::value); static_assert(!comd::always_false<volatile int&>::value); static_assert(!comd::always_false<const int&>::value); static_assert(!comd::always_false<const volatile int&>::value); static_assert(!comd::always_false<int&&>::value); static_assert(!comd::always_false<volatile int&&>::value); static_assert(!comd::always_false<const int&&>::value); static_assert(!comd::always_false<const volatile int&&>::value); static_assert(!comd::always_false<float, double, long double>::value); } BOOST_AUTO_TEST_CASE( always_false_v ) { namespace comd = commander::detail; static_assert(!comd::always_false_v<>); static_assert(!comd::always_false_v<int>); static_assert(!comd::always_false_v<volatile int>); static_assert(!comd::always_false_v<const int>); static_assert(!comd::always_false_v<const volatile int>); static_assert(!comd::always_false_v<int&>); static_assert(!comd::always_false_v<volatile int&>); static_assert(!comd::always_false_v<const int&>); static_assert(!comd::always_false_v<const volatile int&>); static_assert(!comd::always_false_v<int&&>); static_assert(!comd::always_false_v<volatile int&&>); static_assert(!comd::always_false_v<const int&&>); static_assert(!comd::always_false_v<const volatile int&&>); static_assert(!comd::always_false_v<float, double, long double>); }
{"hexsha": "ecf396cfa66b1225536499e92b59f76807fc69de", "size": 2113, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "project/test/type_traits/always_false.cpp", "max_stars_repo_name": "daishe/commander", "max_stars_repo_head_hexsha": "0a23abcbe406e234a4242e0d508bb89d72b28e25", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project/test/type_traits/always_false.cpp", "max_issues_repo_name": "daishe/commander", "max_issues_repo_head_hexsha": "0a23abcbe406e234a4242e0d508bb89d72b28e25", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project/test/type_traits/always_false.cpp", "max_forks_repo_name": "daishe/commander", "max_forks_repo_head_hexsha": "0a23abcbe406e234a4242e0d508bb89d72b28e25", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8679245283, "max_line_length": 74, "alphanum_fraction": 0.7188831046, "num_tokens": 519}
Ronny Restrepo Portfolio Blog Tutorials Contact Lidar Birds Eye Views March 26, 2017, 11 p.m. Summary Today i started working on creating birds eye view images of the LIDAR data. Quirks of the Lidar Coordinates One thing to keep in mind about the LIDAR data is that the axes represent different things to what a camera photo would represent, and they point in different directions too. The following image illustrates how they differ. Notice how the x axis is actually the depth, and the horizontal axis is the y axis. image comparing coordinates of photo vs lidar Limiting to a Rectangular Region Instead of creating a birds eye view of every single point captured by the Lidar, it is useful to just focus in on a rectangular region of the data when looked at from the top. As for instance illustrated in the image below. Also notice how the x, and y axes will need to be swapped around, and made to point in the opposite direction when converting to image coordinates. Image of example crop region We will want to create a filter that only keeps points within the desired rectangle. The following code creates a 30x30m region, such that it captures 15m on either side of the car, and 30 m in front of it. # LIMIT VIEWING RANGE - To within a desired rectangle side_range = [-15, 15] # 10 metres on either side fwd_range = [-0, 30] # 30 metres in front # INDICES FILTER - of values within the desired rectangle # Note left side is positive y axis in LIDAR coordinates ff = np.logical_and((x_lidar > fwd_range[0]), (x_lidar < fwd_range[1])) ss = np.logical_and((y_lidar > -side_range[1]), (y_lidar < -side_range[0])) indices = np.argwhere(np.logical_and(ff, ss)).flatten() # POINTS TO USE FOR IMAGE x_img = -y_lidar[indices] # x axis is -y in LIDAR y_img = x_lidar[indices] # y axis is x in LIDAR pixel_values = z_lidar[indices] # Height values used for pixel intensity # Shift values so (0,0) is the minimum value x_img -= side_range[0] y_img -= fwd_range[0] Simple Implementation in Matplotlib The following piece of code creates a 2D image of the points in the region by plotting them out with matplotlib, and color coding the points based on their height value. # PLOT THE IMAGE cmap = "jet" # Color map to use dpi = 100 # Image resolution x_max = side_range[1] - side_range[0] y_max = fwd_range[1] - fwd_range[0] fig, ax = plt.subplots(figsize=(600/dpi, 600/dpi), dpi=dpi) ax.scatter(x_img, y_img, s=1, c=pixel_values, linewidths=0, alpha=1, cmap=cmap) ax.set_axis_bgcolor((0, 0, 0)) # Set regions with no points to black ax.axis('scaled') # {equal, scaled} ax.xaxis.set_visible(False) # Do not draw axis tick marks ax.yaxis.set_visible(False) # Do not draw axis tick marks plt.xlim([0, x_max]) # prevent drawing empty space outside of horizontal FOV plt.ylim([0, y_max]) # prevent drawing empty space outside of vertical FOV fig.savefig("/tmp/simple_top.jpg", dpi=dpi, bbox_inches='tight', pad_inches=0.0) Which creates an image like the following: Image of simple birds eye view of lidar points Better solution using numpy and PIL Creating the images in Matplotlib has the advantage that we can choose pretty spectral colormappings to make it easier for us humans to distinguish ranges of values. But matplotlib is horribly slow, and will therefore be impractical if we want to create huge batches of these images as a form of data preprocessing to pass on to a machine learning algorithm. To that extent, I created a much more efficient version of processing the images that uses numpy and PIL. from PIL import Image import numpy as np # ============================================================================== # SCALE_TO_255 # ============================================================================== def scale_to_255(a, min, max, dtype=np.uint8): """ Scales an array of values from specified min, max range to 0-255 Optionally specify the data type of the output (default is uint8) """ return (((a - min) / float(max - min)) * 255).astype(dtype) # ============================================================================== # BIRDS_EYE_POINT_CLOUD # ============================================================================== def birds_eye_point_cloud(points, side_range=(-10, 10), fwd_range=(-10,10), res=0.1, min_height = -2.73, max_height = 1.27, saveto=None): """ Creates an 2D birds eye view representation of the point cloud data. You can optionally save the image to specified filename. Args: points: (numpy array) N rows of points data Each point should be specified by at least 3 elements x,y,z side_range: (tuple of two floats) (-left, right) in metres left and right limits of rectangle to look at. fwd_range: (tuple of two floats) (-behind, front) in metres back and front limits of rectangle to look at. res: (float) desired resolution in metres to use Each output pixel will represent an square region res x res in size. min_height: (float)(default=-2.73) Used to truncate height values to this minumum height relative to the sensor (in metres). The default is set to -2.73, which is 1 metre below a flat road surface given the configuration in the kitti dataset. max_height: (float)(default=1.27) Used to truncate height values to this maximum height relative to the sensor (in metres). The default is set to 1.27, which is 3m above a flat road surface given the configuration in the kitti dataset. saveto: (str or None)(default=None) Filename to save the image as. If None, then it just displays the image. """ x_lidar = points[:, 0] y_lidar = points[:, 1] z_lidar = points[:, 2] # r_lidar = points[:, 3] # Reflectance # INDICES FILTER - of values within the desired rectangle # Note left side is positive y axis in LIDAR coordinates ff = np.logical_and((x_lidar > fwd_range[0]), (x_lidar < fwd_range[1])) ss = np.logical_and((y_lidar > -side_range[1]), (y_lidar < -side_range[0])) indices = np.argwhere(np.logical_and(ff,ss)).flatten() # CONVERT TO PIXEL POSITION VALUES - Based on resolution x_img = (-y_lidar[indices]/res).astype(np.int32) # x axis is -y in LIDAR y_img = (x_lidar[indices]/res).astype(np.int32) # y axis is -x in LIDAR # will be inverted later # SHIFT PIXELS TO HAVE MINIMUM BE (0,0) # floor used to prevent issues with -ve vals rounding upwards x_img -= int(np.floor(side_range[0]/res)) y_img -= int(np.floor(fwd_range[0]/res)) # CLIP HEIGHT VALUES - to between min and max heights pixel_values = np.clip(a = z_lidar[indices], a_min=min_height, a_max=max_height) # RESCALE THE HEIGHT VALUES - to be between the range 0-255 pixel_values = scale_to_255(pixel_values, min=min_height, max=max_height) # FILL PIXEL VALUES IN IMAGE ARRAY x_max = int((side_range[1] - side_range[0])/res) y_max = int((fwd_range[1] - fwd_range[0])/res) im = np.zeros([y_max, x_max], dtype=np.uint8) im[-y_img, x_img] = pixel_values # -y because images start from top left # Convert from numpy array to a PIL image im = Image.fromarray(im) # SAVE THE IMAGE if saveto is not None: im.save(saveto) else: im.show()
{"hexsha": "b8d2313dc376969d2e0a51fa02295691068e4dea", "size": 8003, "ext": "py", "lang": "Python", "max_stars_repo_path": "demos/read_lidar.py", "max_stars_repo_name": "PeiliangLi/avod", "max_stars_repo_head_hexsha": "655b333d36710d665de63fa67355d973364625b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demos/read_lidar.py", "max_issues_repo_name": "PeiliangLi/avod", "max_issues_repo_head_hexsha": "655b333d36710d665de63fa67355d973364625b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demos/read_lidar.py", "max_forks_repo_name": "PeiliangLi/avod", "max_forks_repo_head_hexsha": "655b333d36710d665de63fa67355d973364625b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.0981595092, "max_line_length": 372, "alphanum_fraction": 0.628389354, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1885}
#%% import pandas as pd import numpy as np from io import StringIO # our simple csv file file_path = "./3.Pandas/simple_data.csv" #%% # create dataframe, read_csv data dt = pd.read_csv(file_path) dt #%% choose columns dt = pd.read_csv(file_path, usecols=['Imie', 'wiek']) print(dt.head()) #%% parse and cast data to special format dt = pd.read_csv(file_path, usecols=['Imie', 'wiek'], dtype={'wiek': np.int16}) print(dt.head()) #%% def imie_converter(imie): return imie*5 file_path = "./3.Pandas/simple_data.csv" dt = pd.read_csv(file_path, usecols=['Imie', 'wiek'], dtype={'Imie': str, 'wiek': np.int8}, converters={"Imie":imie_converter} )
{"hexsha": "a2d078dbca8bbb60f52a9a8e26d7face80889531", "size": 753, "ext": "py", "lang": "Python", "max_stars_repo_path": "3.Pandas/3.reading_files.py", "max_stars_repo_name": "ksopyla/data-visualization-intro", "max_stars_repo_head_hexsha": "d512b03c820f49108611d3076c4f2cb2cf4de94e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-07T12:10:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-07T12:10:04.000Z", "max_issues_repo_path": "3.Pandas/3.reading_files.py", "max_issues_repo_name": "ksopyla/data-visualization-intro", "max_issues_repo_head_hexsha": "d512b03c820f49108611d3076c4f2cb2cf4de94e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "3.Pandas/3.reading_files.py", "max_forks_repo_name": "ksopyla/data-visualization-intro", "max_forks_repo_head_hexsha": "d512b03c820f49108611d3076c4f2cb2cf4de94e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-08T05:17:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-08T05:17:56.000Z", "avg_line_length": 20.3513513514, "max_line_length": 53, "alphanum_fraction": 0.5962815405, "include": true, "reason": "import numpy", "num_tokens": 196}
import numpy as np from keras.datasets import mnist def create_gallery_probe(x, digit_indices, num_classes): probe = [] probe_l = [] gallery = [] gallery_l = [] n = min([len(digit_indices[d]) for d in range(num_classes)]) numProbe = max(int(n*0.25),1) for d in range(num_classes): for i in range(n): z1 = digit_indices[d][i] if i < numProbe: probe += [[x[z1]]] probe_l.append(d) else: gallery += [[x[z1]]] gallery_l.append(d) return np.array(probe), np.array(probe_l), np.array(gallery), np.array(gallery_l) def get_num_classes(): return 10 def get_train_data(): (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') x_train /= 255 x_train -= 0.5 x_train *= 2 num_classes = get_num_classes() locs = dict() minNum = 0 for i in range(0,num_classes): locs[i] = np.where(y_train==i)[0] if i == 0: minNum = locs[i].shape[0] elif (minNum > locs[i].shape[0]): minNum = locs[i].shape[0] # return data such that every set of 10 data points has a sample from each class x_train2 = np.zeros((minNum//num_classes * num_classes * num_classes, x_train.shape[1], x_train.shape[2]), np.float) y_train2 = np.zeros((minNum//num_classes * num_classes * num_classes),np.uint8) idx = -1 idx2 = -1 for i in range(0,minNum//num_classes): for j in range(0,num_classes): idx2 = idx2 + 1 for cls in range(0,num_classes): idx = idx + 1 oriIdx = locs[cls][idx2] x_train2[idx,:,:] = x_train[oriIdx,:,:] y_train2[idx] = y_train[oriIdx] return x_train2, y_train2 def get_reid_test_data(): num_classes = get_num_classes() (x_train, y_train), (x_test, y_test) = mnist.load_data() x_test = x_test.astype('float32') x_test /= 255 x_test -= 0.5 x_test *= 2 digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)] [probe, pLabel, gallery, gLabel] = create_gallery_probe(x_test, digit_indices, num_classes) probe = probe[:,0] gallery = gallery[:,0] return probe, pLabel, gallery, gLabel
{"hexsha": "5cff93e743aa2a4f1149cae57819f938dae43fcd", "size": 2305, "ext": "py", "lang": "Python", "max_stars_repo_path": "MNISTHelpers.py", "max_stars_repo_name": "psiva7/MNISTTriplet", "max_stars_repo_head_hexsha": "695897b5229387a092b69b5de17dbd996ca2d899", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MNISTHelpers.py", "max_issues_repo_name": "psiva7/MNISTTriplet", "max_issues_repo_head_hexsha": "695897b5229387a092b69b5de17dbd996ca2d899", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MNISTHelpers.py", "max_forks_repo_name": "psiva7/MNISTTriplet", "max_forks_repo_head_hexsha": "695897b5229387a092b69b5de17dbd996ca2d899", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4057971014, "max_line_length": 120, "alphanum_fraction": 0.5874186551, "include": true, "reason": "import numpy", "num_tokens": 655}
from audioop import add from numpy import mat import tensorflow as tf # 2차원 배열 정의 list_of_list = [[10, 20], [30, 40]] # 텐서 변환 - constant 함수에 2차원 배열 입력 mat1 = tf.constant(list_of_list) # 랭크 확인 print("rank:", tf.rank(mat1)) # 텐서 출력 print("mat1:", mat1) # 1차원 벡터 정의 vec1 = tf.constant([1, 0]) vec2 = tf.constant([-1, 2]) # 텐서 변환 - stack 함수로 1차원 배열을 위아래로 쌓기 mat2 = tf.stack([vec1, vec2]) # 랭크 확인 print("rank:", tf.rank(mat2)) # 텐서 출력하기 print("mat2:", mat2) # element-by-element 연산 element_mul = tf.math.multiply(mat1, mat2) print("result:", element_mul) print("rank:", tf.rank(element_mul)) # 브로드캐스팅 연산 element_bc = tf.math.multiply(mat1, 3) print("result:", element_bc) print("rank:", tf.rank(element_bc)) # 행렬곱 연산 mat_mul = tf.matmul(mat1, mat2) print("result:", mat_mul) print("rank:", tf.rank(mat_mul)) # 덧셈 연산 add1 = tf.math.add(mat1, mat2) print("result:", add1) print("rank:", tf.rank(add1)) # 덧셈 연산(파이썬) add2 = mat1 + mat2 print("result:", add2) print("rank:", tf.rank(add2)) # 텐서를 넘파이로 변환 np_arr = mat_mul.numpy() print(type(np_arr)) print(np_arr)
{"hexsha": "b400fd372ef4a504ce034bf9a1333d949f9e9301", "size": 1068, "ext": "py", "lang": "Python", "max_stars_repo_path": "practice/3_basic_tensorflow/Example_Matrix.py", "max_stars_repo_name": "rabbitsun2/toy_python", "max_stars_repo_head_hexsha": "32f84b4d15b13c4daa4fa212a40e685abc0d2a5d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "practice/3_basic_tensorflow/Example_Matrix.py", "max_issues_repo_name": "rabbitsun2/toy_python", "max_issues_repo_head_hexsha": "32f84b4d15b13c4daa4fa212a40e685abc0d2a5d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "practice/3_basic_tensorflow/Example_Matrix.py", "max_forks_repo_name": "rabbitsun2/toy_python", "max_forks_repo_head_hexsha": "32f84b4d15b13c4daa4fa212a40e685abc0d2a5d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.8, "max_line_length": 42, "alphanum_fraction": 0.6666666667, "include": true, "reason": "from numpy", "num_tokens": 430}
from __future__ import print_function, division, absolute_import import torch import torch.nn as nn import torch.nn.functional as F # HACK TODO DEBUG import numpy as np from torchsummary import summary try: # relative import: when executing as a package: python -m ... from .base_models import BaseModelAutoEncoder, ConvSN2d, ConvTransposeSN2d, LinearSN, UNet from ..losses.losses import autoEncoderLoss, AEboundLoss from .base_trainer import BaseTrainer from .gan import GeneratorUnet, EncoderUnet except: # absolute import: when executing directly: python train.py ... from models.base_models import BaseModelAutoEncoder, ConvSN2d, ConvTransposeSN2d, LinearSN, UNet from losses.losses import autoEncoderLoss, AEboundLoss from models.base_trainer import BaseTrainer from models.gan import GeneratorUnet, EncoderUnet class LinearAutoEncoder(BaseModelAutoEncoder): """ :param state_dim: (int) :param img_shape: (tuple) """ def __init__(self, state_dim, img_shape): super(LinearAutoEncoder, self).__init__(state_dim, img_shape) self.img_shape = img_shape self.encoder = nn.Sequential( nn.Linear(np.prod(self.img_shape), state_dim), ) self.decoder = nn.Sequential( nn.Linear(state_dim, np.prod(self.img_shape)), nn.Tanh() ) def encode(self, x): """ :param x: (th.Tensor) :return: (th.Tensor) """ # Flatten input x = x.view(x.size(0), -1) return self.encoder(x) def decode(self, x): """ :param x: (th.Tensor) :return: (th.Tensor) """ x = self.decoder(x) x = x.view(x.size(0), *self.img_shape) return x class DenseAutoEncoder(BaseModelAutoEncoder): """ Dense autoencoder network Known issue: it reconstructs the image but omits the robot arm :param state_dim: (int) :param img_shape: (tuple) """ def __init__(self, state_dim, img_shape): super(DenseAutoEncoder, self).__init__(state_dim, img_shape) self.img_shape = img_shape self.encoder = nn.Sequential( nn.Linear(np.prod(self.img_shape), 50), nn.Tanh(), nn.Linear(50, 50), nn.Tanh(), nn.Linear(50, state_dim), ) self.decoder = nn.Sequential( nn.Linear(state_dim, 50), nn.Tanh(), nn.Linear(50, 50), nn.Tanh(), nn.Linear(50, np.prod(self.img_shape)), nn.Tanh() ) def encode(self, x): """ :param x: (th.Tensor) :return: (th.Tensor) """ # Flatten input x = x.view(x.size(0), -1) return self.encoder(x) def decode(self, x): """ :param x: (th.Tensor) :return: (th.Tensor) """ x = self.decoder(x) x = x.view(x.size(0), *self.img_shape) return x class CNNAutoEncoder(BaseModelAutoEncoder): """ Custom convolutional autoencoder network Input dim (same as ResNet): 3x224x224 :param state_dim: (int) """ def __init__(self, state_dim=3, img_shape=(3, 224, 224)): # state_dim=state_dim, img_shape=img_shape super(CNNAutoEncoder, self).__init__(state_dim=state_dim, img_shape=img_shape) self.state_dim = state_dim self.img_shape = img_shape outshape = summary(self.encoder_conv, img_shape, show=False) # [-1, channels, high, width] self.img_height, self.img_width = outshape[-2:] self.encoder_fc = nn.Sequential( nn.Linear(self.img_height * self.img_width * 64, state_dim) ) self.decoder_fc = nn.Sequential( nn.Linear(state_dim, self.img_height * self.img_width * 64) ) def encode(self, x): """ :param x: (th.Tensor) :return: (th.Tensor) """ encoded = self.encoder_conv(x) encoded = encoded.view(encoded.size(0), -1) return self.encoder_fc(encoded) def decode(self, x): """ :param x: (th.Tensor) :return: (th.Tensor) """ decoded = self.decoder_fc(x) decoded = decoded.view(x.size(0), 64, self.img_height, self.img_width) return self.decoder_conv(decoded) class UNetAutoEncoder(BaseModelAutoEncoder): """ Custom UNet autoencoder network :param state_dim: (int) """ def __init__(self, state_dim=3, img_shape=(3, 224, 224)): super(UNetAutoEncoder, self).__init__(state_dim=state_dim, img_shape=img_shape) self.decoder = GeneratorUnet(state_dim, img_shape, unet_bn=True) self.encoder = EncoderUnet(state_dim, img_shape, unet_bn=True) def encode(self, x): """ :param x: (th.Tensor) :return: (th.Tensor) """ return self.encoder(x) def decode(self, x): """ :param x: (th.Tensor) :return: (th.Tensor) """ return self.decoder(x) class AutoEncoderTrainer(BaseTrainer): def __init__(self, state_dim=2, img_shape=(3, 224, 224)): super().__init__() # BaseTrainer.__init__(self) self.state_dim = state_dim self.img_shape = img_shape def build_model(self, model_type='custom_cnn'): assert model_type in ['custom_cnn', 'linear', 'mlp', 'unet'] if model_type == 'custom_cnn': self.model = CNNAutoEncoder(self.state_dim, self.img_shape) elif model_type == 'mlp': self.model = DenseAutoEncoder(self.state_dim, self.img_shape) elif model_type == 'linear': self.model = LinearAutoEncoder(self.state_dim, self.img_shape) elif model_type == 'unet': self.model = UNetAutoEncoder(self.state_dim, self.img_shape) else: raise NotImplementedError("model type: ({}) not supported yet.".format(model_type)) def train_on_batch(self, obs, next_obs, optimizer, loss_manager, valid_mode=False, device=torch.device('cpu')): state_pred = self.encode(obs) decoded_obs = self.decode(state_pred) decoded_next_obs = self.reconstruct(next_obs) autoEncoderLoss(obs, decoded_obs, next_obs, decoded_next_obs, weight=1.0, loss_manager=loss_manager) AEboundLoss(state_pred, weight=1.0, loss_manager=loss_manager, name='bonud_state_loss', max_val=50) loss = self.update_nn_weights(optimizer, loss_manager, valid_mode=valid_mode) return loss def reconstruct(self, x): return self.model.decode(self.model.encode(x)) def encode(self, x): return self.model.encode(x) def decode(self, x): return self.model.decode(x) def forward(self, x): return self.model.encode(x) # or self.model(x) if __name__ == "__main__": print("Start") from torchsummary import summary img_shape = (3, 128, 128) model = CNNAutoEncoder(state_dim=2, img_shape=img_shape) A = summary(model, img_shape) # import ipdb; ipdb.set_trace()
{"hexsha": "22dc9878071968d70f3405762f7305146ed4b36e", "size": 7079, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/autoencoders.py", "max_stars_repo_name": "ncble/srl-zoo", "max_stars_repo_head_hexsha": "cc209a292ec19718e749e5585488c06f5650e69b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/autoencoders.py", "max_issues_repo_name": "ncble/srl-zoo", "max_issues_repo_head_hexsha": "cc209a292ec19718e749e5585488c06f5650e69b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/autoencoders.py", "max_forks_repo_name": "ncble/srl-zoo", "max_forks_repo_head_hexsha": "cc209a292ec19718e749e5585488c06f5650e69b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-01T09:13:00.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-01T09:13:00.000Z", "avg_line_length": 31.6026785714, "max_line_length": 115, "alphanum_fraction": 0.6183076706, "include": true, "reason": "import numpy", "num_tokens": 1735}
import numpy as np from scipy import signal from scipy import interpolate import cv2 import wave import math import sys #cvt angle to color def val2color(radangle): M_PI = math.pi pi_sixtydig = M_PI / 3 angle = ((radangle / (M_PI*2))- (int)(radangle / (M_PI * 2)))*(M_PI * 2) rgb = [0,0,0] if (angle >= 0 and angle < pi_sixtydig) : val = (angle - pi_sixtydig*0)/ pi_sixtydig rgb[0] = 255 rgb[1] = 255*val rgb[2] = 0 elif(angle >= pi_sixtydig*1 and angle < pi_sixtydig*2) : val = (angle - pi_sixtydig * 1) / pi_sixtydig rgb[0] = 255 *(1 - val) rgb[1] = 255 rgb[2] = 0 elif (angle >= pi_sixtydig * 2 and angle < pi_sixtydig * 3): val = (angle - pi_sixtydig * 2) / pi_sixtydig rgb[0] = 0 rgb[1] = 255 rgb[2] = 255 * ( val) elif (angle >= pi_sixtydig * 3 and angle < pi_sixtydig * 4) : val = (angle - pi_sixtydig * 3) / pi_sixtydig rgb[0] = 0 rgb[1] = 255 * (1 - val) rgb[2] = 255 elif (angle >= pi_sixtydig * 4 and angle < pi_sixtydig * 5) : val = (angle - pi_sixtydig * 4) / pi_sixtydig rgb[0] = 255 * ( val) rgb[1] = 0 rgb[2] = 255 elif (angle >= pi_sixtydig * 5 and angle < pi_sixtydig * 6) : val = (angle - pi_sixtydig * 5) / pi_sixtydig rgb[0] = 255 rgb[1] = 0 rgb[2] = 255 * (1 - val) return (rgb[0],rgb[1],rgb[2]) global circle_num global idx_range_sum global temporal_cnt global col_change_speed col_change_speed = 0.01 circle_num = 5 def drawCircles(points_array,color, alpha=1.0): global circle_num global temporal_cnt for i in range(int(len(points_array))): for j in range(int(len(points_array[i]))): if j==int(len(points_array[i]))-1: cv2.line(base,points_array[i][j],points_array[i][0],np.multiply(alpha,val2color(color[i]+temporal_cnt*col_change_speed)),2) else: cv2.line(base,points_array[i][j],points_array[i][j+1],np.multiply(alpha,val2color(color[i]+temporal_cnt*col_change_speed)),2) def cnt_where(idx,idx_range): cnt = 1 val = idx_range_sum[cnt] while idx >= val: cnt += 1 val =idx_range_sum[cnt] return cnt-1 def min_max(x, axis=None): min = x.min(axis=axis, keepdims=True) max = x.max(axis=axis, keepdims=True) result = 8*(x-min)/(max-min) return result def base_update(mean,cur): add_v = ((mean+8)/2 - cur) * 0.05 cur = cur + add_v if cur > 8: cur = 8 return cur def peak_update(max,cur): add_v = ((max)/2 - cur) * 0.05 cur = cur + add_v if cur > 2: cur = 2 if cur < 0: cur = 0 return cur #spline fitting def spline3(pointarray,point_num,deg): x = [] y = [] for i in range(len(pointarray)): x.append(pointarray[i][0]) y.append(pointarray[i][1]) tck,u = interpolate.splprep([x,y],k=deg,s=0) u = np.linspace(0,1,num=point_num,endpoint=True) spline = interpolate.splev(u,tck) spline_points = [] for i in range(len(spline[0])): spline_points.append((int(spline[0][i]),int(spline[1][i]))) return spline_points # number of afterimage num = 10 # input wav file and output mp4 file wavf = sys.argv[1] outmp4 = sys.argv[2] #output length (second) length_video = int(sys.argv[3]) #read wav data wr = wave.open(wavf, 'r') ch = wr.getnchannels() width = wr.getsampwidth() fr = wr.getframerate() fn = wr.getnframes() data = wr.readframes(wr.getnframes()) wr.close() X = np.frombuffer(data, dtype=np.int16) if ch == 2: l_channel = (1.0/255)* X[::ch] r_channel = (1.0/255)* X[1::ch] print(l_channel) print(len(r_channel)) print("Channel: ", ch) print("Sample width: ", width) print("Frame Rate: ", fr) print("Frame num: ", fn) print("Params: ", wr.getparams()) print("Total time: ", 1.0 * fn / fr) # set output mp4 frame_rate = 30.0 fmt = cv2.VideoWriter_fourcc('m','p','4','v') writer = cv2.VideoWriter(sys.argv[2],fmt,frame_rate, (640,480)) length_video = int(length_video * frame_rate) # fft parameter (window size) window_size = 8092 # array for drawing points points_v = [] # visualization parameters (maximum and minimum db, border index of frequency) base_peak = [2,2,2,2,2] base_val=[8,8,8,8,8] idx_range_sum = [2,40,80,200,400,520] temporal_cnt = 0 for j in range(length_video): print("\r"+str(int(j/30))+ "s rendered", end="") init_pos =int( j * 44100/ 30) # spectrum freq , pw = signal.welch(l_channel[init_pos:init_pos+window_size],fr, nperseg=4096) # update max and min of db pw_db = -np.log10(pw) for i in range(circle_num): base_peak[i]=peak_update(np.min(pw_db[idx_range_sum[i]+1:idx_range_sum[i+1]]),base_peak[i]) base_val[i]=base_update(np.mean(pw_db[idx_range_sum[i]+1:idx_range_sum[i+1]]),base_val[i]) base = np.zeros((480,640,3), np.uint8) v = len(points_v) prange = min(len(points_v),num) # draw afterimage for k in range(prange): drawCircles(points_v[v-(prange-k)-1], base_val,(k)/num ) points_a = [] points = [] x_s = [] y_s = [] prev = -1 # spectrum 2 drawing points ((frec,power)-->(r,theta)) for i in range(0,idx_range_sum[circle_num]+1): if i <= idx_range_sum[0]: continue idx = cnt_where(i-1,idx_range_sum) if not prev == -1 and not prev == idx: points_a.append(spline3(points,5*len(points),3)) points = [] prev = idx theta = -(freq[i]- freq[idx_range_sum[idx]]) / (freq[idx_range_sum[idx + 1]] - freq[idx_range_sum[idx]]) * math.pi *2 v1 = pw_db[i] if v1 < base_peak[idx] : v1 = base_peak[idx] if v1 > base_val[idx] : v1 = base_val[idx] r1 = 40 + 40* idx + 30 * (base_val[idx]- v1) / (base_val[idx] - base_peak[idx]) p1=(320 + int(r1 * math.cos(theta)),240 + int(r1 * math.sin(theta))) x_s.append(320 + int(r1 * math.cos(theta))) y_s.append(240 + int(r1 * math.sin(theta))) points.append(p1) # spline fitting points_a.append(spline3(points,5*len(points),3)) # draw current frame drawCircles(points_a, base_val) points_v.append(points_a) # render frame writer.write(base) temporal_cnt +=1 print("\nfinished!")
{"hexsha": "24eeec1f85b0a13acf30d8de0136a3d999bd711a", "size": 6635, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "cln515/AudioVisualizer", "max_stars_repo_head_hexsha": "343ea7f4150b27b1bf832fade3575af34b8a5dcf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "cln515/AudioVisualizer", "max_issues_repo_head_hexsha": "343ea7f4150b27b1bf832fade3575af34b8a5dcf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "cln515/AudioVisualizer", "max_forks_repo_head_hexsha": "343ea7f4150b27b1bf832fade3575af34b8a5dcf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9737991266, "max_line_length": 143, "alphanum_fraction": 0.5760361718, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2085}
import torch import numpy as np from DDPG import DDPG from utils import ReplayBuffer device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class HAC: def __init__(self, k_level, H, state_dim, action_dim, render, endgoal_thresholds, action_bounds, action_offset, state_bounds, state_offset, lr, goal_dim, subgoal_bounds, subgoal_offset, subgoal_thresholds): # adding lowest level print("LOWEST LEVEL") self.HAC = [DDPG(state_dim, state_dim, action_dim, action_bounds, action_offset, lr, H)] print("FINISH BUILDING HAC for lowest level") self.replay_buffer = [ReplayBuffer()] # adding remaining levels for i in range(k_level - 1): print("building HAC for level-{}".format(i + 1)) # self.HAC.append(DDPG(state_dim, action_dim, state_bounds, state_offset, lr, H)) self.HAC.append(DDPG(state_dim, goal_dim, state_dim, subgoal_bounds, subgoal_offset, lr, H)) print("finished HAC for level-{}".format(i + 1)) self.replay_buffer.append(ReplayBuffer()) # set some parameters self.k_level = k_level self.H = H self.action_dim = action_dim self.state_dim = state_dim self.goal_dim = goal_dim self.endgoal_thresholds = endgoal_thresholds self.subgoal_thresholds = subgoal_thresholds self.render = render # logging parameters self.goals = [None] * self.k_level self.reward = 0 self.timestep = 0 def set_parameters(self, lamda, gamma, action_clip_low, action_clip_high, state_clip_low, state_clip_high, exploration_action_noise, exploration_state_noise, subgoal_clip_low, subgoal_clip_high, exploration_subgoal_noise): self.lamda = lamda self.gamma = gamma self.action_clip_low = action_clip_low self.action_clip_high = action_clip_high self.state_clip_low = state_clip_low self.state_clip_high = state_clip_high self.exploration_action_noise = exploration_action_noise self.exploration_subgoal_noise = exploration_state_noise self.subgoal_clip_low = subgoal_clip_low self.subgoal_clip_high = subgoal_clip_high # self.exploration_subgoal_noise = exploration_subgoal_noise """ """ def check_goal(self, state, goal, threshold): # print("state shape {}".format(state.shape)) # print("action shape {}".format(self.action_dim)) # print("goal shape {}".format(goal.shape)) # print("threshold shape {}".format(threshold.shape)) # state here is only checking the achieved goal, in PickAndPlace is indexed [3:6] obj_state = state[3:6] for i in range(self.goal_dim): if abs(obj_state[i] - goal[i]) > threshold[i]: return False return True """ for level>0 """ def check_goal_up(self, state, goal, threshold): # state here is only checking the subgoal and goal # print("==================== LEVEL-{} ===========================".format(self.k_level-1)) # print("UPPER LVL state shape {}".format(state.shape)) # print("UPPER LVL action shape {}".format(self.action_dim)) # print("UPPER LVL goal shape {}".format(goal.shape)) # print("UPPER LVL threshold shape {}".format(threshold.shape)) for i in range(self.goal_dim): if abs(state[i] - goal[i]) > threshold[i]: return False return True """ """ def run_HAC(self, env, i_level, state, goal, subgoal_test): next_state = None done = None goal_transitions = [] # logging updates self.goals[i_level] = goal print("<<<<<<<<<<<<<<<<<<<<<<<<<< GOALS in run_HAC {}".format(self.goals)) # H attempts for _ in range(self.H): next_subgoal_test = subgoal_test action = self.HAC[i_level].select_action(state, goal) # print("ACTION in run HAC for level-{} >>>>> {}".format(i_level, action)) # # <================ high level policy ================> if i_level > 0: # print("action from nn for level-1 >>>>> {}".format(action)) # action should be subgoal # add noise or take random action if not subgoal testing if not subgoal_test: if np.random.random_sample() > 0.2: action = action + np.random.normal(0, self.exploration_subgoal_noise) action = action.clip(self.subgoal_clip_low, self.subgoal_clip_high) else: action = np.random.uniform(self.subgoal_clip_low, self.subgoal_clip_high) # Determine whether to test subgoal (action) if np.random.random_sample() < self.lamda: next_subgoal_test = True # Pass subgoal to lower level # subgoal is action next_state, done = self.run_HAC(env, i_level - 1, state, action, next_subgoal_test) # only take achieved goal achieved_goal = next_state # if subgoal was tested but not achieved, add subgoal testing transition if next_subgoal_test and not self.check_goal_up(action, achieved_goal, self.endgoal_thresholds): # print("ACTION inserted to replay buffer @ 129 {} at level-{}".format(action.shape, i_level)) self.replay_buffer[i_level].add((state, action, -self.H, next_state, goal, 0.0, float(done))) # for hindsight action transition # TODO: action = achieved_goal # print("HINDSIGHT ACTION action shape {}".format(action.shape)) # <================ low level policy ================> else: # add noise or take random action if not subgoal testing if not subgoal_test: if np.random.random_sample() > 0.2: action = action + np.random.normal(0, self.exploration_action_noise) action = action.clip(self.action_clip_low, self.action_clip_high) else: print("======================== SUBGOAL TESTING ============================") action = np.random.uniform(self.action_clip_low, self.action_clip_high) # take primitive action next_state, rew, done, _ = env.step(action) # print("=============================================") # print("Observation {}".format(np.around(next_state, decimals=3))) if self.render: env.render() if self.k_level == 2: env.render_goal(self.goals[0], self.goals[1]) elif self.k_level == 3: env.render_goal_2(self.goals[0], self.goals[1], self.goals[2]) for _ in range(1000000): continue # this is for logging self.reward += rew self.timestep += 1 # check if goal is achieved goal_achieved = self.check_goal(next_state, goal, self.endgoal_thresholds) # hindsight action transition if goal_achieved: # print("ACTION inserted to replay buffer @ 172 {} at level-{}".format(action.shape, i_level)) self.replay_buffer[i_level].add((state, action, 0.0, next_state, goal, 0.0, float(done))) else: # print("ACTION inserted to replay buffer @ 175 {} at level-{}".format(action.shape, i_level)) self.replay_buffer[i_level].add((state, action, -1.0, next_state, goal, self.gamma, float(done))) # copy for goal transition goal_transitions.append([state, action, -1.0, next_state, None, self.gamma, float(done)]) state = next_state if done or goal_achieved: break # hindsight goal transition # last transition reward and discount is 0 goal_transitions[-1][2] = 0.0 goal_transitions[-1][5] = 0.0 for transition in goal_transitions: # last state is goal for all transitions if i_level > 0: transition[4] = next_state[3:6] else: transition[4] = next_state # print("ACTION inserted to replay buffer @ 196 {} at level-{}".format(transition[1].shape, i_level)) self.replay_buffer[i_level].add(tuple(transition)) return next_state, done """ """ def update(self, n_iter, batch_size): for i in range(self.k_level): self.HAC[i].update(self.replay_buffer[i], n_iter, batch_size) """ """ def save(self, directory, name): for i in range(self.k_level): self.HAC[i].save(directory, name + '_level_{}'.format(i)) """ """ def load(self, directory, name): for i in range(self.k_level): self.HAC[i].load(directory, name + '_level_{}'.format(i))
{"hexsha": "6e0b3df556c37fcb458c145588a76ec5e45ca7ce", "size": 9350, "ext": "py", "lang": "Python", "max_stars_repo_path": "HAC.py", "max_stars_repo_name": "auliyafitri/Hierarchical-Actor-Critic-HAC-PyTorch", "max_stars_repo_head_hexsha": "470e7c392b6436375157c4dc6adc1edd3e9be63a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HAC.py", "max_issues_repo_name": "auliyafitri/Hierarchical-Actor-Critic-HAC-PyTorch", "max_issues_repo_head_hexsha": "470e7c392b6436375157c4dc6adc1edd3e9be63a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HAC.py", "max_forks_repo_name": "auliyafitri/Hierarchical-Actor-Critic-HAC-PyTorch", "max_forks_repo_head_hexsha": "470e7c392b6436375157c4dc6adc1edd3e9be63a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1894273128, "max_line_length": 114, "alphanum_fraction": 0.5649197861, "include": true, "reason": "import numpy", "num_tokens": 2056}
import numpy as np import torch import torchvision import os from .camvid import CamVid c10_classes = np.array([ [0, 1, 2, 8, 9], [3, 4, 5, 6, 7] ], dtype=np.int32) def camvid_loaders(path, batch_size, num_workers, transform_train, transform_test, use_validation, val_size, shuffle_train=True, joint_transform=None, ft_joint_transform=None, ft_batch_size=1, **kwargs): #load training and finetuning datasets print(path) train_set = CamVid(root=path, split='train', joint_transform=joint_transform, transform=transform_train, **kwargs) ft_train_set = CamVid(root=path, split='train', joint_transform=ft_joint_transform, transform=transform_train, **kwargs) val_set = CamVid(root=path, split='val', joint_transform=None, transform=transform_test, **kwargs) test_set = CamVid(root=path, split='test', joint_transform=None, transform=transform_test, **kwargs) num_classes = 11 # hard coded labels ehre return {'train': torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=shuffle_train, num_workers=num_workers, pin_memory=True ), 'fine_tune': torch.utils.data.DataLoader( ft_train_set, batch_size=ft_batch_size, shuffle=shuffle_train, num_workers=num_workers, pin_memory=True ), 'val': torch.utils.data.DataLoader( val_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True ), 'test': torch.utils.data.DataLoader( test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True )}, num_classes def svhn_loaders(path, batch_size, num_workers, transform_train, transform_test, use_validation, val_size, shuffle_train=True): train_set = torchvision.datasets.SVHN(root=path, split='train', download = True, transform = transform_train) if use_validation: test_set = torchvision.datasets.SVHN(root=path, split='train', download = True, transform = transform_test) train_set.data = train_set.data[:-val_size] train_set.labels = train_set.labels[:-val_size] test_set.data = test_set.data[-val_size:] test_set.labels = test_set.labels[-val_size:] else: print('You are going to run models on the test set. Are you sure?') test_set = torchvision.datasets.SVHN(root=path, split='test', download = True, transform = transform_test) num_classes = 10 return \ { 'train': torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=True and shuffle_train, num_workers=num_workers, pin_memory=True ), 'test': torch.utils.data.DataLoader( test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True ), }, \ num_classes def loaders(dataset, path, batch_size, num_workers, transform_train, transform_test, use_validation=True, val_size=5000, split_classes=None, shuffle_train=True, **kwargs): if dataset == 'CamVid': return camvid_loaders(path, batch_size=batch_size, num_workers=num_workers, transform_train=transform_train, transform_test=transform_test, use_validation=use_validation, val_size=val_size, **kwargs) path = os.path.join(path, dataset.lower()) ds = getattr(torchvision.datasets, dataset) if dataset == 'SVHN': return svhn_loaders(path, batch_size, num_workers, transform_train, transform_test, use_validation, val_size) else: ds = getattr(torchvision.datasets, dataset) if dataset == 'STL10': train_set = ds(root=path, split='train', download=True, transform=transform_train) num_classes = 10 cls_mapping = np.array([0, 2, 1, 3, 4, 5, 7, 6, 8, 9]) train_set.labels = cls_mapping[train_set.labels] else: train_set = ds(root=path, train=True, download=True, transform=transform_train) num_classes = max(train_set.targets) + 1 if use_validation: print("Using train (" + str(len(train_set.train_data)-val_size) + ") + validation (" +str(val_size)+ ")") train_set.train_data = train_set.train_data[:-val_size] train_set.targets = train_set.targets[:-val_size] test_set = ds(root=path, train=True, download=True, transform=transform_test) test_set.train = False test_set.test_data = test_set.train_data[-val_size:] test_set.test_labels = test_set.targets[-val_size:] delattr(test_set, 'train_data') delattr(test_set, 'train_labels') else: print('You are going to run models on the test set. Are you sure?') if dataset == 'STL10': test_set = ds(root=path, split='test', download=True, transform=transform_test) test_set.labels = cls_mapping[test_set.labels] else: test_set = ds(root=path, train=False, download=True, transform=transform_test) if split_classes is not None: assert dataset == 'CIFAR10' assert split_classes in {0, 1} print('Using classes:', end='') print(c10_classes[split_classes]) train_mask = np.isin(train_set.targets, c10_classes[split_classes]) train_set.train_data = train_set.train_data[train_mask, :] train_set.targets = np.array(train_set.targets)[train_mask] train_set.targets = np.where(train_set.targets[:, None] == c10_classes[split_classes][None, :])[1].tolist() print('Train: %d/%d' % (train_set.train_data.shape[0], train_mask.size)) test_mask = np.isin(test_set.test_labels, c10_classes[split_classes]) test_set.test_data = test_set.test_data[test_mask, :] test_set.test_labels = np.array(test_set.test_labels)[test_mask] test_set.test_labels = np.where(test_set.test_labels[:, None] == c10_classes[split_classes][None, :])[1].tolist() print('Test: %d/%d' % (test_set.test_data.shape[0], test_mask.size)) return \ { 'train': torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=True and shuffle_train, num_workers=num_workers, pin_memory=True ), 'test': torch.utils.data.DataLoader( test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True ), }, \ num_classes
{"hexsha": "ec18a8771ab3b4c232d78283091e92065181ff19", "size": 7222, "ext": "py", "lang": "Python", "max_stars_repo_path": "swag/data.py", "max_stars_repo_name": "probabilisticdeeplearning/swa_gaussian", "max_stars_repo_head_hexsha": "033f2b956e98f7050793a0d8a4155feb98931a3d", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "swag/data.py", "max_issues_repo_name": "probabilisticdeeplearning/swa_gaussian", "max_issues_repo_head_hexsha": "033f2b956e98f7050793a0d8a4155feb98931a3d", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "swag/data.py", "max_forks_repo_name": "probabilisticdeeplearning/swa_gaussian", "max_forks_repo_head_hexsha": "033f2b956e98f7050793a0d8a4155feb98931a3d", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.802259887, "max_line_length": 127, "alphanum_fraction": 0.601218499, "include": true, "reason": "import numpy", "num_tokens": 1548}
[STATEMENT] lemma ListReds2: "P \<turnstile> \<langle>es,s,b\<rangle> [\<rightarrow>]* \<langle>es',s',b'\<rangle> \<Longrightarrow> P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # es',s',b'\<rangle>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. P \<turnstile> \<langle>es,s,b\<rangle> [\<rightarrow>]* \<langle>es',s',b'\<rangle> \<Longrightarrow> P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # es',s',b'\<rangle> [PROOF STEP] (*<*) [PROOF STATE] proof (prove) goal (1 subgoal): 1. P \<turnstile> \<langle>es,s,b\<rangle> [\<rightarrow>]* \<langle>es',s',b'\<rangle> \<Longrightarrow> P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # es',s',b'\<rangle> [PROOF STEP] proof(induct rule: rtrancl_induct3) [PROOF STATE] proof (state) goal (2 subgoals): 1. P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # es,s,b\<rangle> 2. \<And>a aa ba ab ac baa. \<lbrakk>P \<turnstile> \<langle>es,s,b\<rangle> [\<rightarrow>]* \<langle>a,aa,ba\<rangle>; P \<turnstile> \<langle>a,aa,ba\<rangle> [\<rightarrow>] \<langle>ab,ac,baa\<rangle>; P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # a,aa,ba\<rangle>\<rbrakk> \<Longrightarrow> P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # ab,ac,baa\<rangle> [PROOF STEP] case refl [PROOF STATE] proof (state) this: goal (2 subgoals): 1. P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # es,s,b\<rangle> 2. \<And>a aa ba ab ac baa. \<lbrakk>P \<turnstile> \<langle>es,s,b\<rangle> [\<rightarrow>]* \<langle>a,aa,ba\<rangle>; P \<turnstile> \<langle>a,aa,ba\<rangle> [\<rightarrow>] \<langle>ab,ac,baa\<rangle>; P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # a,aa,ba\<rangle>\<rbrakk> \<Longrightarrow> P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # ab,ac,baa\<rangle> [PROOF STEP] show ?case [PROOF STATE] proof (prove) goal (1 subgoal): 1. P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # es,s,b\<rangle> [PROOF STEP] by blast [PROOF STATE] proof (state) this: P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # es,s,b\<rangle> goal (1 subgoal): 1. \<And>a aa ba ab ac baa. \<lbrakk>P \<turnstile> \<langle>es,s,b\<rangle> [\<rightarrow>]* \<langle>a,aa,ba\<rangle>; P \<turnstile> \<langle>a,aa,ba\<rangle> [\<rightarrow>] \<langle>ab,ac,baa\<rangle>; P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # a,aa,ba\<rangle>\<rbrakk> \<Longrightarrow> P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # ab,ac,baa\<rangle> [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>a aa ba ab ac baa. \<lbrakk>P \<turnstile> \<langle>es,s,b\<rangle> [\<rightarrow>]* \<langle>a,aa,ba\<rangle>; P \<turnstile> \<langle>a,aa,ba\<rangle> [\<rightarrow>] \<langle>ab,ac,baa\<rangle>; P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # a,aa,ba\<rangle>\<rbrakk> \<Longrightarrow> P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # ab,ac,baa\<rangle> [PROOF STEP] case step [PROOF STATE] proof (state) this: P \<turnstile> \<langle>es,s,b\<rangle> [\<rightarrow>]* \<langle>a___,aa___,ba___\<rangle> P \<turnstile> \<langle>a___,aa___,ba___\<rangle> [\<rightarrow>] \<langle>ab___,a_,b_\<rangle> P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # a___,aa___,ba___\<rangle> goal (1 subgoal): 1. \<And>a aa ba ab ac baa. \<lbrakk>P \<turnstile> \<langle>es,s,b\<rangle> [\<rightarrow>]* \<langle>a,aa,ba\<rangle>; P \<turnstile> \<langle>a,aa,ba\<rangle> [\<rightarrow>] \<langle>ab,ac,baa\<rangle>; P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # a,aa,ba\<rangle>\<rbrakk> \<Longrightarrow> P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # ab,ac,baa\<rangle> [PROOF STEP] show ?case [PROOF STATE] proof (prove) goal (1 subgoal): 1. P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # ab___,a_,b_\<rangle> [PROOF STEP] by(rule rtrancl_into_rtrancl[OF step(3) ListRed2[OF step(2)]]) [PROOF STATE] proof (state) this: P \<turnstile> \<langle>Val v # es,s,b\<rangle> [\<rightarrow>]* \<langle>Val v # ab___,a_,b_\<rangle> goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 1866, "file": "JinjaDCI_J_Equivalence", "length": 10}
[STATEMENT] lemma usubst_ulambda [usubst]: "\<sigma> \<dagger> (\<lambda> x \<bullet> P(x)) = (\<lambda> x \<bullet> \<sigma> \<dagger> P(x))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<sigma> \<dagger> ulambda P = (\<lambda> x \<bullet> \<sigma> \<dagger> P x) [PROOF STEP] by (transfer, simp)
{"llama_tokens": 119, "file": "UTP_utp_utp_subst", "length": 1}
from builtins import ord import numpy as np import cv2 # Создаем экземпляр класса VideoCapture(). Принимает один аргумент - это # путь к файлу (относительный или абсолютный) или целое число (индекс # подключенной камеры) cap = cv2.VideoCapture(0) while (True): # Функция cap.read() класса VideoCapture() возвращает два объекта: # 1) булевое значение (True или False), в случае отсутствия ошибок при # загрузке текущего кадра - True. Запишем это в переменную ret # 2) сам текущий прочитанный кадр из видео. Запишем его в переменную frame. ret, frame = cap.read() #frame = cv2.flip(frame, -1) # Flip camera vertically # Функция cvtColor() конвертирует изображение в нужное цветовое # представление. Принимает аргументами сам объект изображения и имя # представления, в нашем случае - это черно-белое для уменьшения # ресурсозатрат при выводе прочитанного видео на экран. gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Функция cv2.imshow() выводит единичное изображение (объект Open CV) # на экран в отдельном окне. cv2.imshow('frame', frame) cv2.imshow('gray', gray) # Чтобы при нажатии клавиши “q” завершить цикл WHILE в теле условия IF мы # вызываем команду break, которая прервет цикл WHILE, и скрипт продолжит # выполняться дальше (условие сработает тогда и только тогда, когда # нажат символ “q”, а не “Q” или “й” или “Й”). if cv2.waitKey(1) & 0xFF == ord('q'): break # Освобождаем оперативную память, занятую переменной cap cap.release() # Закрываем все открытые в скрипте окна cv2.destroyAllWindows()
{"hexsha": "891f871c548c859878152e69a35fa787f2c005b6", "size": 1605, "ext": "py", "lang": "Python", "max_stars_repo_path": "FaceRecognition/VisionDiscovery/VisionDicover.py", "max_stars_repo_name": "Harout8/FuzzyNeuralNetwork", "max_stars_repo_head_hexsha": "7b62a2289b3b1dc83e66acb90acdee0a9037b55d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FaceRecognition/VisionDiscovery/VisionDicover.py", "max_issues_repo_name": "Harout8/FuzzyNeuralNetwork", "max_issues_repo_head_hexsha": "7b62a2289b3b1dc83e66acb90acdee0a9037b55d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FaceRecognition/VisionDiscovery/VisionDicover.py", "max_forks_repo_name": "Harout8/FuzzyNeuralNetwork", "max_forks_repo_head_hexsha": "7b62a2289b3b1dc83e66acb90acdee0a9037b55d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2142857143, "max_line_length": 79, "alphanum_fraction": 0.7214953271, "include": true, "reason": "import numpy", "num_tokens": 581}
# SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2022 Scipp contributors (https://github.com/scipp) # @author Simon Heybrock import numpy as np import pytest import scipp as sc def make_dataarray(dim1='x', dim2='y', seed=None): if seed is not None: np.random.seed(seed) return sc.DataArray(data=sc.Variable(dims=[dim1, dim2], values=np.random.rand(2, 3)), coords={ dim1: sc.Variable(dims=[dim1], values=np.arange(2.0), unit=sc.units.m), dim2: sc.Variable(dims=[dim2], values=np.arange(3.0), unit=sc.units.m), 'aux': sc.Variable(dims=[dim2], values=np.random.rand(3)) }, attrs={'meta': sc.Variable(dims=[dim2], values=np.arange(3))}) def test_slice_init(): orig = sc.DataArray(data=sc.Variable(dims=['x'], values=np.arange(2.0)), coords={'x': sc.Variable(dims=['x'], values=np.arange(3.0))}) a = orig['x', :].copy() assert sc.identical(a, orig) b = orig['x', 1:].copy() assert b.data.values[0] == orig.data.values[1:] def test_no_default_init(): with pytest.raises(TypeError): sc.DataArray() def test_init(): d = sc.DataArray( data=sc.Variable(dims=['x'], values=np.arange(3)), coords={ 'x': sc.Variable(dims=['x'], values=np.arange(3), unit=sc.units.m), 'lib1': sc.Variable(dims=['x'], values=np.random.rand(3)) }, attrs={'met1': sc.Variable(dims=['x'], values=np.arange(3))}, masks={'mask1': sc.Variable(dims=['x'], values=np.ones(3, dtype=bool))}) assert len(d.meta) == 3 assert len(d.coords) == 2 assert len(d.attrs) == 1 assert len(d.masks) == 1 assert d.ndim == 1 def test_init_with_name(): a = sc.DataArray(data=1.0 * sc.units.m, name='abc') assert a.name == 'abc' def test_init_from_variable_views(): var = sc.Variable(dims=['x'], values=np.arange(5)) a = sc.DataArray(data=var, coords={'x': var}, attrs={'meta': var}, masks={'mask1': sc.less(var, sc.scalar(3))}) b = sc.DataArray(data=a.data, coords={'x': a.coords['x']}, attrs={'meta': a.attrs['meta']}, masks={'mask1': a.masks['mask1']}) assert sc.identical(a, b) # Ensure mix of Variables and Variable views work c = sc.DataArray(data=a.data, coords={'x': var}, attrs={'meta': a.attrs['meta']}, masks={'mask1': a.masks['mask1']}) assert sc.identical(a, c) @pytest.mark.parametrize("make", [lambda x: x, sc.DataArray]) def test_builtin_len(make): var = sc.empty(dims=['x', 'y'], shape=[3, 2]) obj = make(var) assert obj.ndim == 2 assert len(obj) == 3 assert len(obj['x', 0]) == 2 assert len(obj['y', 0]) == 3 with pytest.raises(TypeError): len(obj['x', 0]['y', 0]) def test_coords(): da = make_dataarray() assert len(dict(da.meta)) == 4 assert len(dict(da.coords)) == 3 assert len(dict(da.attrs)) == 1 assert 'x' in da.coords assert 'y' in da.coords assert 'aux' in da.coords assert 'meta' in da.meta assert 'meta' in da.attrs def test_masks(): da = make_dataarray() mask = sc.Variable(dims=['x'], values=np.array([False, True], dtype=bool)) da.masks['mask1'] = mask assert len(dict(da.masks)) == 1 assert 'mask1' in da.masks assert sc.identical(da.masks.pop('mask1'), mask) assert (len(dict(da.masks))) == 0 def test_ipython_key_completion(): da = make_dataarray() mask = sc.Variable(dims=['x'], values=np.array([False, True], dtype=bool)) da.masks['mask1'] = mask assert set(da.coords._ipython_key_completions_()) == set(da.coords.keys()) assert set(da.attrs._ipython_key_completions_()) == set(da.attrs.keys()) assert set(da.meta._ipython_key_completions_()) == set(da.meta.keys()) assert set(da.masks._ipython_key_completions_()) == set(da.masks.keys()) def test_name(): a = sc.DataArray(data=1.0 * sc.units.m) assert a.name == '' a.name = 'abc' assert a.name == 'abc' def test_eq(): da = make_dataarray() assert sc.identical(da['x', :], da) assert sc.identical(da['y', :], da) assert sc.identical(da['y', :]['x', :], da) assert not sc.identical(da['y', 1:], da) assert not sc.identical(da['x', 1:], da) assert not sc.identical(da['y', 1:]['x', :], da) assert not sc.identical(da['y', :]['x', 1:], da) def _is_copy_of(orig, copy): assert sc.identical(orig, copy) assert not id(orig) == id(copy) orig += 1.0 assert sc.identical(orig, copy) def _is_deep_copy_of(orig, copy): assert sc.identical(orig, copy) assert not id(orig) == id(copy) orig += 1.0 assert not sc.identical(orig, copy) def test_copy(): import copy da = make_dataarray() _is_copy_of(da, da.copy(deep=False)) _is_deep_copy_of(da, da.copy()) _is_copy_of(da, copy.copy(da)) _is_deep_copy_of(da, copy.deepcopy(da)) def test_in_place_binary_with_variable(): a = sc.DataArray(data=sc.Variable(dims=['x'], values=np.arange(10.0)), coords={'x': sc.Variable(dims=['x'], values=np.arange(10.0))}) copy = a.copy() a += 2.0 * sc.units.dimensionless a *= 2.0 * sc.units.m a -= 4.0 * sc.units.m a /= 2.0 * sc.units.m assert sc.identical(a, copy) def test_in_place_binary_with_dataarray(): da = sc.DataArray( data=sc.Variable(dims=['x'], values=np.arange(1.0, 10.0)), coords={'x': sc.Variable(dims=['x'], values=np.arange(1.0, 10.0))}) orig = da.copy() da += orig da -= orig da *= orig da /= orig assert sc.identical(da, orig) def test_in_place_binary_with_scalar(): a = sc.DataArray(data=sc.Variable(dims=['x'], values=[10.0]), coords={'x': sc.Variable(dims=['x'], values=[10])}) copy = a.copy() a += 2 a *= 2 a -= 4 a /= 2 assert sc.identical(a, copy) def test_binary_with_broadcast(): da = sc.DataArray(data=sc.Variable(dims=['x', 'y'], values=np.arange(20).reshape(5, 4)), coords={ 'x': sc.Variable(dims=['x'], values=np.arange(0.0, 0.6, 0.1)), 'y': sc.Variable(dims=['y'], values=np.arange(0.0, 0.5, 0.1)) }) d2 = da - da['x', 0] da -= da['x', 0] assert sc.identical(da, d2) def test_view_in_place_binary_with_scalar(): d = sc.Dataset(data={'data': sc.Variable(dims=['x'], values=[10.0])}, coords={'x': sc.Variable(dims=['x'], values=[10])}) copy = d.copy() d['x', :] += 2 d['x', :] *= 2 d['x', :] -= 4 d['x', :] /= 2 assert sc.identical(d, copy) def test_rename_dims(): d = make_dataarray('x', 'y', seed=0) original = d.copy() renamed = d.rename_dims({'y': 'z'}) assert sc.identical(d, original) renamed.coords['z'] = renamed.coords['y'] del renamed.coords['y'] assert sc.identical(renamed, make_dataarray('x', 'z', seed=0)) renamed = renamed.rename_dims({'x': 'y', 'z': 'x'}) renamed.coords['y'] = renamed.coords['x'] renamed.coords['x'] = renamed.coords['z'] del renamed.coords['z'] assert sc.identical(renamed, make_dataarray('y', 'x', seed=0)) def test_rename(): d = make_dataarray('x', 'y', seed=0) original = d.copy() renamed = d.rename({'y': 'z'}) assert sc.identical(d, original) assert sc.identical(renamed, make_dataarray('x', 'z', seed=0)) renamed = renamed.rename({'x': 'y', 'z': 'x'}) assert sc.identical(renamed, make_dataarray('y', 'x', seed=0)) def test_rename_kwargs(): d = make_dataarray('x', 'y', seed=0) renamed = d.rename(y='z') assert sc.identical(renamed, make_dataarray('x', 'z', seed=0)) renamed = renamed.rename(x='y', z='x') assert sc.identical(renamed, make_dataarray('y', 'x', seed=0)) def test_rename_with_attr(): d = make_dataarray('x', 'y', seed=0) d.attrs['y'] = d.coords.pop('y') renamed = d.rename({'y': 'z'}) expected = make_dataarray('x', 'z', seed=0) expected.attrs['z'] = expected.coords.pop('z') assert sc.identical(renamed, expected) def test_rename_fails_when_coord_already_exists(): d = make_dataarray('x', 'y', seed=0) d.coords['z'] = d.coords['x'].copy() with pytest.raises(sc.CoordError): d.rename({'x': 'z'}) def test_rename_fails_when_attr_already_exists(): d = make_dataarray('x', 'y', seed=0) d.attrs['y'] = d.coords.pop('y') d.attrs['z'] = d.attrs['y'].copy() with pytest.raises(sc.CoordError): d.rename({'y': 'z'}) def test_rename_fails_when_attr_with_same_name_already_exists(): d = make_dataarray('x', 'y', seed=0) with pytest.raises(sc.CoordError): d.rename({'x': 'meta'}) def test_rename_fails_when_coord_with_same_name_already_exists(): d = make_dataarray('x', 'y', seed=0) d.attrs['y'] = d.coords.pop('y') with pytest.raises(sc.CoordError): d.rename({'y': 'aux'}) def test_coord_setitem_can_change_dtype(): a = np.arange(3) v1 = sc.array(dims=['x'], values=a) v2 = v1.astype(sc.DType.int32) data = sc.DataArray(data=v1, coords={'x': v1}) data.coords['x'] = v2 def test_setitem_works_for_view_and_array(): a = make_dataarray('x', 'y', seed=0) a['x', :]['x', 0] = a['x', 1] a['x', 0] = a['x', 1] def test_astype(): a = sc.DataArray(data=sc.Variable(dims=['x'], values=np.arange(10.0, dtype=np.int64)), coords={'x': sc.Variable(dims=['x'], values=np.arange(10.0))}) assert a.dtype == sc.DType.int64 a_as_float = a.astype(sc.DType.float32) assert a_as_float.dtype == sc.DType.float32 def test_astype_bad_conversion(): a = sc.DataArray(data=sc.Variable(dims=['x'], values=np.arange(10.0, dtype=np.int64)), coords={'x': sc.Variable(dims=['x'], values=np.arange(10.0))}) assert a.dtype == sc.DType.int64 with pytest.raises(sc.DTypeError): a.astype(sc.DType.string) def test_reciprocal(): a = sc.DataArray(data=sc.Variable(dims=['x'], values=np.array([5.0]))) r = sc.reciprocal(a) assert r.values[0] == 1.0 / 5.0 def test_sizes(): a = sc.DataArray(data=sc.scalar(value=1)) assert a.sizes == {} a = sc.DataArray(data=sc.Variable(dims=['x'], values=np.ones(2))) assert a.sizes == {'x': 2} a = sc.DataArray(data=sc.Variable(dims=['x', 'z'], values=np.ones((2, 4)))) assert a.sizes == {'x': 2, 'z': 4} def test_to(): da = sc.DataArray(data=sc.scalar(value=1, dtype="int32", unit="m")) assert sc.identical( da.to(unit="mm", dtype="int64"), sc.DataArray(data=sc.scalar(value=1000, dtype="int64", unit="mm"))) def test_zeros_like(): a = make_dataarray() a.masks['m'] = sc.array(dims=['x'], values=[True, False]) b = sc.zeros_like(a) a.data *= 0. assert sc.identical(a, b) def test_ones_like(): a = make_dataarray() a.masks['m'] = sc.array(dims=['x'], values=[True, False]) b = sc.ones_like(a) a.data *= 0. a.data += 1. assert sc.identical(a, b) def test_empty_like(): a = make_dataarray() a.masks['m'] = sc.array(dims=['x'], values=[True, False]) b = sc.empty_like(a) assert a.dims == b.dims assert a.shape == b.shape assert a.unit == b.unit assert a.dtype == b.dtype assert (a.variances is None) == (b.variances is None) def test_full_like(): a = make_dataarray() a.masks['m'] = sc.array(dims=['x'], values=[True, False]) b = sc.full_like(a, 2.) a.data *= 0. a.data += 2. assert sc.identical(a, b) def test_zeros_like_deep_copy_masks(): a = make_dataarray() a.masks['m'] = sc.array(dims=['x'], values=[True, False]) c = sc.scalar(33., unit='m') b = sc.zeros_like(a) a.coords['x'][0] = c a.masks['m'][0] = False assert sc.identical(b.coords['x'][0], c) assert sc.identical(b.masks['m'][0], sc.scalar(True))
{"hexsha": "e026a6ea56234f4e53f5d28c07a6184c6c803f21", "size": 12583, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/data_array_test.py", "max_stars_repo_name": "mlund/scipp", "max_stars_repo_head_hexsha": "26648fdcda49b21a7aacdafd58625fab7ee3403b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/data_array_test.py", "max_issues_repo_name": "mlund/scipp", "max_issues_repo_head_hexsha": "26648fdcda49b21a7aacdafd58625fab7ee3403b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/data_array_test.py", "max_forks_repo_name": "mlund/scipp", "max_forks_repo_head_hexsha": "26648fdcda49b21a7aacdafd58625fab7ee3403b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.146039604, "max_line_length": 88, "alphanum_fraction": 0.5568624334, "include": true, "reason": "import numpy", "num_tokens": 3553}
# # Flock model # ```@raw html # <video width="auto" controls autoplay loop> # <source src="../flocking.mp4" type="video/mp4"> # </video> # ``` # The flock model illustrates how flocking behavior can emerge when each bird follows three simple rules: # # * maintain a minimum distance from other birds to avoid collision # * fly towards the average position of neighbors # * fly in the average direction of neighbors # It is also available from the `Models` module as [`Models.flocking`](@ref). # ## Defining the core structures # We begin by calling the required packages and defining an agent type representing a bird. using Agents, LinearAlgebra using Random # hide mutable struct Bird <: AbstractAgent id::Int pos::NTuple{2,Float64} vel::NTuple{2,Float64} speed::Float64 cohere_factor::Float64 separation::Float64 separate_factor::Float64 match_factor::Float64 visual_distance::Float64 end # The fields `id` and `pos` are required for every agent. # The field `vel` is required for using [`move_agent!`](@ref) in `ContinuousSpace`. # `speed` defines how far the bird travels in the direction defined by `vel` per `step`. # `seperation` defines the minimum distance a bird must maintain from its neighbors. # `visual_distance` refers to the distance a bird can see and defines a radius of neighboring birds. # The contribution of each rule defined above recieves an importance weight: `cohere_factor` # is the importance of maintaining the average position of neighbors, # `match_factor` is the importance of matching the average trajectory of neighboring birds, # and `separate_factor` is the importance of maining the minimum # distance from neighboring birds. # The function `initialize_model` generates birds and returns a model object using default values. function initialize_model(; n_birds = 100, speed = 1.0, cohere_factor = 0.25, separation = 4.0, separate_factor = 0.25, match_factor = 0.01, visual_distance = 5.0, extent = (100, 100), spacing = visual_distance / 1.5, ) space2d = ContinuousSpace(extent, spacing) model = ABM(Bird, space2d, scheduler = Schedulers.randomly) for _ in 1:n_birds vel = Tuple(rand(model.rng, 2) * 2 .- 1) add_agent!( model, vel, speed, cohere_factor, separation, separate_factor, match_factor, visual_distance, ) end return model end nothing # hide # ## Defining the agent_step! # `agent_step!` is the primary function called for each step and computes velocity # according to the three rules defined above. function agent_step!(bird, model) ## Obtain the ids of neighbors within the bird's visual distance neighbor_ids = nearby_ids(bird, model, bird.visual_distance) N = 0 match = separate = cohere = (0.0, 0.0) ## Calculate behaviour properties based on neighbors for id in neighbor_ids N += 1 neighbor = model[id].pos heading = neighbor .- bird.pos ## `cohere` computes the average position of neighboring birds cohere = cohere .+ heading if edistance(bird.pos, neighbor, model) < bird.separation ## `separate` repels the bird away from neighboring birds separate = separate .- heading end ## `match` computes the average trajectory of neighboring birds match = match .+ model[id].vel end N = max(N, 1) ## Normalise results based on model input and neighbor count cohere = cohere ./ N .* bird.cohere_factor separate = separate ./ N .* bird.separate_factor match = match ./ N .* bird.match_factor ## Compute velocity based on rules defined above bird.vel = (bird.vel .+ cohere .+ separate .+ match) ./ 2 bird.vel = bird.vel ./ norm(bird.vel) ## Move bird according to new velocity and speed move_agent!(bird, model, bird.speed) end # ## Plotting the flock using InteractiveDynamics using CairoMakie CairoMakie.activate!() # hide # The great thing about [`abm_plot`](@ref) is its flexibility. We can incorporate the # direction of the birds when plotting them, by making the "marker" function `am` # create a `Polygon`: a triangle with same orientation as the bird's velocity. # It is as simple as defining the following function: const bird_polygon = Polygon(Point2f0[(-0.5, -0.5), (1, 0), (-0.5, 0.5)]) function bird_marker(b::Bird) φ = atan(b.vel[2], b.vel[1]) #+ π/2 + π scale(rotate2D(bird_polygon, φ), 2) end # Where we have used the utility functions `scale` and `rotate2D` to act on a # predefined polygon. We now give `bird_marker` to `abm_plot`, and notice how # the `as` keyword is meaningless when using polygons as markers. model = initialize_model() figure, = abm_plot(model; am = bird_marker) figure # And let's also do a nice little video for it: abm_video( "flocking.mp4", model, agent_step!; am = bird_marker, framerate = 20, frames = 100, title = "Flocking" ) # ```@raw html # <video width="auto" controls autoplay loop> # <source src="../flocking.mp4" type="video/mp4"> # </video> # ```
{"hexsha": "fa1388c1dcac2e2f8c18c262b2ad8e9c4468e643", "size": 5168, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/flock.jl", "max_stars_repo_name": "Alim-faraji/Agents.jl", "max_stars_repo_head_hexsha": "139095939bcc4efbaa84a4e58a50c04792ee6b0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 395, "max_stars_repo_stars_event_min_datetime": "2019-10-19T01:22:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T00:56:36.000Z", "max_issues_repo_path": "examples/flock.jl", "max_issues_repo_name": "Alim-faraji/Agents.jl", "max_issues_repo_head_hexsha": "139095939bcc4efbaa84a4e58a50c04792ee6b0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 506, "max_issues_repo_issues_event_min_datetime": "2019-10-09T09:53:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:14:17.000Z", "max_forks_repo_path": "examples/flock.jl", "max_forks_repo_name": "Alim-faraji/Agents.jl", "max_forks_repo_head_hexsha": "139095939bcc4efbaa84a4e58a50c04792ee6b0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 85, "max_forks_repo_forks_event_min_datetime": "2019-10-26T19:44:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T09:34:42.000Z", "avg_line_length": 34.4533333333, "max_line_length": 105, "alphanum_fraction": 0.6857585139, "num_tokens": 1325}
import numpy as np import matplotlib.pyplot as plt import argparse def fractal_dimension(array, max_box_size=None, min_box_size=1, n_samples=20, n_offsets=0, plot=False): """Calculates the fractal dimension of a 3D numpy array. Args: array (np.ndarray): The array to calculate the fractal dimension of. max_box_size (int): The largest box size, given as the power of 2 so that 2**max_box_size gives the sidelength of the largest box. min_box_size (int): The smallest box size, given as the power of 2 so that 2**min_box_size gives the sidelength of the smallest box. Default value 1. n_samples (int): number of scales to measure over. n_offsets (int): number of offsets to search over to find the smallest set N(s) to cover all voxels>0. plot (bool): set to true to see the analytical plot of a calculation. """ # determine the scales to measure on if max_box_size == None: # default max size is the largest power of 2 that fits in the smallest dimension of the array: max_box_size = int(np.floor(np.log2(np.min(array.shape)))) scales = np.floor(np.logspace(max_box_size, min_box_size, num=n_samples, base=2)) scales = np.unique(scales) # remove duplicates that could occur as a result of the floor # get the locations of all non-zero pixels locs = np.where(array > 0) voxels = np.array([(x, y, z) for x, y, z in zip(*locs)]) # count the minimum amount of boxes touched Ns = [] # loop over all scales for scale in scales: touched = [] if n_offsets == 0: offsets = [0] else: offsets = np.linspace(0, scale, n_offsets) # search over all offsets for offset in offsets: bin_edges = [np.arange(0, i, scale) for i in array.shape] bin_edges = [np.hstack([0 - offset, x + offset]) for x in bin_edges] H1, e = np.histogramdd(voxels, bins=bin_edges) touched.append(np.sum(H1 > 0)) Ns.append(touched) Ns = np.array(Ns) # From all sets N found, keep the smallest one at each scale Ns = Ns.min(axis=1) # Only keep scales at which Ns changed scales = np.array([np.min(scales[Ns == x]) for x in np.unique(Ns)]) Ns = np.unique(Ns) Ns = Ns[Ns > 0] scales = scales[:len(Ns)] # perform fit coeffs = np.polyfit(np.log(1 / scales), np.log(Ns), 1) # make plot if plot: fig, ax = plt.subplots(figsize=(8, 6)) ax.scatter(np.log(1 / scales), np.log(np.unique(Ns)), c="teal", label="Measured ratios") ax.set_ylabel("$\log N(\epsilon)$") ax.set_xlabel("$\log 1/ \epsilon$") fitted_y_vals = np.polyval(coeffs, np.log(1 / scales)) ax.plot(np.log(1 / scales), fitted_y_vals, "k--", label=f"Fit: {np.round(coeffs[0],3)}X+{coeffs[1]}") ax.legend(); return (coeffs[0]) import os import SimpleITK as sitk def main(): parser = argparse.ArgumentParser() parser.add_argument("-o", "--outputfile", help="output file's name", type=str, default='../HFD3D.txt') parser.add_argument("-m", "--inputmask", help="input mask root", type=str, default='/mnt/data9/cam/mask/') parser.add_argument("-r", "--inputimgs", help="input data root", type=str, default='/mnt/data9/cam/pre/') args = parser.parse_args() filename = args.inputmask raw=args.inputimgs f=open(args.outputfile,'w') f.writelines('name' + ',' + '3d fractals' + '\n') for item in os.listdir(raw): data=sitk.ReadImage(os.path.join(filename,item)) raw_img=sitk.ReadImage(os.path.join(raw,item)) img=sitk.GetArrayFromImage(raw_img) mask = sitk.GetArrayFromImage(data) img=img*mask arr = np.array(img, dtype=np.uint8) map=np.zeros((arr.shape[0],arr.shape[1],256)) for i in range(arr.shape[0]): for j in range(arr.shape[1]): map[i,j,arr[i,j]]=1 fd = fractal_dimension(map, n_samples=20, n_offsets=10, plot=False) print(item,fd) f.writelines(item+','+str(fd)+'\n') f.close() main()
{"hexsha": "dffcf95c2da53de7566da99cb08f2977da79ed88", "size": 4295, "ext": "py", "lang": "Python", "max_stars_repo_path": "fractal-dimension/fractal.py", "max_stars_repo_name": "bemu/diagnosis_covid19", "max_stars_repo_head_hexsha": "625954beb136caa3348edfc75de16cc4db21ee43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2020-03-15T16:53:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T15:44:22.000Z", "max_issues_repo_path": "fractal-dimension/fractal.py", "max_issues_repo_name": "xzwthu/diagnosis_covid19", "max_issues_repo_head_hexsha": "84abe2fd1cc46e4f16d3f59be18ff3c8b5fa08c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-03-29T01:27:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-29T13:33:23.000Z", "max_forks_repo_path": "fractal-dimension/fractal.py", "max_forks_repo_name": "xzwthu/diagnosis_covid19", "max_forks_repo_head_hexsha": "84abe2fd1cc46e4f16d3f59be18ff3c8b5fa08c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-03-29T00:39:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-19T16:17:36.000Z", "avg_line_length": 39.7685185185, "max_line_length": 109, "alphanum_fraction": 0.6013969732, "include": true, "reason": "import numpy", "num_tokens": 1125}
// This file is part of libigl, a simple c++ geometry processing library. // // Copyright (C) 2013 Alec Jacobson <alecjacobson@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla Public License // v. 2.0. If a copy of the MPL was not distributed with this file, You can // obtain one at http://mozilla.org/MPL/2.0/. #include "grad.h" #include <Eigen/Geometry> #include <vector> template <typename DerivedV, typename DerivedF> IGL_INLINE void igl::grad(const Eigen::PlainObjectBase<DerivedV>&V, const Eigen::PlainObjectBase<DerivedF>&F, Eigen::SparseMatrix<typename DerivedV::Scalar> &G) { Eigen::PlainObjectBase<DerivedV > eperp21, eperp13; eperp21.resize(F.rows(),3); eperp13.resize(F.rows(),3); for (int i=0;i<F.rows();++i) { // renaming indices of vertices of triangles for convenience int i1 = F(i,0); int i2 = F(i,1); int i3 = F(i,2); // #F x 3 matrices of triangle edge vectors, named after opposite vertices Eigen::Matrix<typename DerivedV::Scalar, 1, 3> v32 = V.row(i3) - V.row(i2); Eigen::Matrix<typename DerivedV::Scalar, 1, 3> v13 = V.row(i1) - V.row(i3); Eigen::Matrix<typename DerivedV::Scalar, 1, 3> v21 = V.row(i2) - V.row(i1); // area of parallelogram is twice area of triangle // area of parallelogram is || v1 x v2 || Eigen::Matrix<typename DerivedV::Scalar, 1, 3> n = v32.cross(v13); // This does correct l2 norm of rows, so that it contains #F list of twice // triangle areas double dblA = std::sqrt(n.dot(n)); // now normalize normals to get unit normals Eigen::Matrix<typename DerivedV::Scalar, 1, 3> u = n / dblA; // rotate each vector 90 degrees around normal double norm21 = std::sqrt(v21.dot(v21)); double norm13 = std::sqrt(v13.dot(v13)); eperp21.row(i) = u.cross(v21); eperp21.row(i) = eperp21.row(i) / std::sqrt(eperp21.row(i).dot(eperp21.row(i))); eperp21.row(i) *= norm21 / dblA; eperp13.row(i) = u.cross(v13); eperp13.row(i) = eperp13.row(i) / std::sqrt(eperp13.row(i).dot(eperp13.row(i))); eperp13.row(i) *= norm13 / dblA; } std::vector<int> rs; rs.reserve(F.rows()*4*3); std::vector<int> cs; cs.reserve(F.rows()*4*3); std::vector<double> vs; vs.reserve(F.rows()*4*3); // row indices for(int r=0;r<3;r++) { for(int j=0;j<4;j++) { for(int i=r*F.rows();i<(r+1)*F.rows();i++) rs.push_back(i); } } // column indices for(int r=0;r<3;r++) { for(int i=0;i<F.rows();i++) cs.push_back(F(i,1)); for(int i=0;i<F.rows();i++) cs.push_back(F(i,0)); for(int i=0;i<F.rows();i++) cs.push_back(F(i,2)); for(int i=0;i<F.rows();i++) cs.push_back(F(i,0)); } // values for(int i=0;i<F.rows();i++) vs.push_back(eperp13(i,0)); for(int i=0;i<F.rows();i++) vs.push_back(-eperp13(i,0)); for(int i=0;i<F.rows();i++) vs.push_back(eperp21(i,0)); for(int i=0;i<F.rows();i++) vs.push_back(-eperp21(i,0)); for(int i=0;i<F.rows();i++) vs.push_back(eperp13(i,1)); for(int i=0;i<F.rows();i++) vs.push_back(-eperp13(i,1)); for(int i=0;i<F.rows();i++) vs.push_back(eperp21(i,1)); for(int i=0;i<F.rows();i++) vs.push_back(-eperp21(i,1)); for(int i=0;i<F.rows();i++) vs.push_back(eperp13(i,2)); for(int i=0;i<F.rows();i++) vs.push_back(-eperp13(i,2)); for(int i=0;i<F.rows();i++) vs.push_back(eperp21(i,2)); for(int i=0;i<F.rows();i++) vs.push_back(-eperp21(i,2)); // create sparse gradient operator matrix G.resize(3*F.rows(),V.rows()); std::vector<Eigen::Triplet<typename DerivedV::Scalar> > triplets; for (int i=0;i<(int)vs.size();++i) { triplets.push_back(Eigen::Triplet<typename DerivedV::Scalar>(rs[i],cs[i],vs[i])); } G.setFromTriplets(triplets.begin(), triplets.end()); } #ifdef IGL_STATIC_LIBRARY // Explicit template specialization // template void igl::grad<double, int>(Eigen::Matrix<double, -1, -1, 0, -1,-1> const&, Eigen::Matrix<int, -1, -1, 0, -1, -1> const&,Eigen::SparseMatrix<double, 0, int>&); template void igl::grad<Eigen::Matrix<double, -1, 3, 0, -1, 3>, Eigen::Matrix<int, -1, 3, 0, -1, 3> >(Eigen::PlainObjectBase<Eigen::Matrix<double, -1, 3, 0, -1, 3> > const&, Eigen::PlainObjectBase<Eigen::Matrix<int, -1, 3, 0, -1, 3> > const&, Eigen::SparseMatrix<Eigen::Matrix<double, -1, 3, 0, -1, 3>::Scalar, 0, int>&); //template void igl::grad<Eigen::Matrix<double, -1, 3, 0, -1, 3>, Eigen::Matrix<int, -1, 3, 0, -1, 3> >(Eigen::PlainObjectBase<Eigen::Matrix<double, -1, 3, 0, -1, 3> > const&, Eigen::PlainObjectBase<Eigen::Matrix<int, -1, 3, 0, -1, 3> > const&, Eigen::SparseMatrix<Eigen::Matrix<double, -1, 3, 0, -1, 3>::Scalar, 0, int>&); template void igl::grad<Eigen::Matrix<double, -1, -1, 0, -1, -1>, Eigen::Matrix<int, -1, -1, 0, -1, -1> >(Eigen::PlainObjectBase<Eigen::Matrix<double, -1, -1, 0, -1, -1> > const&, Eigen::PlainObjectBase<Eigen::Matrix<int, -1, -1, 0, -1, -1> > const&, Eigen::SparseMatrix<Eigen::Matrix<double, -1, -1, 0, -1, -1>::Scalar, 0, int>&); #endif
{"hexsha": "d414e90a113dbab52ffda6d16de9a30cbf21e089", "size": 4991, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Code/include/igl/grad.cpp", "max_stars_repo_name": "FabianRepository/SinusProject", "max_stars_repo_head_hexsha": "48d68902ccd83f08c4d208ba8e0739a8a1252338", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Code/include/igl/grad.cpp", "max_issues_repo_name": "FabianRepository/SinusProject", "max_issues_repo_head_hexsha": "48d68902ccd83f08c4d208ba8e0739a8a1252338", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/include/igl/grad.cpp", "max_forks_repo_name": "FabianRepository/SinusProject", "max_forks_repo_head_hexsha": "48d68902ccd83f08c4d208ba8e0739a8a1252338", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.963963964, "max_line_length": 331, "alphanum_fraction": 0.6231216189, "num_tokens": 1756}
#include <boost/asio/ip/tcp.hpp> #include <boost/asio/spawn.hpp> #include <boost/asio/connect.hpp> #include <boost/asio/signal_set.hpp> #include <boost/beast/core.hpp> #include <boost/beast/http.hpp> #include <boost/beast/version.hpp> #include <boost/date_time/posix_time/posix_time.hpp> #include <boost/format.hpp> #include <boost/asio/ssl.hpp> #include <boost/asio/ssl/stream.hpp> #include <boost/optional/optional_io.hpp> #include <boost/range/adaptor/indirected.hpp> #include <boost/range/adaptor/filtered.hpp> #include <boost/range/adaptor/transformed.hpp> #include <boost/range/adaptor/indexed.hpp> #include <boost/regex.hpp> #include <iterator> #include <iostream> #include <cstdlib> // for atexit() #include "cache/client.h" #include "namespaces.h" #include "origin_pools.h" #include "doh.h" #include "http_util.h" #include "client_front_end.h" #include "connect_to_host.h" #include "generic_stream.h" #include "util.h" #include "async_sleep.h" #include "or_throw.h" #include "request_routing.h" #include "full_duplex_forward.h" #include "client_config.h" #include "client.h" #include "authenticate.h" #include "defer.h" #include "default_timeout.h" #include "constants.h" #include "util/async_queue_reader.h" #include "util/queue_reader.h" #include "session.h" #include "create_udp_multiplexer.h" #include "ssl/ca_certificate.h" #include "ssl/dummy_certificate.h" #include "ssl/util.h" #include "bittorrent/dht.h" #include "bittorrent/mutable_data.h" #ifndef __ANDROID__ # include "force_exit_on_signal.h" #endif // ifndef __ANDROID__ #include "ouiservice.h" #include "ouiservice/i2p.h" #include "ouiservice/lampshade.h" #include "ouiservice/pt-obfs2.h" #include "ouiservice/pt-obfs3.h" #include "ouiservice/pt-obfs4.h" #include "ouiservice/tcp.h" #include "ouiservice/utp.h" #include "ouiservice/tls.h" #include "ouiservice/weak_client.h" #include "ouiservice/bep5/client.h" #include "ouiservice/multi_utp_server.h" #include "parse/number.h" #include "util/signal.h" #include "util/crypto.h" #include "util/lru_cache.h" #include "util/scheduler.h" #include "util/reachability.h" #include "util/async_job.h" #include "upnp.h" #include "util/handler_tracker.h" #include "logger.h" #define _YDEBUG(y, ...) do { if (logger.get_threshold() <= DEBUG) y.log(DEBUG, __VA_ARGS__); } while (false) #define _YWARN(y, ...) do { if (logger.get_threshold() <= WARN) y.log(WARN, __VA_ARGS__); } while (false) #define _YERROR(y, ...) do { if (logger.get_threshold() <= ERROR) y.log(ERROR, __VA_ARGS__); } while (false) using namespace std; using namespace ouinet; namespace posix_time = boost::posix_time; namespace bt = ouinet::bittorrent; using tcp = asio::ip::tcp; using Request = http::request<http::string_body>; using Response = http::response<http::dynamic_body>; using TcpLookup = tcp::resolver::results_type; using UdpEndpoints = std::set<asio::ip::udp::endpoint>; static const fs::path OUINET_CA_CERT_FILE = "ssl-ca-cert.pem"; static const fs::path OUINET_CA_KEY_FILE = "ssl-ca-key.pem"; static const fs::path OUINET_CA_DH_FILE = "ssl-ca-dh.pem"; // Flags for normal, case-insensitive regular expression. static const auto rx_icase = boost::regex::normal | boost::regex::icase; //------------------------------------------------------------------------------ struct UserAgentMetaData { boost::optional<bool> is_private; boost::optional<std::string> dht_group; static UserAgentMetaData extract(Request& rq) { UserAgentMetaData ret; { auto i = rq.find(http_::request_group_hdr); if (i != rq.end()) { ret.dht_group = i->value().to_string(); rq.erase(i); } } { auto i = rq.find(http_::request_private_hdr); if (i != rq.end()) { ret.is_private = boost::iequals(i->value(), http_::request_private_true); rq.erase(i); } } return ret; } // Apply the metadata to the given request. template<class Req> void apply_to(Req& rq) const { if (is_private && *is_private) rq.set(http_::request_private_hdr, http_::request_private_true); if (dht_group) rq.set(http_::request_group_hdr, *dht_group); } }; //------------------------------------------------------------------------------ class Client::State : public enable_shared_from_this<Client::State> { friend class Client; enum class InternalState { Created, Failed, Started, Stopped }; public: State(asio::io_context& ctx, ClientConfig cfg) : _ctx(ctx) , _config(move(cfg)) // A certificate chain with OUINET_CA + SUBJECT_CERT // can be around 2 KiB, so this would be around 2 MiB. // TODO: Fine tune if necessary. , _ssl_certificate_cache(1000) , _injector_starting{get_executor()} , _cache_starting{get_executor()} , ssl_ctx{asio::ssl::context::tls_client} , inj_ctx{asio::ssl::context::tls_client} , _bt_dht_wc(_ctx) , _multi_utp_server_wc(_ctx) { ssl_ctx.set_default_verify_paths(); ssl_ctx.set_verify_mode(asio::ssl::verify_peer); // We do *not* want to do this since // we will not be checking certificate names, // thus any certificate signed by a recognized CA // would be accepted if presented by an injector. //inj_ctx.set_default_verify_paths(); inj_ctx.set_verify_mode(asio::ssl::verify_peer); } void start(); void stop() { if (_internal_state == InternalState::Created) _internal_state = InternalState::Stopped; if (_internal_state != InternalState::Started) return; _internal_state = InternalState::Stopped; // Requests waiting for these after stop may get "operation aborted" // when these are destroyed. // If the cancellation signal in `wait_for_*` was not called, // `return_or_throw_on_error` would catch this and trigger an assertion error. // Since requests waiting for these after stop should not happen, // these are not reset here, as we do want that crash when debugging. if (_injector_starting) _injector_starting->notify(asio::error::shut_down); if (_cache_starting) _cache_starting->notify(asio::error::shut_down); _cache = nullptr; _upnps.clear(); _shutdown_signal(); if (_injector) _injector->stop(); if (_bt_dht) { _bt_dht->stop(); _bt_dht = nullptr; } if (_udp_reachability) { _udp_reachability->stop(); _udp_reachability = nullptr; } } Client::RunningState get_state() const noexcept { switch (_internal_state) { case InternalState::Created: return Client::RunningState::Created; case InternalState::Failed: return Client::RunningState::Failed; case InternalState::Started: break; // handled below case InternalState::Stopped: // TODO: Gather stopped state from members // instead of checking that all tasks in the context // (even those which are not part of the client object) are finished. return _ctx.stopped() ? Client::RunningState::Stopped : Client::RunningState::Stopping; } assert(_internal_state == InternalState::Started); if (was_stopped()) return Client::RunningState::Stopping; // `stop()` did not run yet // TODO: check proxy acceptor // TODO: check front-end acceptor bool use_injector(_config.injector_endpoint()); bool use_cache(_config.cache_type() != ClientConfig::CacheType::None); if (use_injector && _injector_starting) return Client::RunningState::Starting; if (use_cache && _cache_starting) return Client::RunningState::Starting; if (use_injector && _injector_start_ec) return Client::RunningState::Degraded; if (use_cache && _cache_start_ec) return Client::RunningState::Degraded; return Client::RunningState::Started; } void setup_cache(asio::yield_context); const asio_utp::udp_multiplexer& common_udp_multiplexer() { if (_udp_multiplexer) return *_udp_multiplexer; _udp_multiplexer = create_udp_multiplexer( _ctx , _config.repo_root() / "last_used_udp_port"); _udp_reachability = make_unique<util::UdpServerReachabilityAnalysis>(); _udp_reachability->start(get_executor(), *_udp_multiplexer); return *_udp_multiplexer; } std::shared_ptr<bt::MainlineDht> bittorrent_dht(asio::yield_context yield) { if (_bt_dht) return _bt_dht; // Ensure that only one coroutine is modifying the instance at a time. sys::error_code ec; _bt_dht_wc.wait(_shutdown_signal, yield[ec]); return_or_throw_on_error(yield, _shutdown_signal, ec, _bt_dht); if (_bt_dht) return _bt_dht; auto lock = _bt_dht_wc.lock(); auto bt_dht = make_shared<bt::MainlineDht>( _ctx.get_executor() , _config.repo_root() / "dht"); // Port allocation works like this: // // 1. The client tries to bind to the internal UDP port last used // (a default one on first run), or a random one if it is busy. // 2. The BT DHT is setup to use that internal endpoint, then bootstrapped, // yielding the public endpoint seen by the DHT node used too bootstrap. // 3. The port of that endpoint is configured as external UPnP port. // // Note that this approach still has some issues: // // - A NAT box may use different external ports depending on various factors like // the remote endpoint and the presence of other devices in the LAN // using the same internal port number (esp. other Ouinet clients), // i.e. different bootstrap nodes may see the same or different source port numbers. // - If there is an extra NAT box in the middle (e.g. with CGNAT), // the public port number may differ from that (or rather those) used by the "closest" NAT box, // which would create a useless UPnP mapping. // // But, for the majority of cases, this may still be a reasonable bet. auto& mpl = common_udp_multiplexer(); asio_utp::udp_multiplexer m(_ctx); m.bind(mpl, ec); if (ec) return or_throw(yield, ec, _bt_dht); auto cc = _shutdown_signal.connect([&] { bt_dht.reset(); }); auto ext_ep = bt_dht->add_endpoint(move(m), yield[ec]); if (ec) return or_throw(yield, ec, _bt_dht); setup_upnp(ext_ep.port(), mpl.local_endpoint()); _bt_dht = move(bt_dht); return _bt_dht; } http::response<http::string_body> retrieval_failure_response(const Request&); private: GenericStream ssl_mitm_handshake( GenericStream&& , const Request& , asio::yield_context); void serve_request(GenericStream&& con, asio::yield_context yield); // All `fetch_*` functions below take care of keeping or dropping // Ouinet-specific internal HTTP headers as expected by upper layers. CacheEntry fetch_stored_in_dcache( const Request& request , const request_route::Config& request_config , const std::string& dht_group , Cancel& cancel , Yield yield); template<class Rq> Session fetch_via_self( Rq, const UserAgentMetaData& , Cancel&, Yield); Response fetch_fresh_from_front_end(const Request&, Yield); Session fetch_fresh_from_origin( Request , const UserAgentMetaData& , Cancel, Yield); Session fetch_fresh_through_connect_proxy(const Request&, Cancel&, Yield); Session fetch_fresh_through_simple_proxy( Request , bool can_inject , Cancel& cancel , Yield); template<class Resp> void maybe_add_proto_version_warning(Resp& res) const { auto newest = newest_proto_seen; // Check if cache client knows about a newer protocol version too. auto c = get_cache(); if (c && c->get_newest_proto_version() > newest) newest = c->get_newest_proto_version(); if (newest > http_::protocol_version_current) res.set( http_::response_warning_hdr , "Newer Ouinet protocol found in network, " "please consider upgrading."); }; CacheControl build_cache_control(request_route::Config& request_config); tcp::acceptor make_acceptor( const tcp::endpoint& , const char* service) const; void listen_tcp( asio::yield_context , tcp::acceptor , function<void(GenericStream, asio::yield_context)>); void setup_injector(asio::yield_context); bool was_stopped() const { return _shutdown_signal.call_count() != 0; } #define DEF_WAIT_FOR(WHAT) \ void wait_for_##WHAT(Cancel& cancel, Yield yield) { \ if (!_##WHAT##_starting) \ return or_throw(yield, _##WHAT##_start_ec); \ \ sys::error_code ec; \ yield[ec].tag("wait_for_" #WHAT).run([&] (auto y) { \ _##WHAT##_starting->wait(cancel, y); \ }); \ if (cancel) ec = asio::error::operation_aborted; \ if (ec && ec != asio::error::operation_aborted) \ LOG_ERROR("Error while waiting for " #WHAT " setup; ec=", ec); \ return or_throw(yield, ec); \ } DEF_WAIT_FOR(injector) DEF_WAIT_FOR(cache) #undef DEF_WAIT_FOR fs::path ca_cert_path() const { return _config.repo_root() / OUINET_CA_CERT_FILE; } fs::path ca_key_path() const { return _config.repo_root() / OUINET_CA_KEY_FILE; } fs::path ca_dh_path() const { return _config.repo_root() / OUINET_CA_DH_FILE; } asio::io_context& get_io_context() { return _ctx; } asio::executor get_executor() { return _ctx.get_executor(); } Signal<void()>& get_shutdown_signal() { return _shutdown_signal; } bool maybe_handle_websocket_upgrade( GenericStream& , beast::string_view connect_host_port , Request& , Yield); // Resolve host and port strings. TcpLookup resolve_tcp_dns( const std::string&, const std::string& , Cancel&, Yield); TcpLookup resolve_tcp_doh( const std::string&, const std::string& , const UserAgentMetaData& , const doh::Endpoint& , Cancel&, Yield); GenericStream connect_to_origin( const Request& , const UserAgentMetaData& , Cancel&, Yield); unique_ptr<OuiServiceImplementationClient> maybe_wrap_tls(unique_ptr<OuiServiceImplementationClient>); cache::Client* get_cache() const { return _cache.get(); } void serve_utp_request(GenericStream, Yield); void setup_upnp(uint16_t ext_port, asio::ip::udp::endpoint local_ep) { if (_shutdown_signal) return; if (!local_ep.address().is_v4()) { LOG_WARN("Not setting up UPnP redirection because endpoint is not ipv4"); return; } auto& p = _upnps[local_ep]; if (p) { LOG_WARN("UPnP redirection for ", local_ep, " is already set"); return; } p = make_unique<UPnPUpdater>(_ctx.get_executor(), ext_port, local_ep.port()); } void idempotent_start_accepting_on_utp(asio::yield_context yield) { if (_multi_utp_server) return; // Ensure that only one coroutine is modifying the instance at a time. sys::error_code ec; _multi_utp_server_wc.wait(_shutdown_signal, yield[ec]); return_or_throw_on_error(yield, _shutdown_signal, ec); if (_multi_utp_server) return; auto lock = _multi_utp_server_wc.lock(); _multi_utp_server = make_unique<ouiservice::MultiUtpServer>( _ctx.get_executor() , UdpEndpoints{common_udp_multiplexer().local_endpoint()}, nullptr); TRACK_SPAWN(_ctx, ([&, c = _shutdown_signal] (asio::yield_context yield) mutable { auto slot = c.connect([&] () mutable { _multi_utp_server = nullptr; }); sys::error_code ec; _multi_utp_server->start_listen(yield[ec]); if (ec) { LOG_ERROR("Failed to start accepting on multi uTP service; ec=", ec); return; } while (!c) { sys::error_code ec; auto con = _multi_utp_server->accept(yield[ec]); if (c) return; if (ec == asio::error::operation_aborted) return; if (ec) { LOG_WARN("Bep5Http: Failure to accept; ec=", ec); async_sleep(_ctx, 200ms, c, yield); continue; } TRACK_SPAWN(_ctx, ([this, con = move(con)] (asio::yield_context yield) mutable { sys::error_code ec; // Do not log other users' addresses unless debugging. Yield y( _ctx, yield , (logger.get_threshold() <= DEBUG) ? "uTPAccept(" + con.remote_endpoint() + ")" : "uTPAccept"); serve_utp_request(move(con), y[ec].tag("serve_utp_req")); _YDEBUG(y, "Done; ec=", ec); })); } })); } private: // The newest protocol version number seen in a trusted exchange // (i.e. from an injector exchange or injector-signed cached content). unsigned newest_proto_seen = http_::protocol_version_current; // This reflects which operations have been called on the object. InternalState _internal_state = InternalState::Created; asio::io_context& _ctx; ClientConfig _config; std::unique_ptr<CACertificate> _ca_certificate; util::LruCache<string, string> _ssl_certificate_cache; std::unique_ptr<OuiServiceClient> _injector; std::unique_ptr<cache::Client> _cache; boost::optional<ConditionVariable> _injector_starting, _cache_starting; sys::error_code _injector_start_ec, _cache_start_ec; ClientFrontEnd _front_end; Signal<void()> _shutdown_signal; // For debugging uint64_t _next_connection_id = 0; ConnectionPool<Endpoint> _injector_connections; ConnectionPool<bool> _self_connections; // stored value is unused OriginPools _origin_pools; asio::ssl::context ssl_ctx; asio::ssl::context inj_ctx; boost::optional<asio::ip::udp::endpoint> _local_utp_endpoint; boost::optional<asio_utp::udp_multiplexer> _udp_multiplexer; unique_ptr<util::UdpServerReachabilityAnalysis> _udp_reachability; shared_ptr<bt::MainlineDht> _bt_dht; WaitCondition _bt_dht_wc; unique_ptr<ouiservice::MultiUtpServer> _multi_utp_server; WaitCondition _multi_utp_server_wc; shared_ptr<ouiservice::Bep5Client> _bep5_client; std::map<asio::ip::udp::endpoint, unique_ptr<UPnPUpdater>> _upnps; }; //------------------------------------------------------------------------------ template<class Resp> static void handle_http_error( GenericStream& con , Resp& res , Yield yield) { _YDEBUG(yield, "=== Sending back response ==="); _YDEBUG(yield, res); util::http_reply(con, res, static_cast<asio::yield_context>(yield)); } template<class ReqBody> static void handle_bad_request( GenericStream& con , const http::request<ReqBody>& req , const string& message , Yield yield) { auto res = util::http_error( req, http::status::bad_request , OUINET_CLIENT_SERVER_STRING , "", message); return handle_http_error(con, res, yield); } //------------------------------------------------------------------------------ void Client::State::serve_utp_request(GenericStream con, Yield yield) { assert(_cache); if (!_cache) { LOG_WARN("Received uTP request, but cache is not initialized"); return; } Cancel cancel = _shutdown_signal; auto cancel_slot = cancel.connect([&] { con.close(); }); // We expect the first request right a way. Consecutive requests may arrive with // various delays. bool is_first_request = true; beast::flat_buffer con_rbuf; // accumulate reads across iterations here while (true) { sys::error_code ec; http::request<http::empty_body> req; { auto rq_read_timeout = default_timeout::http_recv_simple(); if (is_first_request) { is_first_request = false; rq_read_timeout = default_timeout::http_recv_simple_first(); } auto wd = watch_dog(_ctx, rq_read_timeout, [&] { con.close(); }); yield[ec].tag("read_req").run([&] (auto y) { http::async_read(con, con_rbuf, req, y); }); if (!wd.is_running()) { return or_throw(yield, asio::error::timed_out); } if (cancel) ec = asio::error::operation_aborted; if (ec) return or_throw(yield, ec); } if (req.method() != http::verb::connect) { auto keep_alive = _cache->serve_local(req, con, cancel, yield[ec].tag("serve_local")); if (keep_alive) { continue; // possible error is recoverable } return or_throw(yield, ec); // done or unrecoverable error } _YDEBUG(yield, "Client: Received uTP/CONNECT request"); // Connect to the injector and tunnel the transaction through it if (!_bep5_client) { return handle_bad_request( con, req, "No known injectors" , yield.tag("handle_no_injectors_error")); } auto inj = yield[ec].tag("connect_to_injector").run([&] (auto y) { return _bep5_client->connect( y, cancel , false, ouiservice::Bep5Client::injectors); }); if (cancel) ec = asio::error::operation_aborted; if (ec == asio::error::operation_aborted) return or_throw(yield, ec); if (ec) { return handle_bad_request( con, req, "Failed to connect to injector" , yield.tag("handle_injector_unreachable")); } // Send the client an OK message indicating that the tunnel // has been established. http::response<http::empty_body> res{http::status::ok, req.version()}; res.prepare_payload(); // No ``res.prepare_payload()`` since no payload is allowed for CONNECT: // <https://tools.ietf.org/html/rfc7231#section-6.3.1>. yield[ec].tag("write_res").run([&] (auto y) { util::http_reply(con, res, y); }); if (cancel) ec = asio::error::operation_aborted; if (ec) return or_throw(yield, ec); // First queue unused but already read data back into the other client connnection. if (con_rbuf.size() > 0) con.put_back(con_rbuf.data(), ec); assert(!ec); // Forward the rest of data in both directions. yield[ec].tag("full_duplex").run([&] (auto y) { full_duplex(move(con), move(inj), cancel, y); }); return or_throw(yield, ec); } } //------------------------------------------------------------------------------ CacheEntry Client::State::fetch_stored_in_dcache( const Request& request , const request_route::Config& request_config , const std::string& dht_group , Cancel& cancel , Yield yield) { sys::error_code ec; wait_for_cache(cancel, yield[ec]); return_or_throw_on_error(yield, cancel, ec, CacheEntry{}); auto c = get_cache(); const bool cache_is_disabled = !c || !_config.is_cache_access_enabled(); if (cache_is_disabled) { return or_throw<CacheEntry>( yield , asio::error::operation_not_supported); } auto key = key_from_http_req(request); if (!key) return or_throw<CacheEntry>(yield, asio::error::invalid_argument); auto s = c->load(move(*key), dht_group, request.method() == http::verb::head, cancel, yield[ec]); return_or_throw_on_error(yield, cancel, ec, CacheEntry{}); s.debug(); s.debug_prefix(yield.tag()); auto& hdr = s.response_header(); if (!util::http_proto_version_check_trusted(hdr, newest_proto_seen)) // The cached resource cannot be used, treat it like // not being found. return or_throw<CacheEntry>(yield, asio::error::not_found); auto tsh = util::http_injection_ts(hdr); auto ts = parse::number<time_t>(tsh); auto date = ( ts ? boost::posix_time::from_time_t(*ts) : boost::posix_time::not_a_date_time); maybe_add_proto_version_warning(hdr); assert(!hdr[http_::response_source_hdr].empty()); // for agent, set by cache return CacheEntry{date, move(s)}; } //------------------------------------------------------------------------------ template<class Rq> Session Client::State::fetch_via_self( Rq request, const UserAgentMetaData& meta , Cancel& cancel, Yield yield) { sys::error_code ec; // Connect to the client proxy port. // TODO: Maybe refactor with `fetch_fresh_through_simple_proxy`. ConnectionPool<bool>::Connection con; if (_self_connections.empty()) { _YDEBUG(yield, "Connecting to self"); // TODO: Keep lookup object or allow connecting to endpoint. auto epl = TcpLookup::create(_config.local_endpoint(), "dummy", "dummy"); auto c = connect_to_host( epl, _ctx.get_executor() , cancel, static_cast<asio::yield_context>(yield[ec])); assert(!cancel || ec == asio::error::operation_aborted); if (ec) { if (ec != asio::error::operation_aborted) { _YERROR(yield, "Failed to connect to self; ec=", ec); } return or_throw<Session>(yield, ec); } con = _self_connections.wrap(std::move(c)); } else { _YDEBUG(yield, "Reusing existing self connection"); con = _self_connections.pop_front(); } auto cancel_slot = cancel.connect([&] { con.close(); }); // Build the actual request to send to self. if (!_config.client_credentials().empty()) request = authorize(request, _config.client_credentials()); request.keep_alive(true); meta.apply_to(request); _YDEBUG(yield, "Sending a request to self"); // Send request yield[ec].tag("write_self_req").run([&] (auto y) { http::async_write(con, request, y); }); if (cancel_slot) { ec = asio::error::operation_aborted; } if (ec) { _YERROR(yield, "Failed to send request to self; ec=", ec); } if (ec) return or_throw<Session>(yield, ec); return yield.tag("read_hdr").run([&] (auto y) { return Session::create( move(con), request.method() == http::verb::head , cancel, y); }); } // Transforms addresses to endpoints with the given port. template<class Addrs, class Endpoint> class AddrsAsEndpoints { public: using value_type = Endpoint; using addrs_iterator = typename Addrs::const_iterator; AddrsAsEndpoints(const Addrs& addrs, unsigned short port) : _addrs(addrs), _port(port) {} class const_iterator : public std::iterator<std::input_iterator_tag, value_type> { public: const_iterator(const addrs_iterator& it, unsigned short port) : _it(it), _port(port) {} value_type operator*() const { return {*_it, _port}; } const_iterator& operator++() { ++_it; return *this; } bool operator==(const const_iterator& other) const { return _it == other._it; } bool operator!=(const const_iterator& other) const { return _it != other._it; } private: addrs_iterator _it; unsigned short _port; }; const_iterator begin() const { return {_addrs.begin(), _port}; }; const_iterator end() const { return {_addrs.end(), _port}; }; private: const Addrs& _addrs; unsigned short _port; }; TcpLookup Client::State::resolve_tcp_doh( const std::string& host , const std::string& port , const UserAgentMetaData& meta , const doh::Endpoint& ep , Cancel& cancel , Yield yield) { using TcpEndpoint = typename TcpLookup::endpoint_type; boost::string_view portsv(port); auto portn_o = parse::number<unsigned short>(portsv); if (!portn_o) return or_throw<TcpLookup>(yield, asio::error::invalid_argument); // Build and return lookup if `host` is already a network address. { sys::error_code e; auto addr = asio::ip::make_address(host, e); if (!e) return TcpLookup::create(TcpEndpoint{move(addr), *portn_o}, host, port); } // TODO: When to disable queries for IPv4 or IPv6 addresses? auto rq4_o = doh::build_request_ipv4(host, ep); auto rq6_o = doh::build_request_ipv6(host, ep); if (!rq4_o || !rq6_o) return or_throw<TcpLookup>(yield, asio::error::invalid_argument); sys::error_code ec4, ec6; doh::Response rs4, rs6; WaitCondition wc(_ctx); // By passing user agent metadata as is, // we ensure that the DoH request is done with the same browsing mode // as the content request that triggered it, // and is announced under the same group. // TODO: Handle redirects. #define SPAWN_QUERY(VER) \ TRACK_SPAWN(_ctx, ([ \ this, \ rq = move(*rq##VER##_o), &meta, &ec##VER, &rs##VER, \ &cancel, &yield, lock = wc.lock() \ ] (asio::yield_context y_) { \ sys::error_code ec; \ auto y = yield.detach(y_); \ auto s = fetch_via_self(move(rq), meta, cancel, y[ec].tag("fetch##VER")); \ if (ec) { ec##VER = ec; return; } \ rs##VER = y[ec].tag("slurp##VER").run([&] (auto yy) { \ return http_response::slurp_response<doh::Response::body_type> \ (s, doh::payload_size, cancel, yy); \ }); \ if (ec) { ec##VER = ec; return; } \ })); SPAWN_QUERY(4); SPAWN_QUERY(6); yield.tag("wait").run([&] (auto y) { wc.wait(y); }); _YDEBUG(yield, "DoH query; ip4_ec=", ec4, " ip6_ec=", ec6); if (ec4 && ec6) return or_throw<TcpLookup>(yield, ec4 /* arbitrary */); doh::Answers answers4, answers6; if (!ec4) answers4 = doh::parse_response(rs4, host, ec4); if (!ec6) answers6 = doh::parse_response(rs6, host, ec6); _YDEBUG(yield, "DoH parse; ip4_ec=", ec4, " ip6_ec=", ec6); if (ec4 && ec6) return or_throw<TcpLookup>(yield, ec4 /* arbitrary */); answers4.insert( answers4.end() , std::make_move_iterator(answers6.begin()) , std::make_move_iterator(answers6.end())); AddrsAsEndpoints<doh::Answers, TcpEndpoint> eps{answers4, *portn_o}; return TcpLookup::create(eps.begin(), eps.end(), host, port); } TcpLookup Client::State::resolve_tcp_dns( const std::string& host , const std::string& port , Cancel& cancel , Yield yield) { return util::tcp_async_resolve( host, port , _ctx.get_executor() , cancel , static_cast<asio::yield_context>(yield)); } GenericStream Client::State::connect_to_origin( const Request& rq , const UserAgentMetaData& meta , Cancel& cancel , Yield yield) { std::string host, port; std::tie(host, port) = util::get_host_port(rq); sys::error_code ec; // Resolve using DoH if configured and not resolving the resolver's address itself. auto doh_ep_o = _config.origin_doh_endpoint(); bool do_doh = doh_ep_o && !rq.target().starts_with(*doh_ep_o); auto lookup = do_doh ? resolve_tcp_doh(host, port, meta, *doh_ep_o, cancel, yield[ec].tag("resolve_doh")) : resolve_tcp_dns(host, port, cancel, yield[ec].tag("resolve_dns")); _YDEBUG( yield, do_doh ? "DoH name resolution: " : "DNS name resolution: " , host, "; naddrs=", lookup.size(), " ec=", ec); return_or_throw_on_error(yield, cancel, ec, GenericStream()); auto sock = connect_to_host( lookup, _ctx.get_executor() , cancel, static_cast<asio::yield_context>(yield[ec])); return_or_throw_on_error(yield, cancel, ec, GenericStream()); GenericStream stream; if (rq.target().starts_with("https:") || rq.target().starts_with("wss:")) { stream = ssl::util::client_handshake( move(sock) , ssl_ctx , host , cancel , static_cast<asio::yield_context>(yield[ec])); return_or_throw_on_error(yield, cancel, ec, GenericStream()); } else { stream = move(sock); } return stream; } //------------------------------------------------------------------------------ Response Client::State::fetch_fresh_from_front_end(const Request& rq, Yield yield) { Cancel cancel = _shutdown_signal; boost::optional<ClientFrontEnd::UdpEndpoint> local_ep; if (_udp_multiplexer) { local_ep = _udp_multiplexer->local_endpoint(); } sys::error_code ec; auto res = _front_end.serve( _config , rq , get_state() , _cache.get() , *_ca_certificate , local_ep , _upnps , _bt_dht.get() , _udp_reachability.get() , yield[ec].tag("serve_frontend")); if (cancel) ec = asio::error::operation_aborted; if (ec) return or_throw<Response>(yield, ec); res.set( http_::response_source_hdr // for agent , http_::response_source_hdr_front_end); res.keep_alive(rq.keep_alive()); return res; } //------------------------------------------------------------------------------ Session Client::State::fetch_fresh_from_origin( Request rq , const UserAgentMetaData& meta , Cancel cancel, Yield yield) { auto watch_dog = ouinet::watch_dog( _ctx , default_timeout::fetch_http() , [&] { cancel(); }); assert(!rq[http::field::host].empty()); // origin pools require host util::remove_ouinet_fields_ref(rq); // avoid leaking to non-injectors sys::error_code ec; auto maybe_con = _origin_pools.get_connection(rq); OriginPools::Connection con; if (maybe_con) { con = std::move(*maybe_con); } else { auto stream = connect_to_origin(rq, meta, cancel, yield[ec]); if (cancel) { assert(ec == asio::error::operation_aborted); ec = watch_dog.is_running() ? ec = asio::error::operation_aborted : ec = asio::error::timed_out; } if (ec) return or_throw<Session>(yield, ec); con = _origin_pools.wrap(rq, std::move(stream)); } // Transform request from absolute-form to origin-form // https://tools.ietf.org/html/rfc7230#section-5.3 auto rq_ = util::req_form_from_absolute_to_origin(rq); // Send request yield[ec].tag("write_origin_req").run([&] (auto y) { auto con_close = cancel.connect([&] { con.close(); }); http::async_write(con, rq_, y); }); if (cancel) { ec = watch_dog.is_running() ? ec = asio::error::operation_aborted : ec = asio::error::timed_out; } if (ec) return or_throw<Session>(yield, ec); auto ret = yield[ec].tag("read_hdr").run([&] (auto y) { return Session::create( std::move(con), rq.method() == http::verb::head , cancel, y); }); return_or_throw_on_error(yield, cancel, ec, Session()); // Prevent others from inserting ouinet headers. util::remove_ouinet_fields_ref(ret.response_header()); ret.response_header().set( http_::response_source_hdr // for agent , http_::response_source_hdr_origin); return ret; } //------------------------------------------------------------------------------ Session Client::State::fetch_fresh_through_connect_proxy( const Request& rq , Cancel& cancel_ , Yield yield) { // TODO: We're not re-using connections here. It's because the // ConnectionPool as it is right now can only work with http requests // and responses and thus can't be used for full-dupplex forwarding. Cancel cancel(cancel_); auto watch_dog = ouinet::watch_dog( _ctx , default_timeout::fetch_http() , [&]{ cancel(); }); // Parse the URL to tell HTTP/HTTPS, host, port. util::url_match url; if (!match_http_url(rq.target(), url)) { // unsupported URL return or_throw<Session>(yield, asio::error::operation_not_supported); } // Connect to the injector/proxy. sys::error_code ec; wait_for_injector(cancel, yield[ec]); return_or_throw_on_error(yield, cancel, ec, Session{}); assert(_injector); auto inj = yield[ec].tag("connect_to_injector").run([&] (auto y) { return _injector->connect(y, cancel); }); return_or_throw_on_error(yield, cancel, ec, Session()); // Build the actual request to send to the proxy. Request connreq = { http::verb::connect , url.host + ":" + (url.port.empty() ? "443" : url.port) , 11 /* HTTP/1.1 */}; // HTTP/1.1 requires a ``Host:`` header in all requests: // <https://tools.ietf.org/html/rfc7230#section-5.4>. connreq.set(http::field::host, connreq.target()); if (auto credentials = _config.credentials_for(inj.remote_endpoint)) connreq = authorize(connreq, *credentials); // Open a tunnel to the origin // (to later perform the SSL handshake and send the request). yield[ec].tag("connreq").run([&] (auto y) { util::http_request(inj.connection, connreq, cancel, y); }); return_or_throw_on_error(yield, cancel, ec, Session()); // Only get the head of the CONNECT response // (otherwise we would get stuck waiting to read // a body whose length we do not know // since a successful respone should have no content length as per RFC7231#4.3.6). { auto r = std::make_unique<http_response::Reader>(std::move(inj.connection)); auto part = yield[ec].tag("read_hdr").run([&] (auto y) { return r->async_read_part(cancel, y); }); return_or_throw_on_error(yield, cancel, ec, Session()); assert(part && part->is_head()); if (http::to_status_class(part->as_head()->result()) != http::status_class::successful) { auto rsh = std::move(*(part->as_head())); _YERROR(yield.tag("proxy_connect"), rsh); util::remove_ouinet_nonerrors_ref(rsh); rsh.set(http_::response_source_hdr, http_::response_source_hdr_proxy); return Session(std::move(rsh), rq.method() == http::verb::head, std::move(r)); } inj.connection = r->release_stream(); } GenericStream con; if (url.scheme == "https") { con = ssl::util::client_handshake( move(inj.connection) , ssl_ctx , url.host , cancel , static_cast<asio::yield_context>(yield[ec])); } else { con = move(inj.connection); } return_or_throw_on_error(yield, cancel, ec, Session()); // TODO: move auto rq_ = util::req_form_from_absolute_to_origin(rq); yield[ec].tag("write_req").run([&] (auto y) { auto slot = cancel.connect([&con] { con.close(); }); http::async_write(con, rq_, y); }); return_or_throw_on_error(yield, cancel, ec, Session()); auto session = yield[ec].tag("read_hdr").run([&] (auto y) { return Session::create( move(con), rq.method() == http::verb::head , cancel, y); }); return_or_throw_on_error(yield, cancel, ec, Session()); // Prevent others from inserting ouinet headers. util::remove_ouinet_fields_ref(session.response_header()); session.response_header().set( http_::response_source_hdr // for agent , http_::response_source_hdr_proxy); return session; } //------------------------------------------------------------------------------ Session Client::State::fetch_fresh_through_simple_proxy ( Request request , bool can_inject , Cancel& cancel , Yield yield) { sys::error_code ec; // Build the actual request to send to the injector (auth added below). if (can_inject) { bool keepalive = request.keep_alive(); auto irq = util::to_injector_request(move(request)); if (!irq) { _YERROR(yield, "Invalid request"); return or_throw<Session>(yield, asio::error::invalid_argument); } request = move(*irq); request.keep_alive(keepalive); } else { util::remove_ouinet_fields_ref(request); // avoid accidental injection } // Connect to the injector. // TODO: Maybe refactor with `fetch_via_self`. wait_for_injector(cancel, yield[ec]); return_or_throw_on_error(yield, cancel, ec, Session{}); assert(_injector); ConnectionPool<Endpoint>::Connection con; if (_injector_connections.empty()) { _YDEBUG(yield, "Connecting to the injector"); auto c = yield[ec].tag("connect_to_injector2").run([&] (auto y) { return _injector->connect(y, cancel); }); assert(!cancel || ec == asio::error::operation_aborted); if (ec) { if (ec != asio::error::operation_aborted) { _YWARN(yield, "Failed to connect to injector; ec=", ec); } return or_throw<Session>(yield, ec); } assert(c.connection.has_implementation()); con = _injector_connections.wrap(std::move(c.connection)); *con = c.remote_endpoint; } else { _YDEBUG(yield, "Reusing existing injector connection"); con = _injector_connections.pop_front(); } auto cancel_slot = cancel.connect([&] { con.close(); }); if (auto credentials = _config.credentials_for(*con)) request = authorize(request, *credentials); _YDEBUG(yield, "Sending a request to the injector"); // Send request yield[ec].tag("write_injector_req").run([&] (auto y) { http::async_write(con, request, y); }); if (cancel_slot) { ec = asio::error::operation_aborted; } if (ec) { _YWARN(yield, "Failed to send request to the injector; ec=", ec); } if (ec) return or_throw<Session>(yield, ec); _YDEBUG(yield, "Reading response"); cancel_slot = {}; // Receive response auto session = yield[ec].tag("read_hdr").run([&] (auto y) { return Session::create( move(con), request.method() == http::verb::head , cancel, y); }); auto& hdr = session.response_header(); if (cancel) ec = asio::error::operation_aborted; else if ( !ec && can_inject && !util::http_proto_version_check_trusted(hdr, newest_proto_seen)) // The injector using an unacceptable protocol version is treated like // the Injector mechanism being disabled. ec = asio::error::operation_not_supported; _YDEBUG(yield, "End reading response; ec=", ec); if (ec) return or_throw(yield, ec, std::move(session)); // Store keep-alive connections in connection pool if (can_inject) { maybe_add_proto_version_warning(hdr); hdr.set(http_::response_source_hdr, http_::response_source_hdr_injector); // for agent } else { // Prevent others from inserting ouinet headers // (except a protocol error, if present and well-formed). util::remove_ouinet_nonerrors_ref(hdr); hdr.set(http_::response_source_hdr, http_::response_source_hdr_proxy); // for agent } return session; } class Transaction { public: Transaction(GenericStream& ua_con, const Request& rq, UserAgentMetaData meta) : _ua_con(ua_con) , _request(rq) , _meta(std::move(meta)) {} void write_to_user_agent(Session& session, Cancel& cancel, asio::yield_context yield) { namespace err = asio::error; if (cancel) { assert(!cancel); LOG_ERROR(__FILE__, ":", __LINE__, " Cancel already called"); return or_throw(yield, err::operation_aborted); } if (_ua_was_written_to) { return or_throw(yield, err::already_started); } sys::error_code ec; _ua_was_written_to = true; session.flush_response(_ua_con, cancel, yield[ec]); bool keep_alive = !ec && _request.keep_alive() && session.keep_alive(); if (!keep_alive) { session.close(); _ua_con.close(); } return or_throw(yield, ec); } template<class BodyType> void write_to_user_agent(const http::response<BodyType>& rs, Cancel& cancel, asio::yield_context yield) { namespace err = asio::error; if (cancel) { assert(!cancel); LOG_ERROR(__FILE__, ":", __LINE__, " Cancel already called"); return or_throw(yield, err::operation_aborted); } if (_ua_was_written_to) { return or_throw(yield, err::already_started); } sys::error_code ec; _ua_was_written_to = true; http::async_write(_ua_con, rs, yield[ec]); bool keep_alive = !ec && _request.keep_alive() && rs.keep_alive(); if (!keep_alive) _ua_con.close(); return or_throw(yield, ec); } const Request& request() const { return _request; } bool user_agent_was_written_to() { return _ua_was_written_to; } bool is_open() const { return _ua_con.is_open(); } const UserAgentMetaData& meta() const { return _meta; } private: /* * Connection to the user agent */ GenericStream& _ua_con; const Request& _request; bool _ua_was_written_to = false; UserAgentMetaData _meta; }; //------------------------------------------------------------------------------ class Client::ClientCacheControl { public: ClientCacheControl( Client::State& client_state , const request_route::Config& request_config) : client_state(client_state) , request_config(request_config) , cc(client_state.get_executor(), OUINET_CLIENT_SERVER_STRING) { //------------------------------------------------------------ cc.fetch_fresh = [&] (const Request& rq, Cancel& cancel, Yield yield_) { auto yield = yield_.tag("injector"); namespace err = asio::error; _YDEBUG(yield, "Start"); if (!client_state._config.is_injector_access_enabled()) { _YDEBUG(yield, "Disabled"); return or_throw<Session>(yield, err::operation_not_supported); } sys::error_code ec; auto s = client_state.fetch_fresh_through_simple_proxy( rq , true , cancel , yield[ec]); if (!ec) { _YDEBUG(yield, "Finish; ec=", ec, " status=", s.response_header().result()); } else { _YDEBUG(yield, "Finish; ec=", ec); } return or_throw(yield, ec, move(s)); }; //------------------------------------------------------------ cc.fetch_stored = [&] (const Request& rq, const std::string& dht_group, Cancel& cancel, Yield yield_) { auto yield = yield_.tag("cache"); _YDEBUG(yield, "Start"); sys::error_code ec; auto r = client_state.fetch_stored_in_dcache( rq , request_config , dht_group , cancel , yield[ec]); _YDEBUG(yield, "Finish; ec=", ec, " canceled=", bool(cancel)); return or_throw(yield, ec, move(r)); }; //------------------------------------------------------------ cc.max_cached_age(client_state._config.max_cached_age()); } void front_end_job_func(Transaction& tnx, Cancel& cancel, Yield yield) { sys::error_code ec; Response res = client_state.fetch_fresh_from_front_end(tnx.request(), yield[ec]); if (cancel) ec = asio::error::operation_aborted; if (!ec) tnx.write_to_user_agent(res, cancel, static_cast<asio::yield_context>(yield[ec])); return or_throw(yield, ec); } void origin_job_func( Transaction& tnx , Cancel& cancel, Yield yield) { if (cancel) { LOG_ERROR("origin_job_func received an already triggered cancel"); return or_throw(yield, asio::error::operation_aborted); } _YDEBUG(yield, "Start"); sys::error_code ec; auto session = client_state.fetch_fresh_from_origin( tnx.request(), tnx.meta() , cancel, yield[ec]); _YDEBUG(yield, "Fetch; ec=", ec); return_or_throw_on_error(yield, cancel, ec); tnx.write_to_user_agent(session, cancel, static_cast<asio::yield_context>(yield[ec])); _YDEBUG(yield, "Flush; ec=", ec); return or_throw(yield, ec); } void proxy_job_func(Transaction& tnx, Cancel& cancel, Yield yield) { sys::error_code ec; _YDEBUG(yield, "Start"); Session session; const auto& rq = tnx.request(); if (rq.target().starts_with("https://")) { session = client_state.fetch_fresh_through_connect_proxy (rq, cancel, yield[ec]); } else { session = client_state.fetch_fresh_through_simple_proxy (rq, false, cancel, yield[ec]); } _YDEBUG(yield, "Proxy fetch; ec=", ec); return_or_throw_on_error(yield, cancel, ec); tnx.write_to_user_agent(session, cancel, static_cast<asio::yield_context>(yield[ec])); _YDEBUG(yield, "Flush; ec=", ec); return or_throw(yield, ec); } void injector_job_func(Transaction& tnx, Cancel& cancel, Yield yield) { namespace err = asio::error; sys::error_code ec; sys::error_code fresh_ec; sys::error_code cache_ec; _YDEBUG(yield, "Start"); _YDEBUG(yield, tnx.request()); const auto& rq = tnx.request(); const auto& meta = tnx.meta(); auto session = cc.fetch(rq, meta.dht_group, fresh_ec, cache_ec, cancel, yield[ec]); _YDEBUG(yield, "cc.fetch; ec=", ec, " fresh_ec=", fresh_ec, " cache_ec=", cache_ec); if (ec) return or_throw(yield, ec); auto& rsh = session.response_header(); _YDEBUG(yield, "Response header:"); _YDEBUG(yield, rsh); assert(!fresh_ec || !cache_ec); // At least one success assert( fresh_ec || cache_ec); // One needs to fail auto injector_error = rsh[http_::response_error_hdr]; if (!injector_error.empty()) { _YERROR(yield, "Error from injector: ", injector_error); tnx.write_to_user_agent(session, cancel, static_cast<asio::yield_context>(yield[ec])); return or_throw(yield, ec); } auto& ctx = client_state.get_io_context(); auto exec = ctx.get_executor(); using http_response::Part; util::AsyncQueue<boost::optional<Part>> qst(exec), qag(exec); // to storage, agent WaitCondition wc(ctx); auto cache = client_state.get_cache(); const char* no_cache_reason = nullptr; bool do_cache = ( cache && rq.method() == http::verb::get // TODO: storing HEAD response not yet supported && rsh[http_::response_source_hdr] != http_::response_source_hdr_local_cache && CacheControl::ok_to_cache( rq, rsh, client_state._config.do_cache_private() , (logger.get_threshold() <= DEBUG ? &no_cache_reason : nullptr)) && meta.dht_group); if (do_cache) { TRACK_SPAWN(ctx, ([ &, cache = std::move(cache), lock = wc.lock() ] (asio::yield_context yield_) { auto key = key_from_http_req(rq); assert(key); AsyncQueueReader rr(qst); sys::error_code ec; yield.detach(yield_)[ec].run([&] (auto y) { cache->store(*key, *meta.dht_group, rr, cancel, y); }); })); } else if (no_cache_reason) _YDEBUG(yield, "Not ok to cache response: ", no_cache_reason); TRACK_SPAWN(ctx, ([ &, lock = wc.lock() ] (asio::yield_context yield_) { sys::error_code ec; auto rr = std::make_unique<AsyncQueueReader>(qag); Session sag = Session::create(std::move(rr), tnx.request().method() == http::verb::head, cancel, yield_[ec]); if (cancel) return; if (ec) return; tnx.write_to_user_agent(sag, cancel, yield_[ec]); })); session.flush_response(cancel, static_cast<asio::yield_context>(yield[ec]), [&] ( Part&& part , Cancel& cancel , asio::yield_context y) { // If the user agent closed its connection, stop getting data from the injector too. // Otherwise, besides continuing to transfer data to the local cache, // it will also accumulate in memory (at the `qag` queue, which is no longer read), // with both being especially problematic with big resources like videos. // // Please note that this will cause an incomplete response to be stored; // hopefully the Injector mechanism may be faster to respond // if the client tries to download the same resource again. // Another fix would be to have the local cache participate in multi-peer downloads. if (!tnx.is_open()) return or_throw(y, asio::error::broken_pipe); if (do_cache) qst.push_back(part); qag.push_back(std::move(part)); }); if (do_cache) qst.push_back(boost::none); qag.push_back(boost::none); wc.wait(static_cast<asio::yield_context>(yield)); _YDEBUG(yield, "Finish; ec=", ec); return or_throw(yield, ec); } struct Jobs { enum class Type { front_end, origin, proxy, injector_or_dcache }; // XXX: Currently `AsyncJob` isn't specialized for `void`, so using // boost::none_t as a temporary hack. using Retval = boost::none_t; using Job = AsyncJob<Retval>; Jobs(asio::executor exec) : exec(exec) , front_end(exec) , origin(exec) , proxy(exec) , injector_or_dcache(exec) , all({&front_end, &origin, &proxy, &injector_or_dcache}) {} asio::executor exec; Job front_end; Job origin; Job proxy; Job injector_or_dcache; // All jobs, even those that never started. // Unfortunately C++14 is not letting me have array of references. const std::array<Job*, 4> all; auto running() const { static const auto is_running = [] (auto& v) { return v.is_running(); }; return all | boost::adaptors::indirected | boost::adaptors::filtered(is_running); } const char* as_string(const Job* ptr) const { auto type = job_to_type(ptr); if (!type) return "unknown"; return as_string(*type); } static const char* as_string(Type type) { switch (type) { case Type::front_end: return "front_end"; case Type::origin: return "origin"; case Type::proxy: return "proxy"; case Type::injector_or_dcache: return "injector_or_dcache"; } assert(0); return "xxx"; }; boost::optional<Type> job_to_type(const Job* ptr) const { if (ptr == &front_end) return Type::front_end; if (ptr == &origin) return Type::origin; if (ptr == &proxy) return Type::proxy; if (ptr == &injector_or_dcache) return Type::injector_or_dcache; return boost::none; } Job* job_from_type(Type type) { switch (type) { case Type::front_end: return &front_end; case Type::origin: return &origin; case Type::proxy: return &proxy; case Type::injector_or_dcache: return &injector_or_dcache; } assert(0); return nullptr; } size_t count_running() const { auto jobs = running(); return std::distance(jobs.begin(), jobs.end()); } void sleep_before_job(Type job_type, Cancel& cancel, Yield& yield) { size_t n = count_running(); // 'n' includes "this" job, and we don't need to wait for that. assert(n > 0); if (n > 0) --n; if (job_type == Type::injector_or_dcache || job_type == Type::proxy) { // If origin is running, give it some time, but stop sleeping // if origin fetch exits early. if (!origin.is_running()) return; Cancel c(cancel); boost::optional<Job::Connection> jc; if (origin.is_running()) { jc = origin.on_finish_sig([&c] { c(); }); } async_sleep( exec, n * chrono::seconds(3) , c, static_cast<asio::yield_context>(yield)); } else if (job_type == Type::front_end) { // No pause for front-end jobs. } else { async_sleep( exec, n * chrono::seconds(3) , cancel, static_cast<asio::yield_context>(yield)); } } }; bool is_access_enabled(Jobs::Type job_type) const { using Type = Jobs::Type; auto& cfg = client_state._config; switch (job_type) { case Type::front_end: return true; case Type::origin: return cfg.is_origin_access_enabled(); case Type::proxy: return cfg.is_proxy_access_enabled(); case Type::injector_or_dcache: return cfg.is_injector_access_enabled() || cfg.is_cache_access_enabled(); } assert(0); return false; } // The transaction's connection is only kept open if it can still be used, // otherwise it is closed. // If an error is reported but the connection was not yet written to, // a response may still be sent to it // (please check `tnx.user_agent_was_written_to()`). void mixed_fetch(Transaction& tnx, Yield yield) { Cancel cancel(client_state._shutdown_signal); namespace err = asio::error; using request_route::fresh_channel; using Job = Jobs::Job; using JobCon = Job::Connection; using OptJobCon = boost::optional<JobCon>; auto exec = client_state.get_io_context().get_executor(); Jobs jobs(exec); auto cancel_con = cancel.connect([&] { for (auto& job : jobs.running()) job.cancel(); }); auto start_job = [&] (Jobs::Type job_type, auto func) { const char* name_tag = Jobs::as_string(job_type); Job* job = jobs.job_from_type(job_type); assert(job); if (!job) return; if (!is_access_enabled(job_type)) { _YDEBUG(yield, name_tag, ": disabled"); return; } job->start([ &yield, &jobs, name_tag, func = std::move(func), job_type ] (Cancel& c, asio::yield_context y_) { auto y = yield.detach(y_).tag(name_tag); jobs.sleep_before_job(job_type, c, y); if (c) return or_throw(y_, err::operation_aborted, boost::none); sys::error_code ec; func(c, y[ec]); return or_throw(y, ec, boost::none); }); }; // TODO: When the origin is enabled and it always times out, it // will induce an unnecessary delay to the other routes. We need a // mechanism which will "realize" that other origin requests are // already timing out and that injector, proxy and dcache routes don't // need to wait for it. for (auto route : request_config.fresh_channels) { switch (route) { case fresh_channel::_front_end: { start_job(Jobs::Type::front_end, [&] (auto& c, auto y) { front_end_job_func(tnx, c, y); }); break; } case fresh_channel::origin: { start_job(Jobs::Type::origin, [&] (auto& c, auto y) { origin_job_func(tnx, c, y); }); break; } case fresh_channel::proxy: { start_job(Jobs::Type::proxy, [&] (auto& c, auto y) { proxy_job_func(tnx, c, y); }); break; } case fresh_channel::injector_or_dcache: { start_job(Jobs::Type::injector_or_dcache, [&] (auto& c, auto y) { injector_job_func(tnx, c, y); }); break; } } } const char* final_job = "(none)"; boost::optional<sys::error_code> final_ec; auto target = tnx.request().target(); std::string short_target = target.substr(0, 64).to_string(); if (target.length() > 64) short_target.replace(short_target.end() - 3, short_target.end(), "..."); for (size_t job_count; (job_count = jobs.count_running()) != 0;) { ConditionVariable cv(exec); std::array<OptJobCon, jobs.all.size()> cons; Job* which = nullptr; for (const auto& job : jobs.running() | boost::adaptors::indexed(0)) { auto i = job.index(); auto v = &job.value(); cons[i] = v->on_finish_sig([&cv, &which, v] { if (!which) which = v; cv.notify(); }); } _YDEBUG(yield, "Waiting for ", job_count, " running jobs"); cv.wait(static_cast<asio::yield_context>(yield)); if (!which) { _YWARN(yield, "Got result from unknown job"); continue; // XXX } auto&& result = which->result(); _YDEBUG( yield, "Got result; job=", jobs.as_string(which), " ec=", result.ec , " target=", short_target); if (!result.ec) { final_job = jobs.as_string(which); final_ec = sys::error_code{}; // success for (auto& job : jobs.running()) { job.stop(static_cast<asio::yield_context>(yield)); } break; } else if (!final_ec) { final_job = jobs.as_string(which); final_ec = result.ec; } } if (!final_ec /* not set */) { final_ec = err::no_protocol_option; } _YDEBUG( yield, "Done; final_job=", final_job, " final_ec=", *final_ec , " target=", short_target); return or_throw(yield, *final_ec); } private: Client::State& client_state; const request_route::Config& request_config; CacheControl cc; }; //------------------------------------------------------------------------------ static string base_domain_from_target(const beast::string_view& target) { auto full_host = target.substr(0, target.rfind(':')); size_t dot0, dot1 = 0; if ((dot0 = full_host.find('.')) != full_host.rfind('.')) // Two different dots were found // (e.g. "www.example.com" but not "localhost" or "example.com"). dot1 = dot0 + 1; // skip first component and dot (e.g. "www.") return full_host.substr(dot1).to_string(); } //------------------------------------------------------------------------------ GenericStream Client::State::ssl_mitm_handshake( GenericStream&& con , const Request& con_req , asio::yield_context yield) { // TODO: We really should be waiting for // the TLS Client Hello message to arrive at the clear text connection // (after we send back 200 OK), // then retrieve the value of the Server Name Indication (SNI) field // and rewind the Hello message, // but for the moment we will assume that the browser sends // a host name instead of an IP address or its reverse resolution. auto base_domain = base_domain_from_target(con_req.target()); const string* crt_chain = _ssl_certificate_cache.get(base_domain); if (!crt_chain) { DummyCertificate dummy_crt(*_ca_certificate, base_domain); crt_chain = _ssl_certificate_cache.put(move(base_domain) , dummy_crt.pem_certificate() + _ca_certificate->pem_certificate()); } auto ssl_context = ssl::util::get_server_context ( *crt_chain , _ca_certificate->pem_private_key() , _ca_certificate->pem_dh_param()); // Send back OK to let the UA know we have the "tunnel" http::response<http::string_body> res{http::status::ok, con_req.version()}; // No ``res.prepare_payload()`` since no payload is allowed for CONNECT: // <https://tools.ietf.org/html/rfc7231#section-6.3.1>. http::async_write(con, res, yield); sys::error_code ec; auto ssl_sock = make_unique<asio::ssl::stream<GenericStream>>(move(con), ssl_context); ssl_sock->async_handshake(asio::ssl::stream_base::server, yield[ec]); if (ec) return or_throw<GenericStream>(yield, ec); static const auto ssl_shutter = [](asio::ssl::stream<GenericStream>& s) { // Just close the underlying connection // (TLS has no message exchange for shutdown). s.next_layer().close(); }; return GenericStream(move(ssl_sock), move(ssl_shutter)); } //------------------------------------------------------------------------------ bool Client::State::maybe_handle_websocket_upgrade( GenericStream& browser , beast::string_view connect_hp , Request& rq , Yield yield) { sys::error_code ec; if (!boost::iequals(rq[http::field::upgrade], "websocket")) return false; bool has_upgrade = false; for (auto s : SplitString(rq[http::field::connection], ',')) { if (boost::iequals(s, "Upgrade")) { has_upgrade = true; break; } } if (!has_upgrade) return false; if (!rq.target().starts_with("ws:") && !rq.target().starts_with("wss:")) { if (connect_hp.empty()) { sys::error_code ec_; handle_bad_request(browser, rq, "Not a websocket server", yield[ec_]); return true; } // Make this a "proxy" request. Among other things, this is important // to let the consecutive code know we want encryption. rq.target( string("wss://") + ( (rq[http::field::host].length() > 0) ? rq[http::field::host] : connect_hp).to_string() + rq.target().to_string()); } Cancel cancel(_shutdown_signal); // TODO: Reuse existing connections to origin and injectors. Currently // this is hard because those are stored not as streams but as // ConnectionPool::Connection. auto meta = UserAgentMetaData::extract(rq); auto origin = connect_to_origin(rq, meta, cancel, yield[ec]); if (ec) return or_throw(yield, ec, true); yield[ec].tag("write_req").run([&] (auto y) { http::async_write(origin, rq, y); }); beast::flat_buffer origin_rbuf; Response rs; yield[ec].tag("read_res").run([&] (auto y) { http::async_read(origin, origin_rbuf, rs, y); }); if (ec) return or_throw(yield, ec, true); yield[ec].tag("write_res").run([&] (auto y) { http::async_write(browser, rs, y); }); if (rs.result() != http::status::switching_protocols) return true; // First queue unused but already read data back into the origin connnection. if (origin_rbuf.size() > 0) origin.put_back(origin_rbuf.data(), ec); assert(!ec); // Forward the rest of data in both directions. yield[ec].tag("full_duplex").run([&] (auto y) { full_duplex(move(browser), move(origin), cancel, y); }); return or_throw(yield, ec, true); } //------------------------------------------------------------------------------ http::response<http::string_body> Client::State::retrieval_failure_response(const Request& req) { auto res = util::http_error ( req, http::status::bad_gateway, OUINET_CLIENT_SERVER_STRING , http_::response_error_hdr_retrieval_failed , "Failed to retrieve the resource " "(after attempting all configured mechanisms)"); maybe_add_proto_version_warning(res); return res; } //------------------------------------------------------------------------------ void Client::State::serve_request( GenericStream&& con , asio::yield_context yield_) { Cancel cancel(_shutdown_signal); LOG_DEBUG("Request received "); namespace rr = request_route; using rr::fresh_channel; auto close_con_slot = _shutdown_signal.connect([&con] { con.close(); }); // This request router configuration will be used for requests by default. // // Looking up the cache when needed is allowed, while for fetching fresh // content: // // - the origin is first contacted directly, // for good overall speed and responsiveness // - if not available, the injector is used to // get the content and cache it for future accesses // // So enabling the Injector channel will result in caching content // when access to the origin is not possible. // // To also avoid getting content from the cache // (so that browsing looks like using a normal non-caching proxy) // the cache can be disabled. const rr::Config default_request_config { deque<fresh_channel>({ fresh_channel::origin , fresh_channel::injector_or_dcache})}; // This is the matching configuration for the one above, // but for uncacheable requests. const rr::Config nocache_request_config { deque<fresh_channel>({ fresh_channel::origin , fresh_channel::proxy})}; // The currently effective request router configuration. rr::Config request_config; Client::ClientCacheControl cache_control(*this, request_config); sys::error_code ec; // Expressions to test the request against and configurations to be used. // TODO: Create once and reuse. using Match = pair<const ouinet::reqexpr::reqex, const rr::Config>; auto method_override_getter([](const Request& r) {return r["X-HTTP-Method-Override"];}); auto method_getter([](const Request& r) {return r.method_string();}); auto host_getter([](const Request& r) {return r[http::field::host];}); auto hostname_getter([](const Request& r) {return util::split_ep(r[http::field::host]).first;}); auto x_private_getter([](const Request& r) {return r[http_::request_private_hdr];}); auto target_getter([](const Request& r) {return r.target();}); auto local_rx = util::str("https?://[^:/]+\\.", _config.local_domain(), "(:[0-9]+)?/.*"); #ifdef NDEBUG // release const rr::Config unrequested{deque<fresh_channel>({fresh_channel::origin})}; #else // debug // Don't request these in debug mode as they bring a lot of noise into the log const rr::Config unrequested{deque<fresh_channel>()}; #endif static const boost::regex localhost_exact_rx{"localhost", rx_icase}; const vector<Match> matches({ // Please keep host-specific matches at a bare minimum // as they require curation and they may have undesired side-effects; // instead, use user agent-side mechanisms like browser settings and extensions when possible, // and only leave those that really break things and cannot be otherwise disabled. // // Also note that using the normal mechanisms for these may help users // keep their browsers up-to-date (by retrieving via the injector in case of interference), // and they may still not pollute the cache unless // the requests are explicitly marked for caching and announcement. // Disable cache and always go to origin for this site. //Match( reqexpr::from_regex(target_getter, "https?://ident\\.me/.*") // , {deque<fresh_channel>({fresh_channel::origin})} ), /* Requests which may be considered public but too noisy and of little value for caching * should be processed by something like browser extensions. // Google Search completion Match( reqexpr::from_regex(target_getter, "https?://(www\\.)?google\\.com/complete/.*") , unrequested ), */ /* To stop these requests in Firefox, * uncheck "Preferences / Privacy & Security / Deceptive Content and Dangerous Software Protection". // Safe Browsing API <https://developers.google.com/safe-browsing/>. // These should not be very frequent after start, // plus they use POST requests, so there is no risk of accidental injection. Match( reqexpr::from_regex(target_getter, "https://safebrowsing\\.googleapis\\.com/.*") , unrequested ), */ /* These are used to retrieve add-ons and all kinds of minor security updates from Mozilla, * and they mostly happen on browser start only. // Disable cache and always go to origin for these mozilla sites. Match( reqexpr::from_regex(target_getter, "https?://content-signature\\.cdn\\.mozilla\\.net/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*services\\.mozilla\\.com/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*cdn\\.mozilla\\.net/.*") , unrequested ), */ /* To stop these requests, * uncheck "Preferences / Add-ons / (gear icon) / Update Add-ons Automatically". // Firefox add-ons hotfix (auto-update) Match( reqexpr::from_regex(target_getter, "https?://services\\.addons\\.mozilla\\.org/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://versioncheck-bg\\.addons\\.mozilla\\.org/.*") , unrequested ), */ /* To stop these requests, * uncheck all options from "Preferences / Privacy & Security / Firefox Data Collection and Use", * maybe clear `toolkit.telemetry.server` in `about:config`. // Firefox telemetry Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*telemetry\\.mozilla\\.net/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*telemetry\\.mozilla\\.org/.*") , unrequested ), */ /* This should work as expected as long as Origin is enabled. * To stop these requests, set `network.captive-portal-service.enabled` to false in `about:config`. // Firefox' captive portal detection Match( reqexpr::from_regex(target_getter, "https?://detectportal\\.firefox\\.com/.*") , unrequested ), */ /* To avoid these at the client, use some kind of ad blocker (like uBlock Origin). // Ads and tracking Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*google-analytics\\.com/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*googlesyndication\\.com/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*googletagservices\\.com/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*moatads\\.com/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*amazon-adsystem\\.com/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*adsafeprotected\\.com/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*ads-twitter\\.com/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*doubleclick\\.net/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://([^/\\.]+\\.)*summerhamster\\.com/.*") , unrequested ), Match( reqexpr::from_regex(target_getter, "https?://ping.chartbeat.net/.*") , unrequested ), */ // Handle requests to <http://localhost/> internally. Match( reqexpr::from_regex(host_getter, localhost_exact_rx) , {deque<fresh_channel>({fresh_channel::_front_end})} ), Match( reqexpr::from_regex(host_getter, util::str(_config.front_end_endpoint())) , {deque<fresh_channel>({fresh_channel::_front_end})} ), // Other requests to the local host should not use the network // to avoid leaking internal services accessed through the client. Match( reqexpr::from_regex(hostname_getter, util::localhost_rx) , {deque<fresh_channel>({fresh_channel::origin})} ), // Access to sites under the local TLD are always accessible // with good connectivity, so always use the Origin channel // and never cache them. Match( reqexpr::from_regex(target_getter, local_rx) , {deque<fresh_channel>({fresh_channel::origin})} ), // Do not use caching for requests tagged as private with Ouinet headers. Match( reqexpr::from_regex( x_private_getter , boost::regex(http_::request_private_true, rx_icase)) , nocache_request_config), // When to try to cache or not, depending on the request method: // // - Unsafe methods (CONNECT, DELETE, PATCH, POST, PUT): do not cache // - Safe but uncacheable methods (OPTIONS, TRACE): do not cache // - Safe and cacheable (GET, HEAD): cache // // Thus the only remaining method that implies caching is GET. Match( !reqexpr::from_regex(method_getter, "(GET|HEAD)") , nocache_request_config), // Requests declaring a method override are checked by that method. // This is not a standard header, // but for instance Firefox uses it for Safe Browsing requests, // which according to this standard should actually be POST requests // (probably in the hopes of having more chances that requests get through, // in spite of using HTTPS). Match( !reqexpr::from_regex(method_override_getter, "(|GET)") , nocache_request_config), // Disable cache and always go to proxy for this site. //Match( reqexpr::from_regex(target_getter, "https?://ifconfig\\.co/.*") // , {deque<fresh_channel>({fresh_channel::proxy})} ), // Force cache and default channels for this site. //Match( reqexpr::from_regex(target_getter, "https?://(www\\.)?example\\.com/.*") // , {deque<fresh_channel>()} ), // Force cache and particular channels for this site. //Match( reqexpr::from_regex(target_getter, "https?://(www\\.)?example\\.net/.*") // , {deque<fresh_channel>({fresh_channel::injector})} ), }); auto connection_id = _next_connection_id++; auto connection_idstr = util::str('C', connection_id); // Is MitM active? bool mitm = false; // Saved host/port from CONNECT request. string connect_hp; // Process the different requests that may come over the same connection. beast::flat_buffer con_rbuf; // accumulate reads across iterations here for (;;) { // continue for next request; break for no more requests // Read the (clear-text) HTTP request // (without a size limit, in case we are uploading a big file). // Based on <https://stackoverflow.com/a/50359998>. http::request_parser<Request::body_type> reqhp; reqhp.body_limit((std::numeric_limits<std::uint64_t>::max)()); // No timeout either, a keep-alive connection to the user agent // will remain open and waiting for new requests // until the later desires to close it. Yield yield(_ctx.get_executor(), yield_, connection_idstr); yield[ec].tag("read_req").run([&] (auto y) { http::async_read(con, con_rbuf, reqhp, y); }); if ( ec == http::error::end_of_stream || ec == asio::ssl::error::stream_truncated || ec == asio::error::operation_aborted) break; if (ec) { LOG_WARN("Failed to read request; ec=", ec); break; } Request req(reqhp.release()); auto req_done = defer([&yield] { _YDEBUG(yield, "Done"); }); bool auth = yield[ec].tag("auth").run([&] (auto y) { return authenticate(req, con, _config.client_credentials(), y); }); if (!auth) { _YWARN(yield, "Request authentication failed, discarding"); continue; } _YDEBUG(yield, "=== New request ==="); _YDEBUG(yield, req.base()); auto target = req.target(); // Perform MitM for CONNECT requests (to be able to see encrypted requests) if (!mitm && req.method() == http::verb::connect) { sys::error_code ec; // Subsequent access to the connection will use the encrypted channel. yield[ec].tag("mitm_handshake").run([&] (auto y) { con = ssl_mitm_handshake(move(con), req, y); }); if (ec) { _YERROR(yield, "MitM exception; ec=", ec); break; } mitm = true; // Save CONNECT target (minus standard HTTPS port ``:443`` if present) // in case of subsequent HTTP/1.0 requests with no ``Host:`` header. auto port_pos = max( target.length() - 4 /* strlen(":443") */ , string::npos); connect_hp = target // Do not to hit ``:443`` inside of an IPv6 address. .substr(0, target.rfind(":443", port_pos)) .to_string(); // Go for requests in the encrypted channel. continue; } if (maybe_handle_websocket_upgrade( con , connect_hp , req , yield[ec].tag("websocket"))) { break; } // Ensure that the request is proxy-like. if (!(target.starts_with("https://") || target.starts_with("http://"))) { if (mitm) { // Requests in the encrypted channel are usually not proxy-like // so the target is not "http://example.com/foo" but just "/foo". // We expand the target again with the ``Host:`` header // (or the CONNECT target if the header is missing in HTTP/1.0) // so that "/foo" becomes "https://example.com/foo". auto host = req[http::field::host]; if (host.empty()) { req.set(http::field::host, connect_hp); host = connect_hp; } req.target( string("https://") + host.to_string() + target.to_string()); target = req.target(); } else { // TODO: Maybe later we want to support front-end and API calls // as plain HTTP requests (as if we were a plain HTTP server) // but for the moment we only accept proxy requests. sys::error_code ec_; handle_bad_request(con, req, "Not a proxy request", yield[ec_]); if (req.keep_alive()) continue; else break; } } // Ensure that the request has a `Host:` header // (to ease request routing check and later operations on the head). if (!util::req_ensure_host(req)) { sys::error_code ec_; handle_bad_request(con, req, "Invalid or missing host in request", yield[ec_]); if (req.keep_alive()) continue; else break; } request_config = route_choose_config(req, matches, default_request_config); auto meta = UserAgentMetaData::extract(req); Transaction tnx(con, req, std::move(meta)); if (request_config.fresh_channels.empty()) { _YDEBUG(yield, "Abort due to no route"); sys::error_code ec; tnx.write_to_user_agent( retrieval_failure_response(req) , cancel, static_cast<asio::yield_context>(yield[ec])); if (ec || cancel) break; continue; } cache_control.mixed_fetch(tnx, yield[ec].tag("mixed_fetch")); if (ec) { _YERROR(yield, "Error writing back response; ec=", ec); if (tnx.user_agent_was_written_to()) con.close(); // it may already be closed if (con.is_open() && !cancel) { sys::error_code ec_; tnx.write_to_user_agent( retrieval_failure_response(req) , cancel, static_cast<asio::yield_context>(yield[ec_])); } if (!req.keep_alive()) con.close(); } if (!con.is_open()) { break; } } LOG_DEBUG(connection_idstr, " Done"); } //------------------------------------------------------------------------------ void Client::State::setup_cache(asio::yield_context yield) { // Remember to always set before return in case of error, // or the notification may not pass the right error code to listeners. sys::error_code ec; auto do_notify_ready = [&] { if (!_cache_starting) return; _cache_start_ec = ec; _cache_starting->notify(ec); _cache_starting.reset(); }; auto notify_ready = defer([&] { do_notify_ready(); }); if (_config.cache_type() != ClientConfig::CacheType::Bep5Http) { ec = asio::error::operation_not_supported; return; }; LOG_DEBUG("HTTP signing public key (Ed25519): ", _config.cache_http_pub_key()); #define fail_on_error(__msg) { \ if (_shutdown_signal) ec = asio::error::operation_aborted; \ if (ec && ec != asio::error::operation_aborted) \ LOG_ERROR(__msg "; ec=", ec); \ return_or_throw_on_error(yield, _shutdown_signal, ec); \ } _cache = _config.cache_static_content_path().empty() ? cache::Client::build( _ctx.get_executor() , UdpEndpoints{common_udp_multiplexer().local_endpoint()} , *_config.cache_http_pub_key() , _config.repo_root()/"bep5_http" , _config.max_cached_age() , yield[ec]) : cache::Client::build( _ctx.get_executor() , UdpEndpoints{common_udp_multiplexer().local_endpoint()} , *_config.cache_http_pub_key() , _config.repo_root()/"bep5_http" , _config.max_cached_age() , _config.cache_static_path() , _config.cache_static_content_path() , yield[ec]); fail_on_error("Failed to initialize cache::Client"); idempotent_start_accepting_on_utp(yield[ec]); fail_on_error("Failed to start accepting on uTP for cache::Client"); // Subsequent calls below will not alter cache start result, // but they will still report and error code to the caller. do_notify_ready(); auto dht = bittorrent_dht(yield[ec]); fail_on_error("Failed to initialize BT DHT for cache::Client"); if (!_cache->enable_dht(dht)) ec = asio::error::invalid_argument; fail_on_error("Failed to enable BT DHT in cache::Client"); #undef fail_on_error } //------------------------------------------------------------------------------ tcp::acceptor Client::State::make_acceptor( const tcp::endpoint& local_endpoint , const char* service) const { sys::error_code ec; // Open the acceptor tcp::acceptor acceptor(_ctx); acceptor.open(local_endpoint.protocol(), ec); if (ec) { throw runtime_error(util::str("Failed to open TCP acceptor for service: ", service, "; ec=", ec)); } acceptor.set_option(asio::socket_base::reuse_address(true)); // Bind to the server address acceptor.bind(local_endpoint, ec); if (ec) { throw runtime_error(util::str("Failed to bind TCP acceptor for service: ", service, "; ec=", ec)); } // Start listening for connections acceptor.listen(asio::socket_base::max_connections, ec); if (ec) { throw runtime_error(util::str("Failed to 'listen' to service on TCP acceptor: ", service, "; ec=", ec)); } LOG_INFO("Client listening to ", service, " on TCP:", acceptor.local_endpoint()); return acceptor; } //------------------------------------------------------------------------------ void Client::State::listen_tcp ( asio::yield_context yield , tcp::acceptor acceptor , function<void(GenericStream, asio::yield_context)> handler) { auto shutdown_acceptor_slot = _shutdown_signal.connect([&acceptor] { acceptor.close(); }); WaitCondition wait_condition(_ctx); for(;;) { sys::error_code ec; tcp::socket socket(_ctx); acceptor.async_accept(socket, yield[ec]); if (ec) { if (ec == asio::error::operation_aborted) break; LOG_WARN("Accept failed on TCP:", acceptor.local_endpoint(), "; ec=", ec); if (!async_sleep(_ctx, chrono::seconds(1), _shutdown_signal, yield)) { break; } } else { static const auto tcp_shutter = [](tcp::socket& s) { sys::error_code ec; // Don't throw s.shutdown(tcp::socket::shutdown_both, ec); s.close(ec); }; GenericStream connection(move(socket) , move(tcp_shutter)); // Increase the size of the coroutine stack. // Some interesing info: // https://lists.ceph.io/hyperkitty/list/dev@ceph.io/thread/6LBFZIFUPTJQ3SNTLVKSQMVITJWVWTZ6/ boost::coroutines::attributes attribs; attribs.size *= 2; TRACK_SPAWN( _ctx, ([ this, self = shared_from_this(), c = move(connection), handler, lock = wait_condition.lock() ](asio::yield_context yield) mutable { if (was_stopped()) return; handler(move(c), yield); }), attribs); } } wait_condition.wait(yield); } //------------------------------------------------------------------------------ void Client::State::start() { if (_internal_state != InternalState::Created) return; InternalState next_internal_state = InternalState::Failed; auto set_internal_state = defer([&] { _internal_state = next_internal_state; }); // These may throw if the endpoints are busy. auto proxy_acceptor = make_acceptor(_config.local_endpoint(), "browser requests"); boost::optional<tcp::acceptor> front_end_acceptor; if (_config.front_end_endpoint() != tcp::endpoint()) front_end_acceptor = make_acceptor(_config.front_end_endpoint(), "frontend"); ssl::util::load_tls_ca_certificates(ssl_ctx, _config.tls_ca_cert_store_path()); _ca_certificate = get_or_gen_tls_cert<CACertificate> ( "Your own local Ouinet client" , ca_cert_path(), ca_key_path(), ca_dh_path()); if (!_config.tls_injector_cert_path().empty()) { if (fs::exists(fs::path(_config.tls_injector_cert_path()))) { LOG_DEBUG("Loading injector certificate file..."); inj_ctx.load_verify_file(_config.tls_injector_cert_path()); LOG_DEBUG("Loading injector certificate file: success"); } else { throw runtime_error( util::str("Invalid path to Injector's TLS cert file: " , _config.tls_injector_cert_path())); } } next_internal_state = InternalState::Started; TRACK_SPAWN(_ctx, ([ this, self = shared_from_this(), acceptor = move(proxy_acceptor) ] (asio::yield_context yield) mutable { if (was_stopped()) return; sys::error_code ec; listen_tcp( yield[ec] , move(acceptor) , [this, self] (GenericStream c, asio::yield_context yield) { serve_request(move(c), yield); }); })); if (front_end_acceptor) { TRACK_SPAWN( _ctx, ([ this, self = shared_from_this(), acceptor = move(*front_end_acceptor) ] (asio::yield_context yield) mutable { if (was_stopped()) return; LOG_INFO("Serving front end on ", acceptor.local_endpoint()); sys::error_code ec; listen_tcp( yield[ec] , move(acceptor) , [this, self] (GenericStream c, asio::yield_context yield_) { Yield yield(_ctx, yield_, "frontend"); sys::error_code ec; beast::flat_buffer c_rbuf; Request rq; yield[ec].tag("read_req").run([&] (auto y) { http::async_read(c, c_rbuf, rq, y); }); if (ec) return; auto rs = fetch_fresh_from_front_end(rq, yield[ec].tag("get_res")); if (ec) return; yield[ec].tag("write_res").run([&] (auto y) { http::async_write(c, rs, y); }); }); })); } TRACK_SPAWN(_ctx, ([ this ] (asio::yield_context yield) { if (was_stopped()) return; sys::error_code ec; setup_injector(yield[ec]); if (ec && ec != asio::error::operation_aborted) LOG_ERROR("Failed to setup injector; ec=", ec); })); TRACK_SPAWN(_ctx, ([ this ] (asio::yield_context yield) { if (was_stopped()) return; sys::error_code ec; setup_cache(yield[ec]); if (ec && ec != asio::error::operation_aborted) LOG_ERROR("Failed to setup cache; ec=", ec); })); } //------------------------------------------------------------------------------ unique_ptr<OuiServiceImplementationClient> Client::State::maybe_wrap_tls(unique_ptr<OuiServiceImplementationClient> client) { bool enable_injector_tls = !_config.tls_injector_cert_path().empty(); if (!enable_injector_tls) { LOG_WARN("Connection to the injector shall not be encrypted"); return client; } return make_unique<ouiservice::TlsOuiServiceClient>(move(client), inj_ctx); } void Client::State::setup_injector(asio::yield_context yield) { // Remember to always set before return in case of error, // or the notification may not pass the right error code to listeners. sys::error_code ec; auto notify_ready = defer([&] { if (!_injector_starting) return; _injector_start_ec = ec; _injector_starting->notify(ec); _injector_starting.reset(); }); auto injector_ep = _config.injector_endpoint(); if (!injector_ep) { ec = asio::error::operation_not_supported; return; } LOG_INFO("Setting up injector: ", *injector_ep); std::unique_ptr<OuiServiceImplementationClient> client; if (injector_ep->type == Endpoint::I2pEndpoint) { auto i2p_service = make_shared<ouiservice::I2pOuiService>((_config.repo_root()/"i2p").string(), _ctx.get_executor()); auto i2p_client = i2p_service->build_client(injector_ep->endpoint_string); /* if (!i2p_client->verify_endpoint()) { return or_throw(yield, ec = asio::error::invalid_argument); } */ client = std::move(i2p_client); } else if (injector_ep->type == Endpoint::TcpEndpoint) { auto tcp_client = make_unique<ouiservice::TcpOuiServiceClient>(_ctx.get_executor(), injector_ep->endpoint_string); if (!tcp_client->verify_endpoint()) { return or_throw(yield, ec = asio::error::invalid_argument); } client = maybe_wrap_tls(move(tcp_client)); } else if (injector_ep->type == Endpoint::UtpEndpoint) { asio_utp::udp_multiplexer m(_ctx); m.bind(common_udp_multiplexer(), ec); assert(!ec); auto utp_client = make_unique<ouiservice::UtpOuiServiceClient> (_ctx.get_executor(), move(m), injector_ep->endpoint_string); if (!utp_client->verify_remote_endpoint()) { return or_throw(yield, ec = asio::error::invalid_argument); } client = maybe_wrap_tls(move(utp_client)); } else if (injector_ep->type == Endpoint::Bep5Endpoint) { auto dht = bittorrent_dht(yield[ec]); if (ec) { if (ec != asio::error::operation_aborted) { LOG_ERROR("Failed to set up Bep5Client at setting up BT DHT; ec=", ec); } return or_throw(yield, ec); } boost::optional<string> bridge_swarm_name = _config.bep5_bridge_swarm_name(); if (!bridge_swarm_name) { LOG_ERROR("Bridge swarm name has not been computed"); return or_throw(yield, ec = asio::error::operation_not_supported); } _bep5_client = make_shared<ouiservice::Bep5Client> ( dht , injector_ep->endpoint_string , *bridge_swarm_name , &inj_ctx); client = make_unique<ouiservice::WeakOuiServiceClient>(_bep5_client); idempotent_start_accepting_on_utp(yield[ec]); if (ec) { LOG_ERROR("Failed to start accepting on uTP; ec=", ec); ec = {}; } /* } else if (injector_ep->type == Endpoint::LampshadeEndpoint) { auto lampshade_client = make_unique<ouiservice::LampshadeOuiServiceClient>(_ctx, injector_ep->endpoint_string); if (!lampshade_client->verify_endpoint()) { return or_throw(yield, ec = asio::error::invalid_argument); } client = std::move(lampshade_client); */ } else if (injector_ep->type == Endpoint::Obfs2Endpoint) { auto obfs2_client = make_unique<ouiservice::Obfs2OuiServiceClient>(_ctx, injector_ep->endpoint_string, _config.repo_root()/"obfs2-client"); if (!obfs2_client->verify_endpoint()) { return or_throw(yield, ec = asio::error::invalid_argument); } client = std::move(obfs2_client); } else if (injector_ep->type == Endpoint::Obfs3Endpoint) { auto obfs3_client = make_unique<ouiservice::Obfs3OuiServiceClient>(_ctx, injector_ep->endpoint_string, _config.repo_root()/"obfs3-client"); if (!obfs3_client->verify_endpoint()) { return or_throw(yield, ec = asio::error::invalid_argument); } client = std::move(obfs3_client); } else if (injector_ep->type == Endpoint::Obfs4Endpoint) { auto obfs4_client = make_unique<ouiservice::Obfs4OuiServiceClient>(_ctx, injector_ep->endpoint_string, _config.repo_root()/"obfs4-client"); if (!obfs4_client->verify_endpoint()) { return or_throw(yield, ec = asio::error::invalid_argument); } client = std::move(obfs4_client); } _injector = std::make_unique<OuiServiceClient>(_ctx.get_executor()); _injector->add(*injector_ep, std::move(client)); _injector->start(yield[ec]); return or_throw(yield, ec); } //------------------------------------------------------------------------------ Client::Client(asio::io_context& ctx, ClientConfig cfg) : _state(make_shared<State>(ctx, move(cfg))) {} Client::~Client() { } void Client::start() { _state->start(); } void Client::stop() { _state->stop(); } Client::RunningState Client::get_state() const noexcept { return _state->get_state(); } void Client::charging_state_change(bool is_charging) { LOG_DEBUG("Charging state changed, is charging: ", is_charging); //TODO(peter) do something } void Client::wifi_state_change(bool is_wifi_connected) { LOG_DEBUG("Wifi state changed, is connected: ", is_wifi_connected); //TODO(peter) do something } fs::path Client::ca_cert_path() const { return _state->ca_cert_path(); } fs::path Client::get_or_gen_ca_root_cert(const string repo_root) { fs::path repo_path = fs::path(repo_root); fs::path ca_cert_path = repo_root / OUINET_CA_CERT_FILE; fs::path ca_key_path = repo_root / OUINET_CA_KEY_FILE; fs::path ca_dh_path = repo_root / OUINET_CA_DH_FILE; get_or_gen_tls_cert<CACertificate> ( "Your own local Ouinet client" , ca_cert_path, ca_key_path, ca_dh_path); return ca_cert_path; } //------------------------------------------------------------------------------ #ifndef __ANDROID__ int main(int argc, char* argv[]) { util::crypto_init(); ClientConfig cfg; try { cfg = ClientConfig(argc, argv); } catch(std::exception const& e) { LOG_ABORT(e.what()); return 1; } if (cfg.is_help()) { cout << "Usage:" << endl; cout << cfg.description() << endl; return 0; } asio::io_context ctx; asio::signal_set signals(ctx, SIGINT, SIGTERM); Client client(ctx, move(cfg)); unique_ptr<ForceExitOnSignal> force_exit; signals.async_wait([&client, &signals, &force_exit] (const sys::error_code& ec, int signal_number) { LOG_INFO("GOT SIGNAL ", signal_number); HandlerTracker::stopped(); client.stop(); signals.clear(); force_exit = make_unique<ForceExitOnSignal>(); }); try { client.start(); } catch (std::exception& e) { LOG_ABORT(e.what()); return 1; } ctx.run(); LOG_INFO("Exiting gracefuly"); return EXIT_SUCCESS; } #endif
{"hexsha": "a18bf33bb226fb33a0b0685073483a9b8eb83f03", "size": 106990, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/client.cpp", "max_stars_repo_name": "mhqz/ouinet", "max_stars_repo_head_hexsha": "10f924712bdbfae03d64097f040697d4c11d7911", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/client.cpp", "max_issues_repo_name": "mhqz/ouinet", "max_issues_repo_head_hexsha": "10f924712bdbfae03d64097f040697d4c11d7911", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/client.cpp", "max_forks_repo_name": "mhqz/ouinet", "max_forks_repo_head_hexsha": "10f924712bdbfae03d64097f040697d4c11d7911", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7410714286, "max_line_length": 147, "alphanum_fraction": 0.569660716, "num_tokens": 24207}
""" The object ``climlab.solar.orbital.OrbitalTable`` is an ``xarray.Dataset`` holding orbital data (**eccentricity**, **obliquity**, and **longitude of perihelion**) for the past 5 Myears. The data are from :cite:`Berger_1991`. Data are read from the file ``orbit91``, which was originally obtained from <https://www1.ncdc.noaa.gov/pub/data/paleo/climate_forcing/orbital_variations/insolation/> If the file isn't found locally, the module will attempt to read it remotely from the above URL. A subclass ``climlab.solar.orbital.long.OrbitalTable`` works with La2004 orbital data for -51 to +21 Myears as calculated by :cite:`Laskar_2004`. See <http://vo.imcce.fr/insola/earth/online/earth/La2004/README.TXT> (Breaking change from climlab 0.7.0 and previous) :Example: Load orbital data from the past 5 Myears:: # Load the data from climlab.solar.orbital import OrbitalTable # Examine the xarray object print(OrbitalTable) # Get a timeseries of obliquity print(OrbitalTable.obliquity) # Get the orbital data for a specific year, 10 kyear before present: print(OrbitalTable.interp(kyear=-10)) # Get the long orbital table data from climlab.solar.orbital.long import OrbitalTable as LongTable print(LongTable) """ from __future__ import division, print_function, absolute_import import numpy as np import pandas as pd import xarray as xr from .table import _get_Berger_data OrbitalTable = _get_Berger_data()
{"hexsha": "acebb9e3ad09ecf6fa2cc7fecef4288dbea934ad", "size": 1559, "ext": "py", "lang": "Python", "max_stars_repo_path": "climlab/solar/orbital/__init__.py", "max_stars_repo_name": "nfeldl/climlab", "max_stars_repo_head_hexsha": "2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7", "max_stars_repo_licenses": ["BSD-3-Clause", "MIT"], "max_stars_count": 160, "max_stars_repo_stars_event_min_datetime": "2015-02-25T15:56:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T23:51:23.000Z", "max_issues_repo_path": "climlab/solar/orbital/__init__.py", "max_issues_repo_name": "nfeldl/climlab", "max_issues_repo_head_hexsha": "2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7", "max_issues_repo_licenses": ["BSD-3-Clause", "MIT"], "max_issues_count": 137, "max_issues_repo_issues_event_min_datetime": "2015-12-18T17:39:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T20:50:53.000Z", "max_forks_repo_path": "climlab/solar/orbital/__init__.py", "max_forks_repo_name": "nfeldl/climlab", "max_forks_repo_head_hexsha": "2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7", "max_forks_repo_licenses": ["BSD-3-Clause", "MIT"], "max_forks_count": 54, "max_forks_repo_forks_event_min_datetime": "2015-04-28T05:57:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T08:15:11.000Z", "avg_line_length": 35.4318181818, "max_line_length": 90, "alphanum_fraction": 0.7094291212, "include": true, "reason": "import numpy", "num_tokens": 413}
""" @Author: Yu Huang @Email: yuhuang-cst@foxmail.com """ import os from tqdm import tqdm import h5py import sys import scipy.sparse as sp import numpy as np from sklearn.externals import joblib from scipy.sparse import save_npz, load_npz, csr_matrix import json import pickle import time import logging, logging.config from aide.constant import JSON_FILE_FORMAT, PKL_FILE_FORMAT, NPY_FILE_FORMAT, SPARSE_NPZ_FILE_FORMAT, JOBLIB_FILE_FORMAT, NPZ_FILE_FORMAT def timer(func): def wrapper(*args, **kwargs): print('{0} starts running...'.format(func.__name__)) startTime = time.time() ret = func(*args, **kwargs) print('Function {0} finished. Total time cost: {1} seconds'.format(func.__name__, time.time()-startTime)) return ret return wrapper def process_timer(func): def wrapper(*args, **kwargs): print('{0} starts running...'.format(func.__name__)) startTime = time.process_time() ret = func(*args, **kwargs) print('Function {0} finished. Total time cost: {1} seconds'.format(func.__name__, time.process_time()-startTime)) return ret return wrapper def isJsonable(x): try: json.dumps(x) return True except: return False def getsizeof(obj, unit='M'): mem = sys.getsizeof(obj) return mem / 1024 if unit == 'K' else (mem / 1048576 if unit == 'M' else mem / 1073741824) def save_h5_csr(h5path, X, y=None): with h5py.File(h5path, 'w') as f: g = f.create_group('X') g.create_dataset('data', data=X.data) g.create_dataset('indptr', data=X.indptr) g.create_dataset('indices', data=X.indices) g.attrs['shape'] = X.shape if y is not None: f.create_dataset('y', data=y) def save_h5_ary(h5path, X, y=None): with h5py.File(h5path, 'w') as f: f.create_dataset('X', data=X) if y is not None: f.create_dataset('y', data=y) def is_sparse_h5(h5path): with h5py.File(h5path, 'r') as f: return isinstance(f['X'], h5py.Group) def sample_h5_mat_rows(h5path, sample_num, verbose=0): shape = get_h5_mat_shape(h5path) sorted_sample_rows = sorted(np.random.choice(shape[0], sample_num)) return get_h5_mat_rows(h5path, sorted_sample_rows, verbose), sorted_sample_rows def get_h5_mat_rows(h5path, sorted_sample_rows, verbose=0): if isinstance(sorted_sample_rows, np.ndarray): sorted_sample_rows = list(sorted_sample_rows) with h5py.File(h5path, 'r') as f: is_sparse = isinstance(f['X'], h5py.Group) if is_sparse: data, indices, indptr, shape = f['X']['data'], f['X']['indices'], f['X']['indptr'], f['X'].attrs['shape'] return get_h5_csr_rows(data, indices, indptr, shape, sorted_sample_rows, verbose=verbose) else: return f['X'][sorted_sample_rows, :] def get_h5_mat(h5path): with h5py.File(h5path, 'r') as f: is_sparse = isinstance(f['X'], h5py.Group) if is_sparse: data, indices, indptr, shape = f['X']['data'][:], f['X']['indices'][:], f['X']['indptr'][:], f['X'].attrs['shape'] return sp.csr_matrix((data, indices, indptr), shape=shape) else: return f['X'][:] def get_h5_labels(h5path): with h5py.File(h5path, 'r') as f: if 'y' in f: return f['y'][:] else: return None def get_h5_labels_rows(h5path, sorted_sample_rows): with h5py.File(h5path, 'r') as f: if 'y' in f: return f['y'][sorted_sample_rows] else: return None def get_h5_mat_shape(h5path): """ Returns: tuple """ with h5py.File(h5path, 'r') as f: is_sparse = isinstance(f['X'], h5py.Group) shape = f['X'].attrs['shape'] if is_sparse else f['X'].shape return (int(shape[0]), int(shape[1])) def get_h5_csr_rows(data, indices, indptr, shape, sorted_sample_rows, verbose=0): """ Args: data (h5py.Dataset): indices (h5py.Dataset): indptr (h5py.Dataset): sample_rows (list or np.ndarray): Returns: csr_matrix """ new_data, new_indices, new_indptr = [], [], [0] it = tqdm(enumerate(sorted_sample_rows), total=len(sorted_sample_rows)) if verbose else enumerate(sorted_sample_rows) for i, r in it: b, e = indptr[r], indptr[r+1] new_indptr.append(new_indptr[i] + e - b) new_data.append(data[b: e]) new_indices.append(indices[b: e]) return csr_matrix((np.hstack(new_data), np.hstack(new_indices), new_indptr), shape=(len(sorted_sample_rows), shape[1])) def get_mat_memuse(m): """get matrix's memory; unit: GB """ assert isinstance(m, sp.csr_matrix) or isinstance(m, np.ndarray) if sp.issparse(m): nbytes = m.data.nbytes + m.indptr.nbytes + m.indices.nbytes else: nbytes = m.nbytes return nbytes / 1073741824 # 1024**3; G def get_load_func(file_format): if file_format == JSON_FILE_FORMAT: return lambda path: json.load(open(path)) if file_format == PKL_FILE_FORMAT: return lambda path: pickle.load(open(path, 'rb')) if file_format == NPY_FILE_FORMAT or file_format == NPZ_FILE_FORMAT: return lambda path: np.load(path) if file_format == SPARSE_NPZ_FILE_FORMAT: return lambda path: load_npz(path) if file_format == JOBLIB_FILE_FORMAT: return lambda path: joblib.load(path) assert False def get_save_func(file_format): if file_format == JSON_FILE_FORMAT: return lambda obj, path: json.dump(obj, open(path, 'w'), indent=2, ensure_ascii=False) if file_format == PKL_FILE_FORMAT: return lambda obj, path: pickle.dump(obj, open(path, 'wb')) if file_format == NPY_FILE_FORMAT: return lambda obj, path: np.save(path, obj) if file_format == NPZ_FILE_FORMAT: return lambda obj, path: np.savez_compressed(path, obj) if file_format == SPARSE_NPZ_FILE_FORMAT: return lambda obj, path: save_npz(path, obj) if file_format == JOBLIB_FILE_FORMAT: return lambda obj, path: joblib.dump(obj, path) assert False def check_return(attrCollector): def outerWrapper(func): def wrapper(cls, *args, **kwargs): coll = getattr(cls, attrCollector, None) if coll is not None: return coll coll = func(cls, *args, **kwargs) setattr(cls, attrCollector, coll) return coll return wrapper return outerWrapper def get_logger(name, logPath=None, level=logging.DEBUG, mode='a'): """ Args: name (str or None): None means return root logger logPath (str or None): log文件路径 """ formatter = logging.Formatter(fmt="%(message)s", datefmt='%Y-%m-%d %H:%M:%S') logger = logging.getLogger(name) if len(logger.handlers) != 0: return logger logger.setLevel(level) if logPath is not None: fh = logging.FileHandler(logPath, mode=mode) fh.setFormatter(formatter) logger.addHandler(fh) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) return logger def delete_logger(logger): while logger.handlers: logger.handlers.pop() def sparse2tuple(mx): """Convert sparse matrix to tuple representation. ref: https://github.com/tkipf/gcn/blob/master/gcn/utils.py """ if not sp.isspmatrix_coo(mx): mx = mx.tocoo() coords = np.vstack((mx.row, mx.col)).transpose() values = mx.data shape = mx.shape return coords, values, shape def x_to_input(X): return sparse2tuple(X) if sp.issparse(X) else X def sparse_x_to_input(X): X = sparse2tuple(X) return X, X[1].shape def read_file_folder(path, handle_func, recursive=True): """ Args: path (string): path of file or file folder handleFunc (function): paras = (file_path) recursive (bool): Whether to recursively traverse the sub folders """ if os.path.isfile(path): handle_func(path) elif recursive: for file_name in os.listdir(path): file_dir = os.path.join(path, file_name) read_file_folder(file_dir, handle_func, recursive) def get_file_list(path, filter): """ Args: dir (string): path of file or file folder filter (function): paras = (file_path); i.e. filter=lambda file_path: file_path.endswith('.json') Returns: list: [file_path1, file_path2, ...] """ def handle_func(file_path): if filter(file_path): file_list.append(file_path) file_list = [] read_file_folder(path, handle_func) return file_list def l2_normalize(X): """ Args: X (np.array or sp.csr_matrix): (n_samples, n_features) Returns: np.array or sp.csr_matrix: (n_samples, n_features) """ row_inv_norm = 1. / l2_norm(X).reshape((-1, 1)) return X.multiply(row_inv_norm) if sp.issparse(X) else X * row_inv_norm def l2_norm(X): """ Args: X (np.array or sp.csr_matrix): (n_samples, n_features) Returns: np.array: (n_samples,) """ return sparse_l2_norm(X) if sp.issparse(X) else ary_l2_norm(X) def ary_l2_norm(X): return np.sqrt(np.square(X).sum(axis=1)) def sparse_l2_norm(X): return np.sqrt(X.power(2).sum(axis=-1)).A def get_sample_ranks(sample_range, sample_num): return np.random.choice(sample_range, sample_num) def sample_dist_from(X, sample_num, dist_name): from aide.utils_dist import get_dist_func dist_func = get_dist_func(dist_name) ranks1, ranks2 = get_sample_ranks(X.shape[0], sample_num), get_sample_ranks(X.shape[0], sample_num) X1, X2 = X[ranks1], X[ranks2] sample_dist = dist_func(X1, X2) return sample_dist
{"hexsha": "46f460120889411760cc710596be9df647fec4d9", "size": 8820, "ext": "py", "lang": "Python", "max_stars_repo_path": "aide/utils_.py", "max_stars_repo_name": "tinglabs/aide", "max_stars_repo_head_hexsha": "3aee646b219cd81214cb3681286735ff24c72d88", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-04-03T07:28:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-26T04:04:57.000Z", "max_issues_repo_path": "aide/utils_.py", "max_issues_repo_name": "tinglabs/aide", "max_issues_repo_head_hexsha": "3aee646b219cd81214cb3681286735ff24c72d88", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-09-26T00:53:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:49:13.000Z", "max_forks_repo_path": "aide/utils_.py", "max_forks_repo_name": "tinglabs/aide", "max_forks_repo_head_hexsha": "3aee646b219cd81214cb3681286735ff24c72d88", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-03T08:16:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-03T08:16:16.000Z", "avg_line_length": 27.1384615385, "max_line_length": 137, "alphanum_fraction": 0.7114512472, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 2529}
#' Convert descriptives to a tidy data frame #' #' \code{tidy_describe_data} returns a tidy data frame of descriptive statistics created with \strong{tidystats}' \code{describe_data}. #' #' @param descriptives A data frame created with tidystats' \code{describe_data}. #' #' @examples #' library(dplyr) #' #' # Calculate descriptives #' descriptives <- describe_data(sleep, extra) #' #' # Create a tidy data frame of the descriptives #' tidy_describe_data(descriptives) #' #' # With a grouping variable: #' sleep %>% #' group_by(group) %>% #' describe_data(extra) %>% #' tidy_describe_data() #' #' @import dplyr #' @import tidyr #' #' @export tidy_describe_data <- function(descriptives) { # Retrieve grouping information groups <- dplyr::group_vars(descriptives) # Gather the data if (length(groups) > 0) { output <- descriptives %>% tidyr::gather("statistic", "value", -var, -one_of(groups)) %>% dplyr::arrange(.by_group = TRUE) %>% tidyr::unite(col = "group", groups, sep = " - ") } else { output <- tidyr::gather(descriptives, "statistic", "value", -var) } # Arrange by var output <- select(output, everything()) %>% arrange(var) return(output) }
{"hexsha": "57f43bfa55c1da6c4d6703b5620f37c96952b7ef", "size": 1210, "ext": "r", "lang": "R", "max_stars_repo_path": "R/tidy_describe_data.r", "max_stars_repo_name": "WillemSleegers/tidystats-v0.3", "max_stars_repo_head_hexsha": "03b08a96c1cb4617a3c90daab3ae88d51d1f5fcc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-07-08T07:13:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-15T04:49:11.000Z", "max_issues_repo_path": "R/tidy_describe_data.r", "max_issues_repo_name": "WillemSleegers/tidystats-v0.3", "max_issues_repo_head_hexsha": "03b08a96c1cb4617a3c90daab3ae88d51d1f5fcc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-23T16:47:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-24T22:10:58.000Z", "max_forks_repo_path": "R/tidy_describe_data.r", "max_forks_repo_name": "WillemSleegers/tidystats-v0.3", "max_forks_repo_head_hexsha": "03b08a96c1cb4617a3c90daab3ae88d51d1f5fcc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-07-12T14:03:19.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-01T19:27:09.000Z", "avg_line_length": 25.2083333333, "max_line_length": 135, "alphanum_fraction": 0.6619834711, "num_tokens": 346}
\chapter{Components} \label{sec:draw} Some sweet pictures! \nomenclature[aA]{$y^+$}{Length in viscous units} ... ... ...
{"hexsha": "205f140dd979884b0f43dd79a98a797f6ca468de", "size": 124, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "appendices/fabricationPictures.tex", "max_stars_repo_name": "Biles430/Dissertation", "max_stars_repo_head_hexsha": "5594a5f3d172662b4404d5357d3a28639a0feb43", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "appendices/fabricationPictures.tex", "max_issues_repo_name": "Biles430/Dissertation", "max_issues_repo_head_hexsha": "5594a5f3d172662b4404d5357d3a28639a0feb43", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "appendices/fabricationPictures.tex", "max_forks_repo_name": "Biles430/Dissertation", "max_forks_repo_head_hexsha": "5594a5f3d172662b4404d5357d3a28639a0feb43", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.4, "max_line_length": 49, "alphanum_fraction": 0.6532258065, "num_tokens": 37}
\section{How To create a simulation} To create your own simulation, from a xml description, or a c++ file, you have to respect some rules. The Modeler can be used to have a quick view of all the components already available in Sofa. \subsection{Model a dynamic object} To model a dynamic object, you have to follow that steps: \subsubsection{Mechanical} \begin{enumerate} \item { \bf GNode}: Generally, we give it the name of the whole object \item { \bf Solver}: choose the solver you want to resolve this part of the simulation (you might need two components actually, a OdeSolver followed by a LinearSolver) \item { \bf Topology}: describes how the dofs will be connected \item { \bf MechanicalState}: the degrees of freedom (dofs) of your object. It is the heart of the simulation \item { \bf Mass}: the mass attached to each dofs of the object \item { \bf ForceField}: describes the behavior of your object, how it will interact. If you don't specify one, your model won't be deformable \item { \bf Constraint}: optional \end{enumerate} \begin{figure} \centering \includegraphics[width=0.5\textwidth]{Modelling0.jpg} \caption{Basic example modelling a Finite Element Object} \end{figure} After these steps, you will have a mechanical model, that can be integrated in a Sofa Simulation. Nevertheless, you won't have any visual model, only points representing your dofs. \subsubsection{Visual} Using the previously described mechanism of Mapping, you can attach a visual model, of any kind, to represent your mechanical object. \begin{enumerate} \item { \bf GNode}: add a GNode inside your current object. It will contain the components necessary to do the visual mapping \item { \bf VisualModel}: this component contains the mesh representing your object \item { \bf Mapping}: a non-mechanical mapping will connect your mesh to the dofs. This mapping won't transmit forces from your visual model to the dofs. If you are writing... \begin{itemize} \item { \bf a c++ file}, take good care of using a non-mechanical template: The second object should be a template of ExtVec3Types \item { \bf a xml file}, you have to specify the path to the two models to be mapped: \begin{itemize} \item object1=''../..`` : meaning the dofs are located one level below \item object2=''Visual`` : where ''Visual'' is the name of your VisualModel (as described in this example, it is located at the same level as your mapping) \end{itemize} \end{itemize} \end{enumerate} \begin{figure}[htpb] \centering \includegraphics[width=0.5\textwidth]{Modelling1.jpg} \caption{Basic example modelling a Finite Element Object with a Visual Model} \end{figure} \subsubsection{Collision} If you need to simulate interactions between objects, you will need another node, a Collision Node. In the example we describe, we will use a Triangle Model as collision model. We chose it because, it behaves like most of our collision models, needing a topology and dofs to behave properly. But if you use the simple SphereCollisionModel, this component already contains a topology, dofs and collision model. So you will just have to create a mechanical mapping. \begin{enumerate} \item { \bf GNode}: add a GNode inside your current object. It will contain the components necessary to do the mechanical mapping \item { \bf Topology} \item { \bf MechanicalState}: the dofs of your collision model. They will be used to transmit the forces they receive from the interactions to the real mechanical dofs of your object \item { \bf CollisionModel}: the model of collision, a sequence of them can be specified (for example, TriangleModel, then LineModel, then PointModel). \item { \bf MechanicalMapping}: for a XML description of your object, you don't need to specify who is object1 or object2 \end{enumerate} \begin{figure}[htpb] \centering \includegraphics[width=0.5\textwidth]{Modelling2.jpg} \caption{Basic example modelling a Finite Element Object with a Visual Model and CollisionModel} \end{figure} Your object is now ready to be inserted in a Sofa simulation. Another example of a full object, using SphereModels. \begin{figure}[htpb] \centering \includegraphics[width=0.5\textwidth]{Modelling3.jpg} \caption{Modeling a liver with sphere collision model} \end{figure} \subsection{Model a static object} Fixed object, like floors, walls, or objects that only must be used as obstacle are easier to model. \begin{enumerate} \item { \bf GNode}: Generally, we give it the name of the whole object \item { \bf Topology}: describes how the dofs will be connected \item { \bf MechanicalState}: the degrees of freedom (dofs) of your object \item { \bf CollisionModel}: the model of collision, a sequence of them can be specified (for example, TriangleModel, then LineModel, then PointModel). You have to specify the fact that your object is fixed by setting some flags. \begin{itemize} \item { \bf moving}: if your object can be displaced. You can think of an external interaction, using an haptic device for instance \item { \bf simulated}: if you object is controlled by a simulation. Generally, a fixed object is not simulated. \end{itemize} \item { \bf VisualModel}: this component contains the mesh representing your object \end{enumerate} No need of any mapping as no forces, or modifications of position will be transmitted. \begin{figure}[htpb] \centering \includegraphics[width=0.5\textwidth]{Modelling4.jpg} \caption{Modeling a Fixed object} \end{figure} \subsection{Include Collisions} To perform collision detection, as you have seen, the objects of the scene must have a or several collision models. But, you will have to set up several components performing the collision detection, and response. \begin{enumerate} \item { \bf CollisionPipeline}: currently, only our default collision pipeline is available. \item { \bf CollisionDetection}: method to detect collisions \item { \bf IntesectionMethods}: depending on the collision detection algorithm, you may have to specify some components to perform the proximity intersection test for example. \item { \bf ContactManager}: receiving the collisions found, it will generate a response. You can chose the response you want by filling the field ``response''. By default, we use a penalty response. \item { \bf CollisionGroupManager}: manages collisions between different kind of simulated objects. It avoids explosions of your simulation by changing the graph dynamically, and putting an appropriated solver above the objects in interaction \end{enumerate} \begin{figure}[htpb] \centering \includegraphics[width=0.5\textwidth]{Modelling5.jpg} \caption{Collision Components} \end{figure}
{"hexsha": "88c1ac580530eaefe29aedafc582ed920981f43d", "size": 6679, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/HowTo/createSimulation.tex", "max_stars_repo_name": "sofa-framework/issofa", "max_stars_repo_head_hexsha": "94855f488465bc3ed41223cbde987581dfca5389", "max_stars_repo_licenses": ["OML"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/HowTo/createSimulation.tex", "max_issues_repo_name": "sofa-framework/issofa", "max_issues_repo_head_hexsha": "94855f488465bc3ed41223cbde987581dfca5389", "max_issues_repo_licenses": ["OML"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/HowTo/createSimulation.tex", "max_forks_repo_name": "sofa-framework/issofa", "max_forks_repo_head_hexsha": "94855f488465bc3ed41223cbde987581dfca5389", "max_forks_repo_licenses": ["OML"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 58.5877192982, "max_line_length": 363, "alphanum_fraction": 0.7736188052, "num_tokens": 1593}
import pandas as pd import h5py import numpy as np from rdkit.Chem import MolFromSmiles from rdkit.Chem.rdMolDescriptors import GetMorganFingerprintAsBitVect def smiles2ecfp(smiles, radius=4, bits=2048): mol = MolFromSmiles(smiles) if mol is None: return "" fp = GetMorganFingerprintAsBitVect(mol, radius, nBits=bits) return "".join(map(str, list(fp))) if __name__ == "__main__": path = "data/ChEMBL24_all_compounds.csv.gz" data = pd.read_csv(path) # Calculate ECFP extracted_data = data[["ChEMBL_ID", " SMILES"]] extracted_data["ECFP"] = extracted_data[" SMILES"].map(smiles2ecfp) extracted_data.drop( extracted_data[extracted_data["ECFP"] == ""].index, inplace=True) extracted_data.to_csv("data/ChEMBL24_smiles_fp.csv") # Convert the csv file to hdf5 file chembl = pd.read_csv("data/ChEMBL24_smiles_fp.csv") h5f = h5py.File("data/ChEMBL24.hdf5", "w") root_gp = h5f.create_group("/ChEMBL") dt = h5py.string_dtype(encoding="utf-8") root_gp.create_dataset("ChEMBL_ID", data=chembl["ChEMBL_ID"].astype(bytes), dtype=dt) root_gp.create_dataset("SMILES", data=chembl[" SMILES"].astype(bytes), dtype=dt) np_ecfp = chembl["ECFP"].map(lambda x: np.fromiter(x, dtype=int)) root_gp.create_dataset("ECFP", data=np.stack(list(np_ecfp), axis=0)) h5f.close()
{"hexsha": "dc96bcb4bdb8420de0c9e05cce8069456142391d", "size": 1458, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data_preprocessing/chembl24.py", "max_stars_repo_name": "XieResearchGroup/PLANS", "max_stars_repo_head_hexsha": "479e97f5944dcc036d5f4204890a371ebafb394a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data_preprocessing/chembl24.py", "max_issues_repo_name": "XieResearchGroup/PLANS", "max_issues_repo_head_hexsha": "479e97f5944dcc036d5f4204890a371ebafb394a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data_preprocessing/chembl24.py", "max_forks_repo_name": "XieResearchGroup/PLANS", "max_forks_repo_head_hexsha": "479e97f5944dcc036d5f4204890a371ebafb394a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3846153846, "max_line_length": 73, "alphanum_fraction": 0.6515775034, "include": true, "reason": "import numpy", "num_tokens": 418}
using PyPlot using Statistics include("../../../src/extract_planet.jl") include("../../../src/laplace_wisdom.jl") function chop_coeff_inner(alpha,j) # Computes f_1^(j) from equation (10) in Deck & Agol (2015). beta = j*(1-alpha^1.5) # Equation (11) in Deck & Agol (2015): f1 = 2*beta*laplace_wisdom(1//2,j,1,alpha)+ j*(3+beta^2)*laplace_wisdom(1//2,j,0,alpha) if j == 1 f1 -= alpha*(beta^2+2*beta+3) end f1 *= alpha/(beta^2*(1-beta^2)) return f1 end function chop_coeff_outer(alpha,j) # Computes f_2^(j) from equation (14) in Deck & Agol (2015). kappa = j*(1/alpha^1.5-1) # Equation (15) in Deck & Agol (2015): f2 = (j*(kappa^2+3)+2*kappa)*laplace_wisdom(1//2,j,0,alpha)+ 2*kappa*laplace_wisdom(1//2,j,1,alpha) if j == 1 f2 -= (kappa^2-2*kappa+3)/alpha^2 end f2 /= (kappa^2*(kappa^2-1)) return f2 end function plot_ttv_diff(data,elements,tt1,count1,nplanet,jmax) # Now make some plots: #fig,axes = subplots(4,2,sharex="col",constrained_layout="True") fig,axes = subplots(7,1,sharex="col",constrained_layout="True",figsize=(8,8)) #fig,axes = subplots(7,1,sharex="col",figsize=(24,24)) #fig,axes = subplots(7,1,sharex="col") plabel = ["T1b","T1c","T1d","T1e","T1f","T1g","T1h"] range = [1.5,1.5,1.5,3.0,12.0,8.0,4.0] # First, go through the planets and compute their # ephemerides: coeff_planet = zeros(2,nplanet) for ip=1:nplanet eobs,tobs,sobs,nobs = extract_planet(data,ip) fn = zeros(2,nobs); fn[1,:] .= 1.0; fn[2,:] .= eobs coeff,cov = regress(fn,tobs,sobs) # Now plot TTVFast: fn = zeros(Float64,2,count1[ip+1]) sig = ones(count1[ip+1]) tti1 = tt1[ip+1,1:count1[ip+1]] tt_ref1 = zeros(count1[ip+1]) epoch = zeros(count1[ip+1]) for j=1:count1[ip+1] fn[1,j] = 1.0 fn[2,j] = round(Int64,(tti1[j]-elements[ip+1,3])/elements[ip+1,2]) epoch[j] = round((tti1[j]-coeff[1])/coeff[2]) tt_ref1[j] = coeff[1]+coeff[2]*epoch[j] end coeff,cov = regress(fn,tti1,sig) coeff_planet[:,ip] .= coeff end # Now make some plots: eoffset = [43,10,75,9,7,3,22] ord = [5,5,18,30,18,18,18] for ip=1:nplanet ax = axes[ip] coeff = zeros(Float64,2) # Plot each "Mode" eobs,tobs,sobs,nobs = extract_planet(data,ip) # println("ip: ",ip," nobs: ",nobs) fn = zeros(2,nobs) fn[1,:] .= 1.0 fn[2,:] .= eobs coeff,cov = regress(fn,tobs,sobs) # println(ip," ",coeff) # Now plot TTVFast: fn = zeros(Float64,2,count1[ip+1]) sig = ones(count1[ip+1]) tti1 = tt1[ip+1,1:count1[ip+1]] tt_ref1 = zeros(count1[ip+1]) epoch = zeros(count1[ip+1]) for j=1:count1[ip+1] fn[1,j] = 1.0 fn[2,j] = round(Int64,(tti1[j]-elements[ip+1,3])/elements[ip+1,2]) epoch[j] = round((tti1[j]-coeff[1])/coeff[2]) tt_ref1[j] = coeff[1]+coeff[2]*epoch[j] end coeff,cov = regress(fn,tti1,sig) # println(ip," ",coeff) tt_ref1 = coeff[1] .+coeff[2] .*fn[2,:] ttv1 = (tti1 .-tt_ref1) .*(24*60) # ax.plot(tti1,ttv1,label=plabel[ip]) # Now, fit a high-order polynomial to TTVs, and overplot it: fn = zeros(ord[ip]+1,count1[ip+1]) fn[1,:] .= 1.0 for j=1:ord[ip] fn[j+1,:] .= epoch.^j end coeff_poly,cov = regress(fn,tti1,sig) mod_poly = ones(count1[ip+1])*coeff_poly[1] for j=1:ord[ip] mod_poly .+= fn[j+1,:] .*coeff_poly[j+1] end # ax.plot(tti1,(mod_poly .-tt_ref1) .*(24*60)) ttv_mod = (tti1 .- mod_poly) .*(24*60) ax.plot(tti1,ttv_mod,label=plabel[ip]) # ttv_obs = tobs .- coeff_poly[1] .- coeff_poly[2] .*(eobs .+eoffset[ip]) ttv_obs = tobs .- coeff_poly[1] .- coeff_poly[2] .*eobs for j=2:ord[ip] # ttv_obs .-= coeff[j+1] .*(eobs .+eoffset[ip]).^j ttv_obs .-= coeff_poly[j+1] .*eobs.^j end ttv_obs .*= 24*60 sobs_min = sobs .*(24*60) if ip < 5 igood = sobs_min .< 0.5*(maximum(ttv_mod)-minimum(ttv_mod)) else igood = ones(Bool,size(ttv_obs)[1]) end println("Fraction of points: ",sum(igood)/size(sobs)[1]) ax.errorbar(tobs[igood], ttv_obs[igood], sobs_min[igood],fmt=".") if ip == 7 ax.set_xlabel(L"BJD$_\mathrm{TDB}$-2,450,000") end if ip == 4 ax.set_ylabel("TTV - Polynomial [min]") end # ax.legend(loc="upper right",fontsize=10) ax.legend(fontsize=8) ax.axis([7200,8900,-range[ip],range[ip]]) # ax.axis([7960,8860,-range[ip],range[ip]]) # Now, overplot chopping due to all adjacent planets: dt_chop = zeros(length(tti1)) if ip > 1 for jp=ip-1:-1:1 # Compute the observed mean longitudes at the time of transit: lam1 = 2*pi*(tti1 .-coeff_planet[1,jp])./coeff_planet[2,jp] lam2 = 2*pi*(tti1 .-coeff_planet[1,ip])./coeff_planet[2,ip] dlambda = lam1-lam2 alpha = (coeff_planet[2,jp]/coeff_planet[2,ip])^(2//3) for j=1:jmax dt_chop .+= elements[1+jp,1]*chop_coeff_outer(alpha,j)*sin.(j*dlambda) end end end if ip < nplanet for jp = ip+1:nplanet # Compute the observed mean longitudes at the time of transit: lam1 = 2*pi*(tti1 .-coeff_planet[1,ip])./coeff_planet[2,ip] lam2 = 2*pi*(tti1 .-coeff_planet[1,jp])./coeff_planet[2,jp] dlambda = lam1-lam2 alpha = (coeff_planet[2,ip]/coeff_planet[2,jp])^(2//3) for j=1:jmax dt_chop .+= elements[1+jp,1]*chop_coeff_inner(alpha,j)*sin.(j*dlambda) end end end dt_chop .*= coeff_planet[2,ip]/(2pi) println(ip," range of chopping: ",minimum(dt_chop*(24*60))," ",maximum(dt_chop*(24*60))) # Remove a polynomial: coeff_poly,cov = regress(fn,dt_chop,sig) fill!(mod_poly,coeff_poly[1]) for j=1:ord[ip] mod_poly .+= fn[j+1,:] .*coeff_poly[j+1] end ax.plot(tti1,(dt_chop-mod_poly)*(24*60),".",markersize=2.0) # ax.plot(tti1,(dt_chop-mod_poly)*(24*60) .- ttv_mod) end #ax = axes[8] #ax.axis("off") tight_layout() subplots_adjust(hspace=0) #println("Ephemerides: ",coeff_planet) savefig("../T1_chopping.pdf",bbox_inches="tight") return end
{"hexsha": "c4e00f5bb6ae3da4f9c9e04adb144085e733b677", "size": 5762, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "tex/figures/julia/plot_ttv_diff.jl", "max_stars_repo_name": "educrot/TRAPPIST1_Spitzer", "max_stars_repo_head_hexsha": "850ca965c3c8a794519ce9f73a117d08039c3de6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tex/figures/julia/plot_ttv_diff.jl", "max_issues_repo_name": "educrot/TRAPPIST1_Spitzer", "max_issues_repo_head_hexsha": "850ca965c3c8a794519ce9f73a117d08039c3de6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tex/figures/julia/plot_ttv_diff.jl", "max_forks_repo_name": "educrot/TRAPPIST1_Spitzer", "max_forks_repo_head_hexsha": "850ca965c3c8a794519ce9f73a117d08039c3de6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4863387978, "max_line_length": 90, "alphanum_fraction": 0.6310308921, "num_tokens": 2294}
import numpy as np import math import cv2 import os import json # from scipy.special import expit # from utils.box import BoundBox, box_iou, prob_compare # from utils.box import prob_compare2, box_intersection from ...utils.box import BoundBox from ...cython_utils.cy_yolo2_findboxes import box_constructor # from .sort import black ds = True try: from deep_sort.application_util import preprocessing as prep from deep_sort.application_util import visualization from deep_sort.deep_sort.detection import Detection except: ds = False def expit(x): return 1. / (1. + np.exp(-x)) def _softmax(x): e_x = np.exp(x - np.max(x)) out = e_x / e_x.sum() return out def findboxes(self, net_out): # meta meta = self.meta boxes = list() boxes = box_constructor(meta, net_out) # print(boxes) return boxes def extract_boxes(self, new_im): cont = [] new_im = new_im.astype(np.uint8) ret, thresh = cv2.threshold(new_im, 127, 255, 0) p, contours, hierarchy = cv2.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) for i in range(0, len(contours)): cnt = contours[i] x, y, w, h = cv2.boundingRect(cnt) if w * h > 30 ** 2 and ((w < new_im.shape[0] and h <= new_im.shape[1]) or (w <= new_im.shape[0] and h < new_im.shape[1])): if self.FLAGS.tracker == "sort": cont.append([x, y, x + w, y + h]) else: cont.append([x, y, w, h]) return cont def postprocess(self, net_out, im, frame_id=0, csv_file=None, csv=None, mask=None, encoder=None, tracker=None): """ Takes net output, draw net_out, save to disk """ boxes = self.findboxes(net_out) # meta meta = self.meta nms_max_overlap = 0.1 threshold = meta['thresh'] colors = meta['colors'] labels = meta['labels'] if type(im) is not np.ndarray: imgcv = cv2.imread(im) else: imgcv = im h, w, _ = imgcv.shape thick = int((h + w) // 300) resultsForJSON = [] if not self.FLAGS.track: for b in boxes: boxResults = self.process_box(b, h, w, threshold) if boxResults is None: continue left, right, top, bot, mess, max_indx, confidence = boxResults if self.FLAGS.json: resultsForJSON.append({"label": mess, "confidence": float('%.2f' % confidence), "topleft": {"x": left, "y": top}, "bottomright": {"x": right, "y": bot}}) continue if self.FLAGS.display or self.FLAGS.saveVideo: cv2.rectangle(imgcv, (left, top), (right, bot), colors[max_indx], thick) cv2.putText(imgcv, mess, (left, top - 12), 0, 1e-3 * h, colors[max_indx], thick // 3) else: if not ds: print("ERROR : deep sort or sort submodules not found for tracking please run :") print("\tgit submodule update --init --recursive") print("ENDING") exit(1) detections = [] scores = [] for b in boxes: boxResults = self.process_box(b, h, w, threshold) if boxResults is None: continue left, right, top, bot, mess, max_indx, confidence = boxResults if mess not in self.FLAGS.trackObj: continue if self.FLAGS.tracker == "deep_sort": detections.append(np.array([left, top, right - left, bot - top]).astype(np.float64)) scores.append(confidence) elif self.FLAGS.tracker == "sort": detections.append(np.array([left, top, right, bot]).astype(np.float64)) # detections = np.append(detections, [left,top,right-left,bot-top]) # detections = np.append(detections, mess) if len(detections) < 3 and self.FLAGS.BK_MOG: detections = detections + extract_boxes(self, mask) detections = np.array(detections) # print(detections[4]) # print(boxResults[4]) if detections.shape[0] == 0: return imgcv if self.FLAGS.tracker == "deep_sort": scores = np.array(scores) features = encoder(imgcv, detections.copy()) detections = [ Detection(bbox, score, feature) for bbox, score, feature in zip(detections, scores, features)] # Run non-maxima suppression. boxes = np.array([d.tlwh for d in detections]) scores = np.array([d.confidence for d in detections]) indices = prep.non_max_suppression(boxes, nms_max_overlap, scores) detections = [detections[i] for i in indices] tracker.predict() tracker.update(detections) trackers = tracker.tracks elif self.FLAGS.tracker == "sort": trackers = tracker.update(detections) for track in trackers: if self.FLAGS.tracker == "deep_sort": if not track.is_confirmed() or track.time_since_update > 1: continue bbox = track.to_tlbr() id_num = str(track.track_id) elif self.FLAGS.tracker == "sort": bbox = [int(track[0]), int(track[1]), int(track[2]), int(track[3])] id_num = str(int(track[4])) messi = str(int(track[5])) # print(id_num) if self.FLAGS.csv: csv.writerow([frame_id, id_num, int(bbox[0]), int(bbox[1]), int(bbox[2]) - int(bbox[0]), int(bbox[3]) - int(bbox[1])]) csv_file.flush() if self.FLAGS.display or self.FLAGS.saveVideo: cv2.rectangle(imgcv, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), thick // 3) cv2.line(imgcv, (100, 360), (1200, 360), (0, 0, 255), 6) cv2.putText(imgcv, ('CAR ='), (10, 50), 0, 1, (255, 255, 255), 2) # cv2.putText(imgcv, id_num,(int(bbox[0]), int(bbox[1]) - 12),0, 1e-3 * h, (255,255,255),thick//6) cv2.putText(imgcv, messi, (120, 50), 0, 1, (0, 0, 255), thick // 3) return imgcv
{"hexsha": "9aa9e155e7b22087ecc7586e9e697c2272c5f795", "size": 6277, "ext": "py", "lang": "Python", "max_stars_repo_path": "sort/predict.py", "max_stars_repo_name": "srnthsrdhrn/VehicleTrackingGUI", "max_stars_repo_head_hexsha": "a18d890176de7547d557dfe7cc18dd37afa37411", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sort/predict.py", "max_issues_repo_name": "srnthsrdhrn/VehicleTrackingGUI", "max_issues_repo_head_hexsha": "a18d890176de7547d557dfe7cc18dd37afa37411", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sort/predict.py", "max_forks_repo_name": "srnthsrdhrn/VehicleTrackingGUI", "max_forks_repo_head_hexsha": "a18d890176de7547d557dfe7cc18dd37afa37411", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-13T08:43:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-13T08:43:57.000Z", "avg_line_length": 37.5868263473, "max_line_length": 169, "alphanum_fraction": 0.5574318942, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1607}
import BlockArrays: BlockIndex, BlockIndexRange, globalrange, nblocks, global2blockindex, blockindex2global @testset "Blocks" begin @test Int(Block(2)) === Integer(Block(2)) === Number(Block(2)) === 2 @test Block((Block(3), Block(4))) === Block(3,4) end #= [1,1 1,2] | [1,3 1,4 1,5] -------------------------- [2,1 2,2] | [2,3 2,4 2,5] [3,1 3,2] | [3,3 3,4 3,5] ---------------------------- [4,1 4,2] | [4,3 4,4 4,5] [5,1 5,2] | [5,3 5,4 5,5] [6,1 6,2] | [6,3 6,4 6,5] =# @testset "BlockSizes / BlockIndices" begin block_size = BlockArrays.BlockSizes([1,2,3], [2, 3]) @test nblocks(block_size) == (3,2) @test nblocks(block_size, 1) == 3 @test nblocks(block_size, 2) == 2 @test @inferred(globalrange(block_size, (1,1))) == (1:1, 1:2) @test @inferred(globalrange(block_size, (1,2))) == (1:1, 3:5) @test @inferred(globalrange(block_size, (2,1))) == (2:3, 1:2) @test @inferred(globalrange(block_size, (2,2))) == (2:3, 3:5) # Test for allocations inside a function to avoid noise due to global # variable references wrapped_allocations = (bs, i) -> @allocated(globalrange(bs, i)) @test wrapped_allocations(block_size, (1, 1)) == 0 @test @inferred(global2blockindex(block_size, (3, 1))) == BlockIndex((2,1), (2,1)) @test @inferred(global2blockindex(block_size, (1, 4))) == BlockIndex((1,2), (1,2)) @test @inferred(global2blockindex(block_size, (4, 5))) == BlockIndex((3,2), (1,3)) wrapped_allocations = (bs, i) -> @allocated(global2blockindex(bs, i)) @test wrapped_allocations(block_size, (3, 1)) == 0 @test @inferred(blockindex2global(block_size, BlockIndex((2,1), (2,1)))) == (3, 1) @test @inferred(blockindex2global(block_size, BlockIndex((1,2), (1,2)))) == (1, 4) @test @inferred(blockindex2global(block_size, BlockIndex((3,2), (1,3)))) == (4, 5) wrapped_allocations = (bs, i) -> @allocated(blockindex2global(bs, i)) @test wrapped_allocations(block_size, BlockIndex((2,1), (2,1))) == 0 @test block_size == BlockArrays.BlockSizes(1:3, 2:3) buf = IOBuffer() print(buf, block_size) @test String(take!(buf)) == "[1, 2, 3] × [2, 3]" @test BlockArrays.searchlinear([1,2,3], 5) == 3 @test Base.dataids(block_size) == Base.dataids(block_size) @test Block(1)[1] == BlockIndex((1,),(1,)) @test Block(1)[1:2] == BlockIndexRange(Block(1),(1:2,)) @test Block(1,1)[1,1] == BlockIndex((1,1),(1,1)) @test Block(1,1)[1:2,1:2] == BlockIndexRange(Block(1,1),(1:2,1:2)) A = BlockVector([1,2,3],[1,2]) @test A[Block(2)[2]] == 3 @test A[Block(2)[1:2]] == [2,3] @test A[getindex.(Block.(1:2), 1)] == [1,2] @test_throws BlockBoundsError A[Block(3)] @test_throws BlockBoundsError A[Block(3)[1]] @test_throws BoundsError A[Block(3)[1:1]] # this is likely an error @test_throws BoundsError A[Block(2)[3]] @test_throws BoundsError A[Block(2)[3:3]] end @testset "sortedin" begin v = [1,3,4] @test BlockArrays.sortedin(1,v) @test !BlockArrays.sortedin(2,v) @test !BlockArrays.sortedin(0,v) @test !BlockArrays.sortedin(5,v) end
{"hexsha": "9043580c4413fdc0c1249d9354fb2248a2b4760d", "size": 3126, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_blockindices.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/BlockArrays.jl-8e7c35d0-a365-5155-bbbb-fb81a777f24e", "max_stars_repo_head_hexsha": "5812573bf3ee4b5797ba631e6b7a8b02c0f9d3d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-12-30T10:45:16.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-30T10:45:16.000Z", "max_issues_repo_path": "test/test_blockindices.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/BlockArrays.jl-8e7c35d0-a365-5155-bbbb-fb81a777f24e", "max_issues_repo_head_hexsha": "5812573bf3ee4b5797ba631e6b7a8b02c0f9d3d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_blockindices.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/BlockArrays.jl-8e7c35d0-a365-5155-bbbb-fb81a777f24e", "max_forks_repo_head_hexsha": "5812573bf3ee4b5797ba631e6b7a8b02c0f9d3d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1219512195, "max_line_length": 107, "alphanum_fraction": 0.6014075496, "num_tokens": 1191}
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as xp class Variable(object): def __init__(self, data): self.data = data self.creator = None self.grad = 1 def set_creator(self, gen_func): self.creator = gen_func def backward(self): if self.creator is None: # input data return func = self.creator while func: gy = func.output.grad func.input.grad = func.backward(gy) func = func.input.creator class Function(object): def __call__(self, in_var): in_data = in_var.data output = self.forward(in_data) ret = Variable(output) ret.set_creator(self) self.input = in_var self.output = ret return ret def forward(self, in_data): NotImplementedError() def backward(self, grad_output): NotImplementedError() class Mul(Function): def __init__(self, init_w): self.w = init_w # Initialize the parameter def forward(self, in_var): return in_var * self.w def backward(self, grad_output): gx = self.w * grad_output self.gw = self.input return gx data = xp.array([0, 1, 2, 3]) f1 = Mul(2) f2 = Mul(3) f3 = Mul(4) y0 = Variable(data) y1 = f1(y0) # y1 = y0 * 2 y2 = f2(y1) # y2 = y1 * 3 y3 = f3(y2) # y3 = y2 * 4 print(y0.data) print(y1.data) print(y2.data) print(y3.data) y3.backward() print(y3.grad) # df3 / dy3 = 1 print(y2.grad) # df3 / dy2 = (df3 / dy3) * (dy3 / dy2) = 1 * 4 print(y1.grad) # df3 / dy1 = (df3 / dy3) * (dy3 / dy2) * (dy2 / dy1) = 1 * 4 * 3 print(y0.grad) # df3 / dy0 = (df3 / dy3) * (dy3 / dy2) * (dy2 / dy1) * (dy1 / dy0) = 1 * 4 * 3 * 2 print(f3.gw.data) print(f2.gw.data) print(f1.gw.data)
{"hexsha": "eebcc70f71becb5378ead2ff0605651fa63d7e78", "size": 1814, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/exp_2.py", "max_stars_repo_name": "mitmul/1f-chainer", "max_stars_repo_head_hexsha": "f9970493214ba615f22579a234b6954267427fd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2016-12-07T18:34:17.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-01T08:21:18.000Z", "max_issues_repo_path": "examples/exp_2.py", "max_issues_repo_name": "mitmul/1f-chainer", "max_issues_repo_head_hexsha": "f9970493214ba615f22579a234b6954267427fd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/exp_2.py", "max_forks_repo_name": "mitmul/1f-chainer", "max_forks_repo_head_hexsha": "f9970493214ba615f22579a234b6954267427fd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-12-07T18:33:54.000Z", "max_forks_repo_forks_event_max_datetime": "2018-04-20T22:30:44.000Z", "avg_line_length": 21.5952380952, "max_line_length": 99, "alphanum_fraction": 0.5578831312, "include": true, "reason": "import numpy", "num_tokens": 572}
#!/usr/bin/env python # PyTorch 1.8.1-CPU virtual env. # Python 3.9.4 Windows 10 # -*- coding: utf-8 -*- """The script implement the classical longstaff-schwartz algorithm for pricing american options. This script focus on the multidimensional case for rainbow option """ # reproducablity seed = 3 import random import numpy as np import torch random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import numpy as np import Products import time import SimulationPaths.GBM import h5py ############# # American Put ############ #timeStepsTotal = 100 #normalizeStrike= 40 #spot = 36 #putOption = Products.Option(timeToMat=1, strike=1, typeOfContract="Put") #marketVariables = Products.MarketVariables(r=0.06,vol=0.2, spot=spot/normalizeStrike) #learningPaths= SimulationPaths.GBM.generateSDEStockPaths(pathTotal=10**6, timeStepsPerYear=timeStepsTotal, timeToMat=putOption.timeToMat, MarketVariables=marketVariables) ###Safe data #f = h5py.File(os.path.join('data', 'AmericanPut', '1MPut100K.hdf5'), 'w') #f.create_dataset('RND', data = learningPaths) #f.close() ## #for i in range(100): # # create empirical estimations # pricingPaths = SimulationPaths.GBM.generateSDEStockPaths(pathTotal=10**4, timeStepsPerYear=timeStepsTotal, timeToMat=putOption.timeToMat, MarketVariables=marketVariables) # g = h5py.File(os.path.join("data", "AmericanPut", "PricePaths100K", f"PricePath{i}.hdf5"), 'w') # g.create_dataset('RND', data = pricingPaths) # g.close() # print(i) ###################### # American Call Max option on two stocks ######################## #import SimulationPaths.GBMMultiDim #timeStepsTotal = 9 #normalizeStrike=100 #callMax = Products.Option(timeToMat=3, strike=1, typeOfContract="CallMax") #underlyingsTotal = 2 #marketVariables = Products.MarketVariables(r=0.05, dividend=0.10, vol=0.2, spot=[100/normalizeStrike]*underlyingsTotal, correlation=0.0) # #timeSimPathsStart = time.time() #learningPaths = SimulationPaths.GBMMultiDim.simulatePaths(timeStepsTotal=timeStepsTotal,pathsTotal=10**6, marketVariables=marketVariables, timeToMat=callMax.timeToMat) #timeSimPathsEnd = time.time() #print(f"Time taken to simulate paths is: {timeSimPathsEnd-timeSimPathsStart:f}") ###Safe data #f = h5py.File('data/MaxCall/1MPut.hdf5', 'w') #f.create_dataset('RND', data = learningPaths) #f.close() # #estimates = np.zeros(100) #for i in range(100): # # create empirical estimations # pricingPaths = SimulationPaths.GBMMultiDim.simulatePaths(timeStepsTotal=timeStepsTotal,pathsTotal=10**4, marketVariables=marketVariables, timeToMat=callMax.timeToMat) # g = h5py.File(f"data/MaxCall/PricePaths/PricePath{i}.hdf5", 'w') # g.create_dataset('RND', data = pricingPaths) # g.close() # print(i) # #################### # American Geometric Average option on 7 stocks ##################### #import SimulationPaths.GBMMultiDim #timeStepsTotal = 10 #normalizeStrike=100 #geometricCall = Products.Option(timeToMat=1, strike=1, typeOfContract="CallGeometricAverage") #underlyingsTotal = 7 #marketVariables = Products.MarketVariables(r=0.03, dividend=0.05, vol=0.4, spot=[100/normalizeStrike]*underlyingsTotal, correlation=0.0) #timeSimPathsStart = time.time() #learningPaths = SimulationPaths.GBMMultiDim.simulatePaths(timeStepsTotal=timeStepsTotal,pathsTotal=10**6, marketVariables=marketVariables, timeToMat=geometricCall.timeToMat) #timeSimPathsEnd = time.time() #print(f"Time taken to simulate paths is: {timeSimPathsEnd-timeSimPathsStart:f}") ###Safe data #f = h5py.File(os.path.join('.', 'data', 'GeometricCall', '1MAssets7.hdf5'), 'w') #f.create_dataset('RND', data = learningPaths) #f.close() # #estimates = np.zeros(100) #for i in range(100): # # create empirical estimations # pricingPaths = SimulationPaths.GBMMultiDim.simulatePaths(timeStepsTotal=timeStepsTotal,pathsTotal=10**4, marketVariables=marketVariables, timeToMat=geometricCall.timeToMat) # g = h5py.File(os.path.join(".", "data", "GeometricCall", "PricePaths", f"PricePath{i}.hdf5"), 'w') # g.create_dataset('RND', data = pricingPaths) # g.close() # print(i) #
{"hexsha": "058d6d780a60a0a5f737f354c4ccd9a8083c66a9", "size": 4186, "ext": "py", "lang": "Python", "max_stars_repo_path": "generateData.py", "max_stars_repo_name": "MrPPL/FNNMC", "max_stars_repo_head_hexsha": "6ecbe8fcf802a409d9f2dcbc62c3291e182915eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generateData.py", "max_issues_repo_name": "MrPPL/FNNMC", "max_issues_repo_head_hexsha": "6ecbe8fcf802a409d9f2dcbc62c3291e182915eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generateData.py", "max_forks_repo_name": "MrPPL/FNNMC", "max_forks_repo_head_hexsha": "6ecbe8fcf802a409d9f2dcbc62c3291e182915eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0392156863, "max_line_length": 177, "alphanum_fraction": 0.7403248925, "include": true, "reason": "import numpy", "num_tokens": 1199}
from PIL import Image, ImageDraw, ImageFont, ImageFilter import os import numpy as np import cv2 import math import copy from albumentations import IAAAffine, IAAPerspective import random angle_map = {"left": 225, "vertical": 270, "right": 315, "top": 45, "horizontal": 0, "down": 315} def get_text_size(font, char): return font.getsize(char) # im = Image.new("L", (1, 1), "black") # draw = ImageDraw.Draw(im) # return draw.textsize(char, font) def get_image_each_empty_row_nums(im): bbox = im.getbbox() w, h = im.size if not bbox: return int(h * 0.1), int(h * 0.1), int(w * 0.1), int(w * 0.1) return bbox[1], h - bbox[3], bbox[0], w - bbox[2] # im_array = np.array(im) # top_row_cnt = 0 # for row in im_array: # if row.sum() == 0: # top_row_cnt += 1 # else: # break # # bot_row_cnt = 0 # for i in range(len(im_array) - 1, -1, -1): # if im_array[i].sum() == 0: # bot_row_cnt += 1 # else: # break # # left_row_cnt = 0 # for i in range(len(im_array[0])): # if im_array[:, i].sum() == 0: # left_row_cnt += 1 # else: # break # # right_row_cnt = 0 # for i in range(len(im_array[0]) - 1, -1, -1): # if im_array[:, i].sum() == 0: # right_row_cnt += 1 # else: # break # # return top_row_cnt, bot_row_cnt, left_row_cnt, right_row_cnt def get_each_empty_row_nums(char, font, img_width, img_height, text_width, text_height, x=None, y=None): if x is None: x = (img_width - text_width) / 2 if y is None: y = (img_height - text_height) / 2 im = render_text(font, char, x, y, img_width, img_height, text_color=(255, 255, 255, 255)) result = get_image_each_empty_row_nums(im) im.close() im = None return result def interpolate(startValue, endValue, stepNumber, lastStepNumber): return ((endValue - startValue) * stepNumber / lastStepNumber + startValue).astype(int) def get_masks(font, text, char_spacing=0): tmp_mask = font.getmask(text, mode="L") tmp_w, tmp_h = tmp_mask.size margin_ratio = 1.5 tmp_im = Image.new('L', (int(tmp_w * margin_ratio), tmp_h), 'black') tmp_draw = ImageDraw.Draw(tmp_im) tmp_draw.text((0, 0), text[0], fill='white', font=font) tmp_mask_im = Image.new('L', (int(tmp_w * margin_ratio), tmp_h), 'black') tmp_mask_draw = ImageDraw.Draw(tmp_mask_im) tmp_mask_draw.text((0, 0), text[0], fill='white', font=font) tmp_mask_im = np.array(tmp_mask_im) tmp_mask_im = tmp_mask_im > 0 tmp_mask_im = tmp_mask_im.astype(np.uint8) * 255 tmp_mask_im = Image.fromarray(tmp_mask_im, mode="L") mask_im = Image.new('L', (int(tmp_w * margin_ratio), tmp_h), 'black') mask_im.paste(tmp_mask_im, (0, 0)) tmp_mask_im.close() x1, y1, x2, y2 = tmp_im.getbbox() for i, char in enumerate(text[1:]): tmp_mask_im = Image.new('L', (int(tmp_w * margin_ratio) - x2, tmp_h), 'black') tmp_mask_draw = ImageDraw.Draw(tmp_mask_im) tmp_mask_draw.text((0, 0), char, fill='white', font=font) tmp_mask_im = np.array(tmp_mask_im) tmp_mask_im = tmp_mask_im > 0 tmp_mask_im = tmp_mask_im.astype(np.uint8) * (254 - i) tmp_mask_im = Image.fromarray(tmp_mask_im, mode="L") mask_im.paste(tmp_mask_im, (x2 + char_spacing, 0)) tmp_draw.text((x2 + char_spacing, 0), char, fill='white', font=font) x1, y1, x2, y2 = tmp_im.getbbox() x1, y1, x2, y2 = tmp_im.getbbox() tmp_im = tmp_im.crop((x1, y1, x2, y2)) mask_im = mask_im.crop((x1, y1, x2, y2)) ori_text_mask = tmp_im return ori_text_mask, mask_im def aug_multi_masks(mask_im, result_im, aug): min_val = np.unique(mask_im)[1] mask_ar = np.array(mask_im) mask_channels = None for i in range(min_val, 256): tmp_channel = (mask_ar == i) * 1 tmp_channel = np.expand_dims(tmp_channel, axis=0).astype(np.uint8) if mask_channels is None: mask_channels = tmp_channel else: mask_channels = np.concatenate((mask_channels, tmp_channel), axis=0) result = aug(image=np.array(result_im), masks=mask_channels) image = result['image'] mask_sum = None for i, tmp_mask in enumerate(result['masks']): tmp_channel = tmp_mask * (min_val + i) if mask_sum is None: mask_sum = tmp_channel else: union_mask = mask_sum & tmp_channel if np.sum(union_mask) > 0: mask_sum *= (union_mask == False) mask_sum += tmp_channel return image, mask_sum def create_text_shadow(text_shadow, image_width, image_height, ori_text_mask, mask_width, mask_height, text_x, text_y): shadow_color = tuple(text_shadow["color"]) shadow_width = text_shadow["width"] shadow_blur_amount = text_shadow["blur_count"] direction = text_shadow["direction"] mask_list = [] for i in range(shadow_width): gap = i + 1 if direction == "bottom_right": tmp_mask1 = pad_mask(ori_text_mask, mask_height, mask_width, text_x, text_y + gap, image_width, image_height, is_expand=True) tmp_mask2 = pad_mask(ori_text_mask, mask_height, mask_width, text_x + gap, text_y, image_width, image_height, is_expand=True) tmp_mask3 = pad_mask(ori_text_mask, mask_height, mask_width, text_x + gap, text_y + gap, image_width, image_height, is_expand=True) if tmp_mask1 is not None: mask_list.append(tmp_mask1) if tmp_mask2 is not None: mask_list.append(tmp_mask2) if tmp_mask3 is not None: mask_list.append(tmp_mask3) elif direction == "right": tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x + gap, text_y, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) elif direction == "top_right": tmp_mask1 = pad_mask(ori_text_mask, mask_height, mask_width, text_x, text_y - gap, image_width, image_height, is_expand=True) tmp_mask2 = pad_mask(ori_text_mask, mask_height, mask_width, text_x + gap, text_y, image_width, image_height, is_expand=True) tmp_mask3 = pad_mask(ori_text_mask, mask_height, mask_width, text_x + gap, text_y - gap, image_width, image_height, is_expand=True) if tmp_mask1 is not None: mask_list.append(tmp_mask1) if tmp_mask2 is not None: mask_list.append(tmp_mask2) if tmp_mask3 is not None: mask_list.append(tmp_mask3) elif direction == "top": tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x, text_y - gap, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) elif direction == "top_left": tmp_mask1 = pad_mask(ori_text_mask, mask_height, mask_width, text_x - gap, text_y, image_width, image_height, is_expand=True) tmp_mask2 = pad_mask(ori_text_mask, mask_height, mask_width, text_x, text_y - gap, image_width, image_height, is_expand=True) tmp_mask3 = pad_mask(ori_text_mask, mask_height, mask_width, text_x - gap, text_y - gap, image_width, image_height, is_expand=True) if tmp_mask1 is not None: mask_list.append(tmp_mask1) if tmp_mask2 is not None: mask_list.append(tmp_mask2) if tmp_mask3 is not None: mask_list.append(tmp_mask3) elif direction == "left": tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x - gap, text_y, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) elif direction == "bottom_left": tmp_mask1 = pad_mask(ori_text_mask, mask_height, mask_width, text_x, text_y + gap, image_width, image_height, is_expand=True) tmp_mask2 = pad_mask(ori_text_mask, mask_height, mask_width, text_x - gap, text_y, image_width, image_height, is_expand=True) tmp_mask3 = pad_mask(ori_text_mask, mask_height, mask_width, text_x - + gap, text_y + gap, image_width, image_height, is_expand=True) if tmp_mask1 is not None: mask_list.append(tmp_mask1) if tmp_mask2 is not None: mask_list.append(tmp_mask2) if tmp_mask3 is not None: mask_list.append(tmp_mask3) elif direction == "bottom": tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x, text_y + gap, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) if len(mask_list) > 0: if len(mask_list) == 1: tmp_mask = mask_list[0] else: tmp_mask = np.concatenate(mask_list, axis=2) tmp_mask = np.amax(tmp_mask, axis=2) tmp_mask = np.expand_dims(tmp_mask, axis=2) shadow_im = render_text_masked(tmp_mask, image_width, image_height, text_color=shadow_color) mask_list = None tmp_mask = None if shadow_blur_amount > 0: for i in range(shadow_blur_amount): shadow_im = shadow_im.filter(ImageFilter.BLUR) return shadow_im def create_text_border(text_border, image_width, image_height, ori_text_mask, mask_width, mask_height, text_x, text_y): border_color = tuple(text_border["color"]) border_width = text_border["width"] border_blur_iter_num = text_border["blur_count"] mask_list = [] for i in range(border_width): gap = i + 1 tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x - gap, text_y, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x + gap, text_y, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x, text_y - gap, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x, text_y + gap, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x - gap, text_y - gap, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x + gap, text_y - gap, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x - gap, text_y + gap, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) tmp_mask = pad_mask(ori_text_mask, mask_height, mask_width, text_x + gap, text_y + gap, image_width, image_height, is_expand=True) if tmp_mask is not None: mask_list.append(tmp_mask) if len(mask_list) > 0: if len(mask_list) == 1: tmp_mask = mask_list[0] else: tmp_mask = np.concatenate(mask_list, axis=2) tmp_mask = np.amax(tmp_mask, axis=2) tmp_mask = np.expand_dims(tmp_mask, axis=2) border_im = render_text_masked(tmp_mask, image_width, image_height, text_color=border_color) mask_list = None tmp_mask = None if border_blur_iter_num > 0: for i in range(border_blur_iter_num): border_im = border_im.filter(ImageFilter.BLUR) return border_im def draw_text(text_x, text_y, text, fg_color, font, text_border=None, text_gradient=None, text_skew=None, text_shadow=None, text_width_ratio=1.0, text_height_ratio=1.0, text_rotate=0.0, image_width=None, image_height=None, char_spacing=0, text_italic=None, italic_ratio=0.0, use_text_persp_trans=False, text_persp_trans_params=None, raise_exception=False, use_default_render=False, return_mask=False): if return_mask: ori_text_mask, mask_im = get_masks(font, text, char_spacing) else: ori_text_mask = font.getmask(text) mask_width, mask_height = ori_text_mask.size shadow_im = None border_im = None if text_shadow is not None: shadow_im = create_text_shadow(text_shadow, image_width, image_height, ori_text_mask, mask_width, mask_height, text_x, text_y) if text_border is not None: border_im = create_text_border(text_border, image_width, image_height, ori_text_mask, mask_width, mask_height, text_x, text_y) if text_gradient is None or len(text_gradient["anchors"]) < 2: if use_default_render: text_im = Image.new("RGBA", (image_width, image_height), (0, 0, 0, 0)) draw = ImageDraw.Draw(text_im) draw.text((text_x, text_y), text, fill=fg_color, font=font) else: text_im = render_text(font, text, text_x, text_y, image_width, image_height, text_color=fg_color, text_mask=ori_text_mask) if return_mask: tmp_mask_im = Image.new('L', (image_width, image_height), 'black') tmp_mask_im.paste(mask_im, (text_x, text_y)) mask_im = tmp_mask_im elif text_gradient["type"] == "linear": grad_array = get_multiple_gradation(image_width, image_height, text_gradient) text_im = render_text(font, text, text_x, text_y, image_width, image_height, draw_im=grad_array, text_mask=ori_text_mask) if return_mask: tmp_mask_im = Image.new('L', (image_width, image_height), 'black') tmp_mask_im.paste(mask_im, (text_x, text_y)) mask_im = tmp_mask_im else: if raise_exception: raise Exception("no text foreground.") if shadow_im is not None: result_im = Image.alpha_composite(shadow_im, text_im) text_im.close() text_im = None shadow_im.close() shadow_im = None else: result_im = text_im if border_im is not None: result_im = Image.alpha_composite(border_im, result_im) border_im.close() border_im = None if text_italic: width, height = result_im.size m = italic_ratio xshift = abs(m) * width new_width = width + int(round(xshift)) result_im = result_im.transform((new_width, height), Image.AFFINE, (1, m, -xshift if m > 0 else 0, 0, 1, 0), Image.BICUBIC) if return_mask: mask_im = mask_im.transform((new_width, height), Image.AFFINE, (1, m, -xshift if m > 0 else 0, 0, 1, 0), Image.NEAREST) if use_text_persp_trans: result_im = perspective_transform(result_im, limit_ratio=0.2, params=text_persp_trans_params, inter=Image.BICUBIC) if return_mask: mask_im = perspective_transform(mask_im, limit_ratio=0.2, params=text_persp_trans_params, inter=Image.NEAREST) if text_width_ratio > 0.: w, h = result_im.size result_im = result_im.resize((int(w * text_width_ratio), h), resample=Image.BILINEAR) if return_mask: mask_im = mask_im.resize((int(w * text_width_ratio), h), resample=Image.NEAREST) if text_height_ratio > 0.: w, h = result_im.size result_im = result_im.resize((w, int(h * text_height_ratio)), resample=Image.BILINEAR) if return_mask: mask_im = mask_im.resize((w, int(h * text_height_ratio)), resample=Image.NEAREST) if text_rotate is not None and (text_rotate > -360 and text_rotate < 360): if result_im.size[0] > result_im.size[1]: tmp_im = Image.new("RGBA", (result_im.size[0], result_im.size[0]), (0, 0, 0, 0)) tmp_im.paste(result_im, (0, round(result_im.size[0] / 2) - round(result_im.size[1] / 2))) result_im = tmp_im if return_mask: tmp_im = Image.new("L", (mask_im.size[0], mask_im.size[0]), 'black') tmp_im.paste(mask_im, (0, round(mask_im.size[0] / 2) - round(mask_im.size[1] / 2))) mask_im = tmp_im result_im = result_im.rotate(text_rotate, resample=Image.BILINEAR) if return_mask: mask_im = mask_im.rotate(text_rotate, resample=Image.NEAREST) if return_mask: return result_im, mask_im else: return result_im def get_bbox_area(im): bbox = im.getbbox() if bbox is None: return 0 return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) def _perspective_transform(image, return_arr=False, params=None, mask=None): # if params: # a = params[0] # b = params[1] # else: # a = 0.05 * (random.random() - (1 / 2)) # b = 0.1 * (random.random() - (1 / 2)) # if params: aug = params else: a = 0.05 * (random.random() - (1 / 2)) b = 0.1 * (random.random() - (1 / 2)) aug = IAAPerspective(p=1, scale=(0.05 + a, 0.1 + b), keep_size=True) if mask: image, mask_im = aug_multi_masks(mask, image, aug) if return_arr: return Image.fromarray(image), image, Image.fromarray(mask_im.astype(np.uint8)), mask_im else: return Image.fromarray(image), Image.fromarray(mask_im.astype(np.uint8)) else: image = aug(image=np.array(image))['image'] if return_arr: return Image.fromarray(image), image else: return Image.fromarray(image) def _perspective_transform_with_bg(image_with_bg, img_persp_trans_params, mask=None): ori_std = np.array(image_with_bg).std() if mask: image, image_ar, tmp_mask, mask_ar = _perspective_transform(image_with_bg, True, params=img_persp_trans_params, mask=mask) else: image, image_ar = _perspective_transform(image_with_bg, True, params=img_persp_trans_params) retry_cnt = 0 while image.getbbox() is None or image_ar.std() < ori_std / 4: retry_cnt += 1 if retry_cnt > 2: if mask is not None: return image_with_bg, mask else: return image_with_bg # raise Exception("image perspective error") if mask: image, image_ar, tmp_mask, mask_ar = _perspective_transform(image_with_bg, True, params=img_persp_trans_params, mask=mask) else: image, image_ar = _perspective_transform(image_with_bg, True, params=img_persp_trans_params) if mask is not None: mask = tmp_mask return image, mask return image def pad_mask(text_mask, mask_height, mask_width, text_x=0, text_y=0, image_width=None, image_height=None, is_expand=True): if image_width is None: image_width = mask_width if image_height is None: image_height = mask_height if image_width < mask_width + text_x or image_height < mask_height + text_y: return None if text_x < 0: text_x = 0 if text_y < 0: text_y = 0 if not isinstance(text_mask, np.ndarray): text_mask = np.array(text_mask) text_mask = text_mask.reshape((mask_height, mask_width)) text_mask = np.pad(text_mask, ((text_y, image_height - mask_height - text_y), (text_x, image_width - mask_width - text_x)), mode='constant') if is_expand: text_mask = np.expand_dims(text_mask, axis=2) return text_mask def render_text(font, text, text_x=0, text_y=0, image_width=None, image_height=None, text_color=None, draw_im=None, text_mask=None): if not text_mask: text_mask = font.getmask(text) mask_width, mask_height = text_mask.size text_mask = pad_mask(text_mask, mask_height, mask_width, text_x, text_y, image_width, image_height) if text_mask is None: raise Exception("failed to pad text mask") if draw_im is None: if text_color is None: text_color = (0, 0, 0, 0) draw_im = Image.new("RGBA", (image_width, image_height), tuple(text_color)) # (0, 0, 0, 0)) if not isinstance(draw_im, np.ndarray): im_arr = np.array(draw_im) else: im_arr = draw_im im_alpha = im_arr[:, :, 3:4] * (text_mask / 255) im_rgb = im_arr[:, :, :3] * (text_mask != 0).astype(int) im_arr = np.concatenate((im_rgb, im_alpha), axis=2) draw_im = Image.fromarray(im_arr.astype(int).astype(np.int8), mode="RGBA") return draw_im def render_text_masked(text_mask, image_width=None, image_height=None, text_color=None, draw_im=None): if draw_im is None: if text_color is None: text_color = (0, 0, 0, 0) draw_im = Image.new("RGBA", (image_width, image_height), tuple(text_color)) # (0, 0, 0, 0)) im_arr = np.array(draw_im) im_alpha = im_arr[:, :, 3:4] * (text_mask / 255) im_rgb = im_arr[:, :, :3] * (text_mask != 0).astype(int) im_arr = np.concatenate((im_rgb, im_alpha), axis=2) draw_im = Image.fromarray(im_arr.astype(int).astype(np.int8), mode="RGBA") return draw_im def get_font_size_by_text_height(font_path, text, text_height, allowable_pixels=0, font_size_step=5, max_try=30, max_allowable_pixels=3, start_font_size=None, min_font_size=4): paddings = {"left": 0.0, "top": 0.0, "right": 0.0, "bottom": 0.0} font_size = round(text_height * 1.1) pad_length = text_height * paddings["top"] + text_height * paddings["bottom"] target_length = text_height if start_font_size: font_size = start_font_size is_small_target = None # font_size_step = int(font_size * 0.16) tried = 0 min_diff_pixels = 1000000 best_font_size = font_size last_try = max_try * 2 while True: if font_size < min_font_size: font_size = 7 break # raise Exception("min font size") font = ImageFont.truetype(font_path, font_size) text_mask = font.getmask(text) text_width, text_height = text_mask.size text_length = text_height cur_diff_pixels = abs(target_length - (text_length + pad_length)) if cur_diff_pixels <= allowable_pixels: break if min_diff_pixels > cur_diff_pixels: min_diff_pixels = cur_diff_pixels best_font_size = font_size if target_length > (text_length + pad_length): if is_small_target: if font_size_step == 1: break else: font_size_step -= 1 is_small_target = False if is_small_target is None: is_small_target = False font_size += font_size_step else: if is_small_target is not None and not is_small_target: if font_size_step == 1: break else: font_size_step -= 1 is_small_target = True if is_small_target is None: is_small_target = True font_size -= font_size_step if tried > last_try: font_size = best_font_size break if tried > max_try: if min_diff_pixels <= max_allowable_pixels: font_size = best_font_size break else: max_try += 10 tried += 1 return font_size def get_gradation_2d(start, stop, width, height, is_horizontal): if is_horizontal: return np.tile(np.linspace(start, stop, width), (height, 1)) else: return np.tile(np.linspace(start, stop, height), (width, 1)).T def create_simple_text_image_by_size(text, font_path, width, height, paddings={"left": 0.05, "top": 0.05, "right": 0.05, "bottom": 0.05}, allowable_pixels=0, font_size_step=5, output_path=None, fg_color=(0, 0, 0, 255), use_bg_color=True, bg_color=(255, 255, 255, 255), bg_img_path=None, image_quality=100, image_format="JPEG", limit_height=20, return_info=False, use_extend_bg_image=True, max_try=30, max_allowable_pixels=3, start_font_size=None, min_font_size=4, target_resize=True, raise_exception=True, return_numpy=True, use_default_render=False): if width is not None and height is not None: if width * 2 <= height: use_width = True font_size = int(width * 0.9) pad_length = paddings["left"] + paddings["right"] target_length = width else: use_width = False font_size = int(height * 0.9) pad_length = paddings["top"] + paddings["bottom"] target_length = height if start_font_size: font_size = start_font_size is_small_target = None # font_size_step = int(font_size * 0.16) tried = 0 min_diff_pixels = 1000000 best_font_size = font_size last_try = max_try * 2 while True: if font_size < min_font_size: font_size = 7 break # raise Exception("min font size") font = ImageFont.truetype(font_path, font_size) text_width, text_height = get_text_size(font, text) if text_width > text_height: img_width = text_width * 2 img_height = round(text_width * 1.5) else: img_width = round(text_height * 1.5) img_height = text_height * 2 text_x = (img_width - text_width) // 2 text_y = (img_height - text_height) // 2 text_im = draw_text(text_x, text_y, text, fg_color, font, image_width=img_width, image_height=img_height, raise_exception=raise_exception, use_default_render=use_default_render) img_width, img_height = text_im.size top, bottom, left, right = get_image_each_empty_row_nums(text_im) text_width = img_width - (right + left) text_height = img_height - (bottom + top) if use_width: text_length = text_width else: text_length = text_height cur_diff_pixels = abs(target_length - (text_length + pad_length)) if cur_diff_pixels <= allowable_pixels: break if min_diff_pixels > cur_diff_pixels: min_diff_pixels = cur_diff_pixels best_font_size = font_size if target_length > (text_length + pad_length): if is_small_target: if font_size_step == 1: break else: font_size_step -= 1 is_small_target = False if is_small_target is None: is_small_target = False font_size += font_size_step else: if is_small_target is not None and not is_small_target: if font_size_step == 1: break else: font_size_step -= 1 is_small_target = True if is_small_target is None: is_small_target = True font_size -= font_size_step if tried > last_try: font_size = best_font_size break if tried > max_try: if min_diff_pixels <= max_allowable_pixels: font_size = best_font_size break else: max_try += 10 tried += 1 else: font_size = start_font_size if target_resize: target_width = width target_height = height else: target_width = None target_height = None return create_text_image(text=text, font_path=font_path, font_size=font_size, paddings=paddings, output_path=output_path, fg_color=fg_color, use_bg_color=use_bg_color, bg_color=bg_color, bg_img_path=bg_img_path, image_quality=image_quality, image_format=image_format, min_len=limit_height, return_info=return_info, use_extend_bg_image=use_extend_bg_image, target_width=target_width, target_height=target_height, return_numpy=return_numpy) def get_gradation_3d(width, height, start_list, stop_list, is_horizontal): result = np.zeros((height, width, len(start_list)), dtype=np.float) for i, (start, stop) in enumerate(zip(start_list, stop_list)): result[:, :, i] = get_gradation_2d(start, stop, width, height, is_horizontal) return result def get_multiple_gradation(target_width, target_height, grad_param): grad_array = None grad_param['anchors'] = sorted(grad_param['anchors'], key=lambda x: x['pos']) if grad_param['anchors'][0]['pos'] != 0: if grad_param['direction'] == 'horizontal': tw = round(target_width * grad_param['anchors'][0]['pos']) th = target_height else: tw = target_width th = round(target_height * grad_param['anchors'][0]['pos']) grad_array = np.full((th, tw, 4), grad_param['anchors'][0]['color'], dtype=np.uint8) for i in range(len(grad_param['anchors']) - 1): anchor = grad_param['anchors'][i] next_anchor = grad_param['anchors'][i + 1] if grad_param['direction'] == 'horizontal': tw = round(target_width * (next_anchor['pos'] - anchor['pos'])) th = target_height else: th = round(target_height * (next_anchor['pos'] - anchor['pos'])) tw = target_width tmp_grad = get_gradation_3d(tw, th, anchor['color'], next_anchor['color'], grad_param['direction'] == 'horizontal') if grad_array is None: grad_array = tmp_grad else: axis = 1 if grad_param['direction'] == 'horizontal' else 0 grad_array = np.concatenate((grad_array, tmp_grad), axis=axis) if len(grad_array[0]) > target_width: grad_array = grad_array[:, :target_width] if len(grad_array) > target_height: grad_array = grad_array[:target_height] if len(grad_array[0]) < target_width or len(grad_array) < target_height: if grad_param['direction'] == 'horizontal': tw = target_width - len(grad_array[0]) grad_array = np.concatenate( (grad_array, np.full((target_height, tw, 4), grad_param['anchors'][-1]['color'], dtype=np.uint8)), axis=1) else: th = target_height - len(grad_array) grad_array = np.concatenate( (grad_array, np.full((th, target_width, 4), grad_param['anchors'][-1]['color'], dtype=np.uint8)), axis=0) return grad_array def perspective_transform(im, limit_ratio=0.45, params=None, inter=Image.BICUBIC): w, h = im.size x1, y1 = params[0] p_a1 = [] p_b1 = [] if x1 == 0: p_a1.append(0) p_b1.append(0) elif x1 > 0: p_a1.append(round(w * limit_ratio * abs(x1))) p_b1.append(0) else: p_a1.append(0) p_b1.append(round(w * limit_ratio * abs(x1))) if y1 == 0: p_a1.append(0) p_b1.append(0) elif y1 > 0: p_a1.append(round(h * limit_ratio * abs(y1))) p_b1.append(0) else: p_a1.append(0) p_b1.append(round(h * limit_ratio * abs(y1))) x2, y2 = params[1] p_a2 = [] p_b2 = [] if x2 == 0: p_a2.append(w) p_b2.append(w) elif x2 > 0: p_a2.append(w) p_b2.append(w - round(w * limit_ratio * abs(x2))) else: p_a2.append(w - round(w * limit_ratio * abs(x2))) p_b2.append(w) if y2 == 0: p_a2.append(0) p_b2.append(0) elif y2 > 0: p_a2.append(round(h * limit_ratio * abs(y2))) p_b2.append(0) else: p_a2.append(0) p_b2.append(round(h * limit_ratio * abs(y2))) x3, y3 = params[2] p_a3 = [] p_b3 = [] if x3 == 0: p_a3.append(w) p_b3.append(w) elif x3 > 0: p_a3.append(w) p_b3.append(w - round(w * limit_ratio * abs(x3))) else: p_a3.append(w - round(w * limit_ratio * abs(x3))) p_b3.append(w) if y3 == 0: p_a3.append(h) p_b3.append(h) elif y3 > 0: p_a3.append(h) p_b3.append(h - round(h * limit_ratio * abs(y3))) else: p_a3.append(h - round(h * limit_ratio * abs(y3))) p_b3.append(h) x4, y4 = params[3] p_a4 = [] p_b4 = [] if x4 == 0: p_a4.append(0) p_b4.append(0) elif x4 > 0: p_a4.append(round(w * limit_ratio * abs(x4))) p_b4.append(0) else: p_a4.append(0) p_b4.append(round(w * limit_ratio * abs(x4))) if y4 == 0: p_a4.append(h) p_b4.append(h) elif y4 > 0: p_a4.append(h) p_b4.append(h - round(h * limit_ratio * abs(y4))) else: p_a4.append(h - round(h * limit_ratio * abs(y4))) p_b4.append(h) coeffs = find_coeffs( [tuple(p_a1), tuple(p_a2), tuple(p_a3), tuple(p_a4)], [tuple(p_b1), tuple(p_b2), tuple(p_b3), tuple(p_b4)]) return im.transform(im.size, Image.PERSPECTIVE, coeffs, inter) def find_coeffs(pa, pb): matrix = [] for p1, p2 in zip(pa, pb): matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]]) matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]]) A = np.matrix(matrix, dtype=np.float) B = np.array(pb).reshape(8) res = np.dot(np.linalg.inv(A.T * A) * A.T, B) return np.array(res).reshape(8) def create_composed_text_image(np_text_images, target_width, target_height, output_path=None, use_bg_color=True, bg_color=(255, 255, 255, 255), bg_img_path=None, pos_ratio=None, bg_gradient=None, color_mode="RGB", use_binarize=False, image_quality=100, image_format="JPEG", use_extend_bg_image=True, bg_img_scale=1.0, bg_img_height_ratio=1.0, bg_img_width_ratio=1.0, use_img_persp_trans=False, img_persp_trans_params=None, raise_exception=True, return_numpy=False, target_x=None, target_y=None ): if use_bg_color: if bg_gradient is None: bg_img = Image.new("RGBA", (target_width, target_height), tuple(bg_color)) elif bg_gradient["type"] == "linear": grad_array = get_multiple_gradation(target_width, target_height, bg_gradient) bg_img = Image.fromarray(grad_array.astype(np.uint8), mode="RGBA") else: bg_img = resize_bg_image(bg_img_path, bg_img_scale, bg_img_width_ratio, bg_img_height_ratio, pos_ratio, use_extend_bg_image, target_width, target_height, raise_exception) texts_arr = np.zeros_like(bg_img, dtype=np.uint8) texts_mask = np.zeros((bg_img.size[1], bg_img.size[0]), dtype=np.uint8) text_image_pos_list = [] none_cnt = 0 for text_image in np_text_images: h_l_ratio = random.random() w_l_ratio = random.random() x1 = target_x if target_x is not None else round(w_l_ratio * target_width) y1 = target_y if target_y is not None else round(h_l_ratio * target_height) try_cnt = 0 skip = False while np.sum(texts_mask[y1:y1 + text_image.shape[0], x1:x1 + text_image.shape[1]]) > 0 or x1 + text_image.shape[ 1] -1 >= target_width or y1 + text_image.shape[0] -1 >= target_height: try_cnt += 1 if try_cnt == 10: skip = True break h_l_ratio = random.random() w_l_ratio = random.random() y1 = round(h_l_ratio * target_height) x1 = round(w_l_ratio * target_width) if skip: if target_x is not None and target_y is not None: raise Exception("target x and y is failed to make composed image.") text_image_pos_list.append(None) none_cnt += 1 continue text_image_pos_list.append({"x": x1, "y": y1}) texts_mask[y1:y1 + text_image.shape[0], x1:x1 + text_image.shape[1]] = np.ones( (text_image.shape[0], text_image.shape[1]), dtype=np.uint8) texts_arr[y1:y1 + text_image.shape[0], x1:x1 + text_image.shape[1], :] = text_image if len(np_text_images) == none_cnt: raise Exception("failed to insert all images") fg_img = Image.fromarray(texts_arr, mode="RGBA") im = Image.alpha_composite(bg_img, fg_img) if im.mode != color_mode: im = im.convert(color_mode) # if use_img_persp_trans: # im = perspective_transform(im, limit_ratio=0.2, params=img_persp_trans_params, # inter=Image.BICUBIC) if use_binarize: img = np.array(im) img = img[:, :, ::-1].copy() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret2, img = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) im = Image.fromarray(img) img = None # if target_width and target_height: # im = im.resize((target_width, target_height), resample=Image.BILINEAR) if output_path is None: if return_numpy: result = np.array(im) im.close() im = None return result, text_image_pos_list else: return im, text_image_pos_list else: if not os.path.isdir(os.path.dirname(output_path)): os.makedirs(os.path.dirname(output_path)) im.save(output_path, format=image_format, subsampling=0, quality=image_quality) im.close() im = None return text_image_pos_list def resize_bg_image(bg_img_path, bg_img_scale, bg_img_width_ratio, bg_img_height_ratio, pos_ratio, use_extend_bg_image, bg_w, bg_h, raise_exception): bg_img = Image.open(bg_img_path).convert("RGBA") bg_img_width, bg_img_height = bg_img.size if bg_img_height_ratio and bg_img_height_ratio != 1.0: bg_img_height *= bg_img_height_ratio if bg_img_width_ratio and bg_img_width_ratio != 1.0: bg_img_width *= bg_img_width_ratio if bg_img_scale and bg_img_scale != 1.0: bg_img_height *= bg_img_scale bg_img_width *= bg_img_scale bg_img_height = int(bg_img_height) bg_img_width = int(bg_img_width) bg_img = bg_img.resize((bg_img_width, bg_img_height)) img_width = bg_img_width img_height = bg_img_height if pos_ratio: text_x = img_width * pos_ratio[0] text_y = img_height * pos_ratio[1] else: text_x = img_width text_y = img_height if use_extend_bg_image: if bg_img_width < bg_w + text_x or bg_img_height < bg_h + text_y: target_w = bg_w + text_x if bg_img_width < bg_w + text_x else bg_img_width target_h = bg_h + text_y if bg_img_height < bg_h + text_y else bg_img_height bg_img = bg_img.resize((int(target_w), int(target_h))) else: if bg_img_width < bg_w or bg_img_height < bg_h: if raise_exception: raise Exception("image bg size is more small than text. bg: {}x{}, text: {}x{}".format( bg_img_width, bg_img_height, bg_w, bg_h )) img_width = bg_img_width img_height = bg_img_height if pos_ratio: text_x = img_width * pos_ratio[0] text_y = img_height * pos_ratio[1] else: text_x = img_width text_y = img_height if bg_img_width - text_x < bg_w: text_x = bg_img_width - bg_w if bg_img_height - text_y < bg_h: text_y = bg_img_height - bg_h return bg_img.crop((text_x, text_y, text_x + bg_w, text_y + bg_h)) def calc_paddings(paddings, text_width, text_height, min_len): paddings = copy.deepcopy(paddings) if text_height < min_len or text_width < min_len: if paddings["top"] < 0: paddings["top"] = 0 if paddings["bottom"] < 0: paddings["bottom"] = 0 if paddings["left"] < 0: paddings["left"] = 0 if paddings["right"] < 0: paddings["right"] = 0 paddings["left"] = round(text_height * paddings["left"]) paddings["right"] = round(text_height * paddings["right"]) paddings["top"] = round(text_height * paddings["top"]) paddings["bottom"] = round(text_height * paddings["bottom"]) if paddings["left"] + paddings["right"] <= -(text_width // 3): paddings["left"] = random.randint(-1, round(text_width * 0.4)) paddings["right"] = random.randint(-1, round(text_width * 0.4)) return paddings def create_text_image(text, font_path, paddings={"left": 0.05, "top": 0.05, "right": 0.05, "bottom": 0.05}, font_size=160, output_path=None, fg_color=(0, 0, 0, 255), use_bg_color=True, bg_color=(255, 255, 255, 255), bg_img_path=None, pos_ratio=None, text_rotate=None, text_blur=None, bg_gradient=None, text_border=None, text_gradient=None, text_skew=None, text_italic=False, char_spacing=0, text_shadow=None, text_width_ratio=1.0, text_height_ratio=1.0, color_mode="RGB", use_binarize=False, image_quality=100, image_format="JPEG", min_len=20, return_info=False, use_extend_bg_image=True, target_width=None, target_height=None, bg_img_scale=1.0, bg_img_height_ratio=1.0, bg_img_width_ratio=1.0, italic_ratio=0.0, use_text_persp_trans=False, use_img_persp_trans=False, img_persp_trans_params=None, text_persp_trans_params=None, auto_chance_color_when_same=False, raise_exception=True, return_numpy=True, use_default_render=False, return_mask=False): if use_bg_color and bg_gradient is None and (text_gradient is None or len(text_gradient["anchors"]) < 2) and tuple( bg_color) == tuple(fg_color): if auto_chance_color_when_same: bg_color = (random.randint(1, 255), random.randint(1, 255), random.randint(1, 255), 255) else: if raise_exception: raise Exception("bg color and fg color are same", bg_color, fg_color) font = ImageFont.truetype(font_path, font_size) text_width, text_height = get_text_size(font, text) if text_width > text_height: if return_mask and char_spacing > 9: img_width = round(text_width * 2.) img_height = round(text_height * 2.) else: img_width = round(text_width * 1.5) img_height = round(text_height * 1.5) else: img_width = round(text_width * 1.5) img_height = round(text_height * 1.5) text_x = (img_width - text_width) // 2 text_y = (img_height - text_height) // 2 text_im = draw_text(text_x, text_y, text, fg_color, font, text_border=text_border, text_gradient=text_gradient, text_skew=text_skew, text_rotate=text_rotate, char_spacing=char_spacing, text_shadow=text_shadow, text_width_ratio=text_width_ratio, text_italic=text_italic, text_height_ratio=text_height_ratio, image_width=img_width, image_height=img_height, italic_ratio=italic_ratio, use_text_persp_trans=use_text_persp_trans, text_persp_trans_params=text_persp_trans_params, raise_exception=raise_exception, use_default_render=use_default_render, return_mask=return_mask) if return_mask: text_im, im_mask = text_im # im_mask = np.array(im_mask) # for j in range(1, len(text) + 1): # char_mask = (im_mask == j).astype(np.uint8) * 255 # Image.fromarray(char_mask, "L").show() font = None img_width, img_height = text_im.size top_row_cnt, bot_row_cnt, left_row_cnt, right_row_cnt = get_image_each_empty_row_nums(text_im) text_width = img_width - left_row_cnt - right_row_cnt text_height = img_height - top_row_cnt - bot_row_cnt paddings = calc_paddings(paddings, text_width, text_height, min_len) bg_w = text_width + paddings["left"] + paddings["right"] bg_h = text_height + paddings["top"] + paddings["bottom"] if bg_w < 1 or bg_h < 1: if raise_exception: raise Exception("small width or height", bg_w, bg_h, img_width, img_height, top_row_cnt, bot_row_cnt, left_row_cnt, right_row_cnt) else: bg_w = 100 bg_h = 50 text_bg_im = Image.new("RGBA", (bg_w, bg_h), (0, 0, 0, 0)) text_bg_im.paste(text_im, (paddings["left"] - left_row_cnt, paddings["top"] - top_row_cnt)) text_im.close() text_im = None if return_mask: tmp_im_mask = Image.new("L", (bg_w, bg_h), "black") tmp_im_mask.paste(im_mask, (paddings["left"] - left_row_cnt, paddings["top"] - top_row_cnt)) im_mask = tmp_im_mask if use_bg_color: if bg_gradient is None: bg_img = Image.new("RGBA", (bg_w, bg_h), tuple(bg_color)) elif bg_gradient["type"] == "linear": grad_array = get_multiple_gradation(bg_w, bg_h, bg_gradient) bg_img = Image.fromarray(grad_array.astype(np.uint8), mode="RGBA") else: bg_img = resize_bg_image(bg_img_path, bg_img_scale, bg_img_width_ratio, bg_img_height_ratio, pos_ratio, use_extend_bg_image, bg_w, bg_h, raise_exception) if bg_img.size != text_bg_im.size: bg_img = bg_img.resize(text_bg_im.size) im = Image.alpha_composite(bg_img, text_bg_im) bg_img.close() bg_img = None text_bg_im.close() text_bg_im = None if text_blur is not None and text_blur > 0: for i in range(text_blur): im = im.filter(ImageFilter.BLUR) if color_mode != "RGBA": im = im.convert(color_mode) if use_img_persp_trans: im = perspective_transform(im, limit_ratio=0.2, params=img_persp_trans_params, inter=Image.BICUBIC) if return_mask: im_mask = perspective_transform(im_mask, limit_ratio=0.2, params=img_persp_trans_params, inter=Image.NEAREST) if use_binarize: img = np.array(im) img = img[:, :, ::-1].copy() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret2, img = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) im = Image.fromarray(img) img = None if target_width and target_height: im = im.resize((target_width, target_height), resample=Image.BILINEAR) if return_mask: im_mask = im_mask.resize((target_width, target_height), resample=Image.NEAREST) if output_path is None: if return_numpy: result = np.array(im) im.close() im = None if return_mask: mask_result = np.array(im_mask) im_mask.close() im_mask = None return result, mask_result else: return result else: if return_mask: return im, im_mask else: return im else: if not os.path.isdir(os.path.dirname(output_path)): os.makedirs(os.path.dirname(output_path)) im.save(output_path, format=image_format, subsampling=0, quality=image_quality) im.close() im = None if return_info: return img_width, img_height, text_x, text_y else: return True
{"hexsha": "bbe7f7f19a06f97d4afcb6c8050a54c9104957e2", "size": 50653, "ext": "py", "lang": "Python", "max_stars_repo_path": "text_image_maker.py", "max_stars_repo_name": "jireh-father/InsightFace_Pytorch", "max_stars_repo_head_hexsha": "6d635cfabe88b15e6a65d1965c48b9266d71e7ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "text_image_maker.py", "max_issues_repo_name": "jireh-father/InsightFace_Pytorch", "max_issues_repo_head_hexsha": "6d635cfabe88b15e6a65d1965c48b9266d71e7ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "text_image_maker.py", "max_forks_repo_name": "jireh-father/InsightFace_Pytorch", "max_forks_repo_head_hexsha": "6d635cfabe88b15e6a65d1965c48b9266d71e7ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0102685624, "max_line_length": 120, "alphanum_fraction": 0.5850393856, "include": true, "reason": "import numpy", "num_tokens": 12568}
""" script to generate plots for a simulation Use: python plots.py sim_date mode Eg: python plots.py 2015-03-06 nowcast generates plots for the March 6, 2015 nowcast. sim_date corresponds to the date simulated. plots are stored in a directory mode/run_dat, where run_date is the date the simulation was run. """ import datetime import os import sys from glob import glob import matplotlib import netCDF4 as nc import scipy.io as sio matplotlib.use("Agg") from salishsea_tools.nowcast import figures paths = { "nowcast": "/data/dlatorne/MEOPAR/SalishSea/nowcast/", "forecast": "/ocean/sallen/allen/research/MEOPAR/SalishSea/forecast/", "forecast2": "/ocean/sallen/allen/research" "/MEOPAR/SalishSea/forecast2/", } model_path = "/ocean/sallen/allen/research/MEOPAR/Operational/" coastline = sio.loadmat("/ocean/rich/more/mmapbase/bcgeo/PNW.mat") bathy = nc.Dataset( "/data/nsoontie/MEOPAR/NEMO-forcing/grid/" "bathy_meter_SalishSea2.nc" ) def main(): sim_date = datetime.datetime.strptime(sys.argv[1], "%Y-%m-%d") mode = sys.argv[2] if mode == "nowcast": # make_all_plots() run_date = sim_date elif mode == "forecast": run_date = sim_date + datetime.timedelta(days=-1) elif mode == "forecast2": run_date = sim_date + datetime.timedelta(days=-2) os.mkdir(os.path.join(mode, run_date.strftime("%d%b%y").lower())) dmy = run_date.strftime("%d%b%y").lower() plots_dir = os.path.join(mode, dmy) results_dir = os.path.join(paths[mode], dmy) if mode == "nowcast": make_research_plots(dmy, model_path, bathy, results_dir, plots_dir, coastline) make_publish_plots(dmy, model_path, bathy, results_dir, plots_dir, coastline) def make_research_plots(dmy, model_path, bathy, results_dir, plots_dir, coastline): """Make the plots we wish to look at for research purposes.""" # get the results grid_T_dy = results_dataset("1d", "grid_T", results_dir) grid_T_hr = results_dataset("1h", "grid_T", results_dir) grid_U_dy = results_dataset("1d", "grid_U", results_dir) grid_V_dy = results_dataset("1d", "grid_V", results_dir) # do the plots fig = figures.thalweg_salinity(grid_T_dy) filename = os.path.join(plots_dir, f"Salinity_on_thalweg_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor(), bbox_inches="tight") fig = figures.compare_VENUS("East", grid_T_hr, bathy) filename = os.path.join(plots_dir, f"Compare_VENUS_East_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor(), bbox_inches="tight") fig = figures.compare_VENUS("Central", grid_T_hr, bathy) filename = os.path.join(plots_dir, f"Compare_VENUS_Central_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor(), bbox_inches="tight") def make_publish_plots(dmy, model_path, bathy, results_dir, plots_dir, coastline): """Make the plots we wish to publish.""" # get the results grid_T_hr = results_dataset("1h", "grid_T", results_dir) # do the plots fig = figures.website_thumbnail(bathy, grid_T_hr, model_path, coastline) filename = os.path.join(plots_dir, f"Website_thumbnail_{dmy}.png") fig.savefig(filename, facecolor=fig.get_facecolor(), bbox_inches="tight") fig = figures.plot_threshold_website(bathy, grid_T_hr, model_path, coastline) filename = os.path.join(plots_dir, f"Threshold_website_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor()) fig = figures.PA_tidal_predictions(grid_T_hr) filename = os.path.join(plots_dir, f"PA_tidal_predictions_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor(), bbox_inches="tight") fig = figures.compare_tidalpredictions_maxSSH( grid_T_hr, bathy, model_path, name="Victoria" ) filename = os.path.join(plots_dir, f"Vic_maxSSH_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor(), bbox_inches="tight") fig = figures.compare_tidalpredictions_maxSSH(grid_T_hr, bathy, model_path) filename = os.path.join(plots_dir, f"PA_maxSSH_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor(), bbox_inches="tight") fig = figures.compare_tidalpredictions_maxSSH( grid_T_hr, bathy, model_path, name="Campbell River" ) filename = os.path.join(plots_dir, f"CR_maxSSH_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor(), bbox_inches="tight") fig = figures.compare_water_levels(grid_T_hr, bathy, coastline) filename = os.path.join(plots_dir, f"NOAA_ssh_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor()) fig = figures.plot_thresholds_all(grid_T_hr, bathy, model_path, coastline) filename = os.path.join(plots_dir, f"WaterLevel_Thresholds_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor()) fig = figures.SandHeads_winds(grid_T_hr, bathy, model_path, coastline) filename = os.path.join(plots_dir, f"SH_wind_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor()) fig = figures.average_winds_at_station( grid_T_hr, bathy, model_path, coastline, station="all" ) filename = os.path.join(plots_dir, f"Avg_wind_vectors_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor()) fig = figures.winds_at_max_ssh( grid_T_hr, bathy, model_path, coastline, station="all" ) filename = os.path.join(plots_dir, f"Wind_vectors_at_max_{dmy}.svg") fig.savefig(filename, facecolor=fig.get_facecolor()) def results_dataset(period, grid, results_dir): """Return the results dataset for period (e.g. 1h or 1d) and grid (e.g. grid_T, grid_U) from results_dir. """ filename_pattern = "SalishSea_{period}_*_{grid}.nc" print(results_dir) filepaths = glob( os.path.join(results_dir, filename_pattern.format(period=period, grid=grid)) ) return nc.Dataset(filepaths[0]) main()
{"hexsha": "107eae066f7ad5f42d38ddc97d38e70970a42868", "size": 5892, "ext": "py", "lang": "Python", "max_stars_repo_path": "nowcast/plots.py", "max_stars_repo_name": "SalishSeaCast/SalishSeaNowcast", "max_stars_repo_head_hexsha": "947ba6fbb8952c7ae989a3aa96614b900748f55d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-02-06T01:10:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-11T01:06:10.000Z", "max_issues_repo_path": "nowcast/plots.py", "max_issues_repo_name": "SalishSeaCast/SalishSeaNowcast", "max_issues_repo_head_hexsha": "947ba6fbb8952c7ae989a3aa96614b900748f55d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2020-02-03T23:54:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T18:50:31.000Z", "max_forks_repo_path": "nowcast/plots.py", "max_forks_repo_name": "SalishSeaCast/SalishSeaNowcast", "max_forks_repo_head_hexsha": "947ba6fbb8952c7ae989a3aa96614b900748f55d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7631578947, "max_line_length": 86, "alphanum_fraction": 0.716904277, "include": true, "reason": "import scipy", "num_tokens": 1603}
% ############################################################################# % This is Chapter 7 % !TEX root = ../main.tex % ############################################################################# % Change the Name of the Chapter i the following line \fancychapter{Conclusion} \cleardoublepage % The following line allows to ref this chapter \label{chap:conclusion} This work focused on evaluating the security features of the Microsemi SmartFusion2 board, modelling its performance, and developing a proof of concept system to secure communication channels between devices. % ----------------------------------------------------- % ----------------------------------------------------- \section{Overview} \label{chap:conclusion:overview} The SmartFusion2 SoC provides a varied array of security services using symmetric keys: AES with 128 and 256 bit key encryption, SHA-256, HMAC authentication with 256 bit SHA and a SHA based key derivation function. For asymmetric cryptography it offers ECDH for key generation, and additional ECC primitives, with the P-384 NIST defined curve, which can be used to implement digital signatures. Lastly, a true random number generator, a PUF based secure storage solution, tamper detection capabilities with several detection flags, and a zeroization feature with multiple recoverability options. The prototype implemented on the device, focused on the implementation of a service which provides authentication and encryption to a TCP channel, using symmetric keys. The system is able to encrypt up to 36 KB of data using the AES and HMAC accelerators, with adequate 256 bit security. This limitation is imposed, due to the limited 80 KB of RAM. This hurdle was overcome by implementing a continuous authenticated encryption service, using an HMAC software implementation, and taking advantage of the characteristic of the AES CBC mode. A key generation service using asymmetric key pairs was also implemented. It generates a shared secret, using an internal private key and a public key. The system also has a limitation in its ECC primitives. It does not provide an ECDSA implementation. In order to implement signatures, a big numbers library must be included in the device. The inclusion of such a library is complex due to its tendency to be heavy in code space and the device's limited RAM memory, even when disabling error correction and detection (80 KB). Regarding key management, the PUF secure storage service is limited to 1000 write cycles for a predicted twenty year lifespan. To mitigate this, a key management service was developed, which stores the keys encrypted in a non volatile memory using the PUF service. This allows the update of multiple keys with only one PUF write for each update, instead of one write per key. This work also contributes with an extensive characterization of the SmartFusion2 device. It studied each security service advantage and possible trade offs. Furthermore, it models the performance of every service, providing a useful prediction of the system's behaviour. Lastly, the implemented prototype provides solid groundwork for a secure communications service, in a low cost HSM device. % ----------------------------------------------------- % ----------------------------------------------------- \section{Future Work} \label{chap:conclusion:future-work} This work can be improved by implementing fully working digital signatures. To achieve this, a lightweight big integer library, with all arithmetic operations in the ECDSA algorithm is needed. It should support 48 byte integers, and have a maximum memory footprint of around 50 KB, to fit in the 80 KB of RAM with error detection and correction disabled, while keeping the secure communication and key management services enabled. Another potential improvement to the SmartFusion2 is to study other software and FPGA cores, compared to the AES, HMAC and SHA SoC cores, regarding their side-channel protection and performance. There is potential for including other FPGA cores, with better scaling performance in regards to increasing data sizes, which have side-channel protection. Regarding the secure communications prototype, the data transmission performance between the computer and the SmartFusion2 SoC can be improved by using the available USB connection which provides a much higher throughput rate. Additionally, the secure data exchange service only acts as a middleman to secure communications. Ideally, the secure data exchange service would secure a real time connection between two individuals. This can be achieved, e.g., by including a TCP or TLS library directly in the board, in order to allow two similar devices to establish a direct and secure connection. % For signature generation of unlimited data sizes, a continuous SHA implementation is necessary, since the board does not offer one. % Future work could also test, using the secure communications service, an encrypted connection between two similar devices, running the existing prototype. The connection could be TLS encrypted, using the board to encrypt and authenticate the data, with internal symmetric keys. %This connection could be improved with a TLS implementation, using internal symmetric keys. % Finally, the AES, SHA and HMAC cores are not side-channel protected. This includes AES, SHA-256 and HMAC. This could be improved by implementing cryptographic core's of its services using the FPGA.
{"hexsha": "2b4a3b2bd20b96c2f695774a36d29826e556466e", "size": 5451, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapters/Thesis-MSc-Chapter_7.tex", "max_stars_repo_name": "LexVar/thesis_latex", "max_stars_repo_head_hexsha": "fbfd42ad61e74e6cde8e198d8cabe4a4e405a598", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapters/Thesis-MSc-Chapter_7.tex", "max_issues_repo_name": "LexVar/thesis_latex", "max_issues_repo_head_hexsha": "fbfd42ad61e74e6cde8e198d8cabe4a4e405a598", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapters/Thesis-MSc-Chapter_7.tex", "max_forks_repo_name": "LexVar/thesis_latex", "max_forks_repo_head_hexsha": "fbfd42ad61e74e6cde8e198d8cabe4a4e405a598", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 126.7674418605, "max_line_length": 596, "alphanum_fraction": 0.7591267657, "num_tokens": 996}
import tensorflow as tf import numpy as np ISHAPE = (1, 2, 3, 4) OSHAPE = (int(np.product(ISHAPE)),) def genWithKeras(): data = tf.keras.Input(dtype='float32', name='input', batch_size=ISHAPE[0], shape=ISHAPE[1:]) reshape = tf.keras.layers.Reshape(OSHAPE, name='reshaped')(data) model = tf.keras.Model(inputs=[data], outputs=[reshape]) return tf.lite.TFLiteConverter.from_keras_model(model) def genWithTFModel(): class ReshapeModule(tf.Module): def __init__(self): super(ReshapeModule, self).__init__() @tf.function(input_signature=[tf.TensorSpec(ISHAPE, tf.float32)]) def __call__(self, data): return tf.reshape(data, OSHAPE) module = ReshapeModule() tf.saved_model.save(module, 'reshape.saved_model') return tf.lite.TFLiteConverter.from_saved_model('reshape.saved_model') converter = genWithKeras() # converter = genWithTFModel() tflite_model = converter.convert() with open('model.tflite', 'wb') as f: f.write(tflite_model)
{"hexsha": "4962d9f536925cea5d21a99b9e25eebab30169b2", "size": 977, "ext": "py", "lang": "Python", "max_stars_repo_path": "misc/model-builder/reshape/tflite.py", "max_stars_repo_name": "jackwish/shrub", "max_stars_repo_head_hexsha": "acd14c72269c88e3143997288efcc6f0130c4c8e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-23T01:16:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T23:16:21.000Z", "max_issues_repo_path": "misc/model-builder/reshape/tflite.py", "max_issues_repo_name": "jackwish/shrub", "max_issues_repo_head_hexsha": "acd14c72269c88e3143997288efcc6f0130c4c8e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-23T01:09:54.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-23T01:09:54.000Z", "max_forks_repo_path": "misc/model-builder/reshape/tflite.py", "max_forks_repo_name": "jackwish/shrub", "max_forks_repo_head_hexsha": "acd14c72269c88e3143997288efcc6f0130c4c8e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6060606061, "max_line_length": 94, "alphanum_fraction": 0.7236438076, "include": true, "reason": "import numpy", "num_tokens": 272}
subroutine foo02 print *, "foo02" end subroutine bar02 print *, "bar02" end
{"hexsha": "67385d8b25e00aa834ab4a77c2b8a5a54b9d5a19", "size": 117, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/PIPS/validation/Preprocessor/source01.src/module02.f", "max_stars_repo_name": "DVSR1966/par4all", "max_stars_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2015-01-31T01:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:01:50.000Z", "max_issues_repo_path": "packages/PIPS/validation/Preprocessor/source01.src/module02.f", "max_issues_repo_name": "DVSR1966/par4all", "max_issues_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-05-29T09:29:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-11T16:01:39.000Z", "max_forks_repo_path": "packages/PIPS/validation/Preprocessor/source01.src/module02.f", "max_forks_repo_name": "DVSR1966/par4all", "max_forks_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-26T08:05:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T02:01:51.000Z", "avg_line_length": 9.75, "max_line_length": 22, "alphanum_fraction": 0.4786324786, "num_tokens": 30}
# -*- coding: utf-8 -*- import numpy as np def format_results_table(results_table, header_names, row_names=None, operation=None, col_span=10, digits=4): """Build a customized text formatted table Parameters ---------- results_table : 2d array-like, shape = [n_rows, n_cols] Array of data to be formatted. header_names : list of strings, shape = [n_headers_name] List of names of each column. row_names : list of strings, shape = [n_headers_name] Optional list of row names. operation : string, [None (default), 'average', 'sum'] Optional parameter, it is required if the average or the sum of each column needs to be calculated. col_span : int Optional value to indicate the separation between two headers. digits : int Optional number of digits for formatting output floating point values. Returns ------- report: string Text formatted according to the headers names, row names and operation indicated. Examples -------- >>> from mltoolbox.utils.format import format_results_table >>> data = np.arange(10.0).reshape(2, 5) >>> headers = ['Accuracy', 'Precision', 'Recall', 'F1-score', 'AUC'] >>> print(format_results_table(data, headers, operation='average')) Accuracy Precision Recall F1-score AUC <BLANKLINE> 1 0.0000 1.0000 2.0000 3.0000 4.0000 2 5.0000 6.0000 7.0000 8.0000 9.0000 <BLANKLINE> Average 2.5000 3.5000 4.5000 5.5000 6.5000 <BLANKLINE> """ if len(header_names) != results_table.shape[1]: raise ValueError('The header_names size is different to number of columns in results_table ' + '; got %d and %d respectively' % (len(header_names), results_table.shape[1])) operation_values = (None, 'average', 'sum') if operation not in operation_values: raise ValueError('operation has to be one of ' + str(operation_values)) if row_names is None: row_names = [str(i + 1) for i in range(results_table.shape[0])] elif len(row_names) != results_table.shape[0]: raise ValueError('The row_names size is different to number of rows in results_table ' + '; got %d and %d respectively' % (len(row_names), results_table.shape[0])) # Space used when an operation is specified last_row = 'average' header_width = max(len(cn) for cn in header_names) row_width = max(len(cn) for cn in row_names) width = max(header_width, len(last_row), digits) width = max(width, row_width, digits) # Generate the format for the header cols = len(header_names) headers_format = u'{:>{width}s} ' + (u' {:>' + str(col_span) + '}') * cols report = headers_format.format(u'', *header_names, width=width) report += u'\n\n' # Generate the format for the rows row_format = u'{:>{width}s} ' + (u' {:>' + str(col_span) + '.{digits}f}') * cols + u'\n' rows = zip(row_names, results_table) for label, row in rows: report += row_format.format(label, *row, width=width, digits=digits) report += u'\n' mean_results = [] if operation == 'average': last_row = 'Average' mean_results = np.mean(results_table, axis=0) elif operation == 'sum': last_row = 'Sum' mean_results = np.sum(results_table, axis=0) if operation is not None: # Generate the operation row report += row_format.format(last_row, *mean_results, width=width, digits=digits) return report # if __name__ == '__main__': # data = np.arange(10.0).reshape(2, 5) # headers = ['Accuracy', 'Precision', 'Recall', 'F1-score', 'AUC'] # report = format_results_table(data, headers, operation='average') # text_file = open("tests/test_format_files/sum_long_word_header.txt", "w") # text_file.write(report) # text_file.close()
{"hexsha": "0fc02aed19fd58aa769f7561499ca87734ecdbb1", "size": 4065, "ext": "py", "lang": "Python", "max_stars_repo_path": "mltoolbox/utils/format.py", "max_stars_repo_name": "eegkno/mltoolbox", "max_stars_repo_head_hexsha": "54da8854f25c724f8dd1ee2517ff95bfa54b07d4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mltoolbox/utils/format.py", "max_issues_repo_name": "eegkno/mltoolbox", "max_issues_repo_head_hexsha": "54da8854f25c724f8dd1ee2517ff95bfa54b07d4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 72, "max_issues_repo_issues_event_min_datetime": "2017-03-10T04:52:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-15T17:46:56.000Z", "max_forks_repo_path": "mltoolbox/utils/format.py", "max_forks_repo_name": "eegkno/mltoolbox", "max_forks_repo_head_hexsha": "54da8854f25c724f8dd1ee2517ff95bfa54b07d4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-22T17:44:23.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-22T17:44:23.000Z", "avg_line_length": 37.2935779817, "max_line_length": 109, "alphanum_fraction": 0.6164821648, "include": true, "reason": "import numpy", "num_tokens": 1029}
""" Run MuJoCo Maze experiments. """ import os from typing import Optional import click import numpy as np from torch import optim import nets import ppimoc import our_oc import rainy import vis_mjmaze from rainy.envs import EnvExt, pybullet_parallel from rainy.net import option_critic as oc from rainy.net.policy import PerOptionStdGaussianDist, SeparateStdGaussianDist BILLIARD_GOALS = [(2.0, -3.0), (-2.0, -3.0), (-2.0, 1.0), (2.0, 1.0)] def _change_billiard_goal(primary: int) -> dict: return { "primary_goal": BILLIARD_GOALS[primary], "subgoals": BILLIARD_GOALS[:primary] + BILLIARD_GOALS[primary + 1 :], } class MazeEnvExt(EnvExt): CUSTOM_ENVS = { "PointTRoom2": ("PointTRoom-v1", {"goal": (-2.0, -3.0)}), "PointTRoomSub2": ( "PointTRoom-v2", {"primary_goal": (-2.0, -3.0), "subgoal": (2.0, -3.0)}, ), "PointBilliard2": ("PointBilliard-v2", _change_billiard_goal(1)), "PointBilliard3": ("PointBilliard-v2", _change_billiard_goal(2)), "PointBilliard4": ("PointBilliard-v2", _change_billiard_goal(3)), } def __init__(self, name: str) -> None: import gym import mujoco_maze # noqa if name in self.CUSTOM_ENVS: name, task_kwargs = self.CUSTOM_ENVS[name] super().__init__(gym.make(name, task_kwargs=task_kwargs)) else: if "Swimmer" in name or "Reacher" in name: super().__init__(gym.make(name, forward_reward_weight=0.0)) else: super().__init__(gym.make(name)) self.action_shift = self.action_space.low self.action_scale = self.action_space.high - self.action_space.low def step(self, action): action = self.action_scale / (1.0 + np.exp(-action)) + self.action_shift return super().step(action) def select_agent(agent_name: str, **kwargs) -> rainy.agents.Agent: AGENTS = { "ppimoc": ppimoc.PPImocAgent, "ppo": rainy.agents.PPOAgent, "ppoc": rainy.agents.PPOCAgent, "our-ppoc": our_oc.OurPPOCAgent, } return AGENTS[agent_name] @rainy.subcommand() @click.argument("new-envname", type=str) @click.option("--comment", type=str, default=None) @click.option("--logdir", type=str, default=None) @click.option("--additional-steps", type=int, default=None) @click.option("--eval-render", is_flag=True) def train_and_adapt( ctx: click.Context, new_envname: str, comment: Optional[str], logdir: Optional[str], additional_steps: Optional[int], eval_render: bool, ) -> None: new_envs = new_envname.split("/") click.secho(f"adapted envs: {new_envs}", fg="red") experiment = ctx.obj.experiment script_path = ctx.obj.script_path if script_path is not None: fingerprint = dict( comment="" if comment is None else comment, kwargs=ctx.obj.kwargs, ) experiment.logger.setup_from_script_path( script_path, dirname=logdir, fingerprint=fingerprint ) cfg = experiment.config cfg.keep_logger = True experiment.train(eval_render=eval_render) for i, new_env in enumerate(new_envs): cfg.eval_env.close() if i + 1 == len(new_envs): cfg.keep_logger = False # Set new environments cfg.set_env(lambda: MazeEnvExt(new_env)) cfg.eval_env = MazeEnvExt(new_env) experiment.ag.penv = cfg.parallel_env() experiment.ag.eval_penv = None additional_steps = additional_steps or cfg.max_steps if cfg.logmu_weight_min is not None: experiment.ag._opt_logp_cooler = rainy.lib.explore.LinearCooler( cfg.logmu_weight, cfg.logmu_weight_min, additional_steps // cfg.nworkers, ) experiment._retrain_impl(additional_steps, eval_render=eval_render) @rainy.main(script_path=os.path.realpath(__file__), agent_selector=select_agent) @rainy.option("--visualize-beta", "-VB", is_flag=True) @rainy.option("--visualize-policy", "-VP", is_flag=True) @rainy.option("--use-separated-network", "-SN", is_flag=True) @rainy.option("--not-upgoing", "-NU", is_flag=True) def main( envname: str = "PointUMaze-v1", max_steps: int = 4, num_options: int = 4, visualize_beta: bool = False, visualize_policy: bool = False, normalize_reward: bool = False, entropy_weight: float = 0.001, pimu_entropy_weight: float = 0.004, logmu_weight: float = 0.4, logmu_weight_min: Optional[float] = None, beta_logit_clip: float = 0.1, beta_loss_weight: float = 1.0, pimu_mc_rollout: int = 20, adv_type: str = "upgoing", option_selector: str = "logp", agent_name: str = "ppimoc", use_separated_network: bool = False, not_upgoing: bool = False, eval_times: int = 4, ) -> rainy.Config: c = rainy.Config() if visualize_beta or visualize_policy: if agent_name in ["ppimoc", "our-ppoc", "ppoc"]: c.eval_hooks.append( vis_mjmaze.ImocVis(num_options, vis_policy=visualize_policy) ) elif agent_name == "ppo": c.eval_hooks.append(vis_mjmaze.A2CVis()) else: raise NotImplementedError("Visualizer for PPOC is not yet implemented") if max_steps < 20: max_steps *= int(1e6) c.max_steps = max_steps # Environment settings c.set_env(lambda: MazeEnvExt(envname)) c.eval_env = MazeEnvExt(envname) c.discount_factor = 0.99 c.adv_type = adv_type c.set_parallel_env( pybullet_parallel(normalize_obs=False, normalize_reward=normalize_reward) ) # Algorithm specific configurations if "pp" in agent_name: c.nworkers = 16 c.nsteps = 256 c.ppo_minibatch_size = (c.nworkers * c.nsteps) // 4 c.ppo_epochs = 10 c.set_optimizer(lambda params: optim.Adam(params, lr=3e-4, eps=1e-4)) c.adv_normalize_eps = None c.ppo_clip = 0.2 c.use_gae = True else: raise NotImplementedError(f"NotImplemented agent: {agent_name}") # Option parameters c.option_selector = option_selector c.logmu_weight = logmu_weight c.logmu_weight_min = logmu_weight_min c.opt_model_capacity = c.nworkers * c.nsteps c.opt_model_batch_size = c.opt_model_capacity // 2 c.set_explorer(lambda: rainy.lib.explore.EpsGreedy(0.1)) c.set_explorer(lambda: rainy.lib.explore.EpsGreedy(0.1), key="eval") c.grad_clip = 0.5 c.pimu_mc_rollout = pimu_mc_rollout if not_upgoing: c.upgoing_adv = False # optimization parameters c.entropy_weight = entropy_weight c.pimu_entropy_weight = pimu_entropy_weight c.value_loss_weight = 1.0 c.beta_loss_weight = beta_loss_weight c.beta_logit_clip = beta_logit_clip c.set_net_fn( "actor-critic", rainy.net.actor_critic.fc_shared(policy=SeparateStdGaussianDist), ) if agent_name == "ppoc": c.set_net_fn( "option-critic", oc.fc_shared( num_options=num_options, policy=PerOptionStdGaussianDist, has_mu=True, ), ) else: if use_separated_network: c.set_net_fn( "option-critic", nets.fc_separated( num_options=num_options, policy=PerOptionStdGaussianDist, ), ) else: c.set_net_fn( "option-critic", nets.fc_shared( num_options=num_options, policy=PerOptionStdGaussianDist, ), ) c.episode_log_freq = 100 c.network_log_freq = 10 c.eval_times = eval_times c.eval_freq = c.max_steps // 50 return c if __name__ == "__main__": main()
{"hexsha": "7a4585f9b8f68adfdb354c3aeda008cb289386f9", "size": 7810, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/run_mjmaze.py", "max_stars_repo_name": "kngwyu/infomax-option-critic", "max_stars_repo_head_hexsha": "9d907c041c1d0280db9b23eb2fdf9e0033e33bf3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-24T05:29:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-19T09:27:49.000Z", "max_issues_repo_path": "src/run_mjmaze.py", "max_issues_repo_name": "kngwyu/infomax-option-critic", "max_issues_repo_head_hexsha": "9d907c041c1d0280db9b23eb2fdf9e0033e33bf3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/run_mjmaze.py", "max_forks_repo_name": "kngwyu/infomax-option-critic", "max_forks_repo_head_hexsha": "9d907c041c1d0280db9b23eb2fdf9e0033e33bf3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8151260504, "max_line_length": 86, "alphanum_fraction": 0.6366197183, "include": true, "reason": "import numpy", "num_tokens": 2057}
""" The paramselect module handles automated parameter selection for linear models. Automated Parameter Selection End-members Note: All magnetic parameters from literature for now. Note: No fitting below 298 K (so neglect third law issues for now). For each step, add one parameter at a time and compute AICc with max likelihood. Cp - TlnT, T**2, T**-1, T**3 - 4 candidate models (S and H only have one required parameter each. Will fit in full MCMC procedure) Choose parameter set with best AICc score. """ import sys from typing import List, Dict import logging import operator from collections import OrderedDict import numpy as np import sympy from sympy import Symbol from tinydb import where from pycalphad import Database, Model, variables as v import espei.refdata from espei.database_utils import initialize_database from espei.core_utils import get_prop_data, filter_configurations, filter_temperatures, symmetry_filter from espei.error_functions.non_equilibrium_thermochemical_error import get_prop_samples from espei.parameter_selection.model_building import build_candidate_models from espei.parameter_selection.selection import select_model from espei.parameter_selection.utils import get_data_quantities, feature_transforms, _get_sample_condition_dicts from espei.sublattice_tools import generate_symmetric_group, generate_interactions, \ tuplify, recursive_tuplify, interaction_test, endmembers_from_interaction, generate_endmembers from espei.utils import PickleableTinyDB, sigfigs, extract_aliases _log = logging.getLogger(__name__) def _param_present_in_database(dbf, phase_name, configuration, param_type): const_arr = tuple([tuple(map(lambda x: v.Species(x), subl)) for subl in map(tuplify, configuration)]) # parameter order doesn't matter here, since the generated might not exactly match. Always override. query = (where('phase_name') == phase_name) & \ (where('parameter_type') == param_type) & \ (where('constituent_array') == const_arr) search_result = dbf._parameters.search(query) if len(search_result) > 0: return True def _build_feature_matrix(sample_condition_dicts: List[Dict[Symbol, float]], symbolic_coefficients: List[Symbol]): """ Builds A for solving x = A\\b. A is an MxN matrix of M sampled data points and N is the symbolic coefficients. Parameters ---------- sample_condition_dicts : List[Dict[Symbol, float]] List of length ``M`` containing the conditions (T, P, YS, Z, V_I, V_J, V_K) for each sampled point. symbolic_coefficients : List[Symbol] Symbolic coefficients of length ```N`` (e.g. ``v.T``, ``YS``) of the features corresponding to the variables that will be fit. Returns ------- ArrayLike MxN array of coefficients with sampled data conditions plugged in. """ M = len(sample_condition_dicts) N = len(symbolic_coefficients) feature_matrix = np.empty((M, N)) for i in range(M): for j in range(N): feature_matrix[i, j] = symbolic_coefficients[j].subs(sample_condition_dicts[i]) return feature_matrix def fit_formation_energy(dbf, comps, phase_name, configuration, symmetry, datasets, ridge_alpha=None, aicc_phase_penalty=None, features=None): """ Find suitable linear model parameters for the given phase. We do this by successively fitting heat capacities, entropies and enthalpies of formation, and selecting against criteria to prevent overfitting. The "best" set of parameters minimizes the error without overfitting. Parameters ---------- dbf : Database pycalphad Database. Partially complete, so we know what degrees of freedom to fix. comps : [str] Names of the relevant components. phase_name : str Name of the desired phase for which the parameters will be found. configuration : ndarray Configuration of the sublattices for the fitting procedure. symmetry : [[int]] Symmetry of the sublattice configuration. datasets : PickleableTinyDB All the datasets desired to fit to. ridge_alpha : float Value of the :math:`\\alpha` hyperparameter used in ridge regression. Defaults to 1.0e-100, which should be degenerate with ordinary least squares regression. For now, the parameter is applied to all features. aicc_feature_factors : dict Map of phase name to feature to a multiplication factor for the AICc's parameter penalty. features : dict Maps "property" to a list of features for the linear model. These will be transformed from "GM" coefficients e.g., {"CPM_FORM": (v.T*sympy.log(v.T), v.T**2, v.T**-1, v.T**3)} (Default value = None) Returns ------- dict {feature: estimated_value} """ aicc_feature_factors = aicc_phase_penalty if aicc_phase_penalty is not None else {} if interaction_test(configuration): _log.debug('ENDMEMBERS FROM INTERACTION: %s', endmembers_from_interaction(configuration)) fitting_steps = (["CPM_FORM", "CPM_MIX"], ["SM_FORM", "SM_MIX"], ["HM_FORM", "HM_MIX"]) else: # We are only fitting an endmember; no mixing data needed fitting_steps = (["CPM_FORM"], ["SM_FORM"], ["HM_FORM"]) # create the candidate models and fitting steps if features is None: features = OrderedDict([("CPM_FORM", (v.T * sympy.log(v.T), v.T**2, v.T**-1, v.T**3)), ("SM_FORM", (v.T,)), ("HM_FORM", (sympy.S.One,)), ]) # dict of {feature, [candidate_models]} candidate_models_features = build_candidate_models(configuration, features) # All possible parameter values that could be taken on. This is some legacy # code from before there were many candidate models built. For very large # sets of candidate models, this could be quite slow. # TODO: we might be able to remove this initialization for clarity, depends on fixed poritions parameters = {} for candidate_models in candidate_models_features.values(): for model in candidate_models: for coef in model: parameters[coef] = 0 # These is our previously fit partial model from previous steps # Subtract out all of these contributions (zero out reference state because these are formation properties) fixed_model = Model(dbf, comps, phase_name, parameters={'GHSER'+(c.upper()*2)[:2]: 0 for c in comps}) fixed_portions = [0] for desired_props in fitting_steps: feature_type = desired_props[0].split('_')[0] # HM_FORM -> HM aicc_factor = aicc_feature_factors.get(feature_type, 1.0) solver_qry = (where('solver').test(symmetry_filter, configuration, recursive_tuplify(symmetry) if symmetry else symmetry)) desired_data = get_prop_data(comps, phase_name, desired_props, datasets, additional_query=solver_qry) desired_data = filter_configurations(desired_data, configuration, symmetry) desired_data = filter_temperatures(desired_data) _log.trace('%s: datasets found: %s', desired_props, len(desired_data)) if len(desired_data) > 0: config_tup = tuple(map(tuplify, configuration)) calculate_dict = get_prop_samples(desired_data, config_tup) sample_condition_dicts = _get_sample_condition_dicts(calculate_dict, list(map(len, config_tup))) weights = calculate_dict['weights'] assert len(sample_condition_dicts) == len(weights) # We assume all properties in the same fitting step have the same # features (all CPM, all HM, etc., but different ref states). # data quantities are the same for each candidate model and can be computed up front data_qtys = get_data_quantities(feature_type, fixed_model, fixed_portions, desired_data, sample_condition_dicts) # build the candidate model transformation matrix and response vector (A, b in Ax=b) feature_matricies = [] data_quantities = [] for candidate_coefficients in candidate_models_features[desired_props[0]]: # Map coeffiecients in G to coefficients in the feature_type (H, S, CP) transformed_coefficients = list(map(feature_transforms[feature_type], candidate_coefficients)) if interaction_test(configuration, 3): feature_matricies.append(_build_feature_matrix(sample_condition_dicts, transformed_coefficients)) else: feature_matricies.append(_build_feature_matrix(sample_condition_dicts, transformed_coefficients)) data_quantities.append(data_qtys) # provide candidate models and get back a selected model. selected_model = select_model(zip(candidate_models_features[desired_props[0]], feature_matricies, data_quantities), ridge_alpha, weights=weights, aicc_factor=aicc_factor) selected_features, selected_values = selected_model parameters.update(zip(*(selected_features, selected_values))) # Add these parameters to be fixed for the next fitting step fixed_portion = np.array(selected_features, dtype=np.object_) fixed_portion = np.dot(fixed_portion, selected_values) fixed_portions.append(fixed_portion) return parameters def get_next_symbol(dbf): """ Return a string name of the next free symbol to set Parameters ---------- dbf : Database pycalphad Database. Must have the ``varcounter`` attribute set to an integer. Returns ------- str """ # TODO: PEP-572 optimization symbol_name = 'VV' + str(dbf.varcounter).zfill(4) while dbf.symbols.get(symbol_name, None) is not None: dbf.varcounter += 1 symbol_name = 'VV' + str(dbf.varcounter).zfill(4) return symbol_name def fit_ternary_interactions(dbf, phase_name, symmetry, endmembers, datasets, ridge_alpha=None, aicc_phase_penalty=None): """ Fit ternary interactions for a database in place Parameters ---------- dbf : Database pycalphad Database to add parameters to phase_name : str Name of the phase to fit symmetry : list List of symmetric sublattices, e.g. [[0, 1, 2], [3, 4]] endmembers : list List of endmember tuples, e.g. [('CU', 'MG')] datasets : PickleableTinyDB TinyDB database of datasets ridge_alpha : float Value of the :math:`\\alpha` hyperparameter used in ridge regression. Defaults to 1.0e-100, which should be degenerate with ordinary least squares regression. For now, the parameter is applied to all features. Returns ------- None Modified the Database in place """ numdigits = 6 # number of significant figures, might cause rounding errors interactions = generate_interactions(endmembers, order=3, symmetry=symmetry) _log.trace('%s distinct ternary interactions', len(interactions)) for interaction in interactions: ixx = interaction config = tuple(map(tuplify, ixx)) if _param_present_in_database(dbf, phase_name, config, 'L'): _log.warning('INTERACTION: %s already in Database. Skipping.', ixx) continue else: _log.trace('INTERACTION: %s', ixx) parameters = fit_formation_energy(dbf, sorted(dbf.elements), phase_name, ixx, symmetry, datasets, ridge_alpha, aicc_phase_penalty=aicc_phase_penalty) # Organize parameters by polynomial degree degree_polys = np.zeros(3, dtype=np.object_) YS = Symbol('YS') # asymmetric parameters should have Mugiannu V_I/V_J/V_K, while symmetric just has YS is_asymmetric = any([(k.has(Symbol('V_I'))) and (v != 0) for k, v in parameters.items()]) if is_asymmetric: params = [(2, YS*Symbol('V_K')), (1, YS*Symbol('V_J')), (0, YS*Symbol('V_I'))] # (excess parameter degree, symbol) tuples else: params = [(0, YS)] # (excess parameter degree, symbol) tuples for degree, check_symbol in params: keys_to_remove = [] for key, value in sorted(parameters.items(), key=str): if key.has(check_symbol): if value != 0: symbol_name = get_next_symbol(dbf) dbf.symbols[symbol_name] = sigfigs(parameters[key], numdigits) parameters[key] = Symbol(symbol_name) coef = parameters[key] * (key / check_symbol) try: coef = float(coef) except TypeError: pass degree_polys[degree] += coef keys_to_remove.append(key) for key in keys_to_remove: parameters.pop(key) _log.trace('Polynomial coefs: %s', degree_polys) # Insert into database symmetric_interactions = generate_symmetric_group(interaction, symmetry) for degree in np.arange(degree_polys.shape[0]): if degree_polys[degree] != 0: for syminter in symmetric_interactions: dbf.add_parameter('L', phase_name, tuple(map(tuplify, syminter)), degree, degree_polys[degree]) def phase_fit(dbf, phase_name, symmetry, datasets, refdata, ridge_alpha, aicc_penalty=None, aliases=None): """Generate an initial CALPHAD model for a given phase and sublattice model. Parameters ---------- dbf : Database pycalphad Database to add parameters to. phase_name : str Name of the phase. symmetry : [[int]] Sublattice model symmetry. datasets : PickleableTinyDB All datasets to consider for the calculation. refdata : dict Maps tuple(element, phase_name) -> SymPy object defining energy relative to SER ridge_alpha : float Value of the :math:`\\alpha` hyperparameter used in ridge regression. Defaults to 1.0e-100, which should be degenerate with ordinary least squares regression. For now, the parameter is applied to all features. aicc_penalty : dict Map of phase name to feature to a multiplication factor for the AICc's parameter penalty. aliases : Dict[str, str] Mapping of possible aliases to the Database phase names. Returns ------- None Modifies the dbf. """ aicc_penalty = aicc_penalty if aicc_penalty is not None else {} aicc_phase_penalty = aicc_penalty.get(phase_name, {}) if not hasattr(dbf, 'varcounter'): dbf.varcounter = 0 phase_obj = dbf.phases[phase_name] # TODO: assumed pure elements - add proper support for Species objects subl_model = [sorted([sp.name for sp in subl]) for subl in phase_obj.constituents] site_ratios = phase_obj.sublattices # First fit endmembers all_em_count = len(generate_endmembers(subl_model)) # number of total endmembers endmembers = generate_endmembers(subl_model, symmetry) # Number of significant figures in parameters, might cause rounding errors numdigits = 6 em_dict = {} # TODO: use the global aliases dictionary passed in as-is instead of converting it to a phase-local dict # TODO: use the aliases dictionary in dataset queries to find relevant data if aliases is None: aliases = [phase_name] else: aliases = sorted([alias for alias, database_phase in aliases.items() if database_phase == phase_name]) _log.info('FITTING: %s', phase_name) _log.trace('%s endmembers (%s distinct by symmetry)', all_em_count, len(endmembers)) all_endmembers = [] for endmember in endmembers: symmetric_endmembers = generate_symmetric_group(endmember, symmetry) all_endmembers.extend(symmetric_endmembers) if _param_present_in_database(dbf, phase_name, endmember, 'G'): _log.trace('ENDMEMBER: %s already in Database. Skipping.', endmember) continue else: _log.trace('ENDMEMBER: %s', endmember) # Some endmembers are fixed by our choice of standard lattice stabilities, e.g., SGTE91 # If a (phase, pure component endmember) tuple is fixed, we should use that value instead of fitting endmember_comps = list(set(endmember)) fit_eq = None # only one non-VA component, or two components but the other is VA and its only the last sublattice if ((len(endmember_comps) == 1) and (endmember_comps[0] != 'VA')) or\ ((len(endmember_comps) == 2) and (endmember[-1] == 'VA') and (len(set(endmember[:-1])) == 1)): # this is a "pure component endmember" # try all phase name aliases until we get run out or get a hit em_comp = list(set(endmember_comps) - {'VA'})[0] sym_name = None for name in aliases: sym_name = 'G'+name[:3].upper()+em_comp.upper() stability = refdata.get((em_comp.upper(), name.upper()), None) if stability is not None: if isinstance(stability, sympy.Piecewise): # Default zero required for the compiled backend if (0, True) not in stability.args: new_args = stability.args + ((0, True),) stability = sympy.Piecewise(*new_args) dbf.symbols[sym_name] = stability break if dbf.symbols.get(sym_name, None) is not None: num_moles = sum([sites for elem, sites in zip(endmember, site_ratios) if elem != 'VA']) fit_eq = num_moles * Symbol(sym_name) _log.trace("Found lattice stability: %s", sym_name) _log.debug("%s = %s", sym_name, dbf.symbols[sym_name]) if fit_eq is None: # No reference lattice stability data -- we have to fit it parameters = fit_formation_energy(dbf, sorted(dbf.elements), phase_name, endmember, symmetry, datasets, ridge_alpha, aicc_phase_penalty=aicc_phase_penalty) for key, value in sorted(parameters.items(), key=str): if value == 0: continue symbol_name = get_next_symbol(dbf) dbf.symbols[symbol_name] = sigfigs(value, numdigits) parameters[key] = Symbol(symbol_name) fit_eq = sympy.Add(*[value * key for key, value in parameters.items()]) ref = 0 for subl, ratio in zip(endmember, site_ratios): if subl == 'VA': continue subl = (subl.upper()*2)[:2] ref = ref + ratio * Symbol('GHSER'+subl) fit_eq += ref _log.trace('SYMMETRIC_ENDMEMBERS: %s', symmetric_endmembers) for em in symmetric_endmembers: em_dict[em] = fit_eq dbf.add_parameter('G', phase_name, tuple(map(tuplify, em)), 0, fit_eq) _log.trace('FITTING BINARY INTERACTIONS') bin_interactions = generate_interactions(all_endmembers, order=2, symmetry=symmetry) _log.trace('%s distinct binary interactions', len(bin_interactions)) for interaction in bin_interactions: ixx = [] for i in interaction: if isinstance(i, (tuple, list)): ixx.append(tuple(i)) else: ixx.append(i) ixx = tuple(ixx) config = tuple(map(tuplify, ixx)) if _param_present_in_database(dbf, phase_name, config, 'L'): _log.trace('INTERACTION: %s already in Database', ixx) continue else: _log.trace('INTERACTION: %s', ixx) parameters = fit_formation_energy(dbf, sorted(dbf.elements), phase_name, ixx, symmetry, datasets, ridge_alpha, aicc_phase_penalty=aicc_phase_penalty) # Organize parameters by polynomial degree degree_polys = np.zeros(10, dtype=np.object_) for degree in reversed(range(10)): check_symbol = Symbol('YS') * Symbol('Z')**degree keys_to_remove = [] for key, value in sorted(parameters.items(), key=str): if key.has(check_symbol): if value != 0: symbol_name = get_next_symbol(dbf) dbf.symbols[symbol_name] = sigfigs(parameters[key], numdigits) parameters[key] = Symbol(symbol_name) coef = parameters[key] * (key / check_symbol) try: coef = float(coef) except TypeError: pass degree_polys[degree] += coef keys_to_remove.append(key) for key in keys_to_remove: parameters.pop(key) _log.trace('Polynomial coefs: %s', degree_polys.tolist()) # Insert into database symmetric_interactions = generate_symmetric_group(interaction, symmetry) for degree in np.arange(degree_polys.shape[0]): if degree_polys[degree] != 0: for syminter in symmetric_interactions: dbf.add_parameter('L', phase_name, tuple(map(tuplify, syminter)), degree, degree_polys[degree]) _log.trace('FITTING TERNARY INTERACTIONS') fit_ternary_interactions(dbf, phase_name, symmetry, all_endmembers, datasets, aicc_phase_penalty=aicc_phase_penalty) if hasattr(dbf, 'varcounter'): del dbf.varcounter def generate_parameters(phase_models, datasets, ref_state, excess_model, ridge_alpha=None, aicc_penalty_factor=None, dbf=None): """Generate parameters from given phase models and datasets Parameters ---------- phase_models : dict Dictionary of components and phases to fit. datasets : PickleableTinyDB database of single- and multi-phase to fit. ref_state : str String of the reference data to use, e.g. 'SGTE91' or 'SR2016' excess_model : str String of the type of excess model to fit to, e.g. 'linear' ridge_alpha : float Value of the :math:`\\alpha` hyperparameter used in ridge regression. Defaults to None, which falls back to ordinary least squares regression. For now, the parameter is applied to all features. aicc_penalty_factor : dict Map of phase name to feature to a multiplication factor for the AICc's parameter penalty. dbf : Database Initial pycalphad Database that can have parameters that would not be fit by ESPEI Returns ------- pycalphad.Database """ # Set NumPy print options so logged arrays print on one line. Reset at the end. np.set_printoptions(linewidth=sys.maxsize) _log.info('Generating parameters.') _log.trace('Found the following user reference states: %s', espei.refdata.INSERTED_USER_REFERENCE_STATES) refdata = getattr(espei.refdata, ref_state) aliases = extract_aliases(phase_models) dbf = initialize_database(phase_models, ref_state, dbf) # Fit phases in alphabetic order so the VV#### counter is constistent between runs for phase_name, phase_data in sorted(phase_models['phases'].items(), key=operator.itemgetter(0)): if phase_name in dbf.phases: symmetry = phase_data.get('equivalent_sublattices', None) phase_fit(dbf, phase_name, symmetry, datasets, refdata, ridge_alpha, aicc_penalty=aicc_penalty_factor, aliases=aliases) _log.info('Finished generating parameters.') np.set_printoptions(linewidth=75) return dbf
{"hexsha": "5b66d10b0e4a85984f7b5be2aa9894e96a69e42e", "size": 23680, "ext": "py", "lang": "Python", "max_stars_repo_path": "espei/paramselect.py", "max_stars_repo_name": "wahab2604/ESPEI", "max_stars_repo_head_hexsha": "70a4185ce87a125e926f88e7ef93c02276fd6e90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 39, "max_stars_repo_stars_event_min_datetime": "2017-11-03T03:07:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T02:41:59.000Z", "max_issues_repo_path": "espei/paramselect.py", "max_issues_repo_name": "richardotis/ESPEI", "max_issues_repo_head_hexsha": "70a4185ce87a125e926f88e7ef93c02276fd6e90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 122, "max_issues_repo_issues_event_min_datetime": "2017-06-23T16:34:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:26:01.000Z", "max_forks_repo_path": "espei/paramselect.py", "max_forks_repo_name": "richardotis/ESPEI", "max_forks_repo_head_hexsha": "70a4185ce87a125e926f88e7ef93c02276fd6e90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2017-06-18T02:36:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T00:17:21.000Z", "avg_line_length": 47.8383838384, "max_line_length": 182, "alphanum_fraction": 0.6584881757, "include": true, "reason": "import numpy,import sympy,from sympy", "num_tokens": 5382}
import numpy as np from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM import keras import string # transform the input series and window-size into a set of input/output pairs for use with our RNN model def window_transform_series(series, window_size): # containers for input/output pairs X = [] y = [] for i in range(len(series) - window_size): X.append(series[i:i + window_size]) y.append(series[i + window_size]) # reshape each X = np.asarray(X) X.shape = (np.shape(X)[0:2]) y = np.asarray(y) y.shape = (len(y),1) return X,y # build an RNN to perform regression on our time series input/output data # layer 1 uses an LSTM module with 5 hidden units (note here the input_shape = (window_size,1)) # layer 2 uses a fully connected module with one unit def build_part1_RNN(window_size): m = Sequential() m.add(LSTM(5, input_shape = (window_size,1))) m.add(Dense(1)) return m ### return the text input with only ascii lowercase and the punctuation given below included. def cleaned_text(text): punctuation = ['!', ',', '.', ':', ';', '?', ' '] abc = list(string.ascii_lowercase) text = [c for c in list(text) if c in punctuation + abc] return "".join(text) ### transform the input text and window-size into a set of input/output pairs for use with our RNN model def window_transform_text(text, window_size, step_size): # containers for input/output pairs inputs = [] outputs = [] for i in range(0, len(text) - window_size, step_size): inputs.append(text[i:i + window_size]) outputs.append(text[i + window_size]) return inputs,outputs # build the required RNN model: # a single LSTM hidden layer with softmax activation, categorical_crossentropy loss #layer 1 should be an LSTM module with 200 hidden units --> note this should have input_shape = (window_size,len(chars)) where len(chars) = number of unique characters in your cleaned text #layer 2 should be a linear module, fully connected, with len(chars) hidden units --> where len(chars) = number of unique characters in your cleaned text #layer 3 should be a softmax activation ( since we are solving a multiclass classification) def build_part2_RNN(window_size, num_chars): m = Sequential() m.add(LSTM(200, input_shape=(window_size, num_chars))) m.add(Dense(num_chars, activation='softmax')) return m
{"hexsha": "4042bf0ed2c9f28f54bfff491cce0d1e7ef10bb2", "size": 2470, "ext": "py", "lang": "Python", "max_stars_repo_path": "my_answers.py", "max_stars_repo_name": "anoff/aind-rnn", "max_stars_repo_head_hexsha": "c1c4742f2bc8f30a3e71d4ca58e171a445c90340", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "my_answers.py", "max_issues_repo_name": "anoff/aind-rnn", "max_issues_repo_head_hexsha": "c1c4742f2bc8f30a3e71d4ca58e171a445c90340", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "my_answers.py", "max_forks_repo_name": "anoff/aind-rnn", "max_forks_repo_head_hexsha": "c1c4742f2bc8f30a3e71d4ca58e171a445c90340", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8656716418, "max_line_length": 188, "alphanum_fraction": 0.6991902834, "include": true, "reason": "import numpy", "num_tokens": 586}
import os import pickle import logging import librosa import numpy as np import pandas as pd from math import pi from scipy.fftpack import fft, hilbert from sklearn.ensemble import GradientBoostingClassifier from .gcp_inference import get_vggish_embedding MEAN_VGGISH_EMBEDDING = 0.63299006 VGGISH_EMBEDDING_INDEX = 33 class CovidClassifier: def __init__(self): self.model = pickle.load(open(os.path.join('models', 'gbc_ovo_roc_70_5'), 'rb')) def classify_cough(self, audio, fs, clinical_features): """Classify whether an inputted signal is a cough or not using filtering, feature extraction, and ML classification Inputs: features: (np.array) extracted features Outputs: result: (float) probability that a given file is a cough """ vggish_features = self.__extract_vggish_features(audio, fs) audio_features = self.__extract_audio_features(audio, fs) clinical_features = self.__extract_clinical_features(clinical_features) logging.debug('VGGish Features:', vggish_features) logging.debug('Audio Features:', audio_features) logging.debug('Clinical Features:', clinical_features) features = np.concatenate((vggish_features, audio_features, clinical_features)) result = self.model.predict_proba(np.array([features])) return result def __extract_clinical_features(self, clinical_features): """Gets the clinical features and returns them as a numpy array. Args: clinical_features (dict): clinical features """ print(clinical_features) try: return np.array([ float(clinical_features['age']), float(clinical_features['respiratory_condition']), float(clinical_features['fever_muscle_pain']) ]) except: logging.error('Error extracting clinical features.') return np.zeros(3) def __extract_vggish_features(self, audio, fs): """Gets the VGGish embedding from GCP and returns the relevant features. Only works if a max of 4.2 seconds at 16kHz sample rate is submitted. Args: audio (np.array): audio fs (int): sample rate """ try: resampled_audio = librosa.resample(audio, fs, 16000, res_type='kaiser_best') cut_audio = resampled_audio.tolist()[-int(4.2*16000):] embeddings = get_vggish_embedding(os.environ['GCP_PROJECT'], os.environ['GCP_MODEL'], cut_audio)[0]['output_0'] return np.mean(embeddings, axis=0)[VGGISH_EMBEDDING_INDEX] except: logging.warning('Could not obtain VGGish embeddings. Check if AI Platform endpoint is enabled and credentials are set.') return np.array([MEAN_VGGISH_EMBEDDING]) def __extract_audio_features(self, signal, fs): """Extract part of handcrafted features from the input signal. :param signal: the signal the extract features from :type signal: numpy.ndarray :param signal_sr: the sample rate of the signal :type signal_sr: integer :return: the populated feature vector :rtype: numpy.ndarray """ try: frame_len = int(fs / 10) # 100 ms hop = int(frame_len / 2) # 50% overlap, meaning 5ms hop length # normalise the sound signal before processing signal = signal / np.max(np.abs(signal)) # trim the signal to the appropriate length trimmed_signal, idc = librosa.effects.trim(signal, frame_length=frame_len, hop_length=hop) # extract the signal duration signal_duration = librosa.get_duration(y=trimmed_signal, sr=fs) # find the onset strength of the trimmed signal o_env = librosa.onset.onset_strength(trimmed_signal, sr=fs) # find the frames of the onset onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=fs) # keep only the first onset frame onsets = onset_frames.shape[0] # decompose the signal into its magnitude and the phase components such that signal = mag * phase mag, phase = librosa.magphase(librosa.stft(trimmed_signal, n_fft=frame_len, hop_length=hop)) # extract the rms from the magnitude component rms = librosa.feature.rms(y=trimmed_signal)[0] s = pd.Series(rms) rms_skew = s.skew() # extract the spectral bandwith of the magnitude spec_bandwidth = librosa.feature.spectral_bandwidth(S=mag)[0] # pack the extracted features into the feature vector to be returned signal_features = np.concatenate( ( np.array([signal_duration, onsets]), self.__get_period(signal, signal_sr=fs), np.array([np.max(rms), np.median(rms), np.percentile(rms, 25), rms_skew]), np.array([np.mean(spec_bandwidth)]) ), axis=0, ) return signal_features except: logging.error('Error extracting audio features.') return np.zeros(8) def __get_period(self, signal, signal_sr): """Extract the period from the the provided signal :param signal: the signal to extract the period from :type signal: numpy.ndarray :param signal_sr: the sampling rate of the input signal :type signal_sr: integer :return: a vector containing the signal period :rtype: numpy.ndarray """ # perform a sanity check if signal is None: raise ValueError("Input signal cannot be None") # transform the signal to the hilbert space hy = hilbert(signal) ey = np.sqrt(signal ** 2 + hy ** 2) min_time = 1.0 / signal_sr tot_time = len(ey) * min_time pow_ft = np.abs(fft(ey)) peak_freq = pow_ft[3: int(len(pow_ft) / 2)] peak_freq_pos = peak_freq.argmax() peak_freq_val = 2 * pi * (peak_freq_pos + 2) / tot_time period = 2 * pi / peak_freq_val return np.array([period])
{"hexsha": "826c543b08b183fe141dd3ee34d57c7624c5a2b1", "size": 6206, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/CovidClassifier/CovidClassifier.py", "max_stars_repo_name": "LukasHaas/cs329s-covid-prediction", "max_stars_repo_head_hexsha": "bd73935e1141e72f005389013ba2fa772657b53f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-06-09T04:56:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T17:13:17.000Z", "max_issues_repo_path": "src/CovidClassifier/CovidClassifier.py", "max_issues_repo_name": "LukasHaas/cs329s-covid-prediction", "max_issues_repo_head_hexsha": "bd73935e1141e72f005389013ba2fa772657b53f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/CovidClassifier/CovidClassifier.py", "max_forks_repo_name": "LukasHaas/cs329s-covid-prediction", "max_forks_repo_head_hexsha": "bd73935e1141e72f005389013ba2fa772657b53f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-05-28T01:10:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-20T13:47:44.000Z", "avg_line_length": 39.2784810127, "max_line_length": 132, "alphanum_fraction": 0.6379310345, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1409}
#!/usr/bin/env python # ****************************************** Libraries to be imported ****************************************** # from __future__ import print_function # noinspection PyPackageRequirements import numpy as np import matplotlib.image as mpimg import cv2 from glob import glob from moviepy.editor import VideoFileClip # ****************************************** Func Declaration Start ****************************************** # # Lets create a helper function that handles the entire process of computing calibration parameters for a camera def calibrate_camera_distortion(image_paths, nx=9, ny=6): obj = np.zeros((nx * ny, 3), np.float32) obj[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2) objpoints = [] imgpoints = [] img = None for img_path in image_paths: img = mpimg.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None) if ret is True: imgpoints.append(corners) objpoints.append(obj) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1::-1], None, None) return ret, mtx, dist, rvecs, tvecs # ****************************************** Func Declaration Start ****************************************** # # Lets write a simple pipeline to convert the orignal RGB image to a binary image that accentuates the lane lines def binary_img_pipe(image, gray_thresh=250, b_thresh=160): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) # Convert to LAB color space lab = cv2.cvtColor(image, cv2.COLOR_RGB2Lab) b_channel = lab[:, :, 2] b_channel = (((b_channel - b_channel.min()) / b_channel.ptp()) * 255).astype(np.uint8) combined_binary = np.zeros_like(gray) combined_binary[(gray > gray_thresh) | (b_channel > b_thresh)] = 255 return combined_binary # ****************************************** Func Declaration Start ****************************************** # # perspective transform def perspective_transform(image, reverse=False): imshape = image.shape src = np.float32([[(200, 680), (575, 455), (705, 455), (1100, 680)]]) dst = np.float32([[350, 720], [350, 0], [950, 0], [950, 720]]) # src = np.float32([[(200,680),(505, 500),(790, 500), (1100,680)]]) # dst = np.float32([[200,680],[200,40],[1100,40],[1100,680]]) # reverse - to get back the original image from birds-eye view, swap the points for perspective transform if reverse: mat = cv2.getPerspectiveTransform(dst, src) else: mat = cv2.getPerspectiveTransform(src, dst) img_size = (imshape[1], imshape[0]) return cv2.warpPerspective(image, mat, img_size, flags=cv2.INTER_LINEAR) # ****************************************** Func Declaration Start ****************************************** # def hist(image): return np.sum(image[image.shape[0] // 2:, :], axis=0) # ****************************************** Func Declaration Start ****************************************** # def find_lane_pixels(binary_warped, prev_left_fit, prev_right_fit): # Take a histogram of the bottom half of the image histogram = hist(binary_warped) # Find the peak of the left and right halves of the histogram # These will be the starting point for the left and right lines midpoint = np.int(histogram.shape[0] // 2) leftx_base = np.argmax(histogram[:midpoint]) rightx_base = np.argmax(histogram[midpoint:]) + midpoint # HYPERPARAMETERS # number of sliding windows nwindows = 9 # width of the windows +/- margin margin = 80 # minimum number of pixels found to recenter window minpix = 30 # height of windows - based on nwindows above and image shape window_height = np.int(binary_warped.shape[0] // nwindows) # x and y positions of all nonzero pixels in the image nonzero = binary_warped.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # current positions to be updated later for each window in nwindows leftx_current = leftx_base rightx_current = rightx_base # empty lists to receive left and right lane pixel indices left_lane_inds = [] right_lane_inds = [] # Step through the windows one by one for window in range(nwindows): # identify window boundaries in x and y (and right and left) win_y_low = binary_warped.shape[0] - (window + 1) * window_height win_y_high = binary_warped.shape[0] - window * window_height # four boundaries of the window win_xleft_low = leftx_current - margin win_xleft_high = leftx_current + margin win_xright_low = rightx_current - margin win_xright_high = rightx_current + margin # identify the nonzero pixels in x and y within the window ### good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] left_lane_inds.append(good_left_inds) right_lane_inds.append(good_right_inds) if len(good_left_inds) > minpix: leftx_current = np.int(np.mean(nonzerox[good_left_inds])) if len(good_right_inds) > minpix: rightx_current = np.int(np.mean(nonzerox[good_right_inds])) # concatenate the arrays of indices left_lane_inds = np.concatenate(left_lane_inds) right_lane_inds = np.concatenate(right_lane_inds) # extract left and right line pixel positions leftx = nonzerox[left_lane_inds] lefty = nonzeroy[left_lane_inds] rightx = nonzerox[right_lane_inds] righty = nonzeroy[right_lane_inds] # fit a second order polynomial try: if len(leftx) < 1000: raise TypeError left_fit = np.polyfit(lefty, leftx, 2) prev_left_fit.append(left_fit) except TypeError: pass try: if len(rightx) < 500: raise TypeError right_fit = np.polyfit(righty, rightx, 2) prev_right_fit.append(right_fit) except TypeError: pass left_fit = np.empty((len(prev_left_fit), 3)) right_fit = np.empty((len(prev_right_fit), 3)) for i in range(len(prev_left_fit)): left_fit[i] = np.array(prev_left_fit[i]) for i in range(len(prev_right_fit)): right_fit[i] = np.array(prev_right_fit[i]) left_fit = left_fit.mean(axis=0) right_fit = right_fit.mean(axis=0) # generate x and y values for plotting ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0]) # calc both polynomials using ploty, left_fit and right_fit left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2] right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2] # create an image to draw on and an image to show the selection window out_img = np.zeros([binary_warped.shape[0], binary_warped.shape[1], 3], np.uint8) window_img = np.zeros_like(out_img) # color in left and right line pixels out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0] out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255] # Generate a polygon to illustrate the search window area # And recast the x and y points into usable format for cv2.fillPoly() # new margin : mid of the left and right polynomial : polynomial for center of the lane marginx = (right_fitx - left_fitx) / 2 left_line_window1 = np.array([np.transpose(np.vstack([left_fitx, ploty]))]) left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + marginx, ploty])))]) left_line_pts = np.hstack((left_line_window1, left_line_window2)) right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - marginx, ploty]))]) right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) right_line_pts = np.hstack((right_line_window1, right_line_window2)) # draw the lane onto the warped blank image cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0)) cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0)) result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0) return result, left_fitx, right_fitx, ploty # ****************************************** Func Declaration Start ****************************************** # # measure radius of curvature def measure_curvature(image, leftx, rightx): # Define conversions in x and y from pixels space to meters ym_per_pix = 25 / 720 # meters per pixel in y dimension xm_per_pix = 3.7 / 550 # meters per pixel in x dimension ploty = np.linspace(0, 719, num=720) left_fit_cr = np.polyfit(ploty * ym_per_pix, leftx * xm_per_pix, 2) right_fit_cr = np.polyfit(ploty * ym_per_pix, rightx * xm_per_pix, 2) y_eval = np.max(ploty) left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / ( np.absolute(2 * left_fit_cr[0])) right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / ( np.absolute(2 * right_fit_cr[0])) z = np.mean([left_curverad, right_curverad]) cv2.putText(image, 'Radius of Curvature: {0:.3f}(m)'.format(z), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA) # ****************************************** Func Declaration Start ****************************************** # # calculate the vehicle position wrt center of the lane def dst_from_center(image, left_fitx, right_fitx): # find the x coordinate corresponding to the lane center lane_center_x = (left_fitx[-1] + right_fitx[-1]) / 2 xm_per_pix = 3.7 / 550 # calculate the offset i.e deviation of the lane center coordinate from the image center # this will give the deviation of the vehicle from the center of the lane dist = (image.shape[1] / 2 - lane_center_x) * xm_per_pix # dist is the offset: if the deviation is positive - the vehicle is left from the center of the lane # if the distance is negative - the vehicle is right from the center of the lane if dist <= 0: pos = 'left' else: pos = 'right' cv2.putText(image, "Position: {0:.3f}(m) ".format(abs(dist)) + pos + " of center.", (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA) # ****************************************** Class Declaration Start ****************************************** # class LaneDetection(object): def __init__(self, mtx, dist): self.mtx, self.dist = mtx, dist self.left_fit, self.right_fit = [], [] def lane_detection(self, image): # distortion correction undist_img = cv2.undistort(image, self.mtx, self.dist, None, self.mtx) # calculate gradient binary_img = binary_img_pipe(undist_img) # perspective transform warped_img = perspective_transform(binary_img, False) # detect lane pixels result, left_fitx, right_fitx, ploty = find_lane_pixels(warped_img, self.left_fit, self.right_fit) if len(self.left_fit) > 15: self.left_fit.pop(0) if len(self.right_fit) > 15: self.right_fit.pop(0) # reverse perspective check = perspective_transform(result, True) # wrap to the original image output = cv2.addWeighted(undist_img, 1, check, 1, 0) # measure radius of curvature and distance from the center measure_curvature(output, left_fitx, right_fitx) dst_from_center(output, left_fitx, right_fitx) return output # ****************************************** Class Declaration End ****************************************** # # ****************************************** Main Program Start ****************************************** # def main(): """ The main of the program. Description of the Main Here. """ # Now lets read the image paths from the file system. image_paths = glob('camera_cal/calibration*.jpg') # Now lets use the images pointed by the elements of the list `image_paths` to compute camera calibration parameters num_corners_x, num_corners_y = 9, 6 ret, mtx, dist, rvecs, tvecs = calibrate_camera_distortion(image_paths, nx=num_corners_x, ny=num_corners_y) white_output = 'output_videos/challenge_video.mp4' clip1 = VideoFileClip("test_videos/challenge_video.mp4") ld = LaneDetection(mtx, dist) white_clip = clip1.fl_image(ld.lane_detection) white_clip.write_videofile(white_output, audio=False) # ****************************************** Main Program End ****************************************** # if __name__ == '__main__': try: main() except KeyboardInterrupt: print('\nProcess interrupted by user. Bye!') """ Author: Yash Bansod """
{"hexsha": "05410f220bf4365df06ac5212ccaf05bda869104", "size": 13235, "ext": "py", "lang": "Python", "max_stars_repo_path": "Advanced-Lane-Lines/pipeline_test.py", "max_stars_repo_name": "YashBansod/udacity-self-driving-car", "max_stars_repo_head_hexsha": "2ea83ec4d9232adead77e2662c7593b98f67be97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-27T17:42:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-27T17:42:16.000Z", "max_issues_repo_path": "Advanced-Lane-Lines/pipeline_test.py", "max_issues_repo_name": "YashBansod/udacity-self-driving-car", "max_issues_repo_head_hexsha": "2ea83ec4d9232adead77e2662c7593b98f67be97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-09-25T22:40:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:35:52.000Z", "max_forks_repo_path": "Advanced-Lane-Lines/pipeline_test.py", "max_forks_repo_name": "YashBansod/udacity-self-driving-car", "max_forks_repo_head_hexsha": "2ea83ec4d9232adead77e2662c7593b98f67be97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-29T22:42:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-29T22:42:07.000Z", "avg_line_length": 43.8245033113, "max_line_length": 120, "alphanum_fraction": 0.6166981488, "include": true, "reason": "import numpy", "num_tokens": 3360}
// Copyright 2012-2016 The CRAVE developers, University of Bremen, Germany. All rights reserved.// #include <fstream> #include <boost/assert.hpp> #include "../../crave/experimental/ConstrainedRandomGraph.hpp" #include "../../crave/experimental/graph/GraphVisitor.hpp" #include "../../crave/utils/Logging.hpp" namespace crave { namespace graph { rule_map global_rule_map; void Terminal::accept(NodeVisitor& v) { v.visitTerminal(*this); } void Selector::accept(NodeVisitor& v) { v.visitSelector(*this); } void Sequence::accept(NodeVisitor& v) { v.visitSequence(*this); } struct Executor : NodeVisitor { Executor(NodePtr r) : m_root(r), m_rules(global_rule_map), m_id(0), m_path_count(0) {} virtual void visitTerminal(Terminal&); virtual void visitSelector(Selector&); virtual void visitSequence(Sequence&); private: typedef std::pair<int, int> result_type; Rule* getRule(Node& n) { if (n.name() && m_rules.find(n.name()) != m_rules.end()) { return m_rules[n.name()]; } return NULL; } void make_edge(int s, int d) { m_adj[s].push_back(d); } void check_root(Node&); void dfs(int); NodePtr m_root; rule_map& m_rules; int m_id; std::stack<result_type> m_stack; std::map<int, std::vector<int> > m_adj; std::map<int, action_type> m_actions; std::map<int, Rule*> m_main_to_rule_map; std::vector<int> path; int m_path_count; }; void Executor::check_root(Node& n) { if (&n != m_root.get()) return; dfs(0); } void Executor::dfs(int v) { path.push_back(v); if (m_adj.find(v) == m_adj.end()) { BOOST_ASSERT_MSG(v == 2, "Invalid end of unfolded sequence"); m_path_count++; // reset coverage of all rand_objs on the new path for(int i : path) { if (m_main_to_rule_map.find(i) != m_main_to_rule_map.end()) m_main_to_rule_map[i]->reset_coverage(); } int iter_count = 0; while (true) { // repeat until path is covered iter_count++; for(int i : path) { if (m_actions.find(i) != m_actions.end() && m_actions[i]) m_actions[i](); } bool path_covered = true; for(int i : path) { if (m_main_to_rule_map.find(i) != m_main_to_rule_map.end() && !m_main_to_rule_map[i]->is_rand_obj_covered()) { path_covered = false; break; } } if (path_covered) break; } LOG(INFO) << "Path " << m_path_count << " is covered after " << iter_count << " iteration(s)"; } else { std::vector<int>& adj = m_adj[v]; for(int i : adj) { dfs(i); } } path.pop_back(); } void Executor::visitTerminal(Terminal& t) { Rule* r = getRule(t); BOOST_ASSERT_MSG(r, "A named rule could not be found"); m_actions.insert(std::make_pair(m_id, r->entry)); m_actions.insert(std::make_pair(m_id + 1, r->main)); m_actions.insert(std::make_pair(m_id + 2, r->exit)); m_main_to_rule_map.insert(std::make_pair(m_id + 1, r)); make_edge(m_id, m_id + 1); make_edge(m_id + 1, m_id + 2); m_stack.push(result_type(m_id, m_id + 2)); m_id += 3; check_root(t); } void Executor::visitSelector(Selector& nt) { int start = m_id; int end = m_id + 2; m_id += 3; m_stack.push(result_type(start, end)); Rule* r = getRule(nt); if (r) { m_actions.insert(std::make_pair(start, r->entry)); m_actions.insert(std::make_pair(start + 1, r->main)); m_actions.insert(std::make_pair(end, r->exit)); m_main_to_rule_map.insert(std::make_pair(start + 1, r)); } make_edge(start, start + 1); for(NodePtr n : nt.children) { n->accept(*this); result_type& r = m_stack.top(); m_stack.pop(); make_edge(start + 1, r.first); make_edge(r.second, end); } check_root(nt); } void Executor::visitSequence(Sequence& nt) { int start = m_id; int end = m_id + 2; m_id += 3; m_stack.push(result_type(start, end)); Rule* r = getRule(nt); if (r) { m_actions.insert(std::make_pair(start, r->entry)); m_actions.insert(std::make_pair(start + 1, r->main)); m_actions.insert(std::make_pair(end, r->exit)); m_main_to_rule_map.insert(std::make_pair(start + 1, r)); } make_edge(start, start + 1); int last = start + 1; for(NodePtr n : nt.children) { n->accept(*this); result_type& r = m_stack.top(); m_stack.pop(); make_edge(last, r.first); last = r.second; } make_edge(last, end); check_root(nt); } void RuleContext::root(rule_type& r) { m_root = proto::eval(r, *this); UpdateVisitor uv(m_named_nodes); m_root->accept(uv); Executor exec(m_root); m_root->accept(exec); } void RuleContext::print_dot_graph(rule_type& r, std::ostream& out) { NodePtr n = proto::eval(r, *this); UpdateVisitor uv(m_named_nodes); n->accept(uv); out << "digraph AST {" << std::endl; ToDotVisitor tdv(out); n->accept(tdv); out << "}" << std::endl; } void RuleContext::to_dot_file(rule_type& r, const char* filename) { std::fstream fs; fs.open(filename, std::fstream::out); print_dot_graph(r, fs); fs.close(); } void RuleContext::display_graph(rule_type& r) { to_dot_file(r, "temp.dot"); system("dot -Txlib temp.dot"); } /** * Tests */ void test1() { RuleContext context; NAMED_RULE(r1); NAMED_RULE(r2); context(r1 = r2); // failed } void test2() { RuleContext context; NAMED_RULE(r1); NAMED_RULE(r2); NAMED_RULE(r3); context(r1 = r2 >> r3); context(r1 = r3 >> r2); // failed } void test3() { RuleContext context; NAMED_RULE(r1); NAMED_RULE(r2); NAMED_RULE(r3); context(r1 = r2 >> r3); context(r2 = r3 >> r1); context.root(r1); // failed } void test4() { RuleContext context; NAMED_RULE(a); NAMED_RULE(b); NAMED_RULE(c); NAMED_RULE(d); NAMED_RULE(e); NAMED_RULE(f); NAMED_RULE(g); NAMED_RULE(h); NAMED_RULE(i); NAMED_RULE(j); context(a = b | c | d)(b = c >> d >> e)(c = f | g | (i >> j) | e)(d = i >> c >> h)(g = i | j)(h = (i >> j) | (j >> i)); context.display_graph(a); } }; };
{"hexsha": "4f615208d173e4ad8d122ee3b9ba90d293292536", "size": 6012, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/lib/experimental/ConstrainedRandomGraph.cpp", "max_stars_repo_name": "quadric-io/crave", "max_stars_repo_head_hexsha": "8096d8b151cbe0d2ba437657f42d8bb0e05f5436", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32.0, "max_stars_repo_stars_event_min_datetime": "2015-05-11T02:38:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T07:31:26.000Z", "max_issues_repo_path": "src/lib/experimental/ConstrainedRandomGraph.cpp", "max_issues_repo_name": "quadric-io/crave", "max_issues_repo_head_hexsha": "8096d8b151cbe0d2ba437657f42d8bb0e05f5436", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10.0, "max_issues_repo_issues_event_min_datetime": "2018-06-08T14:44:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-19T16:07:21.000Z", "max_forks_repo_path": "src/lib/experimental/ConstrainedRandomGraph.cpp", "max_forks_repo_name": "quadric-io/crave", "max_forks_repo_head_hexsha": "8096d8b151cbe0d2ba437657f42d8bb0e05f5436", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2019-05-29T21:40:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-01T09:31:15.000Z", "avg_line_length": 24.6393442623, "max_line_length": 118, "alphanum_fraction": 0.622089155, "num_tokens": 1775}
### A Pluto.jl notebook ### # v0.12.21 using Markdown using InteractiveUtils # This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error). macro bind(def, element) quote local el = $(esc(element)) global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : missing el end end # ╔═╡ 5b22ecc2-68fc-11eb-0e6e-f38d2f761d95 using DataFrames # ╔═╡ f2c54404-50dd-11eb-05cd-a94912f14d1e using RDatasets # ╔═╡ 60e6fd0e-50d8-11eb-37d9-add4c3dfcb1b using Plots # ╔═╡ 43774790-50da-11eb-05f3-4f635e934d6a using StatsPlots # ╔═╡ 6bf51fa8-50f3-11eb-378f-bf3ff7156539 using PlutoUI # ╔═╡ 78c7ae98-50f7-11eb-3bc9-e30d06686614 using Distributions # ╔═╡ 4187e3de-50ff-11eb-18c0-93284cb3ad44 using FreqTables # ╔═╡ 12e89740-6b89-11eb-27fd-e7d7c4b81637 begin using HTTP, JSON resp = HTTP.get("https://api.covid19india.org/data.json") str = String(resp.body) jobj = JSON.Parser.parse(str) end # ╔═╡ 5ed5beba-6b8b-11eb-38d6-b1542bff022c using Dates # ╔═╡ 717e19a6-50d6-11eb-011b-ddf29b2946c8 md"# Notebook 7: _Data Visualisation in Julia_ In this section, we will learn the basics of data visualisation. It is best to think of this chapter as consisting of three broad ideas: 1. How do we decide _what plot type to use_ for a specific data visualisation task? 2. What are the _constituents of a plot_? 3. How do we _implement the plotting with Julia_?" # ╔═╡ d12d4384-50dd-11eb-0ecc-65623c8febfe md"## Types of variables" # ╔═╡ d9b48c24-50dd-11eb-2253-e1f0eeb004f2 md"The columns of data we usually study belong to the following data-types: * Numerical data - Data in numbers (think `reals`) * Continuous * Discrete * Categorical data - Data in groups / categories (think `enum`) * Nominal - No clear ordering between categories * Ordinal - Categories can be ordered " # ╔═╡ b8c7b436-6b7d-11eb-0817-eb3fb7f09f95 md"👉 Give examples of each type of variable" # ╔═╡ 52540bba-68fc-11eb-1ecc-03ad619fe826 md"Let us see some examples of these datatypes in a toy dataset from an apparel retailer." # ╔═╡ a6239d94-68fc-11eb-0162-d5707c186fd4 shirt_database = DataFrame(ItemNo = ["A193s1", "A2013a", "B19sd9", "C2131x"], Color = ["White", "Black", "Black", "Blue"], Sleeves = ["Half", "Full", "Half", "Half"], Pattern = ["Solid", "Striped", "Solid", "Checked"], SurveyRating = ["Good", "Poor", "Very poor", "Good"], WebsiteRating = [3, 2, 1, 4], Price = [299, 1349, 599, 729]) # ╔═╡ 59f0a50c-690d-11eb-3a6d-f50e01402f32 md"👉 What are the types for the each of the columns?" # ╔═╡ 02ca2edc-50de-11eb-2cc5-2772d7d92fe6 md"Let us see some examples of these variable types. For creating interesting plots, we will use datasets available from the `RDatasets` package in the format of DataFrames." # ╔═╡ 434cf43c-50de-11eb-2733-71ddf1bb8b49 RDatasets.datasets() # ╔═╡ 85f580d0-6dfb-11eb-2dbf-db79082b85b7 md"Given below is the [iris](https://vincentarelbundock.github.io/Rdatasets/doc/datasets/iris.html) dataset." # ╔═╡ 7d2d18ee-50de-11eb-38a1-cd402e4674b2 iris = dataset("datasets", "iris") # ╔═╡ ea181dc0-6dfb-11eb-0ed6-2d29558cd012 md"We'll also make use of the [diabetes](https://vincentarelbundock.github.io/Rdatasets/doc/MASS/Pima.te.html) dataset." # ╔═╡ bcdc7076-50ef-11eb-09d4-47ab5436e1d9 diabetes = dataset("MASS", "Pima.te") # ╔═╡ a867dcec-50f0-11eb-07f2-7d34e62cd42e md"## Types of plots" # ╔═╡ adda9d42-50f0-11eb-266d-75d0464b2f23 md" The following _taxonomy_ of plots at the top level is based on _what we are plotting_ and in the inner level on the _data type_ of the variables. * Distribution * Single variable * Continuous variable - Histogram, Density plot (+ faceting) * Discrete / categorical variable - Bar plot * Box plot * Violin plot (+ paired) * Two variables * Both continous variables - Scatter plot, marginal density plots * One continuous variable, one categorical variable - * Two discrete variables - Heatmap * Composition * Pie chart * Stacked bar plot * Stacked line plot * Relationship * Line plot * Correlation plot " # ╔═╡ 336536b0-50f2-11eb-1dbd-836f130731ac md"Lets begin plotting. We will use the `gr` backend, but you can use any of the other backends as well." # ╔═╡ 3fb759e6-50d9-11eb-36a4-31ed7766c5b3 gr() # ╔═╡ ad6fa5de-5102-11eb-0329-8b5d52ba864f default(size =(600, 300)) # ╔═╡ 4e488752-50f2-11eb-06c2-83cad3489775 md"We will also use the `StatsPlots` library for some interesting plot types." # ╔═╡ 433bba92-50d8-11eb-05f0-d31772d405e0 md"## Distribution of data" # ╔═╡ 9155c80e-50f0-11eb-2c14-13fce7a14b9d md"One of the most important aspects in this course will be to study the distribution of data. So, we begin with looking at the distribution of one and two variables." # ╔═╡ 48e55444-50d8-11eb-3861-73237803775c md"##### Distribution of a single variable" # ╔═╡ 068c716c-50f2-11eb-35f1-cf749745b296 md"###### Histogram" # ╔═╡ 2df8b140-50f0-11eb-1882-2964dd07d1b3 histogram(diabetes.Glu) # ╔═╡ 802ae7a6-50f2-11eb-1e52-2d4f62abf0e7 md"This helps us see that the data is _right skewed_ with a large fraction of values in the range of 80 - 130. We can change the number of bins and also normalize this into a density function." # ╔═╡ 2683019c-6b81-11eb-36a2-69467c7ee7d6 md"👉 How do we choose the number bins? What effect does this have?" # ╔═╡ 4d06a470-50f0-11eb-1369-2f743c3cbfc9 histogram(diabetes.Glu, bins=1000) # ╔═╡ da9c31d6-8644-11eb-237a-8f711cbf2963 histogram(diabetes.Glu, bins=3) # ╔═╡ 5a55d03c-6b81-11eb-007e-bde94e3eb46a md"👉 When should we normalize the bin heights?" # ╔═╡ 6577ec58-50f0-11eb-0277-6b791ab01895 histogram(diabetes.Glu, bins=25, normalize=:pdf) # ╔═╡ feaf8b4a-8644-11eb-2817-4bc99b6790b7 md"👉What else can a histogram help visualise?" # ╔═╡ b76d303e-50f2-11eb-0830-1bf573e84375 md"_Outliers_. In the Glucose field, there seem not to be many outliers. Let us use an interactive plot to visualise the histogram of all other fields." # ╔═╡ 4c2714ba-50f3-11eb-0aff-679d40f27bf4 names(diabetes) # ╔═╡ 283cb0be-50f3-11eb-35a8-4dc20e9a05f1 @bind sel_col Select(names(diabetes)) # ╔═╡ 2d0c32be-50f2-11eb-1825-9b9648ccfcdc histogram(diabetes[:, sel_col], nbins=20, normalize=:pdf, label = sel_col) # ╔═╡ 2a7f2460-8645-11eb-37f5-89cd05d1af48 md"In summary, histograms can help visualise the skew of data, outliers, and if there are regions of discontinuinty." # ╔═╡ 574527c4-8645-11eb-0b26-c16a97b9ec2d md"👉 What if we wanted a continuous approximation of the histogram?" # ╔═╡ 98153fa4-50f4-11eb-0123-d7f01007697b md"###### Density plot" # ╔═╡ 264cba2a-50f5-11eb-1e9b-178bfbb7e7ab md"Histograms show frequency counts for individual bins. If we are more interested in the abstract _shape of the distribution_ then a continuous density plot is more useful." # ╔═╡ 9dc8fb8e-50f4-11eb-1489-6f4d3dd8802a @bind sel_col2 Select(names(diabetes)) # ╔═╡ f3b3f4d8-50f4-11eb-05b2-d1c9164b21a3 density(diabetes[:, sel_col2], label = sel_col2, line=(4, :blue), fill=(0, :blue, 0.3)) # ╔═╡ 5440fd68-50f6-11eb-284b-5f5b65ade5b5 md"The `density` function plots the _kernel density function_. Specifically, it estimates a distribution such that area under the curve is 1 and the shape of the distribution matches the frequency counts of the variable." # ╔═╡ 813a253e-8645-11eb-1663-03625a5483db md"👉Which measure of central tendency is easiest to spot from the density plot?" # ╔═╡ 908b3186-8645-11eb-1309-71bc7ab6be00 md"_Mode_. This is clear in the unimodal distribution, but also for multiple modes for multimodal distributions." # ╔═╡ fd01f110-6bce-11eb-376d-71e7e338caf9 md"In the [ships](https://vincentarelbundock.github.io/Rdatasets/doc/MASS/ships.html) dataset, the column 'Period'(indicating period of operation) is multimodal as can be seen from the density plot." # ╔═╡ 6336faa0-6bcc-11eb-0dec-7db99fe0d31b shipsDataset = dataset("MASS", "ships") # ╔═╡ 8ab2bf50-6bcd-11eb-01ff-2d884f36fb5c @bind sel_ships Select(names(shipsDataset)[2:5]) # ╔═╡ 9309c1d0-6bcd-11eb-1f82-a5076b557033 density(shipsDataset[:, sel_ships], label = sel_ships, line=(4, :blue), fill=(0, :blue, 0.3)) # ╔═╡ b9ead7b0-6bcf-11eb-2573-d1a2cb1efe8a md"In the ships dataset, the columns 'service' (after 10,000) and 'incidents' (after 30) show outliers in the density plot. Hence, similar to the histogram, the density plot can also be used for the identification of _outliers_." # ╔═╡ b4121ff8-8646-11eb-330f-c792896a1577 md"The Glucose levels of patients with diabetes has a bimodal distribution." # ╔═╡ ab2c6f18-8646-11eb-347c-47954af99437 density(diabetes[diabetes.Type .== "Yes", :Glu], label = "Yes", line=(4, :blue), fill=(0, :blue, 0.3)) # ╔═╡ 7b917128-50f8-11eb-04c0-7bee72532509 md"Density plots are very common in studying probability distributions. For commonly used distributions, we will use the `Distributions` package." # ╔═╡ 75fd8092-50f7-11eb-385b-df20f0b26649 plot(Normal(0,1), line=(4, :blue), fill=(0, :blue, 0.3), label="Normal") # ╔═╡ 8e7dbf8a-50f8-11eb-0cd5-9b1caa04ae2f md"Many of the distributions are parameterized. Let us look at some of the common distributions with their parameters interactively." # ╔═╡ 9dfbf86e-50f8-11eb-2347-4742df778537 begin μslider = @bind μ html"<input type=range min=-5 max=5 step=0.2>" ωslider = @bind σ html"<input type=range min=0.1 max=2 step=0.1>" md"""**Normal Distribution** μ: $(μslider) σ: $(ωslider)""" end # ╔═╡ be9b6320-50f8-11eb-3031-eb88eb81c05f plot(Normal(μ,σ), line=(4, :blue), fill=(0, :blue, 0.3), label="Normal", -10, 10, ylim=(0, 1)) # ╔═╡ 301441fc-50f9-11eb-1c29-9f4f1acfd93c begin αslider = @bind α html"<input type=range min=0.5 max=5 step=0.5>" βslider = @bind β html"<input type=range min=0.5 max=5 step=0.5>" md"""**Beta Distribution** α: $(αslider) β: $(βslider)""" end # ╔═╡ 564d14ca-50f9-11eb-18a0-4d977b316e31 plot(Beta(α,β), line=(4, :blue), fill=(0, :blue, 0.3), label="Beta", 0, 1, ylim=(0, 5)) # ╔═╡ bbe402b2-50f9-11eb-3366-573d0cdaea70 begin kslider = @bind k html"<input type=range min=1 max=10 step=1>" md"""**Chi-square Distribution** k: $(kslider)""" end # ╔═╡ dc80ce26-50f9-11eb-1dc4-efb97abc75f3 plot(Chi(k), line=(4, :blue), fill=(0, :blue, 0.3), label="Beta", 0, 5, ylim=(0, 1)) # ╔═╡ f8b79cf0-50fc-11eb-2d1d-31489e04471f begin kgslider = @bind kg html"<input type=range min=1 max=5 step=1>" θslider = @bind θ html"<input type=range min=0.5 max=2 step=0.1>" md"""**Gamma Distribution** k: $(kgslider) θ: $(θslider)""" end # ╔═╡ 1c809a38-50fd-11eb-281a-e3cc8d990d6d plot(Gamma(kg, θ), line=(4, :blue), fill=(0, :blue, 0.3), label="Gamma", 0, 20, ylim=(0, 1)) # ╔═╡ 2f3ed2ba-8646-11eb-0de1-1d04e888fc2d md"Thus while density plots are the default mode to study continuous probability distribution, they are also useful for empirical data to identify mode, skew, outliers, and the general shape of the data." # ╔═╡ a36e3a72-50f4-11eb-311a-e10e997b2d9d md"###### Bar plot" # ╔═╡ a8580752-50f4-11eb-14c4-93eab7aa6ecb md"We saw that both histogram and density plots don't work for categorical data. But it is useful to visualise the frequencies of categorical data. For this we use the _bar plot_. Notice that the bar plot is different from the histogram." # ╔═╡ 5303f760-50ff-11eb-2641-d397037dd163 freqtable(diabetes.Type) # ╔═╡ 74959a7e-6e07-11eb-0ff2-cf4b49a62e8c md"Given below is a bar plot for diabetic people(Yes/No) from the diabetes dataset." # ╔═╡ 6d6b50e4-50ff-11eb-002a-8d46e701af83 bar(freqtable(diabetes.Type),label="Type") # ╔═╡ f6f1f2d0-6e07-11eb-021b-9dbaf45f1e3f md"Another bar plot from the iris dataset for different kinds of species." # ╔═╡ 8a3911ca-50ff-11eb-362c-6f5ea92d27aa bar(freqtable(iris.Species),label="Species") # ╔═╡ 5b66ce72-6e08-11eb-2fbe-bf4fb21a18f7 md"Let us take a look at a bar plot from the diamonds dataset." # ╔═╡ a89b96b0-50ff-11eb-12fd-19463d92b20d diamonds = dataset("ggplot2", "diamonds"); # ╔═╡ d2dbffa0-50ff-11eb-068c-47e957435cce @bind diamond_column Select(["Cut", "Color", "Clarity"]) # ╔═╡ c12d9840-50ff-11eb-3604-ddc6cc76e839 bar(freqtable(diamonds[:, diamond_column]), label=diamond_column) # ╔═╡ 1f35feaa-5100-11eb-2024-ab35ba3e9fe8 md"Notice that bar plots are more generic than histograms and density plots. They can be used to plot other aspects beyond the distribution of data, as we will see later." # ╔═╡ 4125dfd0-5100-11eb-223a-55b94f998466 md"#### Distribution of single variable in multiple slices" # ╔═╡ 497f2132-5100-11eb-2ed0-2512baedf8b1 md"In the above we saw the distribution of a single variable. But often we would like to contrast the distributions of two or more slices of the data. Let us take an example of the Diabetes data." # ╔═╡ 5bb4a3ee-5101-11eb-2456-39821acd0bb0 @bind sel_col3 Select(names(diabetes)) # ╔═╡ 8df80d74-5100-11eb-207c-53d522115b08 begin density(diabetes[diabetes.Type .== "Yes", sel_col3], label = "Yes", line=(4, :red), fill=(0, :red, 0.3)) density!(diabetes[diabetes.Type .== "No", sel_col3], label = "No", line=(4, :green), fill=(0, :green, 0.3)) end # ╔═╡ 367f5b64-5104-11eb-1d59-416bdbd0fbf3 md"We do not have to do these two plots manually. We can use the idea of _groups_ instead." # ╔═╡ 30eb4c8a-5179-11eb-10bd-23d662061237 @bind sel_col4 Select(names(diabetes)) # ╔═╡ 0e15b664-5179-11eb-03cf-fdd03e1fa960 density(diabetes[:, sel_col4], group = diabetes.Type, line=(4) , fill=(0), fillalpha=0.3) # ╔═╡ 4c0b2814-5179-11eb-3afc-6b96b70c4017 md"One more handy trick: Since the dataframe keeps repeating multiple times in the following, we can use a short-hand with the `@` construction." # ╔═╡ 47669692-5104-11eb-0321-efacd2ee6083 @df diabetes density(:Glu, group = :Type, line=(4) , fill=(0), fillalpha=0.3) # ╔═╡ b361ed4a-5101-11eb-3e24-67e474420a96 md"Stacking multiple density plots for different groups together works well due to the fill transparency. But for histograms, we have to do something different." # ╔═╡ d58b795a-5103-11eb-1a5a-cd932ad9ea5f @df diabetes histogram(:Glu, group = :Type, bins=25, normalize=:pdf) # ╔═╡ a3b1651e-5102-11eb-2b7c-7506f3ec1042 md"We can have multiple plots laid out separately with the `layout` argument." # ╔═╡ 4584d1c6-5105-11eb-24a4-0d471533d62c @df diabetes histogram(:Glu, group=:Type, bins=25, normalize=:pdf, layout=2) # ╔═╡ 9799cb84-5177-11eb-0631-7beed264b449 @df diamonds histogram(:Price, group=:Cut, bins=25, normalize=:pdf, layout=5, legend=:topright) # ╔═╡ afc41804-8647-11eb-0245-655660d79948 md"We can also change the layout if we wanted more horizontal spacing. More complex layouts are showin [here](https://docs.juliaplots.org/latest/layouts/)." # ╔═╡ ab34502e-8647-11eb-1cf0-4ba59ec56871 @df diamonds histogram(:Price, group=:Cut, bins=25, normalize=:pdf, layout=(3, 2), legend=:topright) # ╔═╡ 6f801c82-6e09-11eb-0a00-b5f75adab724 md"For bar plots, we do the grouping in a slighlty different way. We extract the frequencies for each 'Type' and use it for the plot." # ╔═╡ f972e120-6d32-11eb-3237-a74316b8171f begin bar_plots=[] types=unique(diabetes.Type) for i in 1:2 push!(bar_plots,bar(freqtable(diabetes[diabetes.Type .== types[i],:NPreg]),label=types[i])) end plot(bar_plots[1], bar_plots[2], layout = 2) end # ╔═╡ fdd1d920-6e1c-11eb-13d9-29574511937e md"From the bar plots, observe that the number of pregnancies for diabetic patients is more spread out whereas for those who are not diabetic, the distribution is right skewed." # ╔═╡ 9321675e-5179-11eb-2df6-e54ed50b6a17 md"#### Box plot" # ╔═╡ 965e8668-5179-11eb-0718-eb096753ad66 md"In the case of histograms and density plots, a lot of the information about the distribution of a variable is visible. If we wanted to instead look at only a subset of information, then we can use a `box plot`." # ╔═╡ 117fe8a0-517a-11eb-132f-eb3211dde0c4 md"Notice the _abstraction of information_ as we move from histogram/density plots to the box plot." # ╔═╡ b6e26fde-517a-11eb-34ba-b94752f8f4f1 @bind sel_col5 Select(names(diabetes)) # ╔═╡ 1dca2170-517a-11eb-0be4-c1cc0e228971 boxplot(diabetes[:, sel_col5]) # ╔═╡ 86cca6f4-517d-11eb-0104-65589fdd58a4 md"We have already discussed the plot elements of a box plot - median, IQR, whiskers, outliers." # ╔═╡ 98c9a594-517a-11eb-1fe4-d5221eda3dd1 md"![](https://mk0codingwithmaxskac.kinstacdn.com/wp-content/uploads/2019/11/box-plot-vertical-horizontal-1.png)" # ╔═╡ 931e2c52-517d-11eb-2ed6-1fc344727571 md"It is important to build good intuition about how things are related across plot types. For instance, if we have a heavily right skewed dataset with a long tail, then it would show up in the box plot as being bottom heavy and having many outliers on the top, like in the following case of prices of diamonds." # ╔═╡ bccce282-517d-11eb-1894-070fa44f4134 density(diamonds[:, :Price], fill=(0, :blue, 0.3), line=(3, :blue)) # ╔═╡ 6fe12a28-517d-11eb-2a2d-7f9a4c420d89 boxplot(diamonds[:, :Price]) # ╔═╡ 00fdae4c-517d-11eb-1a34-650725b08271 md"Like before, we can plot the boxplots for different groups, such as the diabetes condition." # ╔═╡ efb334e2-517a-11eb-274e-29cd6b00216c @bind sel_col6 Select(names(diabetes)) # ╔═╡ b270a6ec-5179-11eb-2860-95fe1f7e20a6 boxplot(String.(diabetes[:, :Type]), diabetes[:, sel_col6], fillalpha=0.5, linewidth=2, label=sel_col6, legend=:topleft) # ╔═╡ 0ea5516c-517d-11eb-042a-8dbd4acba7b0 md"Let us do the same for the iris dataset from earlier where we had three species of flowers." # ╔═╡ 1288c71a-517c-11eb-2b60-79c4abf10e06 boxplot(String.(iris[:, :Species]), iris[:, 1], fillalpha=0.5, linewidth=2, label=names(iris)[1], legend=:topleft) # ╔═╡ f8cca5fc-517c-11eb-332e-0d89d4691ac8 md"We can combine such boxplots across the different fields in the data to get a complete picture of the dataset. Notice how we are using a for loop to create the plots and then plotting them separately with a layout argument." # ╔═╡ 767041dc-517b-11eb-2e15-83bfa870df15 begin iris_plots = [] for iris_i in 1:4 push!(iris_plots, boxplot(String.(iris[:, :Species]), iris[:, iris_i], fillalpha=0.5, linewidth=2, title=names(iris)[iris_i], label="")) end plot(iris_plots[1], iris_plots[2], iris_plots[3], iris_plots[4], layout = 4) end # ╔═╡ 379cbe3e-517d-11eb-346e-195d12bf8ae0 md"A plot like the one above can help identify the discriminative features. For instance the `setosa` species can be easily called out based on the petal length and width. While `versicolor` and `virgnia` are harder to tell apart, though we have a better chance with the petal features than with sepal features." # ╔═╡ 5ed991c2-6b84-11eb-10c7-e5d34f7302c0 md"You can also plot good looking grouped boxplots if you use `Plotly`." # ╔═╡ 05c7911c-517e-11eb-2a7a-3777272d76fd md"#### Violin plot" # ╔═╡ 0a8a070c-517e-11eb-0201-99f2f6b4f89c md"The density plot showed us the shape of the distribution and the box plot showed us some important statistics. Can we combine these in a single plot? Yes, this is what the violin plot does." # ╔═╡ 2a1c10f6-517e-11eb-3c00-0161244ad098 @bind sel_col7 Select(names(diabetes)) # ╔═╡ 5034b84e-517e-11eb-126f-c7a48c074a5f violin(diabetes[:, sel_col7]) # ╔═╡ dafeda92-517f-11eb-2c15-d70012e98d61 md"We can overlay the boxplot on top of the violin plot." # ╔═╡ 72d70e92-517e-11eb-1b0c-29015c7b91b9 begin violin(String.(diabetes[:, :Type]), diabetes[:, sel_col7], fillalpha=0.8, linewidth=2, label="") boxplot!(String.(diabetes[:, :Type]), diabetes[:, sel_col7], fillalpha=0.3, linewidth=2, label="", title=sel_col7) end # ╔═╡ 10a2c44c-5180-11eb-2a9e-8ffd22114b3f @bind sel_col8 Select(names(iris)) # ╔═╡ eec70900-517f-11eb-0a5a-0b97ab4f572e begin violin(String.(iris[:, :Species]), iris[:, sel_col8], fillalpha=0.8, linewidth=2, label="") boxplot!(String.(iris[:, :Species]), iris[:, sel_col8], fillalpha=0.3, linewidth=2, label="", title=sel_col8) end # ╔═╡ df9fc7d4-5182-11eb-2314-7d91be2355e6 md"The violin plots in the GR backend do not look particularly good. You can try other backends such as Plotly and Gadfly to obtain better looking plots. Python's Seaborn is also a good option: ![](https://datavizcatalogue.com/methods/images/top_images/SVG/violin_plot.svg)" # ╔═╡ 1fec4acc-5180-11eb-0328-bfa2a9f84994 md"If we have just two classes to compare (like in the diabetes case), then we have a special way of doing that - using the two sides of the violin." # ╔═╡ d5cd9782-5181-11eb-0965-411867c9a03a @bind sel_col9 Select(names(diabetes)) # ╔═╡ 26f2c5fc-5181-11eb-2b8a-1f2edee76a9e begin violin([1], diabetes[diabetes.Type .== "Yes", sel_col9], side=:left, label="Yes", fill=(0, :red, 0.3), linewidth=0) violin!([1], diabetes[diabetes.Type .== "No", sel_col9], side=:right, label="No", fill=(0, :green, 0.3), linewidth=0, title=sel_col9) end # ╔═╡ 16622b78-5182-11eb-24bf-cdf3500cd2f3 md"We may also want to see the individual points, especially in domains like healthcare. This can be done by overlaying a `dotplot` on top of the paired violin plots." # ╔═╡ 7f865bec-5182-11eb-3880-e10b9c4a685d @bind sel_col10 Select(names(diabetes)) # ╔═╡ 2a18a110-5182-11eb-1769-ed392bb4284d begin violin([1], diabetes[diabetes.Type .== "Yes", sel_col10], side=:left, label="Yes", fill=(0, :red, 0.3), linewidth=0) dotplot!([1], diabetes[diabetes.Type .== "Yes", sel_col10], side=:left, label="Yes", marker=(:red, stroke(0), 0.3)) violin!([1], diabetes[diabetes.Type .== "No", sel_col10], side=:right, label="No", fill=(0, :green, 0.3), linewidth=0, title=sel_col10) dotplot!([1], diabetes[diabetes.Type .== "No", sel_col10], side=:right, label="No", marker=(:green, stroke(0), 0.3)) end # ╔═╡ 0dc19ee6-5184-11eb-0d29-113e921b264a md"### Distribution of two variables" # ╔═╡ 9f7ee908-5184-11eb-23bc-a9fe1c3749ce md"So far we have been looking at ways to plot the distribution of a single variable. Now we move on to two variables by considering different combinations of the data types of the two variables." # ╔═╡ 15835914-5184-11eb-336d-b3de2228abfb md"#### Two continuous variables" # ╔═╡ 1def7b32-5184-11eb-06d4-7b15b0607708 md"##### Scatter plot" # ╔═╡ b57bc3c0-5184-11eb-1d1a-6763c47721a0 md"The simplest plot to imagine is a scatter plot of thet wo variables. Since they are both continuous, we expect the points to be distributed irregularly on a 2d plot." # ╔═╡ 36831b52-5184-11eb-0244-e9fbcbaafef3 @df diabetes scatter(:Glu, :BP, xlabel="Glu", ylabel="BP", label="") # ╔═╡ c7498fec-5184-11eb-2235-11fe7203feed md"The scatter plot makes sense if we have few points to plot which can be visualised to obtain a sense of density of the distribution of the points. For instance in the following we clearly see that there are two broad clusters of the points." # ╔═╡ 612b5e5c-5184-11eb-3538-edff43e08d7e @df iris scatter(:SepalWidth, :PetalWidth, xlabel="SepalWidth", ylabel="PetalWidth", label="") # ╔═╡ f2dc3286-5184-11eb-1ee1-9fcabc42c007 md"But scatter plots do not work if we have a large number of points to plot like in the diamonds dataframe. In this case, we can take random samples from the data and use them for a scatter plot." # ╔═╡ c95cc3b6-5185-11eb-2102-7bc6b10a9b0a md"Though the scatter plot shows the distribution on the 2-d plane, it is useful to also see how the data is distributed along the two axes. Such projects are technically called marginals and can be used as shown." # ╔═╡ f8a1eae8-5185-11eb-21b0-35381169101a @df iris marginalscatter(:SepalWidth, :PetalWidth, xlabel="SepalWidth", ylabel="PetalWidth", label="") # ╔═╡ 509f0e74-5186-11eb-24ca-154900f666aa md"##### Maginal KDEs" # ╔═╡ 89a98942-519a-11eb-16d9-630f9a769d63 @df iris marginalkde(:SepalWidth, :PetalWidth, xlabel="SepalWidth", ylabel="PetalWidth", label="") # ╔═╡ 17bc0fa4-51bf-11eb-14bb-0976b8c6a92a @df diabetes marginalkde(:Glu, :BMI, xlabel="Glu", ylabel="BMI", label="") # ╔═╡ 63c05f88-51bf-11eb-065b-7d352b5170cb @df diamonds[rand(1:nrow(diamonds), 500), :] marginalkde(:X, :Y, xlabel="X", ylabel="Y", label="") # ╔═╡ 1f67ee8a-6b7f-11eb-3296-190862654a02 md"#### One categorical and one continuous variable" # ╔═╡ 39aa4b94-6b7f-11eb-1763-7b5731db0201 md"##### Sliced set of plots" # ╔═╡ 76884c96-6b7f-11eb-12ab-95f92a9983d7 md"Plot multiple plots for each value of the categorical variable. Use any of the plots seen for distribution of continuous variables." # ╔═╡ 4a217812-6b7f-11eb-24b4-074c956d5577 @df diamonds[rand(1:nrow(diamonds), 500), :] density(:Price, group=:Cut, layout=5, legend=:topright) # ╔═╡ 3bf1b230-6c6e-11eb-2023-6176f077cdf5 md"Below is a sliced box plot from the iris dataset." # ╔═╡ ce243800-6c6c-11eb-1863-e12032c778d2 @df iris[rand(1:nrow(iris), 200), :] boxplot(String.(:Species), :SepalLength, fillalpha=0.5, linewidth=2, label="SepalLength", legend=:topleft) # ╔═╡ 5a6e9110-6c6e-11eb-0aaa-67f45bb7423d md"Another boxplot from the ships dataset" # ╔═╡ ad2c3510-6c6e-11eb-0f9b-bbe03d4e05c0 @df shipsDataset[rand(1:nrow(shipsDataset), 1000), :] boxplot(String.(:Type), :Incidents, fillalpha=0.5, linewidth=2, label="Incidents") # ╔═╡ c71a83d0-6e1e-11eb-20db-65f12fe727d8 md"From the box plot, we can see that the groups C and D contains outliers when considering the number of damage incidents as the feature of interest." # ╔═╡ 0d209c30-6c70-11eb-2cf0-73a491799c73 md"The below illustration is of a violin plot from the iris dataset" # ╔═╡ 5670ebb0-6c70-11eb-0efc-f93bd094bc14 begin irisSlice=iris[rand(1:nrow(iris), 500), :] @df irisSlice violin(String.(:Species), :SepalWidth, fillalpha=0.5, linewidth=2, label="") @df irisSlice boxplot!(String.(:Species), :SepalWidth, fillalpha=0.5, linewidth=2, label="SepalWidth") end # ╔═╡ 08bdf380-6e1f-11eb-3176-e13a76bad6ef md"This is an illustration of a box plot overlayed on a violin plot, for a sliced set of data. Notice that we are using the same slice to draw the violin and boxplots." # ╔═╡ d707654a-51bf-11eb-3eb2-7fed48a711ee md"#### Two discrete variables ##### Marginal Histogram" # ╔═╡ 588566fc-51c1-11eb-018f-95a038ccbbc2 @df diamonds[rand(1:nrow(diamonds), 1000), :] marginalhist(:X, :Y, xlabel="X", ylabel="Y", label="", bins=30) # ╔═╡ 7261d362-51c1-11eb-3110-7bd061ebabba @df iris marginalhist(:PetalLength, :PetalWidth, bins=30) # ╔═╡ e1e84b26-51c1-11eb-2acc-bf44a69a41e5 @df diabetes marginalhist(:Glu, :BMI, xlabel="Glu", ylabel="BMI", label="", bins=30) # ╔═╡ ce379972-51c3-11eb-3fe0-ed1fa76288db md"##### Heatmap" # ╔═╡ 96f72440-6e20-11eb-10f1-4dfe2d368817 md"Firstly, we sample a random number of rows and convert the 'Cut' and 'Clarity' features to String type." # ╔═╡ ec9fc67c-51bf-11eb-0802-35368653faa9 begin diamonds_short = diamonds[rand(1:nrow(diamonds), 5000), [:Cut, :Clarity]]; diamonds_short = transform!(diamonds_short, :Cut => (x -> String.(x)) => :Cut, :Clarity => (x -> String.(x)) => :Clarity); end # ╔═╡ b29c03f0-6e20-11eb-35c5-53d584f71426 md"We build a frequency table using Cut and Clarity as the categorical features." # ╔═╡ 5fe3f886-51c2-11eb-2a37-b3d52ef5fd08 f_table = freqtable(diamonds_short.Cut, diamonds_short.Clarity) # ╔═╡ 8290d584-51c2-11eb-3d27-f726e21512d2 heatmap(names(f_table)[2], names(f_table)[1], f_table) # ╔═╡ df5e4c40-6e20-11eb-38e6-ed76c461a1cf md"The color coding from the scale on the right indicates the intensity of the values, for each pair of feature values being considered." # ╔═╡ 7f2c8750-6d09-11eb-07b4-af47ed3c4003 md"Consider the dataset on carbon dioxide uptake in grass plants." # ╔═╡ 0b9e3ab0-6d5c-11eb-04cc-8d9f0bb46fb3 plants=dataset("datasets","CO2") # ╔═╡ 3ba3caf0-6d0b-11eb-2696-29db4b882fd8 begin plants_short = plants[rand(1:nrow(plants), 50), [:Type, :Treatment]] plants_short = transform!(plants_short, :Type => (x -> String.(x)) => :Type, :Treatment => (x -> String.(x)) => :Treatment) end # ╔═╡ 8a1b12b2-6d0b-11eb-290c-6798b5c8f64e f_table_plants = freqtable(plants_short.Type, plants_short.Treatment) # ╔═╡ a965eff2-6d0b-11eb-26bb-23a3e632570f heatmap(names(f_table_plants)[2], names(f_table_plants)[1], f_table_plants) # ╔═╡ c49ab754-6b82-11eb-097a-e9b6ebae0c6f md"### Composition of data" # ╔═╡ d0175272-6b82-11eb-068e-e76e36973914 md"##### Pie Chart" # ╔═╡ f222c498-6b82-11eb-1008-ed1fed165c64 pie(["Assignments","Midsem","Endsem"], [40,30,30], title="CS6741 Evaluation") # ╔═╡ cc01d6c0-6c78-11eb-2e06-eb859e7eaa91 md"A pie chart from the diamonds dataset" # ╔═╡ e5e7c530-6c74-11eb-07cf-a9dff5b62b8a begin freqCuts=freqtable(diamonds.Cut) pie(String.(names(freqCuts)[1]),freqCuts,title="Diamonds of different cuts") end # ╔═╡ f35f8190-6c78-11eb-204a-21b9d6c13e2d md"A pie chart from the ships dataset" # ╔═╡ fd0990f0-6c78-11eb-2c19-bf8cf669cfbc begin freqTypes=freqtable(shipsDataset.Type) pie(String.(names(freqTypes)[1]),freqTypes,title="Ship damages by type") end # ╔═╡ 9d3c1a2e-6e21-11eb-0a4c-ad31b64a0663 md"The pie chart indicates that there are equal number of ship damages for each type." # ╔═╡ e1f6e600-6b83-11eb-37a7-e169b81ba790 md"We can also have stacked pie charts such as this one ![](https://i0.wp.com/vizartpandey.com/wp-content/uploads/2019/06/Nested-Pie-Charts-in-Tableau.png?resize=696%2C490&ssl=1)" # ╔═╡ 2749759e-6b87-11eb-13b8-0390abfd0682 md"##### Stacked bar plot" # ╔═╡ e88efa40-864d-11eb-1ae0-6b5b81782c65 md"We had seen bar plots to denote the frequency of categorical variables. We can use stacked bar plots to capture composition." # ╔═╡ 1dc71cce-6b87-11eb-0cd1-3179162a8f91 groupedbar(rand(10,3), bar_position = :stack, bar_width=0.7) # ╔═╡ fa44a096-864d-11eb-36e5-83f1777ed417 md"For the diamonds datsaet, we can visualise the composition of each cut based on the clarity." # ╔═╡ 47cb6a70-6c7c-11eb-2d97-297689c50d71 begin frequency=freqtable(diamonds_short.Cut, diamonds_short.Clarity) groupedbar(names(frequency)[1],frequency, bar_position = :stack, bar_width=0.7,xlabel="Cut",ylabel="Clarity", legend=:topleft) end # ╔═╡ ed5b0af0-6e83-11eb-2a0f-2fbf72bfd4af md"From the above stacked bar plot, observe that the frequency is highest for the ideal cut and lowest for the fair cut. The bar lengths are the counts for diamonds of each type of clarity." # ╔═╡ 851f7c02-6b86-11eb-1f53-adc35eba6146 md"##### Stacked line plots" # ╔═╡ 8aa43b5c-6b86-11eb-0e87-ebea1e7c9e79 md"To model time varying compositions, we can use stacked line plot. But there is no default plot for this. But we can build it using `userplots`." # ╔═╡ a1d47864-6b86-11eb-14be-d9cc09db1e18 begin @userplot StackedArea # a simple "recipe" for Plots.jl to get stacked area plots # usage: stackedarea(xvector, datamatrix, plotsoptions) @recipe function f(pc::StackedArea) x, y = pc.args n = length(x) y = cumsum(y, dims=2) seriestype := :shape # create a filled polygon for each item for c=1:size(y,2) sx = vcat(x, reverse(x)) sy = vcat(y[:,c], c==1 ? zeros(n) : reverse(y[:,c-1])) @series (sx, sy) end end end # ╔═╡ ba21b4c2-6b86-11eb-34ea-d36a5f02192a begin a = [1,1,1,1.5,2,3] b = [0.5,0.6,0.4,0.3,0.3,0.2] c = [2,1.8,2.2,3.3,2.5,1.8] sNames = ["a","b","c"] x = [2001,2002,2003,2004,2005,2006] stackedarea(x, [a b c], labels=reshape(sNames, (1,3))) end # ╔═╡ 5eb5b28e-6d1c-11eb-135a-e39ca2886a7e md"Consider the US economic time series data." # ╔═╡ 6c93ceb0-6d1c-11eb-27c5-19a0d1bfb20e timeSeries=dataset("ggplot2","economics") # ╔═╡ 49a53dd0-6d1c-11eb-0c86-951498741f02 begin colNames = ["UEmpMed","PSavert"] @df timeSeries[1:5,:] stackedarea(:Date, [:UEmpMed :PSavert],labels=reshape(colNames, (1,2))) end # ╔═╡ a3ad4740-6e85-11eb-2fa8-493c076784c6 md"The plot shows the time series variation of median unemployment duration and personal savings rate for the first 5 months." # ╔═╡ ddeb6b64-6b86-11eb-315e-03a33e00803a md"👉 Write a custom user plot for normalized stacked area plot." # ╔═╡ ffb31c0a-6b87-11eb-0d32-d5caf1f4d3c0 md"### Relationship between variables" # ╔═╡ 03ee3af2-6b88-11eb-1db2-29ef9e3309bf md"##### Scatter plot" # ╔═╡ 16ec2c52-6b88-11eb-11d9-a77dbcca747a @df diamonds[rand(1:nrow(diamonds), 500), :] scatter(:Carat,:Price, xlabel="Carat", ylabel="Price", label="") # ╔═╡ 2568aa80-6d5b-11eb-3641-55000a09724a md"After manually fitting a quadratic curve:" # ╔═╡ 050e8020-6d5b-11eb-07ca-7dd8100d7ea7 begin @df diamonds[rand(1:nrow(diamonds), 500), :] scatter(:Carat,:Price, xlabel="Carat", ylabel="Price", label="") f(x) = 10^3*x^2 + 4*10^3*x plot!(f, 0,2.5,lw=3,label="Manually fitted curve") end # ╔═╡ f6e06960-6e85-11eb-3cc4-73a72b4ceed9 md"As can be seen after fitting the quadratic curve, there is a clear relationship between the two variables, Carat and Price." # ╔═╡ fd71a7b0-6d1f-11eb-22cf-79bfd98e22e0 @df diamonds[rand(1:nrow(diamonds), 500), :] scatter(:X,:Price, xlabel="X", ylabel="Price", label="") # ╔═╡ b8a6b572-6b89-11eb-3f5d-bde166c607c9 jobj # ╔═╡ 937a3e88-6b8c-11eb-227b-fff79274c07a md"##### Time series line plot" # ╔═╡ ea58b956-6b89-11eb-3ea1-cb07b1b94e67 covid_df = reduce(vcat, DataFrame.(jobj["cases_time_series"])) # ╔═╡ 1d39aa0c-6b8b-11eb-0f5f-4badfee59bbe begin covid_df[!, :dailyconfirmed] = parse.(Int,covid_df[!, :dailyconfirmed]) covid_df[!, :dailydeceased] = parse.(Int,covid_df[!, :dailydeceased]) covid_df[!, :dailyrecovered] = parse.(Int,covid_df[!, :dailyrecovered]) end # ╔═╡ 6101d32e-6b8b-11eb-111c-bfe9b1ccfa7f covid_df[!, :dateymd] = Date.(covid_df.dateymd, "yyyy-mm-dd") # ╔═╡ d9dbc6fe-6b8b-11eb-2248-29d7553b87eb @df covid_df plot(:dateymd, :dailydeceased, line=2, label="Deceased") # ╔═╡ 9dc3942e-6b8a-11eb-1349-a78a72d26992 begin @df covid_df plot(:dateymd, :dailydeceased, line=2, label="Deceased", color=:red) @df covid_df plot!(:dateymd, :dailyrecovered, line=2, label="Recovered", color=:green) @df covid_df plot!(:dateymd, :dailyconfirmed, line=2, label="Confirmed", color=:orange) end # ╔═╡ 3808e72c-6b88-11eb-2ed7-f76c352a67bc md"##### Correlation plot" # ╔═╡ ca585eee-864e-11eb-0513-d79a3422d031 default(size = (600, 500)) # ╔═╡ c4c6f8be-864e-11eb-0d25-a30adea37da6 @df iris corrplot(cols(1:4), grid = false) # ╔═╡ c7b3711a-864e-11eb-12f2-f17247ba833c default(size =(600, 300)) # ╔═╡ 8cff75ee-87cc-11eb-3939-395e970a8ed1 md"![](https://www.mymarketresearchmethods.com/wp-content/uploads/2013/01/chart-types-choosing-the-right-one.png) Source: https://www.mymarketresearchmethods.com/wp-content/uploads/2013/01/chart-types-choosing-the-right-one.png" # ╔═╡ 6bf3c854-87e6-11eb-06df-3d0f67979710 md"Checkout `gapminder` plot in Plotly." # ╔═╡ 09d3b222-6902-11eb-0571-f7cfbaa3c0db hint(text, title) = Markdown.MD(Markdown.Admonition("hint", title, [text])) # ╔═╡ 248ce07a-68ff-11eb-0e88-87c99225f41a hint(md"Unique string id - wouldn't call it categorical as values are unique", "ItemNo") # ╔═╡ 9df0e186-690d-11eb-208d-3d17e689fc83 hint(md"Categorical and Nominal as there is no obvious ordering between the colours, though you can have a distance metric between two colours", "Color") # ╔═╡ 319b0bb2-690e-11eb-1ddd-73e27aefdeb6 hint(md"Categorical - not very useful to differentiate between Ordinal and Nominal variables in case of just two values", "Sleeves") # ╔═╡ 6b16b95a-693a-11eb-1812-0d5d5e80728c hint(md"Categorical and ordinal - can be ordered (though distance between ratings is not clear)", "SurveyRating") # ╔═╡ b62f69a2-693a-11eb-3372-93062a618960 hint(md"Numerical - Discrete since it is between 1 and 5. There is the confusion with Categorical (ordinal), but if we are going to take averages of stars etc., thinking of discrete numerical is more appropriate.", "WebsiteRating") # ╔═╡ de361acc-693a-11eb-110b-4d7c760d3cb6 hint(md"Numerical - Continuous. Usually quantised to rupees or paise, but best to think of them as continuous values within this range. Discrete is used when we expect a large number of values to have the same value (eg. shoe size).", "Price") # ╔═╡ Cell order: # ╟─717e19a6-50d6-11eb-011b-ddf29b2946c8 # ╟─d12d4384-50dd-11eb-0ecc-65623c8febfe # ╟─d9b48c24-50dd-11eb-2253-e1f0eeb004f2 # ╟─b8c7b436-6b7d-11eb-0817-eb3fb7f09f95 # ╟─52540bba-68fc-11eb-1ecc-03ad619fe826 # ╠═5b22ecc2-68fc-11eb-0e6e-f38d2f761d95 # ╟─a6239d94-68fc-11eb-0162-d5707c186fd4 # ╟─59f0a50c-690d-11eb-3a6d-f50e01402f32 # ╟─248ce07a-68ff-11eb-0e88-87c99225f41a # ╟─9df0e186-690d-11eb-208d-3d17e689fc83 # ╟─319b0bb2-690e-11eb-1ddd-73e27aefdeb6 # ╟─6b16b95a-693a-11eb-1812-0d5d5e80728c # ╟─b62f69a2-693a-11eb-3372-93062a618960 # ╟─de361acc-693a-11eb-110b-4d7c760d3cb6 # ╟─02ca2edc-50de-11eb-2cc5-2772d7d92fe6 # ╠═f2c54404-50dd-11eb-05cd-a94912f14d1e # ╠═434cf43c-50de-11eb-2733-71ddf1bb8b49 # ╟─85f580d0-6dfb-11eb-2dbf-db79082b85b7 # ╠═7d2d18ee-50de-11eb-38a1-cd402e4674b2 # ╟─ea181dc0-6dfb-11eb-0ed6-2d29558cd012 # ╠═bcdc7076-50ef-11eb-09d4-47ab5436e1d9 # ╟─a867dcec-50f0-11eb-07f2-7d34e62cd42e # ╟─adda9d42-50f0-11eb-266d-75d0464b2f23 # ╟─336536b0-50f2-11eb-1dbd-836f130731ac # ╠═60e6fd0e-50d8-11eb-37d9-add4c3dfcb1b # ╠═3fb759e6-50d9-11eb-36a4-31ed7766c5b3 # ╠═ad6fa5de-5102-11eb-0329-8b5d52ba864f # ╟─4e488752-50f2-11eb-06c2-83cad3489775 # ╠═43774790-50da-11eb-05f3-4f635e934d6a # ╟─433bba92-50d8-11eb-05f0-d31772d405e0 # ╟─9155c80e-50f0-11eb-2c14-13fce7a14b9d # ╟─48e55444-50d8-11eb-3861-73237803775c # ╟─068c716c-50f2-11eb-35f1-cf749745b296 # ╠═2df8b140-50f0-11eb-1882-2964dd07d1b3 # ╟─802ae7a6-50f2-11eb-1e52-2d4f62abf0e7 # ╟─2683019c-6b81-11eb-36a2-69467c7ee7d6 # ╠═4d06a470-50f0-11eb-1369-2f743c3cbfc9 # ╠═da9c31d6-8644-11eb-237a-8f711cbf2963 # ╟─5a55d03c-6b81-11eb-007e-bde94e3eb46a # ╠═6577ec58-50f0-11eb-0277-6b791ab01895 # ╟─feaf8b4a-8644-11eb-2817-4bc99b6790b7 # ╟─b76d303e-50f2-11eb-0830-1bf573e84375 # ╠═4c2714ba-50f3-11eb-0aff-679d40f27bf4 # ╠═6bf51fa8-50f3-11eb-378f-bf3ff7156539 # ╟─283cb0be-50f3-11eb-35a8-4dc20e9a05f1 # ╠═2d0c32be-50f2-11eb-1825-9b9648ccfcdc # ╟─2a7f2460-8645-11eb-37f5-89cd05d1af48 # ╟─574527c4-8645-11eb-0b26-c16a97b9ec2d # ╟─98153fa4-50f4-11eb-0123-d7f01007697b # ╟─264cba2a-50f5-11eb-1e9b-178bfbb7e7ab # ╟─9dc8fb8e-50f4-11eb-1489-6f4d3dd8802a # ╠═f3b3f4d8-50f4-11eb-05b2-d1c9164b21a3 # ╟─5440fd68-50f6-11eb-284b-5f5b65ade5b5 # ╟─813a253e-8645-11eb-1663-03625a5483db # ╟─908b3186-8645-11eb-1309-71bc7ab6be00 # ╟─fd01f110-6bce-11eb-376d-71e7e338caf9 # ╠═6336faa0-6bcc-11eb-0dec-7db99fe0d31b # ╠═8ab2bf50-6bcd-11eb-01ff-2d884f36fb5c # ╠═9309c1d0-6bcd-11eb-1f82-a5076b557033 # ╟─b9ead7b0-6bcf-11eb-2573-d1a2cb1efe8a # ╟─b4121ff8-8646-11eb-330f-c792896a1577 # ╠═ab2c6f18-8646-11eb-347c-47954af99437 # ╟─7b917128-50f8-11eb-04c0-7bee72532509 # ╠═78c7ae98-50f7-11eb-3bc9-e30d06686614 # ╠═75fd8092-50f7-11eb-385b-df20f0b26649 # ╟─8e7dbf8a-50f8-11eb-0cd5-9b1caa04ae2f # ╟─9dfbf86e-50f8-11eb-2347-4742df778537 # ╟─be9b6320-50f8-11eb-3031-eb88eb81c05f # ╟─301441fc-50f9-11eb-1c29-9f4f1acfd93c # ╟─564d14ca-50f9-11eb-18a0-4d977b316e31 # ╟─bbe402b2-50f9-11eb-3366-573d0cdaea70 # ╟─dc80ce26-50f9-11eb-1dc4-efb97abc75f3 # ╟─f8b79cf0-50fc-11eb-2d1d-31489e04471f # ╟─1c809a38-50fd-11eb-281a-e3cc8d990d6d # ╟─2f3ed2ba-8646-11eb-0de1-1d04e888fc2d # ╟─a36e3a72-50f4-11eb-311a-e10e997b2d9d # ╟─a8580752-50f4-11eb-14c4-93eab7aa6ecb # ╠═4187e3de-50ff-11eb-18c0-93284cb3ad44 # ╠═5303f760-50ff-11eb-2641-d397037dd163 # ╟─74959a7e-6e07-11eb-0ff2-cf4b49a62e8c # ╠═6d6b50e4-50ff-11eb-002a-8d46e701af83 # ╟─f6f1f2d0-6e07-11eb-021b-9dbaf45f1e3f # ╠═8a3911ca-50ff-11eb-362c-6f5ea92d27aa # ╟─5b66ce72-6e08-11eb-2fbe-bf4fb21a18f7 # ╠═a89b96b0-50ff-11eb-12fd-19463d92b20d # ╟─d2dbffa0-50ff-11eb-068c-47e957435cce # ╠═c12d9840-50ff-11eb-3604-ddc6cc76e839 # ╟─1f35feaa-5100-11eb-2024-ab35ba3e9fe8 # ╟─4125dfd0-5100-11eb-223a-55b94f998466 # ╟─497f2132-5100-11eb-2ed0-2512baedf8b1 # ╟─5bb4a3ee-5101-11eb-2456-39821acd0bb0 # ╠═8df80d74-5100-11eb-207c-53d522115b08 # ╟─367f5b64-5104-11eb-1d59-416bdbd0fbf3 # ╟─30eb4c8a-5179-11eb-10bd-23d662061237 # ╠═0e15b664-5179-11eb-03cf-fdd03e1fa960 # ╟─4c0b2814-5179-11eb-3afc-6b96b70c4017 # ╠═47669692-5104-11eb-0321-efacd2ee6083 # ╟─b361ed4a-5101-11eb-3e24-67e474420a96 # ╠═d58b795a-5103-11eb-1a5a-cd932ad9ea5f # ╟─a3b1651e-5102-11eb-2b7c-7506f3ec1042 # ╠═4584d1c6-5105-11eb-24a4-0d471533d62c # ╠═9799cb84-5177-11eb-0631-7beed264b449 # ╟─afc41804-8647-11eb-0245-655660d79948 # ╠═ab34502e-8647-11eb-1cf0-4ba59ec56871 # ╟─6f801c82-6e09-11eb-0a00-b5f75adab724 # ╠═f972e120-6d32-11eb-3237-a74316b8171f # ╟─fdd1d920-6e1c-11eb-13d9-29574511937e # ╟─9321675e-5179-11eb-2df6-e54ed50b6a17 # ╟─965e8668-5179-11eb-0718-eb096753ad66 # ╟─117fe8a0-517a-11eb-132f-eb3211dde0c4 # ╟─b6e26fde-517a-11eb-34ba-b94752f8f4f1 # ╠═1dca2170-517a-11eb-0be4-c1cc0e228971 # ╟─86cca6f4-517d-11eb-0104-65589fdd58a4 # ╟─98c9a594-517a-11eb-1fe4-d5221eda3dd1 # ╟─931e2c52-517d-11eb-2ed6-1fc344727571 # ╠═bccce282-517d-11eb-1894-070fa44f4134 # ╠═6fe12a28-517d-11eb-2a2d-7f9a4c420d89 # ╟─00fdae4c-517d-11eb-1a34-650725b08271 # ╟─efb334e2-517a-11eb-274e-29cd6b00216c # ╠═b270a6ec-5179-11eb-2860-95fe1f7e20a6 # ╟─0ea5516c-517d-11eb-042a-8dbd4acba7b0 # ╠═1288c71a-517c-11eb-2b60-79c4abf10e06 # ╟─f8cca5fc-517c-11eb-332e-0d89d4691ac8 # ╠═767041dc-517b-11eb-2e15-83bfa870df15 # ╟─379cbe3e-517d-11eb-346e-195d12bf8ae0 # ╟─5ed991c2-6b84-11eb-10c7-e5d34f7302c0 # ╟─05c7911c-517e-11eb-2a7a-3777272d76fd # ╟─0a8a070c-517e-11eb-0201-99f2f6b4f89c # ╟─2a1c10f6-517e-11eb-3c00-0161244ad098 # ╠═5034b84e-517e-11eb-126f-c7a48c074a5f # ╟─dafeda92-517f-11eb-2c15-d70012e98d61 # ╠═72d70e92-517e-11eb-1b0c-29015c7b91b9 # ╟─10a2c44c-5180-11eb-2a9e-8ffd22114b3f # ╠═eec70900-517f-11eb-0a5a-0b97ab4f572e # ╟─df9fc7d4-5182-11eb-2314-7d91be2355e6 # ╟─1fec4acc-5180-11eb-0328-bfa2a9f84994 # ╟─d5cd9782-5181-11eb-0965-411867c9a03a # ╠═26f2c5fc-5181-11eb-2b8a-1f2edee76a9e # ╟─16622b78-5182-11eb-24bf-cdf3500cd2f3 # ╟─7f865bec-5182-11eb-3880-e10b9c4a685d # ╠═2a18a110-5182-11eb-1769-ed392bb4284d # ╟─0dc19ee6-5184-11eb-0d29-113e921b264a # ╟─9f7ee908-5184-11eb-23bc-a9fe1c3749ce # ╟─15835914-5184-11eb-336d-b3de2228abfb # ╟─1def7b32-5184-11eb-06d4-7b15b0607708 # ╟─b57bc3c0-5184-11eb-1d1a-6763c47721a0 # ╠═36831b52-5184-11eb-0244-e9fbcbaafef3 # ╟─c7498fec-5184-11eb-2235-11fe7203feed # ╠═612b5e5c-5184-11eb-3538-edff43e08d7e # ╟─f2dc3286-5184-11eb-1ee1-9fcabc42c007 # ╟─c95cc3b6-5185-11eb-2102-7bc6b10a9b0a # ╠═f8a1eae8-5185-11eb-21b0-35381169101a # ╟─509f0e74-5186-11eb-24ca-154900f666aa # ╠═89a98942-519a-11eb-16d9-630f9a769d63 # ╠═17bc0fa4-51bf-11eb-14bb-0976b8c6a92a # ╠═63c05f88-51bf-11eb-065b-7d352b5170cb # ╟─1f67ee8a-6b7f-11eb-3296-190862654a02 # ╟─39aa4b94-6b7f-11eb-1763-7b5731db0201 # ╟─76884c96-6b7f-11eb-12ab-95f92a9983d7 # ╠═4a217812-6b7f-11eb-24b4-074c956d5577 # ╟─3bf1b230-6c6e-11eb-2023-6176f077cdf5 # ╠═ce243800-6c6c-11eb-1863-e12032c778d2 # ╟─5a6e9110-6c6e-11eb-0aaa-67f45bb7423d # ╠═ad2c3510-6c6e-11eb-0f9b-bbe03d4e05c0 # ╟─c71a83d0-6e1e-11eb-20db-65f12fe727d8 # ╟─0d209c30-6c70-11eb-2cf0-73a491799c73 # ╠═5670ebb0-6c70-11eb-0efc-f93bd094bc14 # ╟─08bdf380-6e1f-11eb-3176-e13a76bad6ef # ╟─d707654a-51bf-11eb-3eb2-7fed48a711ee # ╠═588566fc-51c1-11eb-018f-95a038ccbbc2 # ╠═7261d362-51c1-11eb-3110-7bd061ebabba # ╠═e1e84b26-51c1-11eb-2acc-bf44a69a41e5 # ╟─ce379972-51c3-11eb-3fe0-ed1fa76288db # ╟─96f72440-6e20-11eb-10f1-4dfe2d368817 # ╠═ec9fc67c-51bf-11eb-0802-35368653faa9 # ╟─b29c03f0-6e20-11eb-35c5-53d584f71426 # ╠═5fe3f886-51c2-11eb-2a37-b3d52ef5fd08 # ╠═8290d584-51c2-11eb-3d27-f726e21512d2 # ╟─df5e4c40-6e20-11eb-38e6-ed76c461a1cf # ╟─7f2c8750-6d09-11eb-07b4-af47ed3c4003 # ╠═0b9e3ab0-6d5c-11eb-04cc-8d9f0bb46fb3 # ╠═3ba3caf0-6d0b-11eb-2696-29db4b882fd8 # ╠═8a1b12b2-6d0b-11eb-290c-6798b5c8f64e # ╠═a965eff2-6d0b-11eb-26bb-23a3e632570f # ╟─c49ab754-6b82-11eb-097a-e9b6ebae0c6f # ╟─d0175272-6b82-11eb-068e-e76e36973914 # ╠═f222c498-6b82-11eb-1008-ed1fed165c64 # ╟─cc01d6c0-6c78-11eb-2e06-eb859e7eaa91 # ╠═e5e7c530-6c74-11eb-07cf-a9dff5b62b8a # ╟─f35f8190-6c78-11eb-204a-21b9d6c13e2d # ╠═fd0990f0-6c78-11eb-2c19-bf8cf669cfbc # ╟─9d3c1a2e-6e21-11eb-0a4c-ad31b64a0663 # ╟─e1f6e600-6b83-11eb-37a7-e169b81ba790 # ╟─2749759e-6b87-11eb-13b8-0390abfd0682 # ╟─e88efa40-864d-11eb-1ae0-6b5b81782c65 # ╠═1dc71cce-6b87-11eb-0cd1-3179162a8f91 # ╟─fa44a096-864d-11eb-36e5-83f1777ed417 # ╠═47cb6a70-6c7c-11eb-2d97-297689c50d71 # ╟─ed5b0af0-6e83-11eb-2a0f-2fbf72bfd4af # ╟─851f7c02-6b86-11eb-1f53-adc35eba6146 # ╟─8aa43b5c-6b86-11eb-0e87-ebea1e7c9e79 # ╠═a1d47864-6b86-11eb-14be-d9cc09db1e18 # ╠═ba21b4c2-6b86-11eb-34ea-d36a5f02192a # ╟─5eb5b28e-6d1c-11eb-135a-e39ca2886a7e # ╠═6c93ceb0-6d1c-11eb-27c5-19a0d1bfb20e # ╠═49a53dd0-6d1c-11eb-0c86-951498741f02 # ╟─a3ad4740-6e85-11eb-2fa8-493c076784c6 # ╟─ddeb6b64-6b86-11eb-315e-03a33e00803a # ╟─ffb31c0a-6b87-11eb-0d32-d5caf1f4d3c0 # ╟─03ee3af2-6b88-11eb-1db2-29ef9e3309bf # ╠═16ec2c52-6b88-11eb-11d9-a77dbcca747a # ╟─2568aa80-6d5b-11eb-3641-55000a09724a # ╠═050e8020-6d5b-11eb-07ca-7dd8100d7ea7 # ╟─f6e06960-6e85-11eb-3cc4-73a72b4ceed9 # ╠═fd71a7b0-6d1f-11eb-22cf-79bfd98e22e0 # ╠═12e89740-6b89-11eb-27fd-e7d7c4b81637 # ╠═b8a6b572-6b89-11eb-3f5d-bde166c607c9 # ╟─937a3e88-6b8c-11eb-227b-fff79274c07a # ╠═ea58b956-6b89-11eb-3ea1-cb07b1b94e67 # ╠═1d39aa0c-6b8b-11eb-0f5f-4badfee59bbe # ╠═5ed5beba-6b8b-11eb-38d6-b1542bff022c # ╠═6101d32e-6b8b-11eb-111c-bfe9b1ccfa7f # ╠═d9dbc6fe-6b8b-11eb-2248-29d7553b87eb # ╠═9dc3942e-6b8a-11eb-1349-a78a72d26992 # ╟─3808e72c-6b88-11eb-2ed7-f76c352a67bc # ╠═ca585eee-864e-11eb-0513-d79a3422d031 # ╠═c4c6f8be-864e-11eb-0d25-a30adea37da6 # ╠═c7b3711a-864e-11eb-12f2-f17247ba833c # ╟─8cff75ee-87cc-11eb-3939-395e970a8ed1 # ╟─6bf3c854-87e6-11eb-06df-3d0f67979710 # ╠═09d3b222-6902-11eb-0571-f7cfbaa3c0db
{"hexsha": "893fdc1b04d69ea43f3011d888cab897cf119bb6", "size": 45217, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "tutorials/07_Visualisation.jl", "max_stars_repo_name": "sashedher/iitm-cs6741", "max_stars_repo_head_hexsha": "ff626ceda12486d5885e6cbb1ef0d3d91b2fc392", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-13T10:07:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-13T10:07:51.000Z", "max_issues_repo_path": "tutorials/07_Visualisation.jl", "max_issues_repo_name": "sashedher/iitm-cs6741", "max_issues_repo_head_hexsha": "ff626ceda12486d5885e6cbb1ef0d3d91b2fc392", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tutorials/07_Visualisation.jl", "max_forks_repo_name": "sashedher/iitm-cs6741", "max_forks_repo_head_hexsha": "ff626ceda12486d5885e6cbb1ef0d3d91b2fc392", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2563868613, "max_line_length": 333, "alphanum_fraction": 0.7426189265, "num_tokens": 20560}
import torch as th import numpy as np from reversible2.util import np_to_var from reversible2.gradient_penalty import gradient_penalty from reversible2.ot_exact import ot_euclidean_loss_for_samples from reversible2.constantmemory import clear_ctx_dicts from reversible2.ot_exact import ot_euclidean_loss_memory_saving_for_samples from reversible2.gaussian import ( transform_gaussians_by_dirs, get_gaussian_log_probs, ) from reversible2.sliced import sliced_from_samples class Trainer(object): def __init__( self, feature_model, adv_model, class_dist, optim_feature_model, optim_adv_model, optim_class_dist, ): self.feature_model = feature_model self.adv_model = adv_model self.class_dist = class_dist self.optim_feature_model = optim_feature_model self.optim_adv_model = optim_adv_model self.optim_class_dist = optim_class_dist def train(self, train_inputs, gen_update, loss_on_outs): result = dict() feature_model = self.feature_model adv_model = self.adv_model class_dist = self.class_dist optim_feature_model = self.optim_feature_model optim_adv_model = self.optim_adv_model optim_class_dist = self.optim_class_dist optim_feature_model.zero_grad() optim_class_dist.zero_grad() optim_adv_model.zero_grad() for i_class in range(len(train_inputs)): y = np_to_var([i_class]).cuda() class_ins = train_inputs[i_class] other_class_ins = train_inputs[1 - i_class] with th.set_grad_enabled(gen_update): samples = class_dist.get_samples( i_class, len(train_inputs[i_class]) * 4 ) inverted = feature_model.invert(samples) with th.set_grad_enabled(gen_update): outs = feature_model(class_ins) changed_to_other_class = class_dist.change_to_other_class( outs, i_class_from=i_class, i_class_to=1 - i_class ) other_inverted = feature_model.invert(changed_to_other_class) if not gen_update: score_fake = adv_model(inverted.detach(), y) score_real = adv_model(class_ins, y) gradient_loss = gradient_penalty( adv_model, class_ins, inverted[: (len(class_ins))].detach(), y, ) d_loss = ( -score_real.mean() + score_fake.mean() + gradient_loss * 100 ) d_loss.backward() score_real_other = adv_model(other_class_ins.detach(), 1 - y) score_fake_other = adv_model(other_inverted.detach(), 1 - y) gradient_loss_other = gradient_penalty( adv_model, other_class_ins, other_inverted[: (len(class_ins))].detach(), 1 - y, ) d_loss_other = ( -score_real_other.mean() + score_fake_other.mean() + gradient_loss_other * 100 ) d_loss_other.backward() # Clip gradient d_grad_norm = np.mean( [ th.nn.utils.clip_grad_norm_([p], 100) for p in adv_model.parameters() ] ) d_grad = np.mean( [th.sum(p.grad ** 2).item() for p in adv_model.parameters()] ) wd_d = -(-score_real.mean() + score_fake.mean()).item() result["wd_d_{:d}".format(i_class)] = wd_d else: ot_out_loss = ot_euclidean_loss_for_samples( samples[:, i_class_inds], outs[:, i_class_inds], ) other_samples = class_dist.get_samples( 1 - i_class, len(train_inputs[1 - i_class]) * 4 ) ot_out_loss_other = ot_euclidean_loss_for_samples( other_samples[:, i_class_inds], changed_to_other_class[:, i_class_inds], ) score_fake = adv_model(inverted, y) score_fake_other = adv_model(other_inverted, 1 - y) g_loss = -th.mean(score_fake) - th.mean(score_fake_other) if loss_on_outs: g_loss = g_loss + ot_out_loss + ot_out_loss_other g_loss.backward() # Clip gradient g_grad_norm = np.mean( [ th.nn.utils.clip_grad_norm_([p], 100) for p in feature_model.parameters() ] ) g_grad = np.mean( [ th.sum(p.grad ** 2).item() for p in feature_model.parameters() ] ) clear_ctx_dicts(feature_model) if not gen_update: optim_adv_model.step() else: optim_feature_model.step() optim_class_dist.step() if gen_update: result.update( { "g_loss": g_loss.item(), "o_fake": th.mean(score_fake).item(), "g_grad": g_grad, "g_grad_norm": g_grad_norm, "ot_out_loss": ot_out_loss.item(), "ot_out_loss_other": ot_out_loss_other.item(), } ) else: result.update( { "d_loss": d_loss.item(), "grad_loss": gradient_loss.item(), "o_real": th.mean(score_real).item(), "o_fake": th.mean(score_fake).item(), "d_grad": d_grad, "d_grad_norm": d_grad_norm, } ) return result class OTTrainer(object): def __init__( self, feature_model, class_dist, optim_feature_model, optim_class_dist ): self.feature_model = feature_model self.class_dist = class_dist self.optim_feature_model = optim_feature_model self.optim_class_dist = optim_class_dist def train(self, train_inputs, loss_on_outs): result = dict() feature_model = self.feature_model class_dist = self.class_dist optim_feature_model = self.optim_feature_model optim_class_dist = self.optim_class_dist optim_feature_model.zero_grad() optim_class_dist.zero_grad() n_min = min([len(t) for t in train_inputs]) if hasattr(class_dist, 'i_class_inds'): i_class_inds = class_dist.i_class_inds else: i_class_inds = list(range(len(class_dist.get_mean_std(0)[0]))) for i_class in range(len(train_inputs)): class_ins = train_inputs[i_class][:n_min] other_class_ins = train_inputs[1 - i_class][:n_min] samples = class_dist.get_samples(i_class, len(class_ins) * 4) inverted = feature_model.invert(samples) outs = feature_model(class_ins) changed_to_other_class = class_dist.change_to_other_class( outs, i_class_from=i_class, i_class_to=1 - i_class ) other_inverted = feature_model.invert(changed_to_other_class) ot_in_loss = ot_euclidean_loss_memory_saving_for_samples( class_ins.view(len(class_ins), -1), inverted.view(len(inverted), -1), ) ot_in_loss_other = ot_euclidean_loss_memory_saving_for_samples( other_class_ins.view(len(other_class_ins), -1), other_inverted.view(len(other_inverted), -1), ) if loss_on_outs: ot_out_loss = ot_euclidean_loss_for_samples( outs[:, i_class_inds], samples[:, i_class_inds], ) else: ot_out_loss = th.zeros(1) other_samples = class_dist.get_samples( 1 - i_class, len(other_class_ins) * 4 ) if loss_on_outs: ot_out_loss_other = ot_euclidean_loss_for_samples( changed_to_other_class[:, i_class_inds], other_samples[:, i_class_inds], ) else: ot_out_loss_other = th.zeros(1) g_loss = ot_in_loss + ot_in_loss_other if loss_on_outs: g_loss = g_loss + ot_out_loss + ot_out_loss_other g_loss.backward() # Clip gradient g_grad_norm = np.mean( [ th.nn.utils.clip_grad_norm_([p], 100) for p in feature_model.parameters() ] ) g_grad = np.mean( [th.sum(p.grad ** 2).item() for p in feature_model.parameters()] ) result["ot_in_loss_{:d}".format(i_class)] = ot_in_loss.item() result[ "ot_in_loss_other_{:d}".format(i_class) ] = ot_in_loss_other.item() clear_ctx_dicts(feature_model) optim_feature_model.step() optim_class_dist.step() result.update( { "g_loss": g_loss.item(), "g_grad": g_grad, "g_grad_norm": g_grad_norm, "ot_out_loss": ot_out_loss.item(), "ot_out_loss_other": ot_out_loss_other.item(), } ) return result class CLFTrainer(object): def __init__( self, feature_model, clf, dist, optim_model, optim_clf, optim_dist, outs_loss, ): assert outs_loss in ["sliced", "likelihood"] self.feature_model = feature_model self.clf = clf self.dist = dist self.optim_model = optim_model self.optim_clf = optim_clf self.optim_dist = optim_dist self.outs_loss = outs_loss def train(self, train_inputs, loss_on_outs): result = {} for i_class in range(2): outs = self.feature_model(train_inputs[i_class]) soft_probs = self.clf(outs) clf_loss = th.nn.functional.nll_loss( soft_probs, th.ones(len(outs), device=soft_probs.device, dtype=th.int64) * i_class, ) if self.outs_loss == "likelihood": m, s = self.dist.get_mean_std(i_class) transformed_means, transformed_stds = transform_gaussians_by_dirs( m.unsqueeze(0), s.unsqueeze(0), self.clf.get_dirs().detach() ) outs_projected = self.clf.project_outs(outs, detach_dirs=True) log_probs = get_gaussian_log_probs( transformed_means[0], th.log(transformed_stds[0]), outs_projected, ) subspace_loss = -th.mean(log_probs) else: assert self.outs_loss == "sliced" samples = self.dist.get_samples(i_class, len(outs) * 3) subspace_loss = sliced_from_samples( outs, samples, n_dirs=0, adv_dirs=self.clf.get_dirs().detach(), ) loss = clf_loss + subspace_loss self.optim_dist.zero_grad() self.optim_model.zero_grad() self.optim_clf.zero_grad() loss.backward() self.optim_clf.step() if loss_on_outs: self.optim_model.step() self.optim_dist.step() result["clf_loss_{:d}".format(i_class)] = clf_loss.item() result["subspace_loss_{:d}".format(i_class)] = subspace_loss.item() return result
{"hexsha": "b4375c538122e925038a779b26aabd9843698f80", "size": 12219, "ext": "py", "lang": "Python", "max_stars_repo_path": "reversible2/training.py", "max_stars_repo_name": "robintibor/reversible2", "max_stars_repo_head_hexsha": "e6fea33ba41c7f76ee50295329b4ef27b879a7fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reversible2/training.py", "max_issues_repo_name": "robintibor/reversible2", "max_issues_repo_head_hexsha": "e6fea33ba41c7f76ee50295329b4ef27b879a7fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reversible2/training.py", "max_forks_repo_name": "robintibor/reversible2", "max_forks_repo_head_hexsha": "e6fea33ba41c7f76ee50295329b4ef27b879a7fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9472049689, "max_line_length": 82, "alphanum_fraction": 0.5322857844, "include": true, "reason": "import numpy", "num_tokens": 2432}
# Licensed under an MIT style license -- see LICENSE.md import numpy as np import copy __author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"] def paths_to_key(key, dictionary, current_path=None): """Return the path to a key stored in a nested dictionary Parameters ----------` key: str the key that you would like to find dictionary: dict the nested dictionary that has the key stored somewhere within it current_path: str, optional the current level in the dictionary """ if current_path is None: current_path = [] for k, v in dictionary.items(): if k == key: yield current_path + [key] else: if isinstance(v, dict): path = current_path + [k] for z in paths_to_key(key, v, path): yield z def convert_value_to_string(dictionary): """Convert all nested lists of a single value to an item Parameters ---------- dictionary: dict nested dictionary with nested lists """ for key, value in dictionary.items(): if isinstance(value, dict): convert_value_to_string(value) else: dictionary.update({key: str(value)}) return dictionary def convert_list_to_item(dictionary): """Convert all nested lists of a single value to an item Parameters ---------- dictionary: dict nested dictionary with nested lists """ from pesummary.utils.array import Array for key, value in dictionary.items(): if isinstance(value, dict): convert_list_to_item(value) else: if isinstance(value, (list, np.ndarray, Array)): if len(value) == 1 and isinstance(value[0], bytes): dictionary.update({key: value[0].decode("utf-8")}) elif len(value) == 1: dictionary.update({key: value[0]}) return dictionary def load_recursively(key, dictionary): """Return an entry in a nested dictionary for a key of format 'a/b/c/d' Parameters ---------- key: str key of format 'a/b/c/d' dictionary: dict the dictionary that has the key stored """ if "/" in key: key = key.split("/") if isinstance(key, (str, float)): key = [key] if key[-1] in dictionary.keys(): try: converted_dictionary = convert_list_to_item( dictionary[key[-1]] ) yield converted_dictionary except AttributeError: yield dictionary[key[-1]] else: old, new = key[0], key[1:] for z in load_recursively(new, dictionary[old]): yield z def edit_dictionary(dictionary, path, value): """Replace an entry in a nested dictionary Parameters ---------- dictionary: dict the nested dictionary that you would like to edit path: list the path to the key that you would like to edit value: the replacement """ from functools import reduce from operator import getitem edit = dictionary.copy() reduce(getitem, path[:-1], edit)[path[-1]] = value return edit class Dict(dict): """Base nested dictionary class. Parameters ---------- value_class: func, optional Class you wish to use for the nested dictionary value_columns: list, optional Names for each column in value_class to be stored as properties **kwargs: dict All other kwargs are turned into properties of the class. Key is the name of the property """ def __init__( self, *args, value_class=np.array, value_columns=None, _init=True, make_dict_kwargs={}, logger_warn="warn", latex_labels={}, extra_kwargs={}, **kwargs ): from .parameters import Parameters super(Dict, self).__init__() if not _init: return self.logger_warn = logger_warn self.all_latex_labels = latex_labels if isinstance(args[0], dict): if args[0].__class__.__name__ == "SamplesDict": self.parameters = list(args[0].keys(remove_debug=False)) _iterator = args[0].items(remove_debug=False) else: self.parameters = list(args[0].keys()) _iterator = args[0].items() _samples = [args[0][param] for param in self.parameters] try: self.samples = np.array(_samples) except ValueError: self.samples = _samples else: self.parameters, self.samples = args _iterator = zip(self.parameters, self.samples) try: self.make_dictionary(**make_dict_kwargs) except (TypeError, IndexError): for key, item in _iterator: try: self[key] = value_class(item) except Exception: self[key] = value_class(*item) if value_columns is not None: for key in self.keys(): if len(value_columns) == self[key].shape[1]: for num, col in enumerate(value_columns): setattr(self[key], col, np.array(self[key].T[num])) for key, item in kwargs.items(): setattr(self, key, item) self._update_latex_labels() self.extra_kwargs = extra_kwargs self.parameters = Parameters(self.parameters) def __getitem__(self, key): """Return an object representing the specialization of Dict by type arguments found in key. """ if isinstance(key, list): allowed = [_key for _key in key if _key in self.keys()] remove = [_key for _key in self.keys() if _key not in allowed] if len(allowed): if len(allowed) != len(key): import warnings warnings.warn( "Only returning a dict with keys: {} as not all keys " "are in the {} class".format( ", ".join(allowed), self.__class__.__name__ ) ) _self = copy.deepcopy(self) for _key in remove: _self.pop(_key) return _self raise KeyError( "The keys: {} are not available in {}. The list of " "available keys are: {}".format( ", ".join(key), self.__class__.__name__, ", ".join(self.keys()) ) ) elif isinstance(key, str): if key not in self.keys(): raise KeyError( "{} not in {}. The list of available keys are {}".format( key, self.__class__.__name__, ", ".join(self.keys()) ) ) return super(Dict, self).__getitem__(key) @property def latex_labels(self): return self._latex_labels @property def plotting_map(self): return {} @property def available_plots(self): return list(self.plotting_map.keys()) def _update_latex_labels(self): """Update the stored latex labels """ self._latex_labels = { param: self.all_latex_labels[param] if param in self.all_latex_labels.keys() else param for param in self.parameters } def plot(self, *args, type="", **kwargs): """Generate a plot for data stored in Dict Parameters ---------- *args: tuple all arguments are passed to the plotting function type: str name of the plot you wish to make **kwargs: dict all additional kwargs are passed to the plotting function """ if type not in self.plotting_map.keys(): raise NotImplementedError( "The {} method is not currently implemented. The allowed " "plotting methods are {}".format( type, ", ".join(self.available_plots) ) ) return self.plotting_map[type](*args, **kwargs) def make_dictionary(self, *args, **kwargs): """Add the parameters and samples to the class """ raise TypeError
{"hexsha": "df2b9650a906aa15b3f7796ec5558117e9b171c9", "size": 8392, "ext": "py", "lang": "Python", "max_stars_repo_path": "pesummary/utils/dict.py", "max_stars_repo_name": "pesummary/pesummary", "max_stars_repo_head_hexsha": "99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-03T05:58:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-03T05:58:20.000Z", "max_issues_repo_path": "pesummary/utils/dict.py", "max_issues_repo_name": "pesummary/pesummary", "max_issues_repo_head_hexsha": "99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-13T13:29:35.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-15T12:45:04.000Z", "max_forks_repo_path": "pesummary/utils/dict.py", "max_forks_repo_name": "pesummary/pesummary", "max_forks_repo_head_hexsha": "99e3c450ecbcaf5a23564d329bdf6e0080f6f2a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-07-08T08:31:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T14:08:58.000Z", "avg_line_length": 32.2769230769, "max_line_length": 80, "alphanum_fraction": 0.5527883699, "include": true, "reason": "import numpy", "num_tokens": 1722}
import numpy as np import math from zero2ml.supervised_learning._base import BaseModel from zero2ml.utils.evaluation_metrics import MeanSquaredError from zero2ml.utils.data_transformations import Standardize class LinearRegression(BaseModel): """ Multiple Linear Regression with input features standardization. Parameters ---------- learning_rate: scalar Learning rate. Attributes ---------- X: array_like Training features. y: array_like Training labels. samples_num: scalar Number of training samples. features_num: scalar Number of features. input_normalization: object Object used to standardize data and containing necessary parameters. self.W: array_like Weights. self.b: array_like Intercept term. self.training_loss: list List containing training loss for each iteration of model training. """ def __init__(self, learning_rate=0.01): # Specify model type self.model_type = "regressor" # Training data self.X = None # Training targets self.y = None # Number of training samples self.samples_num = None # Number of features self.features_num = None # Input features normalization self.input_normalization = Standardize() # Weights and intercept self.W = None self.b = None # Learning rate self.learning_rate = learning_rate # Loss and loss function self.training_loss = None self.__loss_function = MeanSquaredError() def fit(self, X, y, iterations=5000): """ Fit the linear regression model with given training feature inputs. Parameters ---------- X: array_like Training features. y: array_like Training labels. iterations: scalar Number of iterations for gradient descent to converge. """ # Save X and y into model object self.X = X.copy() self.y = y.copy() # Apply normalization on features X_norm = self.input_normalization(self.X) # Calculate number of samples and features self.samples_num, self.features_num = self.X.shape # Initiate weights and intercept self.W = np.zeros(self.features_num) self.b = 0 # Initiate loss self.training_loss = [] # Gradient descent for iteration in range(iterations): # Make prediction pred = np.dot(X_norm, self.W) + self.b # Calculate loss and save to training loss list self.training_loss.append( self.__loss_function(pred, self.y) ) # Calculate gradients dW = - ( 2 * np.dot(X_norm.T, self.y - pred) ) / self.samples_num db = - ( 2 * np.sum(self.y - pred) ) / self.samples_num # Update weights and intercept self.W -= self.learning_rate * dW self.b -= self.learning_rate * db def predict(self, X): """ Predict the label(s) with given feature inputs. Parameters ---------- X: array_like Training features. Returns ------- pred: array_like Predicted values. """ # Apply Z-score normalization before X_norm = (X - self.input_normalization.mean) / self.input_normalization.std # Make predictions with the trained data pred = np.dot(X_norm, self.W) + self.b return pred
{"hexsha": "be05533b8bdf7ccc623ef11ab2f354ece6a63d22", "size": 3619, "ext": "py", "lang": "Python", "max_stars_repo_path": "zero2ml/supervised_learning/linear_regression.py", "max_stars_repo_name": "bekzatalish/zero2ml", "max_stars_repo_head_hexsha": "c2baa747e3a02893c58590de52f049184fb4b167", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "zero2ml/supervised_learning/linear_regression.py", "max_issues_repo_name": "bekzatalish/zero2ml", "max_issues_repo_head_hexsha": "c2baa747e3a02893c58590de52f049184fb4b167", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "zero2ml/supervised_learning/linear_regression.py", "max_forks_repo_name": "bekzatalish/zero2ml", "max_forks_repo_head_hexsha": "c2baa747e3a02893c58590de52f049184fb4b167", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6102941176, "max_line_length": 83, "alphanum_fraction": 0.5976789168, "include": true, "reason": "import numpy", "num_tokens": 758}
/- Copyright (c) 2022 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import category_theory.limits.shapes.regular_mono import category_theory.limits.shapes.zero_morphisms /-! # Categories where inclusions into coproducts are monomorphisms > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. If `C` is a category, the class `mono_coprod C` expresses that left inclusions `A ⟶ A ⨿ B` are monomorphisms when `has_coproduct A B` is satisfied. If it is so, it is shown that right inclusions are also monomorphisms. TODO @joelriou: show that if `X : I → C` and `ι : J → I` is an injective map, then the canonical morphism `∐ (X ∘ ι) ⟶ ∐ X` is a monomorphism. TODO: define distributive categories, and show that they satisfy `mono_coprod`, see <https://ncatlab.org/toddtrimble/published/distributivity+implies+monicity+of+coproduct+inclusions> -/ noncomputable theory open category_theory category_theory.category category_theory.limits universe u namespace category_theory namespace limits variables (C : Type*) [category C] /-- This condition expresses that inclusion morphisms into coproducts are monomorphisms. -/ class mono_coprod : Prop := (binary_cofan_inl : ∀ ⦃A B : C⦄ (c : binary_cofan A B) (hc : is_colimit c), mono c.inl) variable {C} @[priority 100] instance mono_coprod_of_has_zero_morphisms [has_zero_morphisms C] : mono_coprod C := ⟨λ A B c hc, begin haveI : is_split_mono c.inl := is_split_mono.mk' (split_mono.mk (hc.desc (binary_cofan.mk (𝟙 A) 0)) (is_colimit.fac _ _ _)), apply_instance, end⟩ namespace mono_coprod instance {A B : C} [mono_coprod C] [has_binary_coproduct A B] : mono (coprod.inl : A ⟶ A ⨿ B) := binary_cofan_inl _ (colimit.is_colimit _) instance {A B : C} [mono_coprod C] [has_binary_coproduct A B] : mono (coprod.inr : B ⟶ A ⨿ B) := binary_cofan_inr _ (colimit.is_colimit _) lemma mono_inl_iff {A B : C} {c₁ c₂ : binary_cofan A B} (hc₁ : is_colimit c₁) (hc₂ : is_colimit c₂) : mono c₁.inl ↔ mono c₂.inl := begin suffices : ∀ (c₁ c₂ : binary_cofan A B) (hc₁ : is_colimit c₁) (hc₂ : is_colimit c₂) (h : mono c₁.inl), mono c₂.inl, { exact ⟨λ h₁, this _ _ hc₁ hc₂ h₁, λ h₂, this _ _ hc₂ hc₁ h₂⟩, }, intros c₁ c₂ hc₁ hc₂, introI, simpa only [is_colimit.comp_cocone_point_unique_up_to_iso_hom] using mono_comp c₁.inl (hc₁.cocone_point_unique_up_to_iso hc₂).hom, end lemma mk' (h : ∀ (A B : C), ∃ (c : binary_cofan A B) (hc : is_colimit c), mono c.inl) : mono_coprod C := ⟨λ A B c' hc', begin obtain ⟨c, hc₁, hc₂⟩ := h A B, simpa only [mono_inl_iff hc' hc₁] using hc₂, end⟩ instance mono_coprod_type : mono_coprod (Type u) := mono_coprod.mk' (λ A B, begin refine ⟨binary_cofan.mk (sum.inl : A ⟶ A ⊕ B) sum.inr, _, _⟩, { refine binary_cofan.is_colimit.mk _ (λ Y f₁ f₂ x, by { cases x, exacts [f₁ x, f₂ x], }) (λ Y f₁ f₂, rfl) (λ Y f₁ f₂, rfl) _, intros Y f₁ f₂ m h₁ h₂, ext x, cases x, { dsimp, exact congr_fun h₁ x, }, { dsimp, exact congr_fun h₂ x, }, }, { rw mono_iff_injective, intros a₁ a₂ h, simp only [binary_cofan.mk_inl] at h, dsimp at h, simpa only using h, }, end) end mono_coprod end limits end category_theory
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/category_theory/limits/mono_coprod.lean"}
[STATEMENT] lemma par_strict_col_par_strict: assumes "C \<noteq> E" and "A B ParStrict C D" and "Col C D E" shows "A B ParStrict C E" [PROOF STATE] proof (prove) goal (1 subgoal): 1. A B ParStrict C E [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. A B ParStrict C E [PROOF STEP] have P1: "C E Par A B" [PROOF STATE] proof (prove) goal (1 subgoal): 1. C E Par A B [PROOF STEP] using Par_def Par_perm assms(1) assms(2) assms(3) par_col_par_2 [PROOF STATE] proof (prove) using this: ?A ?B Par ?C ?D \<equiv> ?A ?B ParStrict ?C ?D \<or> ?A \<noteq> ?B \<and> ?C \<noteq> ?D \<and> Col ?A ?C ?D \<and> Col ?B ?C ?D ?A ?B Par ?C ?D \<Longrightarrow> ?A ?B Par ?C ?D \<and> ?B ?A Par ?C ?D \<and> ?A ?B Par ?D ?C \<and> ?B ?A Par ?D ?C \<and> ?C ?D Par ?A ?B \<and> ?C ?D Par ?B ?A \<and> ?D ?C Par ?A ?B \<and> ?D ?C Par ?B ?A C \<noteq> E A B ParStrict C D Col C D E \<lbrakk>?A \<noteq> ?P; Col ?A ?B ?P; ?A ?B Par ?C ?D\<rbrakk> \<Longrightarrow> ?A ?P Par ?C ?D goal (1 subgoal): 1. C E Par A B [PROOF STEP] by blast [PROOF STATE] proof (state) this: C E Par A B goal (1 subgoal): 1. A B ParStrict C E [PROOF STEP] { [PROOF STATE] proof (state) this: C E Par A B goal (1 subgoal): 1. A B ParStrict C E [PROOF STEP] assume "C E ParStrict A B" [PROOF STATE] proof (state) this: C E ParStrict A B goal (1 subgoal): 1. A B ParStrict C E [PROOF STEP] then [PROOF STATE] proof (chain) picking this: C E ParStrict A B [PROOF STEP] have "A B ParStrict C E" [PROOF STATE] proof (prove) using this: C E ParStrict A B goal (1 subgoal): 1. A B ParStrict C E [PROOF STEP] by (metis par_strict_symmetry) [PROOF STATE] proof (state) this: A B ParStrict C E goal (1 subgoal): 1. A B ParStrict C E [PROOF STEP] } [PROOF STATE] proof (state) this: C E ParStrict A B \<Longrightarrow> A B ParStrict C E goal (1 subgoal): 1. A B ParStrict C E [PROOF STEP] thus ?thesis [PROOF STATE] proof (prove) using this: C E ParStrict A B \<Longrightarrow> A B ParStrict C E goal (1 subgoal): 1. A B ParStrict C E [PROOF STEP] using Col_cases Par_def P1 assms(2) par_strict_not_col_1 [PROOF STATE] proof (prove) using this: C E ParStrict A B \<Longrightarrow> A B ParStrict C E Col ?A ?B ?C \<or> Col ?A ?C ?B \<or> Col ?B ?A ?C \<or> Col ?B ?C ?A \<or> Col ?C ?A ?B \<or> Col ?C ?B ?A \<Longrightarrow> Col ?A ?B ?C ?A ?B Par ?C ?D \<equiv> ?A ?B ParStrict ?C ?D \<or> ?A \<noteq> ?B \<and> ?C \<noteq> ?D \<and> Col ?A ?C ?D \<and> Col ?B ?C ?D C E Par A B A B ParStrict C D ?A ?B ParStrict ?C ?D \<Longrightarrow> \<not> Col ?A ?B ?C goal (1 subgoal): 1. A B ParStrict C E [PROOF STEP] by blast [PROOF STATE] proof (state) this: A B ParStrict C E goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 1267, "file": "IsaGeoCoq_Tarski_Neutral", "length": 14}
#!/usr/bin/env python """ Random graph from given degree sequence. Draw degree histogram with matplotlib. """ __author__ = """Aric Hagberg (hagberg@lanl.gov)""" try: import matplotlib.pyplot as plt import matplotlib except: raise import networkx as nx z=nx.create_degree_sequence(100,nx.utils.powerlaw_sequence,exponent=2.1) nx.is_valid_degree_sequence(z) print "Configuration model" G=nx.configuration_model(z) # configuration model degree_sequence=sorted(nx.degree(G).values(),reverse=True) # degree sequence #print "Degree sequence", degree_sequence dmax=max(degree_sequence) plt.loglog(degree_sequence,'b-',marker='o') plt.title("Degree rank plot") plt.ylabel("degree") plt.xlabel("rank") # draw graph in inset plt.axes([0.45,0.45,0.45,0.45]) Gcc=nx.connected_component_subgraphs(G)[0] pos=nx.spring_layout(Gcc) plt.axis('off') nx.draw_networkx_nodes(Gcc,pos,node_size=20) nx.draw_networkx_edges(Gcc,pos,alpha=0.4) plt.savefig("degree_histogram.png") plt.show()
{"hexsha": "9aabc154dba458cbbe4fe9dcaa5703160708eeaf", "size": 990, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/drawing/degree_histogram.py", "max_stars_repo_name": "bjedwards/NetworkX_fork", "max_stars_repo_head_hexsha": "6cb4465d73b8adc4692206fdbc8e1a3934d94fe6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-02-06T01:18:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-10T08:15:33.000Z", "max_issues_repo_path": "examples/drawing/degree_histogram.py", "max_issues_repo_name": "tomzhang/NetworkX_fork", "max_issues_repo_head_hexsha": "6cb4465d73b8adc4692206fdbc8e1a3934d94fe6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/drawing/degree_histogram.py", "max_forks_repo_name": "tomzhang/NetworkX_fork", "max_forks_repo_head_hexsha": "6cb4465d73b8adc4692206fdbc8e1a3934d94fe6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2015-04-28T19:19:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-06T11:46:29.000Z", "avg_line_length": 23.023255814, "max_line_length": 76, "alphanum_fraction": 0.7616161616, "include": true, "reason": "import networkx", "num_tokens": 259}
import theano import theano.tensor as T import numpy as np X = theano.shared(value=np.asarray([[1, 0], [0, 0], [0, 1], [1, 1]]), name='X') y = theano.shared(value=np.asarray([[1], [0], [1], [0]]), name='y') rng = np.random.RandomState(1234) LEARNING_RATE = 0.01 def layer(n_in, n_out): np_array = np.asarray(rng.uniform(low=-1.0, high=1.0, size=(n_in, n_out)), dtype=theano.config.floatX) return theano.shared(value=np_array, name='W', borrow=True) W1 = layer(2, 5) W2 = layer(5, 1) output = T.nnet.sigmoid(T.dot(T.nnet.sigmoid(T.dot(X, W1)), W2)) cost = T.sum((y - output) ** 2) updates = [(W1, W1 - LEARNING_RATE * T.grad(cost, W1)), (W2, W2 - LEARNING_RATE * T.grad(cost, W2))] train = theano.function(inputs=[], outputs=[cost], updates=updates) test = theano.function(inputs=[], outputs=[output]) for i in range(60000): if (i+1) % 10000 == 0: print(i+1) train() print(test()) Status API Training Shop Blog About Pricing
{"hexsha": "5c734955ab93e3d5210403d848b2f959335567a8", "size": 956, "ext": "py", "lang": "Python", "max_stars_repo_path": "3_simple_neural_net/simple_net.py", "max_stars_repo_name": "JBed/Simple_Theano", "max_stars_repo_head_hexsha": "f2e265975339b558c9abb77c26aff6ec8e4a78cb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "3_simple_neural_net/simple_net.py", "max_issues_repo_name": "JBed/Simple_Theano", "max_issues_repo_head_hexsha": "f2e265975339b558c9abb77c26aff6ec8e4a78cb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "3_simple_neural_net/simple_net.py", "max_forks_repo_name": "JBed/Simple_Theano", "max_forks_repo_head_hexsha": "f2e265975339b558c9abb77c26aff6ec8e4a78cb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.875, "max_line_length": 106, "alphanum_fraction": 0.6422594142, "include": true, "reason": "import numpy,import theano", "num_tokens": 313}
import h5py import lmdb import numpy as np import sys h5_file = h5py.File(sys.argv[1]) data = h5_file.get('images') target = h5_file.get('labels') num = int(sys.argv[3]) data = data[:num] target = target[:num] map_size = data.nbytes * 10 env = lmdb.open(sys.argv[2], map_size=map_size) for i in range(data.shape[0]): with env.begin(write=True) as txn: txn.put('X_' + str(i), data[i]) txn.put('y_' + str(i), target[i]) if i % 1000 == 0: print i, data.shape[0]
{"hexsha": "7d935313d915b7e2f9a71e141db07c7f1ede7f99", "size": 495, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch/hdf5_to_lmdb.py", "max_stars_repo_name": "mjm522/gpd", "max_stars_repo_head_hexsha": "6327f20eabfcba41a05fdd2e2ba408153dc2e958", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 439, "max_stars_repo_stars_event_min_datetime": "2017-05-23T07:03:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T09:08:35.000Z", "max_issues_repo_path": "pytorch/hdf5_to_lmdb.py", "max_issues_repo_name": "mjm522/gpd", "max_issues_repo_head_hexsha": "6327f20eabfcba41a05fdd2e2ba408153dc2e958", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 113, "max_issues_repo_issues_event_min_datetime": "2017-05-23T16:52:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T02:06:19.000Z", "max_forks_repo_path": "pytorch/hdf5_to_lmdb.py", "max_forks_repo_name": "mjm522/gpd", "max_forks_repo_head_hexsha": "6327f20eabfcba41a05fdd2e2ba408153dc2e958", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 221, "max_forks_repo_forks_event_min_datetime": "2017-05-23T22:05:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T07:15:59.000Z", "avg_line_length": 21.5217391304, "max_line_length": 47, "alphanum_fraction": 0.6282828283, "include": true, "reason": "import numpy", "num_tokens": 159}
# # Series Recipes @nospecialize """ _process_seriesrecipes!(plt, kw_list) Recursively apply series recipes until the backend supports the seriestype """ function _process_seriesrecipes!(plt, kw_list) for kw in kw_list # in series attributes given as vector with one element per series, # select the value for current series slice_series_attributes!(plt, kw_list, kw) end process_sliced_series_attributes!(plt, kw_list) for kw in kw_list series_attr = DefaultsDict(kw, series_defaults(plt)) # now we have a fully specified series, with colors chosen. we must recursively # handle series recipes, which dispatch on seriestype. If a backend does not # natively support a seriestype, we check for a recipe that will convert that # series type into one made up of lower-level components. # For example, a histogram is just a bar plot with binned data, a bar plot is # really a filled step plot, and a step plot is really just a path. So any backend # that supports drawing a path will implicitly be able to support step, bar, and # histogram plots (and any recipes that use those components). _process_seriesrecipe(plt, series_attr) end end # this method recursively applies series recipes when the seriestype is not supported # natively by the backend function _process_seriesrecipe(plt, plotattributes) # replace seriestype aliases st = Symbol(plotattributes[:seriestype]) st = plotattributes[:seriestype] = type_alias(plt, st) # shapes shouldn't have fillrange set if plotattributes[:seriestype] == :shape plotattributes[:fillrange] = nothing end # if it's natively supported, finalize processing and pass along to the backend, # otherwise recurse if is_seriestype_supported(plt, st) add_series!(plt, plotattributes) else # get a sub list of series for this seriestype x, y, z = plotattributes[:x], plotattributes[:y], plotattributes[:z] datalist = RecipesBase.apply_recipe(plotattributes, Val{st}, x, y, z) warn_on_recipe_aliases!(plt, datalist, :series, st) # assuming there was no error, recursively apply the series recipes for data in datalist if isa(data, RecipeData) preprocess_attributes!(plt, data.plotattributes) if data.plotattributes[:seriestype] == st error("The seriestype didn't change in series recipe $st. This will cause a StackOverflow.") end _process_seriesrecipe(plt, data.plotattributes) else @warn("Unhandled recipe: $(data)") break end end end nothing end @specialize
{"hexsha": "f799a9192d6467b5ae7f2c6d718fd50fc8efb1de", "size": 2799, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/series_recipe.jl", "max_stars_repo_name": "KristofferC/RecipesPipeline.jl", "max_stars_repo_head_hexsha": "a380895318b4386a3e60ba5be88dcd51c6045928", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-04-05T20:02:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T21:26:54.000Z", "max_issues_repo_path": "src/series_recipe.jl", "max_issues_repo_name": "KristofferC/RecipesPipeline.jl", "max_issues_repo_head_hexsha": "a380895318b4386a3e60ba5be88dcd51c6045928", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 43, "max_issues_repo_issues_event_min_datetime": "2020-04-05T14:04:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T18:40:13.000Z", "max_forks_repo_path": "src/series_recipe.jl", "max_forks_repo_name": "JuliaPlots/RecipePipeline.jl", "max_forks_repo_head_hexsha": "3f8fc68767e3dfc70157cb666eed13f663df0210", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-04-29T03:30:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T19:35:00.000Z", "avg_line_length": 39.4225352113, "max_line_length": 112, "alphanum_fraction": 0.6759556985, "num_tokens": 639}
import sys sys.path.append('.') import numpy as np import torch from catalyst import utils from catalyst.dl import SupervisedRunner from src.model.mobilenet import MBv2 from src.model.model_wrapper import ModelWrapper if __name__ == "__main__": image_size = [1, 3, 416, 416] batch = { 'image': np.random.randn(*image_size) } # create model model = MBv2(num_classes=2) model_wrapper = ModelWrapper(model) device = torch.device("cpu") print(f"device: {device}") logdir = "logs/segmentation" checkpoint_path = f"{logdir}/checkpoints/best.pth" checkpoint = utils.load_checkpoint(f"{logdir}/checkpoints/best.pth") model_wrapper.load_state_dict(checkpoint['model_state_dict']) # create runner runner = SupervisedRunner(device=device, input_key="image", input_target_key="mask") # trace model # saves to `logdir` and returns a `ScriptModule` class traced_script_module = runner.trace(model=model_wrapper, batch=batch, fp16=False) traced_script_module.save("traced_model.pt")
{"hexsha": "50caaf9a330382fb12ff6345985396c1d9c1746e", "size": 1057, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/trace_model.py", "max_stars_repo_name": "meikuam/cat_faces", "max_stars_repo_head_hexsha": "00f58d91ac3b01c9e7f239b896283ca678448692", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/trace_model.py", "max_issues_repo_name": "meikuam/cat_faces", "max_issues_repo_head_hexsha": "00f58d91ac3b01c9e7f239b896283ca678448692", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/trace_model.py", "max_forks_repo_name": "meikuam/cat_faces", "max_forks_repo_head_hexsha": "00f58d91ac3b01c9e7f239b896283ca678448692", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8157894737, "max_line_length": 88, "alphanum_fraction": 0.7171239357, "include": true, "reason": "import numpy", "num_tokens": 264}
#include <boost/fusion/include/vector30.hpp>
{"hexsha": "59e1c11332108d6ac5ec1b8650f0c7b6dc26952e", "size": 45, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_fusion_include_vector30.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_fusion_include_vector30.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_fusion_include_vector30.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 22.5, "max_line_length": 44, "alphanum_fraction": 0.8, "num_tokens": 10}
""" """ import re import json from rio_tiler import main from rio_tiler.utils import array_to_img, linear_rescale import numpy as np from lambda_proxy.proxy import API from distutils import util APP = API(app_name="lambda-tiler") @APP.route('/bounds', methods=['GET'], cors=True) def bounds(): """ Handle bounds requests """ query_args = APP.current_request.query_params query_args = query_args if isinstance(query_args, dict) else {} address = query_args['url'] info = main.bounds(address) return ('OK', 'application/json', json.dumps(info)) @APP.route('/tiles/<int:z>/<int:x>/<int:y>.<ext>', methods=['GET'], cors=True) def tile(tile_z, tile_x, tile_y, tileformat): """ Handle tile requests """ query_args = APP.current_request.query_params query_args = query_args if isinstance(query_args, dict) else {} address = query_args['url'] bands = query_args.get('rgb') if bands: bands = tuple(int(s) for s in re.findall(r'\d+', bands)) tilesize = query_args.get('tile', 512) tilesize = int(tilesize) if isinstance(tilesize, str) else tilesize nodata = query_args.get('nodata') if nodata is not None: nodata = int(nodata) alpha = query_args.get('alpha') if alpha is not None: alpha = int(alpha) # detect linear scale request linearStretch = query_args.get('linearStretch') tile, mask = main.tile(address, tile_x, tile_y, tile_z, bands, tilesize=tilesize, nodata=nodata, alpha=alpha) if linearStretch is not None: if util.strtobool(linearStretch): tile = linear_rescale(tile, in_range=(np.min(tile), np.max(tile)) ) tile = array_to_img(tile, tileformat, mask=mask) if tileformat == 'jpg': tileformat = 'jpeg' return ('OK', f'image/{tileformat}', tile) @APP.route('/favicon.ico', methods=['GET'], cors=True) def favicon(): """ favicon """ return('NOK', 'text/plain', '')
{"hexsha": "590f4154f70b5a4aa00a39950f7e348d607be50b", "size": 2026, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/handler.py", "max_stars_repo_name": "dlindenbaum/lambda-tiler", "max_stars_repo_head_hexsha": "18f32d646edc877340abd388c77e37ac4de80b2b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-05-15T11:47:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T14:49:03.000Z", "max_issues_repo_path": "app/handler.py", "max_issues_repo_name": "dlindenbaum/lambda-tiler", "max_issues_repo_head_hexsha": "18f32d646edc877340abd388c77e37ac4de80b2b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-04-16T00:51:48.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-19T22:42:55.000Z", "max_forks_repo_path": "app/handler.py", "max_forks_repo_name": "dlindenbaum/lambda-tiler", "max_forks_repo_head_hexsha": "18f32d646edc877340abd388c77e37ac4de80b2b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-04-11T17:36:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-18T01:29:02.000Z", "avg_line_length": 24.7073170732, "max_line_length": 113, "alphanum_fraction": 0.6362290227, "include": true, "reason": "import numpy", "num_tokens": 502}
import io import urllib.request # 3rd Party from PIL import Image import numpy as np from matplotlib import pyplot as plt from scipy import ndimage def get_entropy(signal): """ Uses log2 as base """ probabability_distribution = [np.size(signal[signal == i])/(1.0 * signal.size) for i in list(set(signal))] entropy = np.sum([pp * np.log2(1.0 / pp) for pp in probabability_distribution]) return entropy image_url = 'http://i.imgur.com/8vuLtqi.png' # Lena Image fd = urllib.request.urlopen(image_url) image_file = io.BytesIO(fd.read()) img_color = Image.open(image_file) img_grey = img_color.convert('L') img_color = np.array(img_color) img_grey = np.array(img_grey) neighborhood=5 def get_entropy_of_image(img_grey, nh): dim0, dim1 = img_grey.shape entropy=np.array(img_grey) for row in range(dim0): for col in range(dim1): lower_x = np.max([0, col - nh]) upper_x = np.min([dim1, col + nh]) lower_y = np.max([0, row - nh]) upper_y = np.min([dim0, row + nh]) area = img_grey[lower_y: upper_y, lower_x: upper_x].flatten() entropy[row, col] = get_entropy(area) return entropy #link do material http://bugra.github.io/work/notes/2014-05-16/entropy-perplexity-image-text/ # Get the entropy of the image img_entropy = get_entropy_of_image(img_grey, neighborhood) plt.figure(figsize=(12, 12)); plt.imshow(img_color); plt.grid(False); plt.xticks([]); plt.yticks([]); plt.figure(figsize=(12, 12)); plt.imshow(img_grey, cmap=plt.cm.gray); plt.grid(False); plt.xticks([]); plt.yticks([]); plt.figure(figsize=(12, 12)); plt.imshow(img_entropy, cmap=plt.cm.Purples); plt.title('Entropy in {}x{} neighbourhood'.format(2*neighborhood, 2*neighborhood)); plt.colorbar(); plt.grid(False); plt.xticks([]); plt.yticks([]); # 10 x 10 neighbor lowpass = ndimage.gaussian_filter(img_grey, 10) # Subtracting the lowpass, we get the highpass highpass = img_grey - lowpass plt.figure(figsize=(12, 12)); plt.imshow(highpass, cmap=plt.cm.Purples); plt.colorbar(); plt.grid(False); plt.xticks([]); plt.yticks([]);
{"hexsha": "a9993ae58899ccc194cbc96abf45b6d1998a9f31", "size": 2087, "ext": "py", "lang": "Python", "max_stars_repo_path": "imagem.py", "max_stars_repo_name": "arijr/tic", "max_stars_repo_head_hexsha": "f6d2312a7d1b09fa15344c9ff8474ce42afad256", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "imagem.py", "max_issues_repo_name": "arijr/tic", "max_issues_repo_head_hexsha": "f6d2312a7d1b09fa15344c9ff8474ce42afad256", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "imagem.py", "max_forks_repo_name": "arijr/tic", "max_forks_repo_head_hexsha": "f6d2312a7d1b09fa15344c9ff8474ce42afad256", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6911764706, "max_line_length": 109, "alphanum_fraction": 0.6947771921, "include": true, "reason": "import numpy,from scipy", "num_tokens": 572}
from .gen_guided_model import GuidedModel import torch import numpy as np from PIL import Image import cv2 import matplotlib.pyplot as plt from scipy.io import savemat class GuideCall(object): def __init__(self, args): self.input_path = args.input_path self.output_path = args.output_path self.output_path.mkdir(parents=True, exist_ok=True) self.gpu = args.gpu # network load self.net = args.net self.net.eval() if self.gpu: self.net.cuda() self.back_model = GuidedModel(self.net) self.back_model.inference() self.shape = None self.output_path_each = None def main(self): for img_i, path in enumerate(self.input_path): self.output_path_each = self.output_path.joinpath("{:05d}".format(img_i)) self.output_path_each.mkdir(parents=True, exist_ok=True) # load image img = cv2.imread(str(path), 0) self.shape = img.shape cv2.imwrite( str(self.output_path_each.joinpath("original.png")), ((img / img.max()) * 255).astype(np.uint8), ) img = (img.astype(np.float32) / img.max()).reshape( (1, 1, img.shape[0], img.shape[1]) ) img = torch.from_numpy(img) # throw unet if self.gpu: img = img.cuda() module = self.back_model prms = module(img, self.output_path_each) prms = np.array(prms) prms_coloring = self.coloring(prms) prms_coloring = np.array(prms_coloring) savemat( str(self.output_path_each.joinpath("prms.mat")), {"prms": prms, "color": prms_coloring}, ) prms_coloring = np.max(prms_coloring, axis=0) prms_coloring = ( prms_coloring.astype(np.float) / prms_coloring.max() * 255 ).astype(np.uint8) cv2.imwrite( str(self.output_path_each.joinpath("instance.png")), prms_coloring.astype(np.uint8), ) def coloring(self, gbs): # coloring r, g, b = np.loadtxt("./utils/color.csv", delimiter=",") gbs_coloring = [] for peak_i, gb in enumerate(gbs): gb = gb / gb.max() * 255 gb = gb.clip(0, 255).astype(np.uint8) result = np.ones((self.shape[0], self.shape[1], 3)) result = gb[..., np.newaxis] * result peak_i = peak_i % 20 result[..., 0][result[..., 0] != 0] = r[peak_i] * gb[gb != 0] result[..., 1][result[..., 1] != 0] = g[peak_i] * gb[gb != 0] result[..., 2][result[..., 2] != 0] = b[peak_i] * gb[gb != 0] gbs_coloring.append(result) return gbs_coloring
{"hexsha": "30718ce74aa0955412944b75795d0fa549716a2f", "size": 2873, "ext": "py", "lang": "Python", "max_stars_repo_path": "propagation/guided_function.py", "max_stars_repo_name": "naivete5656/WSISPDR", "max_stars_repo_head_hexsha": "1dc4d1bf24a6ebf7efd3c75d3f1a9edbe849d38b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2019-10-09T09:42:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T09:57:29.000Z", "max_issues_repo_path": "propagation/guided_function.py", "max_issues_repo_name": "naivete5656/WSISPDR", "max_issues_repo_head_hexsha": "1dc4d1bf24a6ebf7efd3c75d3f1a9edbe849d38b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-02-26T06:49:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-17T16:29:24.000Z", "max_forks_repo_path": "propagation/guided_function.py", "max_forks_repo_name": "naivete5656/WSISPDR", "max_forks_repo_head_hexsha": "1dc4d1bf24a6ebf7efd3c75d3f1a9edbe849d38b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-10-18T07:34:30.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-10T03:35:34.000Z", "avg_line_length": 32.2808988764, "max_line_length": 85, "alphanum_fraction": 0.5342847198, "include": true, "reason": "import numpy,from scipy", "num_tokens": 707}
#! /usr/bin/env python """Thermodynamic quantities.""" import numpy as np from scipy.optimize import brentq from .constants import ( C_P, C_PV, EPSILON, GRAV_EARTH, L_V, P0, R_D, R_V, REL_HUM, ) def sat_vap_press_tetens_kelvin(temp): """Saturation vapor pressure using Tetens equation. Note: unlike original Tetens expression, temperature should be in Kelvin, NOT degrees Celsius. And result has units Pa, not kPa as in original version. """ a = 61.078 b = 17.27 c = -35.85 return a*np.exp(b*(temp - 273.15) / (temp + c)) def saturation_entropy(temp, pressure=P0, sat_vap_press=None, c_p=C_P, r_d=R_D, l_v=L_V): """Saturation entropy, from Emanuel and Rotunno 2011, JAS. Saturation vapor pressure can be provided as `sat_vap_press`, otherwise it is computed using the Tetens equation. If `pressure` is not provided (units Pa), it is assumed to be 1e5 Pa, i.e. 1000 hPa. Neglects difference between mixing ratio and specific humidity. Note that this expression is not identical to moist entropy computed as c_p*log(theta_e_sat), where theta_e_sat is equivalent potential temperature computed at saturation (i.e. with relative humidity = 1). This expression is much lower, i.e. around 2500 J/kg/K for Earth-like near-surface conditions, compared to 5500-6000 J/kg/K for the cp*log(theta_e_sat) version. """ if sat_vap_press is None: sat_vap_press = sat_vap_press_tetens_kelvin(temp) sat_spec_hum = sat_vap_press / pressure return (c_p*np.log(temp) - r_d*np.log(pressure) + l_v*sat_spec_hum / temp) def dsat_entrop_dtemp_approx(temp, pressure=P0, c_p=C_P, r_v=R_V, l_v=L_V): sat_vap_press = sat_vap_press_tetens_kelvin(temp) sat_spec_hum = sat_vap_press / pressure return (c_p + l_v*sat_spec_hum*(l_v/(r_v*temp) - 1)/temp) / temp def water_vapor_mixing_ratio(vapor_press, pressure, epsilon=EPSILON): return epsilon*vapor_press / (pressure - vapor_press) def equiv_pot_temp(temp, rel_hum, pressure, tot_wat_mix_ratio=None, p0=P0, c_p=C_P, c_liq=4185.5, l_v=L_V, r_d=R_D, r_v=R_V): """Equivalent potential temperature.""" sat_vap_press = sat_vap_press_tetens_kelvin(temp) vapor_pressure = rel_hum*sat_vap_press pressure_dry = pressure - vapor_pressure vap_mix_ratio = water_vapor_mixing_ratio(vapor_pressure, pressure) if tot_wat_mix_ratio is None: denom = c_p else: denom = c_p + c_liq*tot_wat_mix_ratio return (temp*(p0/pressure_dry)**(r_d/denom) * rel_hum**(r_v*vap_mix_ratio / denom) * np.exp(l_v*vap_mix_ratio / (denom*temp))) def temp_from_equiv_pot_temp(theta_e, rel_hum=0.7, pressure=P0, tot_wat_mix_ratio=None, p0=P0, c_p=C_P, c_liq=4185.5, l_v=L_V, r_d=R_D, r_v=R_V): """Temperature, given the equivalent potential temperature.""" def func(temp, theta): sat_vap_press = sat_vap_press_tetens_kelvin(temp) vapor_pressure = rel_hum*sat_vap_press vap_mix_ratio = water_vapor_mixing_ratio(vapor_pressure, pressure) if tot_wat_mix_ratio is None: denom = c_p else: denom = c_p + c_liq*tot_wat_mix_ratio return (theta*(pressure/p0)**(r_d/denom) / rel_hum**(r_v*vap_mix_ratio / denom) - temp*np.exp(l_v*vap_mix_ratio / (denom*temp))) pot_temp_is_scalar = np.isscalar(theta_e) pot_temp_is_len0_arr = not pot_temp_is_scalar and not theta_e.shape if pot_temp_is_scalar: pot_temp_array = [theta_e] elif pot_temp_is_len0_arr: pot_temp_array = [float(theta_e)] else: pot_temp_array = theta_e solutions = [] for pta in pot_temp_array: # Start with guess range narrowly bounding the the theta_e value, and # then progressively widen if the function doesn't change sign within # the bound. Ensures that the zero crossing is as close as possible # to the neighborhood of the theta_e value, so that the algorithm will # converge and we don't accidentally catch another irrelevant zero # crossing by mistake. for factor in np.arange(0.01, 0.99, 0.01): guess_lower = (1 - factor)*pta guess_upper = (1 + factor)*pta try: sol = brentq(func, guess_lower, guess_upper, args=(pta,)) except ValueError: pass else: # Temperature is always less than equiv. pot. temp., meaning # that the procedure failed if the opposite occurs. Mask it. if sol > pta: solutions.append(np.nan) else: solutions.append(sol) break # If no solution found, just mask. Otherwise, return same type/shape as # original input data. if len(solutions) == 0: return np.nan elif pot_temp_is_scalar or pot_temp_is_len0_arr: return solutions[0] else: return np.ones_like(theta_e)*solutions def moist_entropy(temp, rel_hum, pressure, tot_wat_mix_ratio=None, p0=P0, c_p=C_P, c_liq=4185.5, l_v=L_V, r_d=R_D, r_v=R_V): """Moist entropy.""" return c_p*np.log(equiv_pot_temp( temp, rel_hum, pressure, tot_wat_mix_ratio=tot_wat_mix_ratio, p0=p0, c_p=c_p, c_liq=c_liq, l_v=l_v, r_d=r_d, r_v=r_v, )) def pseudoadiabatic_lapse_rate(temp, pressure, rel_hum=REL_HUM, grav=GRAV_EARTH, c_p=C_P, r_d=R_D, l_v=L_V, r_v=R_V, c_pv=C_PV): """Pseudoadiabatic lapse rate.""" sat_vap_press = sat_vap_press_tetens_kelvin(temp) vapor_pressure = rel_hum*sat_vap_press vap_mix_ratio = water_vapor_mixing_ratio(vapor_pressure, pressure) numer = grav*(1 + vap_mix_ratio)*(1 + l_v*vap_mix_ratio / (r_d*temp)) epsilon = r_d / r_v denom = c_p + c_pv*vap_mix_ratio + ( l_v**2*vap_mix_ratio * (epsilon + vap_mix_ratio) / (r_d*temp**2) ) return numer / denom if __name__ == '__main__': pass
{"hexsha": "81868041ce5f0c0a9e4f528d2a0514aeae6c7433", "size": 6293, "ext": "py", "lang": "Python", "max_stars_repo_path": "puffins/thermodynamics.py", "max_stars_repo_name": "spencerahill/puffins", "max_stars_repo_head_hexsha": "27a9c06fe0ae1bc090a86084c2f3c924ce15ec95", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-30T18:14:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-08T06:29:42.000Z", "max_issues_repo_path": "puffins/thermodynamics.py", "max_issues_repo_name": "liyang0711/puffins", "max_issues_repo_head_hexsha": "93b386d1121d3b584d484b2d8543945b56161bbb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "puffins/thermodynamics.py", "max_forks_repo_name": "liyang0711/puffins", "max_forks_repo_head_hexsha": "93b386d1121d3b584d484b2d8543945b56161bbb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-19T02:46:50.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-19T02:46:50.000Z", "avg_line_length": 34.9611111111, "max_line_length": 79, "alphanum_fraction": 0.6403940887, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1726}
Require Import MathClasses.interfaces.abstract_algebra MathClasses.interfaces.orders. (** Scalar multiplication function class *) Class ScalarMult K V := scalar_mult: K → V → V. #[global] Instance: Params (@scalar_mult) 3 := {}. Infix "·" := scalar_mult (at level 50) : mc_scope. Notation "(·)" := scalar_mult (only parsing) : mc_scope. Notation "( x ·)" := (scalar_mult x) (only parsing) : mc_scope. Notation "(· x )" := (λ y, y · x) (only parsing) : mc_scope. (** The inproduct function class *) Class Inproduct K V := inprod : V → V → K. #[global] Instance: Params (@inprod) 3 := {}. Notation "(⟨⟩)" := (inprod) (only parsing) : mc_scope. Notation "⟨ u , v ⟩" := (inprod u v) (at level 51) : mc_scope. Notation "⟨ u , ⟩" := (λ v, ⟨u,v⟩) (at level 50, only parsing) : mc_scope. Notation "⟨ , v ⟩" := (λ u, ⟨u,v⟩) (at level 50, only parsing) : mc_scope. Notation "x ⊥ y" := (⟨x,y⟩ = 0) (at level 70) : mc_scope. (** The norm function class *) Class Norm K V := norm : V → K. #[global] Instance: Params (@norm) 2 := {}. Notation "∥ L ∥" := (norm L) (at level 50) : mc_scope. Notation "∥·∥" := norm (only parsing) : mc_scope. (** Let [M] be an R-Module. *) Class Module (R M : Type) {Re Rplus Rmult Rzero Rone Rnegate} {Me Mop Munit Mnegate} {sm : ScalarMult R M} := { lm_ring :> @Ring R Re Rplus Rmult Rzero Rone Rnegate ; lm_group :> @AbGroup M Me Mop Munit Mnegate ; lm_distr_l :> LeftHeteroDistribute (·) (&) (&) ; lm_distr_r :> RightHeteroDistribute (·) (+) (&) ; lm_assoc :> HeteroAssociative (·) (·) (·) (.*.) ; lm_identity :> LeftIdentity (·) 1 ; scalar_mult_proper :> Proper ((=) ==> (=) ==> (=)) sm }. (* TODO K is commutative, so derive right module laws? *) (** A module with a seminorm. *) Class Seminormed {R Re Rplus Rmult Rzero Rone Rnegate Rle Rlt Rapart} {M Me Mop Munit Mnegate Smult} `{!Abs R} (n : Norm R M) := (* We have a module *) { snm_module :> @Module R M Re Rplus Rmult Rzero Rone Rnegate Me Mop Munit Mnegate Smult ; snm_order :> @FullPseudoSemiRingOrder R Re Rapart Rplus Rmult Rzero Rone Rle Rlt (* With respect to which our norm preserves the following: *) ; snm_scale : ∀ a v, ∥a · v∥ = (abs a) * ∥v∥ (* positive homgeneity *) ; snm_triangle : ∀ u v, ∥u & v∥ ≤ ∥u∥ + ∥v∥ (* triangle inequality *) }. (** [K] is the field of scalars, [V] the abelian group of vectors, and together with a scalar multiplication operation, they satisfy the Module laws. *) Class VectorSpace (K V : Type) {Ke Kplus Kmult Kzero Kone Knegate Krecip} (* scalar operations *) {Ve Vop Vunit Vnegate} (* vector operations *) {sm : ScalarMult K V} := { vs_field :> @DecField K Ke Kplus Kmult Kzero Kone Knegate Krecip ; vs_abgroup :> @AbGroup V Ve Vop Vunit Vnegate ; vs_module :> @Module K V Ke Kplus Kmult Kzero Kone Knegate Ve Vop Vunit Vnegate sm }. (** Given some vector space V over a ordered field K, we define the inner product space *) Class InnerProductSpace (K V : Type) {Ke Kplus Kmult Kzero Kone Knegate Krecip} (* scalar operations *) {Ve Vop Vunit Vnegate} (* vector operations *) {sm : ScalarMult K V} {inp: Inproduct K V} {Kle: Le K} := { in_vectorspace :> @VectorSpace K V Ke Kplus Kmult Kzero Kone Knegate Krecip Ve Vop Vunit Vnegate sm ; in_srorder :> SemiRingOrder Kle ; in_comm :> Commutative inprod ; in_linear_l : ∀ a u v, ⟨a·u,v⟩ = a*⟨u,v⟩ ; in_nonneg :> ∀ v, PropHolds (0 ≤ ⟨v,v⟩) (* TODO Le to strong? *) ; in_mon_unit_zero :> ∀ v, 0 = ⟨v,v⟩ <-> v = mon_unit ; inprod_proper :> Proper ((=) ==> (=) ==> (=)) (⟨⟩) }. (* TODO complex conjugate? Section proof_in_linear_r. Context `{InnerProductSpace}. Lemma in_linear_r a u v : ⟨u,a·v⟩ = a*⟨u,v⟩. Proof. rewrite !(commutativity u). apply in_linear_l. Qed. End proof_in_linear_r. *) (* This is probably a bad idea? (because ∣ ≠ |) Notation "∣ a ∣" := (abs a). *) Class SemiNormedSpace (K V : Type) `{a:Abs K} `{n : @Norm K V} (* scalar and vector norms *) {Ke Kplus Kmult Kzero Kone Knegate Krecip} (* scalar operations *) {Ve Vop Vunit Vnegate} (* vector operations *) {sm : ScalarMult K V} := { sn_vectorspace :> @VectorSpace K V Ke Kplus Kmult Kzero Kone Knegate Krecip Ve Vop Vunit Vnegate sm ; sn_nonneg : ∀ v, 0 ≤ ∥v∥ (* non-negativity *) ; sn_scale : ∀ a v, ∥a · v∥ = (abs a) * ∥v∥ (* positive homgeneity *) ; sn_triangle : ∀ u v, ∥u & v∥ ≤ ∥u∥ + ∥v∥ (* triangle inequality *) }. (* For normed spaces: n_separates : ∀ (v:V), ∥v∥ = 0 ↔ v = unit *) (* - Induced metric: d x y := ∥ x - y ∥ - Induced norm. If the metric is 1). translation invariant: d x y = d (x + a) (y + a) 2). and homogeneous: d (α * x) (α * y) = ∣α∣ * (d x y) then we can define the norm as: ∥ x ∥ := d 0 x - Same for seminorm *)
{"author": "coq-community", "repo": "math-classes", "sha": "c11eb05a1e58a7293ef9a9a046ca02a9fd5b44bc", "save_path": "github-repos/coq/coq-community-math-classes", "path": "github-repos/coq/coq-community-math-classes/math-classes-c11eb05a1e58a7293ef9a9a046ca02a9fd5b44bc/interfaces/vectorspace.v"}
subroutine chk_endianc(mendian) !---------------------------------------------------------------------- !$$$ documentation block ! ! get_mendian: to obtain machine endianness ! ! programmer: J. Wang date: Aug, 2012 ! ! Input: ! no input argument ! OUTPUT: ! mendian: character(16) machine endianness ! !---------------------------------------------------------------------- ! implicit none ! character(16),intent(out) :: mendian ! !---------------------------------------------------------------------- INTEGER,PARAMETER :: ASCII_0 = 48,ASCII_1 = 49,ASCII_2 = 50, & & ASCII_3 = 51 INTEGER(4) :: I common// I ! !***** code start ! I = ASCII_0 + ASCII_1*256 + ASCII_2*(256**2) + ASCII_3*(256**3) call findendian(mendian) ! ! ------------------------------------------------------------------ ! end subroutine chk_endianc ! ! !----------------------------------------------------------------------- ! subroutine findendian(mendian) ! implicit none !--- character(16),intent(out) :: mendian ! !--- local vars character :: i*4 common// i ! ------------------------------------------------------------------ if(i .eq. '0123') then mendian='little_endian' return elseif (i .eq. '3210') then mendian='big_endian' return else mendian='mixed_endian' return endif ! ! ------------------------------------------------------------------ ! end subroutine findendian
{"hexsha": "b0f8fc0f9123b89bfa1d8ac473b6b37e6ab3eed7", "size": 1654, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "buildscripts/libs/NCEPlibs/src/bacio/v2.0.1/src/chk_endianc.f", "max_stars_repo_name": "GMAO-SI-Team/jedi-stack", "max_stars_repo_head_hexsha": "c34e968b3f803a255a7d2d1f33c1bf8c4d1559a0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-05-21T18:44:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-14T21:21:11.000Z", "max_issues_repo_path": "buildscripts/libs/NCEPlibs/src/bacio/v2.0.1/src/chk_endianc.f", "max_issues_repo_name": "GMAO-SI-Team/jedi-stack", "max_issues_repo_head_hexsha": "c34e968b3f803a255a7d2d1f33c1bf8c4d1559a0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 152, "max_issues_repo_issues_event_min_datetime": "2019-04-04T14:22:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-24T14:41:00.000Z", "max_forks_repo_path": "NCEP_bacio/chk_endianc.f", "max_forks_repo_name": "GEOS-ESM/NCEP_Shared", "max_forks_repo_head_hexsha": "6f8b2103d3ce8f1b829bff88b71ca4482b4a56a4", "max_forks_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-04-26T21:07:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T20:41:34.000Z", "avg_line_length": 26.253968254, "max_line_length": 73, "alphanum_fraction": 0.3621523579, "num_tokens": 351}
# -*- coding: utf-8 -*- """ Created on Thu Jan 21 10:32:59 2021 Poisson Distribution A random variable X that has a Poisson distribution represents the number of events occurring in a fixed time interval with a rate parameters λ. λ tells you the rate at which the number of events occur. The average and variance is λ @author: Roy """ import numpy as np import matplotlib.pyplot as plt from scipy import stats lam = 5 n = np. arange(0, 26) y= stats.poisson.pmf(n, lam) plt.plot(n, y, 'o-') plt.title ("500 years") plt.show()
{"hexsha": "1df3ea2cbbb49a1342e0b7785ee990b4925a4558", "size": 552, "ext": "py", "lang": "Python", "max_stars_repo_path": "PoissonDistribution.py", "max_stars_repo_name": "IceCube1001/Poisson_Distribution", "max_stars_repo_head_hexsha": "f33f31a639929ec05f0383057e237034d649cfca", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PoissonDistribution.py", "max_issues_repo_name": "IceCube1001/Poisson_Distribution", "max_issues_repo_head_hexsha": "f33f31a639929ec05f0383057e237034d649cfca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PoissonDistribution.py", "max_forks_repo_name": "IceCube1001/Poisson_Distribution", "max_forks_repo_head_hexsha": "f33f31a639929ec05f0383057e237034d649cfca", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0909090909, "max_line_length": 66, "alphanum_fraction": 0.6956521739, "include": true, "reason": "import numpy,from scipy", "num_tokens": 151}
"""A user interface for teleoperating an agent in an x-magical environment. Modified from https://github.com/unixpickle/obs-tower2/blob/master/obs_tower2/recorder/env_interactor.py """ import time from typing import List import numpy as np import pyglet.window from gym.envs.classic_control.rendering import SimpleImageViewer from PIL import Image UP_DOWN_MAG = 0.5 ANGLE_MAG = np.radians(1.5) OPEN_CLOSE_MAG = np.pi / 8 class KeyboardEnvInteractor(SimpleImageViewer): """User interface for interacting in an x-magical environment.""" def __init__( self, action_dim: int, fps: float = 20, resolution: int = 384, ): super().__init__(maxwidth=resolution) self._action_dim = action_dim self._dt = 1.0 / fps self._resolution = resolution self._keys = pyglet.window.key.KeyStateHandler() self.reset() def imshow(self, image: np.ndarray) -> None: self._last_image = image was_none = self.window is None image = Image.fromarray(image) image = image.resize((self._resolution, self._resolution)) image = np.array(image) super().imshow(image) if was_none: self.window.event(self.on_key_press) self.window.push_handlers(self._keys) def get_action(self) -> List[float]: action = [0.0, 0.0, 0.0] if self._keys[pyglet.window.key.UP] and not self._keys[pyglet.window.key.DOWN]: action[0] = +UP_DOWN_MAG self._started = True elif ( self._keys[pyglet.window.key.DOWN] and not self._keys[pyglet.window.key.UP] ): action[0] = -UP_DOWN_MAG self._started = True if ( self._keys[pyglet.window.key.LEFT] and not self._keys[pyglet.window.key.RIGHT] ): action[1] = ANGLE_MAG self._started = True elif ( self._keys[pyglet.window.key.RIGHT] and not self._keys[pyglet.window.key.LEFT] ): action[1] = -ANGLE_MAG self._started = True if self._keys[pyglet.window.key.SPACE]: action[2] = OPEN_CLOSE_MAG self._started = True if self._keys[pyglet.window.key.ESCAPE]: self._finish_early = True return action[: self._action_dim] def on_key_press(self, x, y): return True def reset(self): self._started = False self._finish_early = False self._last_image = None def run_loop(self, step_fn): """Run an environment interaction loop. The step_fn will be continually called with actions, and it should return observations. When step_fn returns None, the loop is done. """ last_time = time.time() while not self._finish_early: action = self.get_action() if self._started: obs = step_fn(action) if obs is None: return self.imshow(obs) else: # Needed to run the event loop. self.imshow(self._last_image) pyglet.clock.tick() # pytype: disable=module-attr delta = time.time() - last_time time.sleep(max(0, self._dt - delta)) last_time = time.time()
{"hexsha": "66b4d2d4cc3e65e981814d59d72f4c4b0e6f5f14", "size": 3358, "ext": "py", "lang": "Python", "max_stars_repo_path": "xmagical/utils/env_interactor.py", "max_stars_repo_name": "kevinzakka/x-magical", "max_stars_repo_head_hexsha": "ce0533f17b0e02baffc04acded5b5b12eb9d1d00", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2021-03-24T12:55:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T06:52:43.000Z", "max_issues_repo_path": "xmagical/utils/env_interactor.py", "max_issues_repo_name": "kevinzakka/x-magical", "max_issues_repo_head_hexsha": "ce0533f17b0e02baffc04acded5b5b12eb9d1d00", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-15T07:33:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T18:38:32.000Z", "max_forks_repo_path": "xmagical/utils/env_interactor.py", "max_forks_repo_name": "kevinzakka/x-magical", "max_forks_repo_head_hexsha": "ce0533f17b0e02baffc04acded5b5b12eb9d1d00", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-12T03:21:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-26T23:54:20.000Z", "avg_line_length": 30.8073394495, "max_line_length": 104, "alphanum_fraction": 0.5935080405, "include": true, "reason": "import numpy", "num_tokens": 805}
# -*- coding: utf-8 -*- """ Created on Thu Dec 31 11:03:47 2020 @author: abner """ import os import numpy as np #arrays import matplotlib.pyplot as plt #visualizacióm import pandas as pd #datos os.chdir('D:/Git Hub-BEST/machinelearning-az/datasets/Part 2 - Regression/Section 6 - Polynomial Regression') '''Limpieza''' dataset = pd.read_csv('Position_Salaries.csv') #print(dataset["Level"]) #type(dataset["Level"]) #Separamos X e Y, no olvidar el .values para q salga como array, sino quedará como dataframe X=dataset.iloc[:,1:2].values #OJO, 1 es diferente a 1:2 en Python. En un caso es vector o lista, en el segundo es matriz. Queremos matriz. Y=dataset.iloc[:,2].values #Separamos data de entrenamiento y de testeo from sklearn.model_selection import train_test_split #función para dividir datasets #Elegimos un nivel de testeo del 0.2, un random_stat=0, para reproducir la randomización #cada vez que alguien ponga random_stat=0, el resultado se mantendrá X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0) #EL PAQUETE YA ESCALA LAS VARIABLES, NOS PASAMOS ESTA ETAPA DEL PRE PROCESAMIENTO ###1. CONSTRUIMOS UNA REGRESIÓN LINEAL CON Sklearn from sklearn.linear_model import LinearRegression #lin_reg = LinearRegression() lin_reg=LinearRegression() lin_reg.fit(X_train, Y_train) #Fit es un método usado para LinearRegression # Visualización de los resultados del Modelo Lineal plt.scatter(X_train, Y_train, color = "red") plt.plot(X_train, lin_reg.predict(X_train), color = "blue") plt.title("Modelo de Regresión Lineal") plt.xlabel("Posición del empleado") plt.ylabel("Sueldo (en $)") plt.show() ###2. CONSTRUIMOS UNA REGRESIÓN POLINÓMICA (ojo: es lineal, o sea lineal en los coeficientes) CON sklearn from sklearn.preprocessing import PolynomialFeatures #Polinomia es esencial crea una variable con una función a la 4. Paso esencial X_poly_train = PolynomialFeatures(degree = 5).fit_transform(X_train) #Botará la columna de indepenientes automáticamente lin_reg_2 = LinearRegression().fit(X_poly_train, Y_train) #PolynomialFeatures da matriz de términos independientes, las X y las X al exponente # Visualización de los resultados del Modelo Polinómico plt.scatter(X_train, Y_train, color = "red") plt.plot(X_train, lin_reg_2.predict(X_poly_train), color = "blue") plt.title("Modelo de Regresión Polinómica") plt.xlabel("Posición del empleado") plt.ylabel("Sueldo (en $)") plt.show() #EL GRÁFICO QUEDA HORRIBLE PORQUE TIENE VALORES DISCRETOS, DEBEMOS SUAVIZARLO #Usarmos arrange de numpy para dicho fin X_grid_train = np.arange(min(X_train), max(X_train), 0.1) #deben ser arrays, por eso es importante usar value al sacar a las X e Y al inicio X_grid_train = X_grid_train.reshape(len(X_grid_train), 1) X_poly_grid_train = PolynomialFeatures(degree = 5).fit_transform(X_grid_train) #El gri lleva el modelo a términos infinitesimales, sino queda feo la gráfica plt.scatter(X_train, Y_train, color = "red") plt.plot(X_grid_train, lin_reg_2.predict(X_poly_grid_train), color = "green") plt.title("Modelo de Regresión Polinómica") plt.xlabel("Posición del empleado") plt.ylabel("Sueldo (en $)") plt.show() #Una visión de ambos casos: suavizado y sin suavizar: plt.scatter(X_train, Y_train, color = "red") plt.plot(X_train, lin_reg_2.predict(X_poly_train), color = "blue") plt.plot(X_grid_train, lin_reg_2.predict(X_poly_grid_train), color = "green") plt.title("Modelo de Regresión Polinómica") plt.xlabel("Posición del empleado") plt.ylabel("Sueldo (en $)") plt.show() lin_reg.predict([[6.5]]) lin_reg_2.predict(poly_reg.fit_transform([[6.5]])) ####USAMOS LA DATA DE TESTEO: X_poly_test = PolynomialFeatures(degree = 5).fit_transform(X_test) #Botará la columna de indepenientes automáticamente plt.scatter(X_test, Y_test, color = "red") plt.plot(X_grid_train, lin_reg_2.predict(X_poly_grid_train), color = "blue") plt.title("Sueldo vs Años de Experiencia (Conjunto de Testing)") plt.xlabel("Años de Experiencia") plt.ylabel("Sueldo (en $)") plt.show() #Analizamos la significancia del modelo por OLS, que sería todo lineal #La constante y análisis sale raro, por eso mejor trabajarlo como polinomio import statsmodels.api as sm X_const = sm.add_constant(X) regression_OLS = sm.OLS(endog = Y, exog = X_const.tolist()).fit() #fit: ajusta, trabajamos con X_opt regression_OLS.summary() #Trabajando con variable exógena en forma polinómica, PERO TOMA POR SEPARADO LOS GRADOS X_poly_2= PolynomialFeatures(degree = 3).fit_transform(X) #Botará la columna de indepenientes automáticamente más la x y el cuadrado y así... #X_poly_const = sm.add_constant(X_poly) No séra necesario en este caso regression_OLS = sm.OLS(endog = Y, exog = X_poly_2.tolist()).fit() #fit: ajusta, trabajamos con X_opt regression_OLS.summary()
{"hexsha": "d560274dfcc829bff8ddfa449be6ca90704ddfcb", "size": 4839, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/Part 2 - Regression/Section 6 - Polynomial Regression/Regresion_Abner.py", "max_stars_repo_name": "abnercasallo/machinelearning-az", "max_stars_repo_head_hexsha": "6f212c29c9870b697d84029f7197e17909e93863", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasets/Part 2 - Regression/Section 6 - Polynomial Regression/Regresion_Abner.py", "max_issues_repo_name": "abnercasallo/machinelearning-az", "max_issues_repo_head_hexsha": "6f212c29c9870b697d84029f7197e17909e93863", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/Part 2 - Regression/Section 6 - Polynomial Regression/Regresion_Abner.py", "max_forks_repo_name": "abnercasallo/machinelearning-az", "max_forks_repo_head_hexsha": "6f212c29c9870b697d84029f7197e17909e93863", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1023622047, "max_line_length": 141, "alphanum_fraction": 0.7623475925, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 1372}
# -*- coding: utf-8 -*- from autograd.blocks.hyperbolic import sinh from autograd.blocks.hyperbolic import cosh from autograd.blocks.hyperbolic import tanh from autograd.variable import Variable import numpy as np import autograd as ad def test_sinh_forward(): ad.set_mode('forward') # ============================================================================= # define the input variablet # ============================================================================= data=np.random.random(5) x=Variable(data) # ============================================================================= # define custom block # ============================================================================= sinh_block=sinh() # ============================================================================= # compute output of custom block # ============================================================================= y_block=sinh_block(x) y_block.compute_gradients() # ============================================================================= # define expected output # ============================================================================= data_true=np.sinh(data) gradient_true=np.diag(np.cosh(data)) # ============================================================================= # assert data pass # ============================================================================= assert np.equal(data_true, y_block.data).all(), 'wrong sinh data pass. expected {}, given{}'.format(data_true, y_block.data) # ============================================================================= # assert gradient forward pass # ============================================================================= assert np.equal(gradient_true, y_block.gradient).all(), 'wrong sinh gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient) def test_sinh_reverse(): ad.set_mode('reverse') # ============================================================================= # define the input variablet # ============================================================================= data=np.random.random(5) x=Variable(data) # ============================================================================= # define custom block # ============================================================================= sinh_block=sinh() # ============================================================================= # compute output of custom block # ============================================================================= y_block=sinh_block(x) y_block.compute_gradients() # ============================================================================= # define expected output # ============================================================================= data_true=np.sinh(data) gradient_true=np.diag(np.cosh(data)) # ============================================================================= # assert data pass # ============================================================================= assert np.equal(data_true, y_block.data).all(), 'wrong sinh data pass. expected {}, given{}'.format(data_true, y_block.data) # ============================================================================= # assert gradient forward pass # ============================================================================= assert np.equal(gradient_true, y_block.gradient).all(), 'wrong sinh gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient) ad.set_mode('forward') def test_cosh_forward(): ad.set_mode('forward') # ============================================================================= # define the input variablet # ============================================================================= data=np.random.random(5) x=Variable(data) # ============================================================================= # define custom block # ============================================================================= cosh_block=cosh() # ============================================================================= # compute output of custom block # ============================================================================= y_block=cosh_block(x) y_block.compute_gradients() # ============================================================================= # define expected output # ============================================================================= data_true=np.cosh(data) gradient_true=np.diag(np.sinh(data)) # ============================================================================= # assert data pass # ============================================================================= assert np.equal(data_true, y_block.data).all(), 'wrong cosh data pass. expected {}, given{}'.format(data_true, y_block.data) # ============================================================================= # assert gradient forward pass # ============================================================================= assert np.equal(gradient_true, y_block.gradient).all(), 'wrong cosh gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient) def test_cosh_reverse(): ad.set_mode('reverse') # ============================================================================= # define the input variablet # ============================================================================= data=np.random.random(5) x=Variable(data) # ============================================================================= # define custom block # ============================================================================= cosh_block=cosh() # ============================================================================= # compute output of custom block # ============================================================================= y_block=cosh_block(x) y_block.compute_gradients() # ============================================================================= # define expected output # ============================================================================= data_true=np.cosh(data) gradient_true=np.diag(np.sinh(data)) # ============================================================================= # assert data pass # ============================================================================= assert np.equal(data_true, y_block.data).all(), 'wrong cosh data pass. expected {}, given{}'.format(data_true, y_block.data) # ============================================================================= # assert gradient forward pass # ============================================================================= assert np.equal(gradient_true, y_block.gradient).all(), 'wrong cosh gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient) ad.set_mode('forward') def test_tanh_forward(): ad.set_mode('forward') # ============================================================================= # define the input variablet # ============================================================================= data=np.random.random(5) x = Variable(data) # ============================================================================= # define custom block # ============================================================================= tanh_block=tanh() # ============================================================================= # compute output of custom block # ============================================================================= y_block=tanh_block(x) y_block.compute_gradients() # ============================================================================= # define expected output # ============================================================================= data_true=np.tanh(data) gradient_true=np.diag(1 - np.tanh(data)**2) # ============================================================================= # assert data pass # ============================================================================= assert np.equal(data_true, y_block.data).all(), 'wrong tanh data pass. expected {}, given{}'.format(data_true, y_block.data) # ============================================================================= # assert gradient forward pass # ============================================================================= assert np.equal(gradient_true, y_block.gradient).all(), 'wrong tanh gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient) def test_tanh_reverse(): ad.set_mode('reverse') # ============================================================================= # define the input variablet # ============================================================================= data=np.random.random(5) x = Variable(data) # ============================================================================= # define custom block # ============================================================================= tanh_block=tanh() # ============================================================================= # compute output of custom block # ============================================================================= y_block=tanh_block(x) y_block.compute_gradients() # ============================================================================= # define expected output # ============================================================================= data_true=np.tanh(data) gradient_true=np.diag(1 - np.tanh(data)**2) # ============================================================================= # assert data pass # ============================================================================= assert np.equal(data_true, y_block.data).all(), 'wrong tanh data pass. expected {}, given{}'.format(data_true, y_block.data) # ============================================================================= # assert gradient forward pass # ============================================================================= assert np.equal(gradient_true, y_block.gradient).all(), 'wrong tanh gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient) ad.set_mode('forward')
{"hexsha": "0f364496c346b800c3bce2083f5fe3a932fe9018", "size": 10375, "ext": "py", "lang": "Python", "max_stars_repo_path": "autograd/tests/test_hyperbolic.py", "max_stars_repo_name": "pmaederyork/Dragrongrad", "max_stars_repo_head_hexsha": "32794d561f8d0273592ed55d315013eab2c24b8b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-12-17T16:24:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-03T22:40:50.000Z", "max_issues_repo_path": "autograd/tests/test_hyperbolic.py", "max_issues_repo_name": "cs207-project-group4/project-repo", "max_issues_repo_head_hexsha": "d5ee88d2a7d16477d816d830ba90d241a05e3b48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-10-18T17:59:26.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-08T16:06:34.000Z", "max_forks_repo_path": "autograd/tests/test_hyperbolic.py", "max_forks_repo_name": "cs207-project-group4/project-repo", "max_forks_repo_head_hexsha": "d5ee88d2a7d16477d816d830ba90d241a05e3b48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-19T06:06:13.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-19T06:06:13.000Z", "avg_line_length": 46.5246636771, "max_line_length": 155, "alphanum_fraction": 0.3180722892, "include": true, "reason": "import numpy", "num_tokens": 1268}
#!/usr/bin/python3 import numpy as np from mseg.utils.cv2_utils import ( grayscale_to_color, form_hstacked_imgs, form_vstacked_imgs, add_text_cv2, ) def test_add_text_cv2() -> None: """ Smokescreen """ img = 255 * np.ones((512, 512, 3), np.uint8) text = "Hello World!" add_text_cv2(img, text) import matplotlib.pyplot as plt plt.imshow(img) # plt.show() plt.close() def test_form_hstacked_imgs_three() -> None: """ Horizontally stack three 2x2 RGB images into a single 2x6 RGB image. """ hstack_save_fpath = "htmp1.png" img1 = np.zeros((2, 2, 3), dtype=np.uint8) img1[0, 0, :] = [255, 0, 1] img2 = np.zeros((2, 2, 3), dtype=np.uint8) img2[1, 1, :] = [5, 10, 15] img3 = np.zeros((2, 2, 3), dtype=np.uint8) img3[0, 1, :] = [255, 254, 253] img_list = [img1, img2, img3] hstack_img = form_hstacked_imgs(img_list, hstack_save_fpath, save_to_disk=False) # now has 6 columns gt_hstack_img = np.zeros((2, 6, 3), dtype=np.uint8) gt_hstack_img[0, 0, :] = [255, 0, 1] gt_hstack_img[1, 3, :] = [5, 10, 15] gt_hstack_img[0, 5, :] = [255, 254, 253] assert np.allclose(hstack_img, gt_hstack_img) def test_form_hstacked_imgs_two() -> None: """ Horizontally stack two 2x2 RGB images into a single 2x4 RGB image. """ hstack_save_fpath = "htmp2.png" img1 = np.zeros((2, 2, 3), dtype=np.uint8) img1[0, 0, :] = [255, 0, 1] img2 = np.zeros((2, 2, 3), dtype=np.uint8) img2[1, 1, :] = [5, 10, 15] img_list = [img1, img2] hstack_img = form_hstacked_imgs(img_list, hstack_save_fpath, save_to_disk=False) gt_hstack_img = np.zeros((2, 4, 3), dtype=np.uint8) gt_hstack_img[0, 0, :] = [255, 0, 1] gt_hstack_img[1, 3, :] = [5, 10, 15] assert np.allclose(hstack_img, gt_hstack_img) def test_form_vstacked_imgs_two() -> None: """ Vertically stack two 2x2 RGB images into a single 2x4 RGB image. """ vstack_save_fpath = "vtmp.png" img1 = np.zeros((2, 2, 3), dtype=np.uint8) img1[0, 0, :] = [255, 0, 1] img2 = np.zeros((2, 2, 3), dtype=np.uint8) img2[1, 1, :] = [5, 10, 15] img_list = [img1, img2] vstack_img = form_vstacked_imgs(img_list, vstack_save_fpath, save_to_disk=False) gt_vstack_img = np.zeros((4, 2, 3), dtype=np.uint8) gt_vstack_img[0, 0, :] = [255, 0, 1] gt_vstack_img[3, 1, :] = [5, 10, 15] assert np.allclose(vstack_img, gt_vstack_img) def test_grayscale_to_color() -> None: """ Convert simple 2x2 grayscale image into RGB image. """ gray_img = np.array([[0, 255], [255, 0]], dtype=np.uint8) rgb_img = grayscale_to_color(gray_img) assert rgb_img.shape == (2, 2, 3) for i in range(2): assert np.allclose(rgb_img[:, :, i], gray_img)
{"hexsha": "4a4fb19f5fc7675af815b41a219a103f26638f32", "size": 2819, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_cv2_utils.py", "max_stars_repo_name": "mintar/mseg-api", "max_stars_repo_head_hexsha": "df7b899b47b33ad82dcbf17c289856a1f1abea22", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 213, "max_stars_repo_stars_event_min_datetime": "2020-04-25T02:51:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T09:57:26.000Z", "max_issues_repo_path": "tests/test_cv2_utils.py", "max_issues_repo_name": "mintar/mseg-api", "max_issues_repo_head_hexsha": "df7b899b47b33ad82dcbf17c289856a1f1abea22", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2020-04-28T05:47:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T03:05:07.000Z", "max_forks_repo_path": "tests/test_cv2_utils.py", "max_forks_repo_name": "mintar/mseg-api", "max_forks_repo_head_hexsha": "df7b899b47b33ad82dcbf17c289856a1f1abea22", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2020-04-29T16:11:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T15:50:15.000Z", "avg_line_length": 25.6272727273, "max_line_length": 84, "alphanum_fraction": 0.6101454416, "include": true, "reason": "import numpy", "num_tokens": 1027}
import matplotlib.pyplot as plt import numpy as np x = np.linspace(-3,3,50) y1 = 2*x+1 y2 = x**2 #绘制在同一个figure中 plt.figure() plt.plot(x,y1) plt.plot(x,y2,color='red',linewidth = 2.0,linestyle = '--')#指定颜色,线宽和线型 #截取x,y的某一部分 plt.xlim((-1,2)) plt.ylim((-2,3)) #设置x,y的坐标描述标签 plt.xlabel("I am x") plt.ylabel("I am y") #设置x刻度的间隔 new_ticks = np.linspace(-1,2,5) plt.xticks(new_ticks) plt.yticks([-2, -1.5, 0, 1.5, 3], [r'$Really\ bad\ \alpha$', r'$bad$', r'$normal$', r'$good$', r'$very\ good$']) #r表示正则化,$$表示用数学字体输出 # gca = 'get current axis' ax = plt.gca()#获取当前坐标的位置 #去掉坐标图的上和右 spine翻译成脊梁 ax.spines['right'].set_color('None') ax.spines['top'].set_color('None') #指定坐标的位置 ax.xaxis.set_ticks_position('bottom') # 设置bottom为x轴 ax.yaxis.set_ticks_position('left') # 设置left为x轴 ax.spines['bottom'].set_position(('data',0))#这个位置的括号要注意 ax.spines['left'].set_position(('data',0)) plt.show()
{"hexsha": "705b4746e07442322764c7be61e71fbca137b9c9", "size": 913, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/extends/analysis/mkfigs/1.py", "max_stars_repo_name": "li-phone/DefectNet", "max_stars_repo_head_hexsha": "f1b6f44a34581c8942d7ee5341cb9da4e76a225a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2020-12-02T14:03:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T08:24:36.000Z", "max_issues_repo_path": "tools/extends/analysis/mkfigs/1.py", "max_issues_repo_name": "li-phone/DefectNet", "max_issues_repo_head_hexsha": "f1b6f44a34581c8942d7ee5341cb9da4e76a225a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-05-24T07:28:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:52:55.000Z", "max_forks_repo_path": "tools/extends/analysis/mkfigs/1.py", "max_forks_repo_name": "li-phone/DefectNet", "max_forks_repo_head_hexsha": "f1b6f44a34581c8942d7ee5341cb9da4e76a225a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-04-21T08:20:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-30T02:27:14.000Z", "avg_line_length": 24.0263157895, "max_line_length": 70, "alphanum_fraction": 0.6516976999, "include": true, "reason": "import numpy", "num_tokens": 397}
(* * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause *) theory CompoundCTypes imports Vanilla32 Padding begin definition empty_typ_info :: "typ_name \<Rightarrow> 'a typ_info" where "empty_typ_info tn \<equiv> TypDesc (TypAggregate []) tn" primrec extend_ti :: "'a typ_info \<Rightarrow> 'a typ_info \<Rightarrow> field_name \<Rightarrow> 'a typ_info" and extend_ti_struct :: "'a field_desc typ_struct \<Rightarrow> 'a typ_info \<Rightarrow> field_name \<Rightarrow> 'a field_desc typ_struct" where et0: "extend_ti (TypDesc st nm) t fn = TypDesc (extend_ti_struct st t fn) nm" | et1: "extend_ti_struct (TypScalar n sz algn) t fn = TypAggregate [DTPair t fn]" | et2: "extend_ti_struct (TypAggregate ts) t fn = TypAggregate (ts@[DTPair t fn])" lemma aggregate_empty_typ_info [simp]: "aggregate (empty_typ_info tn)" by (simp add: empty_typ_info_def) lemma aggregate_extend_ti [simp]: "aggregate (extend_ti tag t f)" apply(cases tag) apply(rename_tac typ_struct xs) apply(case_tac typ_struct, auto) done definition update_desc :: "('a \<Rightarrow> 'b) \<Rightarrow> ('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> 'b field_desc \<Rightarrow> 'a field_desc" where "update_desc f_ab f_upd_ab d \<equiv> \<lparr> field_access = (field_access d) \<circ> f_ab, field_update = \<lambda>bs v. f_upd_ab (field_update d bs (f_ab v)) v \<rparr>" definition adjust_ti :: "'b typ_info \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> ('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> 'a typ_info" where "adjust_ti t f_ab f_upd_ab \<equiv> map_td (\<lambda>n algn. update_desc f_ab f_upd_ab) t" lemma typ_desc_size_update_ti [simp]: "(size_td (adjust_ti t f g) = size_td t)" by (simp add: adjust_ti_def) definition fg_cons :: "('a \<Rightarrow> 'b) \<Rightarrow> ('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> bool" where "fg_cons f g \<equiv> (\<forall>bs v. f (g bs v) = bs) \<and> (\<forall>bs bs' v. g bs (g bs' v) = g bs v) \<and> (\<forall>v. g (f v) v = v)" lemma export_tag_adjust_ti [simp]: "\<forall>bs. fg_cons f g \<longrightarrow> wf_fd t \<longrightarrow> export_uinfo (adjust_ti t f g) = export_uinfo t" "\<forall>bs. fg_cons f g \<longrightarrow> wf_fd_struct st \<longrightarrow> map_td_struct field_norm (map_td_struct (\<lambda>n algn d. update_desc f g d) st) = map_td_struct field_norm st" "\<forall>bs. fg_cons f g \<longrightarrow> wf_fd_list ts \<longrightarrow> map_td_list field_norm (map_td_list (\<lambda>n algn d. update_desc f g d) ts) = map_td_list field_norm ts" "\<forall>bs. fg_cons f g \<longrightarrow> wf_fd_pair x \<longrightarrow> map_td_pair field_norm (map_td_pair (\<lambda>n algn d. update_desc f g d) x) = map_td_pair field_norm x" unfolding adjust_ti_def by (induct t and st and ts and x, all \<open>clarsimp simp: export_uinfo_def\<close>) (fastforce simp: update_desc_def field_norm_def fg_cons_def fd_cons_struct_def fd_cons_access_update_def fd_cons_desc_def) definition ti_typ_combine :: "'b::c_type itself \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> ('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> field_name \<Rightarrow> 'a typ_info \<Rightarrow> 'a typ_info" where "ti_typ_combine t_b f_ab f_upd_ab fn tag \<equiv> let nf = adjust_ti (typ_info_t TYPE('b)) f_ab f_upd_ab in extend_ti tag nf fn" primrec padding_fields :: "'a typ_desc \<Rightarrow> field_name list" and padding_fields_struct :: "'a typ_struct \<Rightarrow> field_name list" where pf0: "padding_fields (TypDesc st tn) = padding_fields_struct st" | pf1: "padding_fields_struct (TypScalar n algn d) = []" | pf2: "padding_fields_struct (TypAggregate xs) = filter (\<lambda>x. hd x = CHR ''!'') (map dt_snd xs)" primrec non_padding_fields :: "'a typ_desc \<Rightarrow> field_name list" and non_padding_fields_struct :: "'a typ_struct \<Rightarrow> field_name list" where npf0: "non_padding_fields (TypDesc st tn) = non_padding_fields_struct st" | npf1: "non_padding_fields_struct (TypScalar n algn d) = []" | npf2: "non_padding_fields_struct (TypAggregate xs) = filter (\<lambda>x. hd x \<noteq> CHR ''!'') (map dt_snd xs)" definition field_names_list :: "'a typ_desc \<Rightarrow> field_name list" where "field_names_list tag \<equiv> non_padding_fields tag @ padding_fields tag" definition ti_pad_combine :: "nat \<Rightarrow> 'a typ_info \<Rightarrow> 'a typ_info" where "ti_pad_combine n tag \<equiv> let fn = foldl (@) ''!pad_'' (field_names_list tag); td = \<lparr> field_access = \<lambda>v. id, field_update = \<lambda>bs. id \<rparr>; nf = TypDesc (TypScalar n 0 td) ''!pad_typ'' in extend_ti tag nf fn" lemma aggregate_ti_pad_combine [simp]: "aggregate (ti_pad_combine n tag)" by (simp add: ti_pad_combine_def Let_def) definition ti_typ_pad_combine :: "'b::c_type itself \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> ('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> field_name \<Rightarrow> 'a typ_info \<Rightarrow> 'a typ_info" where "ti_typ_pad_combine t_b f_ab f_upd_ab fn tag \<equiv> let pad = padup (align_of TYPE('b)) (size_td tag); ntag = if 0 < pad then ti_pad_combine pad tag else tag in ti_typ_combine t_b f_ab f_upd_ab fn ntag" definition final_pad :: "'a typ_info \<Rightarrow> 'a typ_info" where "final_pad tag \<equiv> let n = padup (2^align_td tag) (size_td tag) in if 0 < n then ti_pad_combine n tag else tag" lemma field_names_list_empty_typ_info [simp]: "set (field_names_list (empty_typ_info tn)) = {}" by (simp add: empty_typ_info_def field_names_list_def) lemma field_names_list_extend_ti [simp]: "set (field_names_list (extend_ti tag t fn)) = set (field_names_list tag) \<union> {fn}" unfolding field_names_list_def apply(case_tac tag) apply(rename_tac typ_struct xs) apply(case_tac typ_struct; simp) done lemma field_names_list_ti_typ_combine [simp]: "set (field_names_list (ti_typ_combine t_b f_ab f_upd_ab fn tag)) = set (field_names_list tag) \<union> {fn}" by (clarsimp simp: ti_typ_combine_def Let_def) lemma field_names_list_ti_pad_combine [simp]: "set (field_names_list (ti_pad_combine n tag)) = set (field_names_list tag) \<union> {foldl (@) ''!pad_'' (field_names_list tag)}" by (clarsimp simp: ti_pad_combine_def Let_def) \<comment> \<open>matches on padding\<close> lemma hd_string_hd_fold_eq [simp, rule_format]: "\<lbrakk> s \<noteq> []; hd s = CHR ''!'' \<rbrakk> \<Longrightarrow> hd (foldl (@) s xs) = CHR ''!''" by (induct xs arbitrary: s; clarsimp) lemma field_names_list_ti_typ_pad_combine [simp]: "hd x \<noteq> CHR ''!'' \<Longrightarrow> x \<in> set (field_names_list (ti_typ_pad_combine t_b f_ab f_upd_ab fn tag)) = (x \<in> set (field_names_list tag) \<union> {fn})" by (auto simp: ti_typ_pad_combine_def Let_def) lemma wf_desc_empty_typ_info [simp]: "wf_desc (empty_typ_info tn)" by (simp add: empty_typ_info_def) lemma wf_desc_extend_ti: "\<lbrakk> wf_desc tag; wf_desc t; f \<notin> set (field_names_list tag) \<rbrakk> \<Longrightarrow> wf_desc (extend_ti tag t f)" unfolding field_names_list_def apply(cases tag, rename_tac typ_struct xs) apply(case_tac typ_struct; clarsimp) done lemma foldl_append_length: "length (foldl (@) s xs) \<ge> length s" apply(induct xs arbitrary: s, clarsimp) apply(rename_tac a list s) apply(drule_tac x="s@a" in meta_spec) apply clarsimp done lemma foldl_append_nmem: "s \<noteq> [] \<Longrightarrow> foldl (@) s xs \<notin> set xs" apply(induct xs arbitrary: s, clarsimp) apply(rename_tac a list s) apply(drule_tac x="s@a" in meta_spec) apply clarsimp apply(subgoal_tac "length (foldl (@) (s@a) list) \<ge> length (s@a)") apply simp apply(rule foldl_append_length) done lemma wf_desc_ti_pad_combine: "wf_desc tag \<Longrightarrow> wf_desc (ti_pad_combine n tag)" apply(clarsimp simp: ti_pad_combine_def Let_def) apply(erule wf_desc_extend_ti) apply simp apply(rule foldl_append_nmem, simp) done lemma wf_desc_adjust_ti [simp]: "wf_desc (adjust_ti t f g) = wf_desc (t::'a typ_info)" by (simp add: adjust_ti_def wf_desc_map) lemma wf_desc_ti_typ_combine: "\<lbrakk> wf_desc tag; fn \<notin> set (field_names_list tag) \<rbrakk> \<Longrightarrow> wf_desc (ti_typ_combine (t_b::'a::wf_type itself) f_ab f_upd_ab fn tag)" by (fastforce simp: ti_typ_combine_def Let_def elim: wf_desc_extend_ti) lemma wf_desc_ti_typ_pad_combine: "\<lbrakk> wf_desc tag; fn \<notin> set (field_names_list tag); hd fn \<noteq> CHR ''!'' \<rbrakk> \<Longrightarrow> wf_desc (ti_typ_pad_combine (t_b::'a::wf_type itself) f_ab f_upd_ab fn tag)" unfolding ti_typ_pad_combine_def Let_def by (auto intro!: wf_desc_ti_typ_combine wf_desc_ti_pad_combine) lemma wf_desc_final_pad: "wf_desc tag \<Longrightarrow> wf_desc (final_pad tag)" by (auto simp: final_pad_def Let_def elim: wf_desc_ti_pad_combine) lemma wf_size_desc_extend_ti: "\<lbrakk> wf_size_desc tag; wf_size_desc t \<rbrakk> \<Longrightarrow> wf_size_desc (extend_ti tag t fn)" apply(cases tag, rename_tac typ_struct list) apply(case_tac typ_struct, auto) done lemma wf_size_desc_ti_pad_combine: "\<lbrakk> wf_size_desc tag; 0 < n \<rbrakk> \<Longrightarrow> wf_size_desc (ti_pad_combine n tag)" by (fastforce simp: ti_pad_combine_def Let_def elim: wf_size_desc_extend_ti) lemma wf_size_desc_adjust_ti: "wf_size_desc (adjust_ti t f g) = wf_size_desc (t::'a typ_info)" by (simp add: adjust_ti_def wf_size_desc_map) lemma wf_size_desc_ti_typ_combine: "wf_size_desc tag \<Longrightarrow> wf_size_desc (ti_typ_combine (t_b::'a::wf_type itself) f_ab f_upd_ab fn tag)" by (fastforce simp: wf_size_desc_adjust_ti ti_typ_combine_def Let_def elim: wf_size_desc_extend_ti) lemma wf_size_desc_ti_typ_pad_combine: "wf_size_desc tag \<Longrightarrow> wf_size_desc (ti_typ_pad_combine (t_b::'a::wf_type itself) f_ab f_upd_ab fn tag)" by (auto simp: ti_typ_pad_combine_def Let_def intro: wf_size_desc_ti_typ_combine elim: wf_size_desc_ti_pad_combine) lemma wf_size_desc_ti_typ_combine_empty [simp]: "wf_size_desc (ti_typ_combine (t_b::'a::wf_type itself) f_ab f_upd_ab fn (empty_typ_info tn))" by (clarsimp simp: ti_typ_combine_def Let_def empty_typ_info_def wf_size_desc_adjust_ti) lemma wf_size_desc_ti_typ_pad_combine_empty [simp]: "wf_size_desc (ti_typ_pad_combine (t_b::'a::wf_type itself) f_ab f_upd_ab fn (empty_typ_info tn))" by (clarsimp simp: ti_typ_pad_combine_def Let_def ti_typ_combine_def empty_typ_info_def ti_pad_combine_def wf_size_desc_adjust_ti) lemma wf_size_desc_final_pad: "wf_size_desc tag \<Longrightarrow> wf_size_desc (final_pad tag)" by (fastforce simp: final_pad_def Let_def elim: wf_size_desc_ti_pad_combine) lemma wf_fdp_set_comp_simp [simp]: "wf_fdp {(a, n # b) |a b. (a, b) \<in> tf_set t} = wf_fdp (tf_set t)" unfolding wf_fdp_def by fastforce lemma lf_set_adjust_ti': "\<forall>d fn. d \<in> lf_set (map_td (\<lambda>n algn d. update_desc f g d) t) fn \<longrightarrow> (\<exists>y. lf_fd d=update_desc f g (lf_fd y) \<and> lf_sz d=lf_sz y \<and> lf_fn d=lf_fn y \<and> y \<in> lf_set t fn)" "\<forall>d fn. d \<in> lf_set_struct (map_td_struct (\<lambda>n algn d. update_desc f g d) st) fn \<longrightarrow> (\<exists>y. lf_fd d=update_desc f g (lf_fd y) \<and> lf_sz d=lf_sz y \<and> lf_fn d=lf_fn y \<and> y \<in> lf_set_struct st fn)" "\<forall>d fn. d \<in> lf_set_list (map_td_list (\<lambda>n algn d. update_desc f g d) ts) fn \<longrightarrow> (\<exists>y. lf_fd d=update_desc f g (lf_fd y) \<and> lf_sz d=lf_sz y \<and> lf_fn d=lf_fn y \<and> y \<in> lf_set_list ts fn)" "\<forall>d fn. d \<in> lf_set_pair (map_td_pair (\<lambda>n algn d. update_desc f g d) x) fn \<longrightarrow> (\<exists>y. lf_fd d=update_desc f g (lf_fd y) \<and> lf_sz d=lf_sz y \<and> lf_fn d=lf_fn y \<and> y \<in> lf_set_pair x fn)" unfolding update_desc_def by (induct t and st and ts and x) fastforce+ lemma lf_set_adjust_ti: "\<lbrakk> d \<in> lf_set (adjust_ti t f g) fn; \<And>y. g (f y) y = y \<rbrakk> \<Longrightarrow> (\<exists>y. lf_fd d=update_desc f g (lf_fd y) \<and> lf_sz d=lf_sz y \<and> lf_fn d=lf_fn y \<and> y \<in> lf_set t fn)" by (simp add: lf_set_adjust_ti' adjust_ti_def) lemma fd_cons_struct_id_simp [simp]: "fd_cons_struct (TypScalar n algn \<lparr>field_access = \<lambda>v. id, field_update = \<lambda>bs. id\<rparr>)" by (auto simp: fd_cons_struct_def fd_cons_double_update_def fd_cons_update_access_def fd_cons_access_update_def fd_cons_length_def fd_cons_update_normalise_def fd_cons_desc_def) lemma field_desc_adjust_ti: "fg_cons f g \<longrightarrow> field_desc (adjust_ti (t::'a typ_info) f g) = update_desc f g (field_desc t)" "fg_cons f g \<longrightarrow> field_desc_struct (map_td_struct (\<lambda>n algn d. update_desc f g d) st) = update_desc f g (field_desc_struct st)" "fg_cons f g \<longrightarrow> field_desc_list (map_td_list (\<lambda>n algn d. update_desc f g d) ts) = update_desc f g (field_desc_list ts)" "fg_cons f g \<longrightarrow> field_desc_pair (map_td_pair (\<lambda>n algn d. update_desc f g d) x) = update_desc f g (field_desc_pair x)" unfolding adjust_ti_def by (induct t and st and ts and x) (fastforce simp: fg_cons_def update_desc_def)+ lemma update_ti_adjust_ti: "fg_cons f g \<Longrightarrow> update_ti_t (adjust_ti t f g) bs v = g (update_ti_t t bs (f v)) v" using field_desc_adjust_ti(1) [of f g t] by (clarsimp simp: update_desc_def) declare field_desc_def [simp del] lemma aggregate_ti_typ_combine [simp]: "aggregate (ti_typ_combine t_b f_ab f_upd_ab fn tag)" by (simp add: ti_typ_combine_def Let_def) lemma aggregate_ti_typ_pad_combine [simp]: "aggregate (ti_typ_pad_combine t_b f_ab f_upd_ab fn tag)" by (simp add: ti_typ_pad_combine_def Let_def) lemma align_of_empty_typ_info [simp]: "align_td (empty_typ_info tn) = 0" by (simp add: empty_typ_info_def) lemma align_of_tag_list [simp]: "align_td_list (xs @ [DTPair t fn]) = max (align_td_list xs) (align_td t)" by (induct_tac xs) auto lemma align_of_extend_ti [simp]: "aggregate ti \<Longrightarrow> align_td (extend_ti ti t fn) = max (align_td ti) (align_td t)" apply (cases ti, rename_tac typ_struct xs) apply (case_tac typ_struct; clarsimp) done lemma align_of_adjust_ti [simp]: "align_td (adjust_ti t f g) = align_td (t::'a typ_info)" by (simp add: adjust_ti_def) lemma align_of_ti_typ_combine [simp]: "aggregate ti \<Longrightarrow> align_td (ti_typ_combine (t::'a::c_type itself) f g fn ti) = max (align_td ti) (align_td (typ_info_t (TYPE('a))))" by (clarsimp simp: ti_typ_combine_def Let_def align_of_def) lemma align_of_ti_pad_combine [simp]: "aggregate ti \<Longrightarrow> align_td (ti_pad_combine n ti) = align_td ti" by (clarsimp simp: ti_pad_combine_def Let_def max_def) lemma align_of_final_pad: "aggregate ti \<Longrightarrow> align_td (final_pad ti) = align_td ti" by (auto simp: final_pad_def Let_def max_def) lemma align_of_ti_typ_pad_combine [simp]: "aggregate ti \<Longrightarrow> align_td (ti_typ_pad_combine (t::'a::c_type itself) f g fn ti) = max (align_td ti) (align_td (typ_info_t TYPE('a)))" by (clarsimp simp: ti_typ_pad_combine_def Let_def) definition fu_s_comm_set :: "(byte list \<Rightarrow> 'a \<Rightarrow> 'a) set \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a) set \<Rightarrow> bool" where "fu_s_comm_set X Y \<equiv> \<forall>x y. x \<in> X \<and> y \<in> Y \<longrightarrow> (\<forall>v bs bs'. x bs (y bs' v) = y bs' (x bs v))" lemma fc_empty_ti [simp]: "fu_commutes (update_ti_t (empty_typ_info tn)) f" by (auto simp: fu_commutes_def empty_typ_info_def) lemma fc_extend_ti: "\<lbrakk> fu_commutes (update_ti_t s) h; fu_commutes (update_ti_t t) h \<rbrakk> \<Longrightarrow> fu_commutes (update_ti_t (extend_ti s t fn)) h" apply(case_tac s, rename_tac typ_struct xs) apply(case_tac typ_struct, auto simp: fu_commutes_def) done lemma fc_update_ti: "\<lbrakk> fu_commutes (update_ti_t ti) h; fg_cons f g; \<forall>v bs bs'. g bs (h bs' v) = h bs' (g bs v); \<forall>bs v. f (h bs v) = f v \<rbrakk> \<Longrightarrow> fu_commutes (update_ti_t (adjust_ti t f g)) h" by (auto simp: fu_commutes_def update_ti_adjust_ti) lemma fc_ti_typ_combine: "\<lbrakk> fu_commutes (update_ti_t ti) h; fg_cons f g; \<forall>v bs bs'. g bs (h bs' v) = h bs' (g bs v); \<forall>bs v. f (h bs v) = f v \<rbrakk> \<Longrightarrow> fu_commutes (update_ti_t (ti_typ_combine t f g fn ti)) h" apply(clarsimp simp: ti_typ_combine_def Let_def) apply(rule fc_extend_ti, assumption) apply(rule fc_update_ti; simp) done lemma fc_ti_pad_combine: "fu_commutes (update_ti_t ti) f \<Longrightarrow> fu_commutes (update_ti_t (ti_pad_combine n ti)) f" apply(clarsimp simp: ti_pad_combine_def Let_def) apply(rule fc_extend_ti, assumption) apply(auto simp: fu_commutes_def) done lemma fc_ti_typ_pad_combine: "\<lbrakk> fu_commutes (update_ti_t ti) h; fg_cons f g; \<forall>v bs bs'. g bs (h bs' v) = h bs' (g bs v); \<forall>bs v. f (h bs v) = f v \<rbrakk> \<Longrightarrow> fu_commutes (update_ti_t (ti_typ_pad_combine t f g fn ti)) h" apply(clarsimp simp: ti_typ_pad_combine_def Let_def) apply(rule conjI; clarsimp) apply(rule fc_ti_typ_combine; assumption?) apply(erule fc_ti_pad_combine) apply(erule (3) fc_ti_typ_combine) done definition fu_eq_mask :: "'a typ_info \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> bool" where "fu_eq_mask ti f \<equiv> \<forall>bs v v'. length bs = size_td ti \<longrightarrow> update_ti_t ti bs (f v) = update_ti_t ti bs (f v')" lemma fu_eq_mask: "\<lbrakk> length bs = size_td ti; fu_eq_mask ti id \<rbrakk> \<Longrightarrow> update_ti_t ti bs v = update_ti_t ti bs w" by (clarsimp simp: fu_eq_mask_def update_ti_t_def) lemma fu_eq_mask_ti_pad_combine: "\<lbrakk> fu_eq_mask ti f; aggregate ti \<rbrakk> \<Longrightarrow> fu_eq_mask (ti_pad_combine n ti) f" unfolding ti_pad_combine_def Let_def apply(case_tac ti, rename_tac typ_struct xs) apply(case_tac typ_struct, auto simp: fu_eq_mask_def update_ti_list_t_def) done lemma fu_eq_mask_final_pad: "\<lbrakk> fu_eq_mask ti f; aggregate ti \<rbrakk> \<Longrightarrow> fu_eq_mask (final_pad ti) f" by (fastforce simp: final_pad_def Let_def elim: fu_eq_mask_ti_pad_combine) definition upd_local :: "('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> bool" where "upd_local g \<equiv> \<forall>j k v v'. g k v = g k v' \<longrightarrow> g j v = g j v'" lemma fg_cons_upd_local: "fg_cons f g \<Longrightarrow> upd_local g" apply(clarsimp simp: fg_cons_def upd_local_def) apply(drule_tac f="g j" in arg_cong) apply simp done lemma fu_eq_mask_ti_typ_combine: "\<lbrakk> fu_eq_mask ti (\<lambda>v. (g (f undefined) (h v))); fg_cons f g; fu_commutes (update_ti_t ti) g; aggregate ti \<rbrakk> \<Longrightarrow> fu_eq_mask (ti_typ_combine (t::'a::mem_type itself) f g fn ti) h" apply(frule fg_cons_upd_local) apply(clarsimp simp: ti_typ_combine_def Let_def) apply(case_tac ti, rename_tac typ_struct xs) apply(case_tac typ_struct; clarsimp) apply(rename_tac xs') apply(clarsimp simp: fu_eq_mask_def update_ti_adjust_ti) apply(clarsimp simp: update_ti_list_t_def size_of_def) apply(subst upd [where w="f undefined"]) apply(simp add: size_of_def) apply(subst upd [where w="f undefined" and v="f (h v')" for v']) apply(simp add: size_of_def) apply(subgoal_tac "fu_commutes (\<lambda>v. update_ti_list_t xs' v) g") apply(clarsimp simp: fu_commutes_def) apply(frule_tac x="h v" in spec) apply(rotate_tac -1) apply(drule_tac x="take (size_td_list xs') bs" in spec) apply(drule_tac x="update_ti_t (typ_info_t TYPE('a)) (drop (size_td_list xs') bs) (f undefined)" in spec) apply(frule_tac x="h v'" in spec) apply(rotate_tac -1) apply(drule_tac x="take (size_td_list xs') bs" in spec) apply(drule_tac x="update_ti_t (typ_info_t TYPE('a)) (drop (size_td_list xs') bs) (f undefined)" in spec) apply(clarsimp simp: update_ti_list_t_def) apply(drule_tac x="take (size_td_list xs') bs" in spec) apply simp apply(rotate_tac -1) apply(drule_tac x="v" in spec) apply(rotate_tac -1) apply(drule_tac x="v'" in spec) apply(frule_tac x="h v" in spec) apply(drule_tac x="(take (size_td_list xs') bs)" in spec) apply(drule_tac x="f undefined" in spec) apply(frule_tac x="h v'" in spec) apply(drule_tac x="(take (size_td_list xs') bs)" in spec) apply(drule_tac x="f undefined" in spec) apply(thin_tac "\<forall>v bs bs'. X v bs bs'" for X) apply simp apply(unfold upd_local_def) apply fast apply(unfold fu_commutes_def) apply(thin_tac "\<forall>bs. X bs" for X) apply(thin_tac "\<forall>x y z a. X x y z a" for X) apply(clarsimp simp: update_ti_list_t_def) done lemma fu_eq_mask_ti_typ_pad_combine: "\<lbrakk> fu_eq_mask ti (\<lambda>v. (g (f undefined) (h v))); fg_cons f g; fu_commutes (update_ti_t ti) g; aggregate ti \<rbrakk> \<Longrightarrow> fu_eq_mask (ti_typ_pad_combine (t::'a::mem_type itself) f g fn ti) h" by (fastforce simp: ti_typ_pad_combine_def Let_def intro: fu_eq_mask_ti_typ_combine fu_eq_mask_ti_pad_combine fc_ti_pad_combine) lemma fu_eq_mask_empty_typ_info_g: "\<exists>k. \<forall>v. f v = k \<Longrightarrow> fu_eq_mask t f" by (auto simp: fu_eq_mask_def) lemma fu_eq_mask_empty_typ_info: "\<forall>v. f v = undefined \<Longrightarrow> fu_eq_mask t f" by (auto simp: fu_eq_mask_def) lemma size_td_extend_ti: "aggregate s \<Longrightarrow> size_td (extend_ti s t fn) = size_td s + size_td t" by (cases s, rename_tac typ_struct xs) (case_tac typ_struct; simp) lemma size_td_ti_pad_combine: "aggregate ti \<Longrightarrow> size_td (ti_pad_combine n ti) = n + size_td ti" unfolding ti_pad_combine_def Let_def by (simp add: size_td_extend_ti) lemma align_of_dvd_size_of_final_pad [simp]: "aggregate ti \<Longrightarrow> 2^align_td (final_pad ti) dvd size_td (final_pad ti)" unfolding final_pad_def Let_def by (fastforce simp: size_td_ti_pad_combine ac_simps padup_dvd intro: dvd_padup_add) lemma size_td_lt_ti_pad_combine: "aggregate t \<Longrightarrow> size_td (ti_pad_combine n t) = size_td t + n" by (metis add.commute size_td_ti_pad_combine) lemma size_td_lt_ti_typ_combine: "aggregate ti \<Longrightarrow> size_td (ti_typ_combine (t::'b::c_type itself) f g fn ti) = size_td ti + size_td (typ_info_t TYPE('b))" by (clarsimp simp: ti_typ_combine_def Let_def size_td_extend_ti) lemma size_td_lt_ti_typ_pad_combine: "aggregate ti \<Longrightarrow> size_td (ti_typ_pad_combine (t::'b::c_type itself) f g fn ti) = (let k = size_td ti in k + size_td (typ_info_t TYPE('b)) + padup (2^(align_td (typ_info_t TYPE('b)))) k)" unfolding ti_typ_pad_combine_def Let_def by (auto simp: size_td_lt_ti_typ_combine size_td_ti_pad_combine align_of_def) lemma size_td_lt_final_pad: "aggregate tag \<Longrightarrow> size_td (final_pad tag) = (let k=size_td tag in k + padup (2^align_td tag) k)" by (auto simp: final_pad_def Let_def size_td_ti_pad_combine) lemma size_td_empty_typ_info [simp]: "size_td (empty_typ_info tn) = 0" by (clarsimp simp: empty_typ_info_def) lemma wf_lf_empty_typ_info [simp]: "wf_lf {}" by (auto simp: wf_lf_def empty_typ_info_def) lemma lf_fn_disj_fn: "fn \<notin> set (field_names_list (TypDesc (TypAggregate xs) tn)) \<Longrightarrow> lf_fn ` lf_set_list xs [] \<inter> lf_fn ` lf_set t [fn] = {}" apply(induct xs arbitrary: fn t tn, clarsimp) apply(rename_tac a list fn t tn) apply(case_tac a, clarsimp) apply(drule_tac x=fn in meta_spec) apply(drule_tac x=t in meta_spec) apply(drule_tac x=tn in meta_spec) apply(drule meta_mp, fastforce simp: field_names_list_def split: if_split_asm) apply(safe) apply(fastforce dest!: lf_set_fn simp: field_names_list_def prefix_def less_eq_list_def split: if_split_asm) by force lemma wf_lf_extend_ti: "\<lbrakk> wf_lf (lf_set t []); wf_lf (lf_set ti []); wf_desc t; fn \<notin> set (field_names_list ti); ti_ind (lf_set ti []) (lf_set t []) \<rbrakk> \<Longrightarrow> wf_lf (lf_set (extend_ti ti t fn) [])" apply(cases ti, rename_tac typ_struct xs) apply(case_tac typ_struct; clarsimp) apply(subst wf_lf_fn; simp) apply(subst wf_lf_list, erule lf_fn_disj_fn) apply(subst ti_ind_sym2) apply(subst ti_ind_fn) apply(subst ti_ind_sym2) apply clarsimp apply(subst wf_lf_fn; simp) done lemma wf_lf_ti_pad_combine: "wf_lf (lf_set ti []) \<Longrightarrow> wf_lf (lf_set (ti_pad_combine n ti) [])" apply(clarsimp simp: ti_pad_combine_def Let_def) apply(rule wf_lf_extend_ti) apply(clarsimp simp: wf_lf_def fd_cons_desc_def fd_cons_double_update_def fd_cons_update_access_def fd_cons_access_update_def fd_cons_length_def) apply assumption apply(clarsimp) apply(rule foldl_append_nmem) apply clarsimp apply(clarsimp simp: ti_ind_def fu_commutes_def fa_fu_ind_def) done lemma wf_lf_final_pad: "wf_lf (lf_set ti []) \<Longrightarrow> wf_lf (lf_set (final_pad ti) [])" by (auto simp: final_pad_def Let_def elim: wf_lf_ti_pad_combine) lemma wf_lf_adjust_ti: "\<lbrakk> wf_lf (lf_set t []); \<And>v. g (f v) v = v; \<And>bs bs' v. g bs (g bs' v) = g bs v; \<And>bs v. f (g bs v) = bs \<rbrakk> \<Longrightarrow> wf_lf (lf_set (adjust_ti t f g) [])" apply(clarsimp simp: wf_lf_def) apply(drule lf_set_adjust_ti; clarsimp) apply(rule conjI) apply(fastforce simp: fd_cons_desc_def fd_cons_double_update_def update_desc_def fd_cons_update_access_def fd_cons_access_update_def fd_cons_length_def) apply(fastforce simp: fu_commutes_def update_desc_def fa_fu_ind_def dest!: lf_set_adjust_ti) done lemma ti_ind_empty_typ_info [simp]: "ti_ind (lf_set (empty_typ_info tn) []) (lf_set (adjust_ti k f g) [])" by (clarsimp simp: ti_ind_def empty_typ_info_def) lemma ti_ind_extend_ti: "\<lbrakk> ti_ind (lf_set t []) (lf_set (adjust_ti k f g) []); ti_ind (lf_set ti []) (lf_set (adjust_ti k f g) []) \<rbrakk> \<Longrightarrow> ti_ind (lf_set (extend_ti ti t fn) []) (lf_set (adjust_ti k f g) [])" apply(case_tac ti, rename_tac typ_struct xs) apply(case_tac typ_struct; clarsimp, subst ti_ind_fn, simp) done lemma ti_ind_ti_pad_combine: "ti_ind (lf_set ti []) (lf_set (adjust_ti k f g) []) \<Longrightarrow> ti_ind (lf_set (ti_pad_combine n ti) []) (lf_set (adjust_ti k f g) [])" apply(clarsimp simp: ti_pad_combine_def Let_def) apply(rule ti_ind_extend_ti) apply(clarsimp simp: ti_ind_def fu_commutes_def fa_fu_ind_def) apply assumption done definition f_ind :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a field_desc set \<Rightarrow> bool" where "f_ind f X \<equiv> \<forall>x bs v. x \<in> X \<longrightarrow> f (field_update x bs v) = f v" definition fu_s_comm_k :: "'a leaf_desc set \<Rightarrow> ('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> bool" where "fu_s_comm_k X k \<equiv> \<forall>x. x \<in> field_update ` lf_fd ` X \<longrightarrow> fu_commutes x k" definition g_ind :: "'a leaf_desc set \<Rightarrow> ('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> bool" where "g_ind X g \<equiv> fu_s_comm_k X g" definition fa_ind :: "'a field_desc set \<Rightarrow> ('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> bool" where "fa_ind X g \<equiv> \<forall>x bs v. x \<in> X \<longrightarrow> field_access x (g bs v) = field_access x v" lemma lf_fd_fn: "\<forall>fn. lf_fd ` (lf_set (t::'a typ_info) fn) = lf_fd ` (lf_set t [])" "\<forall>fn. lf_fd ` (lf_set_struct (st::'a field_desc typ_struct) fn) = lf_fd ` (lf_set_struct st [])" "\<forall>fn. lf_fd ` (lf_set_list (ts::('a typ_info,field_name) dt_pair list) fn) = lf_fd ` (lf_set_list ts [])" "\<forall>fn. lf_fd ` (lf_set_pair (x::('a typ_info,field_name) dt_pair) fn) = lf_fd ` (lf_set_pair x [])" by (induct t and st and ts and x, all \<open>clarsimp simp: image_Un\<close>) metis+ lemma lf_set_empty_typ_info [simp]: "lf_set (empty_typ_info tn) fn = {}" by (clarsimp simp: empty_typ_info_def) lemma g_ind_empty [simp]: "g_ind {} g" by (clarsimp simp: g_ind_def fu_s_comm_k_def) lemma g_ind_extend_ti: "\<lbrakk> g_ind (lf_set s []) g; g_ind (lf_set t []) g \<rbrakk> \<Longrightarrow> g_ind (lf_set (extend_ti s t fn) []) g" using lf_fd_fn(1) by (cases s, rename_tac typ_struct xs) (case_tac typ_struct; fastforce simp: g_ind_def image_Un fu_s_comm_k_def) lemma g_ind_ti_typ_combine: "\<lbrakk> g_ind (lf_set ti []) h; \<And>w u v. g w (h u v) = h u (g w v); \<And>w v. f (h w v) = f v; \<And>v. g (f v) v = v \<rbrakk> \<Longrightarrow> g_ind (lf_set (ti_typ_combine t f g fn ti) []) h" apply(clarsimp simp: ti_typ_combine_def Let_def) apply(erule g_ind_extend_ti) apply(clarsimp simp: g_ind_def fu_s_comm_k_def) apply(drule lf_set_adjust_ti) apply clarsimp apply(clarsimp simp: update_desc_def fu_commutes_def ) done lemma g_ind_ti_pad_combine: "g_ind ((lf_set ti [])) g \<Longrightarrow> g_ind ((lf_set (ti_pad_combine n ti) [])) g" apply(clarsimp simp: ti_pad_combine_def Let_def) apply(erule g_ind_extend_ti) apply(auto simp: g_ind_def fu_s_comm_k_def fu_commutes_def) done lemma g_ind_ti_typ_pad_combine: "\<lbrakk> g_ind (lf_set ti []) h; \<And>w u v. g w (h u v) = h u (g w v); \<And>w v. f (h w v) = f v; \<And>v. g (f v) v = v \<rbrakk> \<Longrightarrow> g_ind (lf_set (ti_typ_pad_combine t f g fn ti) []) h" unfolding ti_typ_pad_combine_def Let_def by (fastforce intro!: g_ind_ti_typ_combine g_ind_ti_pad_combine) lemma f_ind_empty [simp]: "f_ind f {}" by (clarsimp simp: f_ind_def) lemma f_ind_extend_ti: "\<lbrakk> f_ind f (lf_fd ` lf_set s []); f_ind f (lf_fd ` lf_set t []) \<rbrakk> \<Longrightarrow> f_ind f (lf_fd ` lf_set (extend_ti s t fn) [])" using lf_fd_fn(1) by (cases s, rename_tac typ_struct xs) (case_tac typ_struct; fastforce simp: f_ind_def image_Un fu_s_comm_k_def) lemma f_ind_ti_typ_combine: "\<lbrakk> f_ind h (lf_fd ` lf_set ti []); \<And>v w. h (g w v) = h v; \<And>v. g (f v) v = v \<rbrakk> \<Longrightarrow> f_ind h (lf_fd ` lf_set (ti_typ_combine t f g fn ti) [])" apply(clarsimp simp: ti_typ_combine_def Let_def) apply(erule f_ind_extend_ti) apply(clarsimp simp: f_ind_def) apply(drule lf_set_adjust_ti) apply clarsimp apply(clarsimp simp: update_desc_def) done lemma f_ind_ti_pad_combine: "f_ind f (lf_fd ` (lf_set t [])) \<Longrightarrow> f_ind f (lf_fd ` (lf_set (ti_pad_combine n t) []))" apply(clarsimp simp: ti_pad_combine_def Let_def) apply(erule f_ind_extend_ti) apply(auto simp: f_ind_def) done lemma f_ind_ti_typ_pad_combine: "\<lbrakk> f_ind h (lf_fd ` lf_set ti []); \<And>v w. h (g w v) = h v; \<And>v. g (f v) v = v \<rbrakk> \<Longrightarrow> f_ind h (lf_fd ` lf_set (ti_typ_pad_combine t f g fn ti) [])" by (auto simp: ti_typ_pad_combine_def Let_def intro: f_ind_ti_typ_combine f_ind_ti_pad_combine) lemma fa_ind_empty [simp]: "fa_ind {} g" by (clarsimp simp: fa_ind_def) lemma fa_ind_extend_ti: "\<lbrakk> fa_ind (lf_fd ` lf_set s []) g; fa_ind (lf_fd ` lf_set t []) g \<rbrakk> \<Longrightarrow> fa_ind (lf_fd ` lf_set (extend_ti s t fn) []) g" using lf_fd_fn(1) by (cases s, rename_tac typ_struct xs) (case_tac typ_struct; fastforce simp: fa_ind_def image_Un fu_s_comm_k_def) lemma fa_ind_ti_typ_combine: "\<lbrakk> fa_ind (lf_fd ` lf_set ti []) h; \<And>v w. f (h w v) = f v; \<And>v. g (f v) v = v \<rbrakk> \<Longrightarrow> fa_ind (lf_fd ` lf_set (ti_typ_combine t f g fn ti) []) h" apply(clarsimp simp: ti_typ_combine_def Let_def) apply(erule fa_ind_extend_ti) apply(clarsimp simp: fa_ind_def fu_s_comm_k_def) apply(drule lf_set_adjust_ti) apply clarsimp apply(clarsimp simp: update_desc_def fu_commutes_def) done lemma fa_ind_ti_pad_combine: "fa_ind (lf_fd ` (lf_set ti [])) g \<Longrightarrow> fa_ind (lf_fd ` (lf_set (ti_pad_combine n ti) [])) g" apply(clarsimp simp: ti_pad_combine_def Let_def) apply(erule fa_ind_extend_ti) apply(auto simp: fa_ind_def) done lemma fa_ind_ti_typ_pad_combine: "\<lbrakk> fa_ind (lf_fd ` lf_set ti []) h; \<And>v w. f (h w v) = f v; \<And>v. g (f v) v = v \<rbrakk> \<Longrightarrow> fa_ind (lf_fd ` lf_set (ti_typ_pad_combine t f g fn ti) []) h" by (auto simp: ti_typ_pad_combine_def Let_def intro: fa_ind_ti_typ_combine fa_ind_ti_pad_combine) lemma wf_lf_ti_typ_combine: "\<lbrakk> wf_lf (lf_set ti []); fn \<notin> set (field_names_list ti); \<And>v. g (f v) v = v; \<And>w u v. g w (g u v) = g w v; \<And>w v. f (g w v) = w; g_ind (lf_set ti []) g; f_ind f (lf_fd ` lf_set ti []); fa_ind (lf_fd ` lf_set ti []) g \<rbrakk> \<Longrightarrow> wf_lf (lf_set (ti_typ_combine (t::'a::wf_type itself) f g fn ti) [])" apply(clarsimp simp: ti_typ_combine_def Let_def) apply(rule wf_lf_extend_ti; simp?) apply(rule wf_lf_adjust_ti; simp) apply(clarsimp simp: ti_ind_def) apply(drule lf_set_adjust_ti, simp) apply(clarsimp simp: fu_commutes_def update_desc_def g_ind_def f_ind_def fu_s_comm_k_def fa_fu_ind_def fa_ind_def) done lemma wf_lf_ti_typ_pad_combine: "\<lbrakk> wf_lf (lf_set ti []); fn \<notin> set (field_names_list ti); hd fn \<noteq> CHR ''!''; \<And>v. g (f v) v = v; \<And>w u v. g w (g u v) = g w v; \<And>w v. f (g w v) = w; g_ind (lf_set ti []) g; f_ind f (lf_fd ` lf_set ti []); fa_ind (lf_fd ` lf_set ti []) g \<rbrakk> \<Longrightarrow> wf_lf (lf_set (ti_typ_pad_combine (t::'a::wf_type itself) f g fn ti) [])" apply(clarsimp simp: ti_typ_pad_combine_def Let_def) apply (fastforce intro!: wf_lf_ti_typ_combine wf_lf_ti_pad_combine g_ind_ti_pad_combine f_ind_ti_pad_combine fa_ind_ti_pad_combine) done lemma align_field_empty_typ_info [simp]: "align_field (empty_typ_info tn)" by (clarsimp simp: empty_typ_info_def align_field_def) lemma align_td_field_lookup: "\<forall>f m s n. field_lookup (t::'a typ_desc) f m = Some (s,n) \<longrightarrow> align_td s \<le> align_td t" "\<forall>f m s n. field_lookup_struct (st::'a typ_struct) f m = Some (s,n) \<longrightarrow> align_td s \<le> align_td_struct st" "\<forall>f m s n. field_lookup_list (ts::('a typ_desc,field_name) dt_pair list) f m = Some (s,n) \<longrightarrow> align_td s \<le> align_td_list ts" "\<forall>f m s n. field_lookup_pair (x::('a typ_desc,field_name) dt_pair) f m = Some (s,n) \<longrightarrow> align_td s \<le> align_td_pair x" by (induct t and st and ts and x, all \<open>clarsimp\<close>) (fastforce simp: max_def split: option.splits) lemma align_field_extend_ti: "\<lbrakk> align_field s; align_field t; 2^(align_td t) dvd size_td s \<rbrakk> \<Longrightarrow> align_field (extend_ti s t fn)" apply(case_tac s, clarsimp, thin_tac "s = X" for X) apply(rename_tac typ_struct xs) apply(case_tac typ_struct, clarsimp) apply(clarsimp simp: align_field_def split: option.splits) apply(clarsimp simp: align_field_def) apply(subst (asm) field_lookup_list_append) apply(clarsimp split: if_split_asm option.splits) apply(case_tac f, clarsimp) apply clarsimp apply(frule field_lookup_offset2) apply (rename_tac lista s n listb) apply(drule_tac x=listb in spec, drule_tac x=s in spec) apply(drule_tac x="n - size_td_list lista" in spec) apply clarsimp apply(drule dvd_diffD) apply(subgoal_tac "2^align_td s dvd (2::nat)^align_td t ") apply(drule (2) dvd_trans) apply(rule le_imp_power_dvd) apply(subst align_td_field_lookup) apply fast apply simp apply(drule (1) field_lookup_offset_le) apply assumption apply(case_tac f, clarsimp) apply(drule_tac x="a#list" in spec) apply clarsimp done lemma align_field_ti_pad_combine: "align_field ti \<Longrightarrow> align_field (ti_pad_combine n ti)" apply(clarsimp simp: ti_pad_combine_def Let_def) apply(erule align_field_extend_ti) apply(clarsimp simp: align_field_def) apply clarsimp done lemma align_field_final_pad: "align_field ti \<Longrightarrow> align_field (final_pad ti)" apply(clarsimp simp: final_pad_def Let_def split: if_split_asm) apply(erule align_field_ti_pad_combine) done lemma field_lookup_adjust_ti_None: "\<forall>fn m s n. field_lookup (adjust_ti t f g) fn m = None \<longrightarrow> (field_lookup t fn m = None)" "\<forall>fn m s n. field_lookup_struct (map_td_struct (\<lambda>n algn d. update_desc f g d) st) fn m = None \<longrightarrow> (field_lookup_struct st fn m = None)" "\<forall>fn m s n. field_lookup_list (map_td_list (\<lambda>n algn d. update_desc f g d) ts) fn m = None \<longrightarrow> (field_lookup_list ts fn m = None)" "\<forall>fn m s n. field_lookup_pair (map_td_pair (\<lambda>n algn d. update_desc f g d) x) fn m = None \<longrightarrow> (field_lookup_pair x fn m = None)" apply (induct t and st and ts and x, all \<open>clarsimp simp: adjust_ti_def split: option.splits\<close>) apply (rename_tac dt_pair list fn m, case_tac dt_pair, clarsimp) done lemma field_lookup_adjust_ti' [rule_format]: "\<forall>fn m s n. field_lookup (adjust_ti t f g) fn m = Some (s,n) \<longrightarrow> (\<exists>s'. field_lookup t fn m = Some (s',n) \<and> align_td s = align_td s')" "\<forall>fn m s n. field_lookup_struct (map_td_struct (\<lambda>n algn d. update_desc f g d) st) fn m = Some (s,n) \<longrightarrow> (\<exists>s'. field_lookup_struct st fn m = Some (s',n) \<and> align_td s = align_td s')" "\<forall>fn m s n. field_lookup_list (map_td_list (\<lambda>n algn d. update_desc f g d) ts) fn m = Some (s,n) \<longrightarrow> (\<exists>s'. field_lookup_list ts fn m = Some (s',n) \<and> align_td s = align_td s')" "\<forall>fn m s n. field_lookup_pair (map_td_pair (\<lambda>n algn d. update_desc f g d) x) fn m = Some (s,n) \<longrightarrow> (\<exists>s'. field_lookup_pair x fn m = Some (s',n) \<and> align_td s = align_td s')" apply(induct t and st and ts and x, all \<open>clarsimp\<close>) apply(clarsimp simp: adjust_ti_def) apply(clarsimp split: option.splits) apply(rule conjI; clarsimp) apply(case_tac dt_pair, clarsimp) apply(case_tac dt_pair, clarsimp split: if_split_asm) apply(drule_tac x=fn in spec) apply clarsimp apply(fold adjust_ti_def) apply(subst (asm) field_lookup_adjust_ti_None; simp) apply fastforce apply fastforce done lemma field_lookup_adjust_ti: "\<lbrakk> field_lookup (adjust_ti t f g) fn m = Some (s,n) \<rbrakk> \<Longrightarrow> (\<exists>s'. field_lookup t fn m = Some (s',n) \<and> align_td s = align_td s')" by (rule field_lookup_adjust_ti') lemma align_adjust_ti: "align_field ti \<Longrightarrow> align_field (adjust_ti ti f g)" apply(clarsimp simp: align_field_def) apply(drule field_lookup_adjust_ti) apply clarsimp done lemma align_field_ti_typ_combine: "\<lbrakk> align_field ti; 2 ^ align_td (typ_info_t TYPE('a)) dvd size_td ti \<rbrakk> \<Longrightarrow> align_field (ti_typ_combine (t::'a::mem_type itself) f g fn ti)" apply(clarsimp simp: ti_typ_combine_def Let_def) apply(rule align_field_extend_ti, assumption) apply(rule align_adjust_ti) apply(rule align_field) apply simp done lemma align_field_ti_typ_pad_combine: "\<lbrakk> align_field ti; aggregate ti \<rbrakk> \<Longrightarrow> align_field (ti_typ_pad_combine (t::'a::mem_type itself) f g fn ti)" unfolding ti_typ_pad_combine_def Let_def by (fastforce simp: size_td_ti_pad_combine ac_simps padup_dvd align_of_def intro: dvd_padup_add align_field_ti_typ_combine align_field_ti_pad_combine) lemma npf_extend_ti [simp]: "non_padding_fields (extend_ti s t fn) = non_padding_fields s @ (if hd fn = CHR ''!'' then [] else [fn])" by (case_tac s, rename_tac typ_struct xs) (case_tac typ_struct; simp) lemma npf_ti_pad_combine [simp]: "non_padding_fields (ti_pad_combine n tag) = non_padding_fields tag" by (clarsimp simp: ti_pad_combine_def Let_def) lemma npf_ti_typ_combine [simp]: "hd fn \<noteq> CHR ''!'' \<Longrightarrow> non_padding_fields (ti_typ_combine t_b f g fn tag) = non_padding_fields tag @ [fn]" by (clarsimp simp: ti_typ_combine_def Let_def) lemma npf_ti_typ_pad_combine [simp]: "hd fn \<noteq> CHR ''!'' \<Longrightarrow> non_padding_fields (ti_typ_pad_combine t_b f g fn tag) = non_padding_fields tag @ [fn]" by (clarsimp simp: ti_typ_pad_combine_def Let_def) lemma npf_final_pad [simp]: "non_padding_fields (final_pad tag) = non_padding_fields tag" by (clarsimp simp: final_pad_def Let_def) lemma npf_empty_typ_info [simp]: "non_padding_fields (empty_typ_info tn) = []" by (clarsimp simp: empty_typ_info_def) definition field_fd' :: "'a typ_info \<Rightarrow> qualified_field_name \<rightharpoonup> 'a field_desc" where "field_fd' t f \<equiv> case field_lookup t f 0 of None \<Rightarrow> None | Some x \<Rightarrow> Some (field_desc (fst x))" lemma padup_zero [simp]: "padup n 0 = 0" by (clarsimp simp: padup_def) lemma padup_same [simp]: "padup n n = 0" by (clarsimp simp: padup_def) lemmas size_td_simps_1 = size_td_lt_final_pad size_td_lt_ti_typ_pad_combine aggregate_ti_typ_pad_combine aggregate_empty_typ_info lemmas size_td_simps_2 = padup_def align_of_final_pad align_of_def lemmas size_td_simps = size_td_simps_1 size_td_simps_2 end
{"author": "seL4", "repo": "l4v", "sha": "9ba34e269008732d4f89fb7a7e32337ffdd09ff9", "save_path": "github-repos/isabelle/seL4-l4v", "path": "github-repos/isabelle/seL4-l4v/l4v-9ba34e269008732d4f89fb7a7e32337ffdd09ff9/tools/c-parser/umm_heap/CompoundCTypes.thy"}
import matplotlib.pyplot as plt import numpy as np from scipy.signal import convolve2d # python gs_convolve.py 3.53s user 0.13s system 107% cpu 3.414 total def calc(u, v, u2, v2): dt = 0.2 F = 0.04 k = 0.06075 laplacian = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]) lu = 0.1*convolve2d(u, laplacian, mode="same") lv = 0.05*convolve2d(v, laplacian, mode="same") cu = -v*v*u + F*(1.0 - u) cv = v*v*u - (F+k)*v u2[:] = u + (lu+cu) * dt v2[:] = v + (lv+cv) * dt def main(): L = 64 u = np.zeros((L, L)) u2 = np.zeros((L, L)) v = np.zeros((L, L)) v2 = np.zeros((L, L)) h = L//2 u[h-6:h+6, h-6:h+6] = 0.9 v[h-3:h+3, h-3:h+3] = 0.7 for i in range(10000): if i % 2 == 0: calc(u, v, u2, v2) else: calc(u2, v2, u, v) return v plt.imshow(main()) plt.savefig("output.png")
{"hexsha": "e25705bdc7d7a08dfdc3ff084712a68a34c6cf22", "size": 889, "ext": "py", "lang": "Python", "max_stars_repo_path": "gs_convolve.py", "max_stars_repo_name": "kaityo256/python_gs", "max_stars_repo_head_hexsha": "d825db5eff9048863a9bed1a52c77e329c6518ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gs_convolve.py", "max_issues_repo_name": "kaityo256/python_gs", "max_issues_repo_head_hexsha": "d825db5eff9048863a9bed1a52c77e329c6518ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gs_convolve.py", "max_forks_repo_name": "kaityo256/python_gs", "max_forks_repo_head_hexsha": "d825db5eff9048863a9bed1a52c77e329c6518ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.225, "max_line_length": 69, "alphanum_fraction": 0.4938132733, "include": true, "reason": "import numpy,from scipy", "num_tokens": 385}
from scipy.fft import fft, fftfreq import numpy as np import matplotlib.pyplot as plt #Chebyshev Filter Coefficients b = [ 0.00757702, -0.02666634, 0.06433529, -0.09739344, 0.11965053, -0.10339635, 0.07472005, -0.0214037, -0.0214037, 0.07472005, -0.10339635, 0.11965053, -0.09739344, 0.06433529, -0.02666634, 0.00757702] a = [ 1.00000000e+00, -6.35728196e+00, 2.04575618e+01, -4.29494964e+01, 6.51181196e+01, -7.49392883e+01, 6.73105585e+01, -4.78581686e+01, 2.70870646e+01, -1.21715215e+01, 4.30082438e+00, -1.17046262e+00, 2.37624459e-01, -3.36085164e-02, 2.99150600e-03, -7.07731789e-05] def saturate(x): if x < -3: return -1 elif x > 3: return 1 else : return x * (27 + x * x) / (27 + 9 * x * x) # Number of sample points N = 4096 Fs = 48000 T = 1 / Fs x = np.linspace(0.0, N*T, N, endpoint=False) y = np.sin(5000 * 2.0*np.pi*x) z = np.sin(5000 * 2.0*np.pi*x) for n in range(0,N): y[n] = saturate(y[n]*20) fig, ax1 = plt.subplots(2) ax1[0].plot(x[0:128], y[0:128]) ax1[0].plot(x[0:128], z[0:128]) yf = fft(y) xf = fftfreq(N, T)[:N//2] ax1[1].plot(xf, 2.0/N * np.abs(yf[0:N//2])) yf = fft(z) xf = fftfreq(N, T)[:N//2] ax1[1].plot(xf, 2.0/N * np.abs(yf[0:N//2])) #Upsample xUpSample = np.linspace(0.0, N*T, N, endpoint=False) #Chebyshev Filter #Distort #Down Sample #FFT plt.grid() #plt.xscale('log') plt.show()
{"hexsha": "30394c722e96e0fde62fa245a2db45cc9a05701f", "size": 1395, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphing/18_fftsaturationaliasing.py", "max_stars_repo_name": "jaakjensen/PythonDSP", "max_stars_repo_head_hexsha": "d4f5850a5379c14d531e6f9c6d43e03f53fb888d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-19T10:40:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T10:40:41.000Z", "max_issues_repo_path": "graphing/18_fftsaturationaliasing.py", "max_issues_repo_name": "jaakjensen/PythonDSP", "max_issues_repo_head_hexsha": "d4f5850a5379c14d531e6f9c6d43e03f53fb888d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphing/18_fftsaturationaliasing.py", "max_forks_repo_name": "jaakjensen/PythonDSP", "max_forks_repo_head_hexsha": "d4f5850a5379c14d531e6f9c6d43e03f53fb888d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1363636364, "max_line_length": 82, "alphanum_fraction": 0.6157706093, "include": true, "reason": "import numpy,from scipy", "num_tokens": 656}
#!/usr/bin/env python # encoding: utf-8 """ miprest.py @author: Kevin S. Brown (UCONN), Ameya Akkalkotkar (UCONN) Created by Kevin Brown on 2016-09-19. """ from stopping import covmatrix from numpy.linalg import svd from numpy import dot,newaxis def pca(X,k): ''' PCA decomposition of matrix X. X is assumed to be N x p, where p is the number of samples (backwards from many PCA implementations). If you want the p x N version, just transpose what comes out of this function. The returned mixing matrix and signals (also called the scores), when multiplied together, will give a ROW-CENTERED version of the data. k is the number of components to retain (probably determined by some PCA stopping rule). Returns the matrix of eigenvectors of X (the "mixing matrix") and the "signals" (projection of the data onto the first k components). ''' # row center the data matrix cX = X - X.mean(axis=1)[:,newaxis] C = covmatrix(cX) # singular value decomp _,s,W = svd(C) # select first k columns W = W[:,:k] # compute signal/score matrix on centered data matrix S = dot(W.T,cX) # need to do something about the units return W,S
{"hexsha": "2282d4c70f7b10468dec87222b5a2f6ac0373266", "size": 1213, "ext": "py", "lang": "Python", "max_stars_repo_path": "pca.py", "max_stars_repo_name": "archimonde1308/miprest", "max_stars_repo_head_hexsha": "097cec1d737df9f590b902028f34275fb870721a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-10-07T04:17:13.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-08T23:56:41.000Z", "max_issues_repo_path": "pca.py", "max_issues_repo_name": "archimonde1308/miprest", "max_issues_repo_head_hexsha": "097cec1d737df9f590b902028f34275fb870721a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-12-28T21:17:48.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-14T11:41:02.000Z", "max_forks_repo_path": "pca.py", "max_forks_repo_name": "archimonde1308/miprest", "max_forks_repo_head_hexsha": "097cec1d737df9f590b902028f34275fb870721a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-09-13T23:54:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-12T12:25:35.000Z", "avg_line_length": 29.5853658537, "max_line_length": 93, "alphanum_fraction": 0.6859027205, "include": true, "reason": "from numpy", "num_tokens": 322}