text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
(******************************************************************************
Mixed distributive laws distributive laws in bicategories
Monads in the bicategory of comonads are the same as mixed distributive laws
******************************************************************************)
Require Import UniMath.Foundations.All.
Require Import UniMath.MoreFoundations.All.
Require Import UniMath.CategoryTheory.Core.Categories.
Require Import UniMath.CategoryTheory.DisplayedCats.Core.
Require Import UniMath.Bicategories.Core.Bicat. Import Bicat.Notations.
Require Import UniMath.Bicategories.Core.BicategoryLaws.
Require Import UniMath.Bicategories.Core.Invertible_2cells.
Require Import UniMath.Bicategories.Core.Unitors.
Require Import UniMath.Bicategories.Core.Examples.OpCellBicat.
Require Import UniMath.Bicategories.DisplayedBicats.DispBicat.
Import DispBicat.Notations.
Require Import UniMath.Bicategories.DisplayedBicats.Examples.MonadsLax.
Require Import UniMath.Bicategories.Monads.Examples.MonadsInOp2Bicat.
Local Open Scope cat.
Section MixedDistributiveLaw.
Context {B : bicat}
(m₁ : comnd B)
(m₂ : disp_mnd B (ob_of_comnd m₁)).
Let x : B := ob_of_comnd m₁.
Let e : x --> x := endo_of_comnd m₁.
Let ε : e ==> id₁ _ := counit_of_comnd m₁.
Let δ : e ==> e · e := comult_of_comnd m₁.
Let f : x --> x := pr11 m₂.
Let η : id₁ _ ==> f := pr121 m₂.
Let μ : f · f ==> f := pr221 m₂.
Definition mixed_distr_law_data
: UU
:= e · f ==> f · e.
Definition mixed_distr_law_laws
(τ : mixed_distr_law_data)
: UU
:= ((ε ▹ f)
• lunitor f
=
τ
• (f ◃ ε)
• runitor f)
×
((δ ▹ f)
• rassociator e e f
• (e ◃ τ)
• lassociator e f e
• (τ ▹ e)
• rassociator f e e
=
τ
• (f ◃ δ))
×
((e ◃ η)
• τ
=
runitor e
• linvunitor e
• (η ▹ e))
×
((e ◃ μ)
• τ
=
lassociator e f f
• (τ ▹ f)
• rassociator f e f
• (f ◃ τ)
• lassociator f f e
• (μ ▹ e)).
Definition mixed_distr_law
: UU
:= ∑ (τ : mixed_distr_law_data), mixed_distr_law_laws τ.
Definition make_mixed_distr_law
(τ : mixed_distr_law_data)
(Hτ : mixed_distr_law_laws τ)
: mixed_distr_law
:= τ ,, Hτ.
Coercion mixed_distr_law_to_cell
(τ : mixed_distr_law)
: e · f ==> f · e
:= pr1 τ.
End MixedDistributiveLaw.
Section FromBicatToMixedDistrLaw.
Context {B : bicat}
(m : mnd (op2_bicat (mnd (op2_bicat B)))).
Let x : B := pr1 (ob_of_mnd m).
Let e : x --> x := pr112 (ob_of_mnd m).
Let ε : e ==> id₁ _ := pr1 (pr212 (ob_of_mnd m)).
Let δ : e ==> e · e := pr2 (pr212 (ob_of_mnd m)).
Let f : x --> x := pr1 (endo_of_mnd m).
Let η : id₁ _ ==> f := pr1 (unit_of_mnd m).
Let μ : f · f ==> f := pr1 (mult_of_mnd m).
Definition to_comnd_data_of_mixed_distr_law
: comnd_data B.
Proof.
use make_comnd_data.
- exact x.
- exact e.
- exact ε.
- exact δ.
Defined.
Definition to_comnd_laws_of_mixed_distr_law
: comnd_laws to_comnd_data_of_mixed_distr_law.
Proof.
repeat split.
- unfold comnd_counit_left_law.
rewrite !vassocl.
exact (pr122 (ob_of_mnd m)).
- unfold comnd_counit_right_law.
rewrite !vassocl.
exact (pr1 (pr222 (ob_of_mnd m))).
- unfold comnd_comult_assoc_law.
rewrite !vassocl.
exact (pr2 (pr222 (ob_of_mnd m))).
Qed.
Definition to_comnd_of_mixed_distr_law
: comnd B.
Proof.
use make_comnd.
- exact to_comnd_data_of_mixed_distr_law.
- exact to_comnd_laws_of_mixed_distr_law.
Defined.
Definition to_mnd_data_of_mixed_distr_law
: mnd_data B.
Proof.
use make_mnd_data.
- exact x.
- exact f.
- exact η.
- exact μ.
Defined.
Definition to_is_mnd_of_mixed_distr_law
: is_mnd B to_mnd_data_of_mixed_distr_law.
Proof.
repeat split.
- exact (maponpaths pr1 (mnd_unit_left m)).
- exact (maponpaths pr1 (mnd_unit_right m)).
- exact (maponpaths pr1 (mnd_mult_assoc m)).
Qed.
Definition to_mnd_of_mixed_distr_law
: mnd B.
Proof.
use make_mnd.
- exact to_mnd_data_of_mixed_distr_law.
- exact to_is_mnd_of_mixed_distr_law.
Defined.
Definition to_cell_of_mixed_distr_law
: mixed_distr_law_data
to_comnd_of_mixed_distr_law
(pr2 to_mnd_of_mixed_distr_law)
:= pr112 (endo_of_mnd m).
Definition to_laws_of_mixed_distr_law
: mixed_distr_law_laws _ _ to_cell_of_mixed_distr_law.
Proof.
repeat split.
- cbn.
rewrite !vassocl.
exact (pr1 (pr212 (endo_of_mnd m))).
- cbn.
rewrite !vassocl.
exact (pr2 (pr212 (endo_of_mnd m))).
- exact (pr112 (unit_of_mnd m)).
- refine (pr112 (mult_of_mnd m) @ _).
cbn.
rewrite !vassocl.
apply idpath.
Qed.
Definition to_mixed_distr_law
: mixed_distr_law
to_comnd_of_mixed_distr_law
(pr2 to_mnd_of_mixed_distr_law).
Proof.
use make_mixed_distr_law.
- exact to_cell_of_mixed_distr_law.
- exact to_laws_of_mixed_distr_law.
Defined.
End FromBicatToMixedDistrLaw.
|
{"author": "UniMath", "repo": "UniMath", "sha": "7de5cc98a7f6718af63a429ea88d80411eca2977", "save_path": "github-repos/coq/UniMath-UniMath", "path": "github-repos/coq/UniMath-UniMath/UniMath-7de5cc98a7f6718af63a429ea88d80411eca2977/UniMath/Bicategories/Monads/MixedDistributiveLaws.v"}
|
\documentclass[12pt]{article}
\usepackage[margin=1.1in]{geometry}
\input{../../syllabi/preamble}
\newcommand{\coursedept}{Math}
\newcommand{\coursenumber}{342W}
\newcommand{\coursenumbercrosslisted}{/ 650.03~}
\newcommand{\semester}{Spring}
\newcommand{\numcredits}{6}
\newcommand{\lectimeandloc}{Mon and Wed 5-6:50PM / on zoom}
\newcommand{\requiredlabtimeandloc}{Required Lab Time / Loc & Thurs 9-10:50AM / on zoom \\}
\newcommand{\tataofficehourtimeandloc}{TA / TA Office Hours / Loc & Tzipora Horowitz / Wed 6:55-7:55PM / on zoom}
\newcommand{\coursewebpageurl}{https://github.com/kapelner/QC_\coursedept_\coursenumber_\semester_\the\year}
\newcommand{\coursewebpagelink}{\href{\coursewebpageurl}{course homepage}}
\newcommand{\slackurl}{https://QC\coursedept\coursenumber\semester\the\year.slack.com/}
\newcommand{\slacklink}{\href{\slackurl}{slack}}
\newcommand{\numtheoryhws}{4--7}
\newcommand{\extrahwzero}{\item provide a link to your public repository on github (this means you need to sign up for github first)}
\newcommand{\hwzerodue}{Wednesday, Feb 3 11:59PM}
\newcommand{\lastdatetimetohandinhomeworks}{May 18 at noon}
\input{../../syllabi/_header}
\section*{Course Overview}
MATH 342W. Data Science via Machine Learning and Statistical Modeling. 6 hr.; 4 cr. Prereq.: MATH 241 (intro to probablity and statistics), MATH 231 (intro to linear algebra), CSCI 111 (intro to programming) or equivalents. Recommended: ECON 382 (intro to economentrics) or equivalent. Philosophy of modeling and learning using data. Prediction via the ordinary linear model including orthogonal projections, sum of squares identity, $R^2$ and RMSE. Polynomial and interaction regressions. Prediction with machine learning including neural nets (the perceptron), support vector machines and the tree methods CART, bagged trees and Random Forests. Probability estimation using logistic regression, asymmetric cost classifiers and the ROC / DET performance curves. Underfitting vs. overfitting and the bias-variance decomposition / tradeoff. Model validation including out of sample techniques such as cross validation and bootstrap validation. Correlation vs. causation, causal models, lurking variables and interpretations of linear model coefficients. Extrapolation. The \texttt{R} language will be taught formally from the ground and up (its use will be a substantial part of the homework) as well as visualization using the \texttt{ggplot} library and manipulation using the \texttt{dplyr} and \texttt{data.table} libraries. \pagebreak
You should be familiar with the following before entering the class:
\begin{itemize}
\itemsep -0.0em
\item Basic Probability Theory: conditional probability, in/dependence, identical distributedness
\item Modeling with discrete random variables: Bernoulli, Binomial%, Poisson, Geometric, Negative Binomial, Uniform Discrete and others
\item Expectation and variance
%\item Modeling with continuous random variables: Exponential, Uniform and Normal
%\item Frequentist confidence intervals and hypothesis testing for one-sample proportions
%\item Basic visualization of data: plots, histograms, bar charts
\item Linear algebra: Vectors, matrices, rank, transpose
\item Programming: basic data types, vectors, arrays, control flow (for, while, if, else), functions
\end{itemize}
\noindent We will review the above \textit{throughout the semester} when needed and we will do so rapidly. \\
\textbf{This is not your typical mathematics course.} This course will do lots of modeling of real-world situations using data via the \texttt{R} statistical language.
\section*{Course Materials}
We will be using many reference texts and three popular books which you will read portions from. However the main materials are the course notes. You should always supplement concepts from class by reading up on them online; \href{https://en.wikipedia.org}{wikipedia} I find the best for this.
\paragraph{Theory Reference:} It is not necessary to have these two books, but it is recommended. The first is \qu{Learning from Data: A Short Course} by Abu-Mostafa, Magdon-Ismael and Lin which can be purchased used on \href{https://www.amazon.com/Learning-Data-Yaser-S-Abu-Mostafa/dp/1600490069}{Amazon}. We will also be using portions from \qu{Deep Learning} by Goodfellow, Bengio and Courville that can be purchased on \href{https://www.amazon.com/Deep-Learning-Adaptive-Computation-Machine/dp/0262035618}{Amazon} and read for free at \url{http://www.deeplearningbook.org/}.
\paragraph{Popular Books:} We will also be reading the non-fiction novel \qu{The Signal and the Noise} by Nate Silver which can also be purchased on \href{https://www.amazon.com/Signal-Noise-Many-Predictions-Fail-but/dp/0143125087}{Amazon}. This is \textit{required} --- you will have homework questions directly from this book. We will also be reading \qu{Preditive Analytics, Data Mining and Big Data} by Steven Finlay that can be purchased on \href{https://www.amazon.com/Predictive-Analytics-Data-Mining-Misconceptions/dp/1349478687}{Amazon} and it is also available online from the \href{https://link-springer-com.queens.ezproxy.cuny.edu/book/10.1057%2F9781137379283}{Queens College library system}.
\paragraph{Computer Software:} You need your own personal computer, laptop preferred. We will be using \texttt{R} which is a free, open source statistical programming language and console available for all operating systems. Please download the latest version from: \url{http://cran.mirrors.hoobly.com/}. You will be expected to do programming. I recommend the IDE \texttt{RStudio} available for free at \url{https://www.rstudio.com/products/rstudio/download/}.
\paragraph{Source Control:} You will be expected to use \texttt{git} and have a \url{github.com} account with a repository named \texttt{QC\_MATH\_342}. You will use this repository to submit coding homework assignments (and theory assignments if you use \LaTeX).
\paragraph{Book on \texttt{R}:} We will be making some use of \qu{R for Data Science} by Wickham and Grolemund which can be purchased on \href{https://www.amazon.com/R-Data-Science-Hadley-Wickham/dp/1491910399}{Amazon} or read online at \url{http://r4ds.had.co.nz/}.
\input{../../syllabi/_the650section}
\input{../../syllabi/_announcements_on_slack}
\input{../../syllabi/_use_of_slack}
\section*{Class Meetings}
There are 42 scheduled meetings. Of these, 26 will be lectures, 10 will be labs, 2 will be midterm exams (see the schedule on page~\pageref{subsec:exam_schedule}) which are in class and 2 will be review periods before the exams, 1 will be a review lab and 1 will be help with the final paper. I am \inred{canceling} Monday, May 17 (the last meeting) due to a Jewish holiday. This meeting would have been for help on the final paper. We will decide when to have this session during finals week that fits the majority's schedule.
\subsection*{Lectures}
Lectures will be on zoom and will be split usually into two periods: theory and practice. The first is a standard \qu{chalkboard} lecture where we learn concepts and the second will be using the \qu{computer/projector} to see the concepts in action in the \texttt{R} language. %I have a no computer / tablet / phone policy during the theory component of the lectures (only pen / pencil and paper) but you are highly recommended to have the laptop during the second part.
\input{../../syllabi/_zoom_policies}
\input{_lecture_schedule}
\input{../../syllabi/_lecture_upload}
\subsection*{Labs}
Labs will be on zoom during the Thursday morning meetings. Sometimes we will spend some of the two hours doing a practice lecture but the majority of this time will be your time. You will take turns \qu{driving} the coding in front of the class, working on exercises that you will finish for homework. Thus we will spend most a lot of time talking through problem solving skills in data science.
\section*{Homework}
Homework will be split into \textit{theory} and \textit{practice} (called \qu{labs}). This course will be the \qu{writing in the major course} next year. Thus, a portion of each theory and practice homework will involve writing \textit{English} and being graded on \textit{English}.
\subsection*{Theory Homework}
\input{../../syllabi/_theory_hws_text}
\input{../../syllabi/_theory_hws_submission_text}
\subsection*{Practice Homework (Labs)}
These will almost exclusively consist of short and medium coding exercises in \texttt{R}. Most of the assignment will be done for you and your peers during the Friday lab session.
\input{../../syllabi/_philosophy_hws}
\input{../../syllabi/_time_spent_hws}
\input{../../syllabi/_late_hw_policy}
\input{../../syllabi/_latex_hw_bonus_policy}
\input{../../syllabi/_hw_ec_policy}
\input{../../syllabi/_hw_0}
\section*{Writing Assignments}
There will be two writing assingments. (1) A \qu{philosophy of modeling} essay. Here you will coalesce the non-mathematical material that is crucial to this class. The purpose is to make you truly understand the modeling process and its limitations from start to finish. (2) A final project. Here you will use build a predictive model using a dataset. This is the capstone project for the entire data science and statistics major and it is where you will tie everything together.
This class will soon be the writing in the major course. Thus, writing is a major part of the curriculum herein.
\section*{Examinations}
\input{../../syllabi/_examination_text}
Since the is the capstone course, there is no final exam, but a large final project. There will be two midterm exams and the schedule is below.
\subsection*{Exam and Major Assignment Schedule}\label{subsec:exam_schedule}
\begin{itemize}
\itemsep -0.0em
\item Midterm examination I will be Thurs, March 24 in class with the first review session on the Wednesday prior
\item The philosophy of modeling paper's first draft is due Sunday, Mar 21 at 11:59PM
\item Midterm examination II will be Thurs, May 13 in class with a review on the Wednesday prior
\item The final project is due Sunday, May 23 11:59PM
\end{itemize}
\subsection*{Exam Policies and Materials}
\input{../../syllabi/_examination_policies}
I also allow \qu{cheat sheets} on examinations. For both midterms, you are allowed to bring \ingreen{two} 8.5'' $\times$ 11'' sheet of paper (front and back). \inred{Four sheets single sided are not allowed.} On this paper you can write anything you would like which you believe will help you on the exam. %For the final, you are allowed to bring three 8.5'' $\times$ 11'' sheet of paper (front and back). \inred{Six sheets single sided are not allowed.} I will be handing back the cheat sheets so you can reuse your midterm cheat sheets for the final if you wish.
\input{../../syllabi/_cheating_on_exams_and_missing_exams}
\input{../../syllabi/_special_services}
\input{../../syllabi/_class_participation}
\input{../../syllabi/_zoom_attendance}
\section*{Grading and Grading Policy}\label{sec:grading}
Your course grade will be calculated based on the percentages as follows:
\begin{table}[h]
\centering
\begin{tabular}{l|l}
Theory Homework & 9\% \\
Labs & 14\% \\
Midterm Examination I & 18\%\\
Midterm Examination II* & 18\%\\
Philosophy of Modeling Paper & 9\% \\
Final Project with Writeup & 22\% \\
Class participation & 5\% \\
Attendance & 5\%
\end{tabular}
\end{table}
\FloatBarrier
\noindent *The second midterm is not cumulative. It only covers material \textit{after} midterm I.
\input{../../syllabi/_advanced_course_grade_distribution}
\input{../../syllabi/_grade_checking_on_gradesly}
\input{../../syllabi/_auditing_policy}
\end{document}
|
{"hexsha": "0b5102bc4a5bb0cb7a8d9a8bf17304412a87063f", "size": 11666, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "syllabus/syllabus.tex", "max_stars_repo_name": "jakemanthebakeman/QC_MATH_342W_Spring_2021", "max_stars_repo_head_hexsha": "494c3dd2df6fb7647d0de5cd271eb85527ef0192", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "syllabus/syllabus.tex", "max_issues_repo_name": "jakemanthebakeman/QC_MATH_342W_Spring_2021", "max_issues_repo_head_hexsha": "494c3dd2df6fb7647d0de5cd271eb85527ef0192", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "syllabus/syllabus.tex", "max_forks_repo_name": "jakemanthebakeman/QC_MATH_342W_Spring_2021", "max_forks_repo_head_hexsha": "494c3dd2df6fb7647d0de5cd271eb85527ef0192", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 61.4, "max_line_length": 1338, "alphanum_fraction": 0.7759300531, "num_tokens": 3003}
|
% Copyright (c) 2019, Betsalel (Saul) Williamson, Jordan Henderson (the Authors)
% All rights reserved.
%
% Redistribution and use in source and binary forms, with or without
% modification, are permitted provided that the following conditions are met:
% * Redistributions of source code must retain the above copyright
% notice, this list of conditions and the following disclaimer.
% * Redistributions in binary form must reproduce the above copyright
% notice, this list of conditions and the following disclaimer in the
% documentation and/or other materials provided with the distribution.
% * Neither the names of the Authors nor the
% names of its contributors may be used to endorse or promote products
% derived from this software without specific prior written permission.
%
% THIS SOFTWARE IS PROVIDED BY THE Authors ``AS IS'' AND ANY
% EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
% WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
% DISCLAIMED. IN NO EVENT SHALL THE Authors BE LIABLE FOR ANY
% DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
% (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
% LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
% ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
% (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
% SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\section{Example Listings}
\subsection{Listing With Caption}
The following Listing has a caption and the language set to Matlab.
\begin{lstlisting}[caption=Psudo code for echo cancellation.,language=matlab]
% for a sampled signal y_1[n] create an output signal y_2[n] that will be the size of the input signal minus N samples, where N is the timeshift
% skip the first N samples if N is greater than zero
% Starting at the sample n, where n-N is the first sample of the signal y subtract to it y[n-N]*alpha, where alpha is a value between 0 and 1.
% Loop and increment n until the end of the signal y[n]
\end{lstlisting}
\subsection{Inline Listing}
Matlab was used to read in an audio file as a vector with two channels using the \lstinline!audioread! function. This inline Listing will change the font to indicate that this text is meant to be code.
\subsection{Listing from File}
See the code in Listing \ref{code:listing-example}. The options have the label for reference, the caption, and the language set to Matlab. The file to be used is \lstinline!lab2_parta.m! in the \lstinline!listings! directory.
\begin{lstlisting}[label=code:listing-example,language=tex]
\lstinputlisting[label=code:z-transform,language=Matlab,caption=Code to find the transfer functions and to plot systems $H_1$ through $H_4$.]{listings/lab2_parta.m}
\end{lstlisting}
|
{"hexsha": "ad84b046f1fa088ed888c9cb0f989effce2b22f4", "size": 2912, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "latex-template/examples/listings.tex", "max_stars_repo_name": "betsalel-williamson/source-2-pdf", "max_stars_repo_head_hexsha": "e2087df98814a83f1b0810dd7e809b9c2dc47b98", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-08T11:49:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-08T11:49:16.000Z", "max_issues_repo_path": "latex-template/examples/listings.tex", "max_issues_repo_name": "betsalel-williamson/source-2-pdf", "max_issues_repo_head_hexsha": "e2087df98814a83f1b0810dd7e809b9c2dc47b98", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "latex-template/examples/listings.tex", "max_forks_repo_name": "betsalel-williamson/source-2-pdf", "max_forks_repo_head_hexsha": "e2087df98814a83f1b0810dd7e809b9c2dc47b98", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 59.4285714286, "max_line_length": 225, "alphanum_fraction": 0.7733516484, "num_tokens": 663}
|
## SCENARIO PARTITION
function GAPM(Ind_old, duals; α_val = 0.)
dualRange = maximum(duals)-minimum(duals)
MaxDiff = α_val*dualRange # Maximum difference between scenario duals
NP = maximum(Ind_old)
Scenarios = [duals Ind_old collect(1:N)]
Ind,NsubParts = ones(N), 1
for p in 1:NP
DualPart = Scenarios[Scenarios[:,2].==p,:]
DualPart=DualPart[sortperm(DualPart[:, 1]), :]
sizePart = size(DualPart, 1)
Ind[Integer(DualPart[1,3])] = NsubParts
ref = DualPart[1,1]
for n in 2:sizePart
# Evaluating distance between duals
if abs(DualPart[n,1] - ref) > MaxDiff
NsubParts += 1
ref = DualPart[n,1] # Reference (1st) value for the partition
end
Ind[Integer(DualPart[n,3])] = NsubParts
end
NsubParts += 1
end
return Integer.(Ind)
end
|
{"hexsha": "0cc7e23b6cf050de97bf335bfcadd66b1922b616", "size": 935, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/functions/GAPM.jl", "max_stars_repo_name": "gonzalez-alvaro/MultidimensionalGAPM", "max_stars_repo_head_hexsha": "605ca8f13feb189b5932ac72a77504e65bbd02c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/functions/GAPM.jl", "max_issues_repo_name": "gonzalez-alvaro/MultidimensionalGAPM", "max_issues_repo_head_hexsha": "605ca8f13feb189b5932ac72a77504e65bbd02c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/functions/GAPM.jl", "max_forks_repo_name": "gonzalez-alvaro/MultidimensionalGAPM", "max_forks_repo_head_hexsha": "605ca8f13feb189b5932ac72a77504e65bbd02c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1666666667, "max_line_length": 78, "alphanum_fraction": 0.5636363636, "num_tokens": 276}
|
module MesoscaleML
"""
func(x)
Returns double the number `x` plus `1`.
"""
func(x) = 2x + 1
export func
end # module
|
{"hexsha": "2c947af19558a17c75bcb2133cb4f8d3ec09320e", "size": 124, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MesoscaleML.jl", "max_stars_repo_name": "upiterbarg/MesoscaleML.jl", "max_stars_repo_head_hexsha": "5bb7f6d3ca431c63023efc5a9951f1cc16ad5f35", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-25T21:33:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-26T02:28:27.000Z", "max_issues_repo_path": "src/MesoscaleML.jl", "max_issues_repo_name": "upiterbarg/MesoscaleML.jl", "max_issues_repo_head_hexsha": "5bb7f6d3ca431c63023efc5a9951f1cc16ad5f35", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MesoscaleML.jl", "max_forks_repo_name": "upiterbarg/MesoscaleML.jl", "max_forks_repo_head_hexsha": "5bb7f6d3ca431c63023efc5a9951f1cc16ad5f35", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.3333333333, "max_line_length": 39, "alphanum_fraction": 0.6209677419, "num_tokens": 41}
|
import numpy
import scipy.special
import csv
import datetime
import os
from pathlib import Path
# TODO Softmax Function for Output with cross entropy cost
'''
Neural network that can have n amount of layers and nodes per layer.
It can also have tanh, sigmoid or relu as an activation function for the hidden layers
and for the output layer. Cost function is squared error.
Weights and configuration can be stored and retrieved from a CSV file.
'''
class _ActivationFunction:
def execute(self):
pass
def derivative(self, y):
pass
class TanhActivationFunction(_ActivationFunction):
def execute(self, x):
return numpy.tanh(x)
def derivative(self, y):
return (1.0 - (y * y))
class SigmoidActivationFunction(_ActivationFunction):
def execute(self, x):
return scipy.special.expit(x)
def derivative(self, y):
return (y * (1.0 - y))
class ReluActivationFunction(_ActivationFunction):
def execute(self, x):
return x * (x > 0)
def derivative(self, y):
return 1. * (y > 0)
class NeuralNetwork:
def __init__(self, nodes, learningRate=0.1, activation_function=TanhActivationFunction(), output_function=SigmoidActivationFunction(), momentum_factor=0.5):
self.momentum_factor = momentum_factor
self.activation_function = activation_function
self.output_function = output_function
self.learningRate = learningRate
self.nodes = nodes
self.layers = len(nodes)
self.weights = []
self.biases = []
self.biases_momentum = []
self.momentum = []
# Create all network layers
for i in range(1, self.layers):
# Initialize Layer weights (Output Weights x Input Weights for later matrix dot product for weighted sum).
self.weights.append(numpy.random.normal(
0.0, pow(self.nodes[i], -0.5), (self.nodes[i], self.nodes[i-1])))
# Initialize Momentum and Biases
self.momentum.append(numpy.zeros(
(self.nodes[i], self.nodes[i-1])))
self.biases_momentum.append(
numpy.zeros((self.nodes[i], 1)))
self.biases.append(numpy.ones((self.nodes[i], 1)))
def train(self, inputs, targets):
''' Calculates inputs, compares with targets, and trains network '''
originalInputs = numpy.array(inputs, ndmin=2).T
inputs = numpy.array(inputs, ndmin=2).T
targets = numpy.array(targets, ndmin=2).T
finalOutputs = []
for i in range(0, self.layers-1):
if i == self.layers-2:
# Output Layer function
inputs = self.output_function.execute(
numpy.dot(self.weights[i], inputs) + self.biases[i])
else:
# Default activation on hidden layers
inputs = self.activation_function.execute(
numpy.dot(self.weights[i], inputs) + self.biases[i])
finalOutputs.append(inputs)
# Calculate errors
errors = []
outputErrors = targets - finalOutputs[-1]
errors.append(outputErrors)
for i in range(self.layers-2, 0, -1):
if(i == (self.layers-2)):
errors.insert(0, numpy.dot(self.weights[i].T, outputErrors))
else:
errors.insert(0, numpy.dot(self.weights[i].T, errors[0]))
# Recalculate weights with back propagation
# Calculate Gradients --> Derivative of Error function in regards to specific weight
# Dot product with transposed Output of previous node for weight adjustment
weightDelta = 0
gradients = None
for i in range(self.layers-2, -1, -1):
# On last node use inputs instead of some outputs
if i == 0:
# Calculate gradients from derivative of activation function
gradients = self.learningRate * \
errors[i] * \
self.activation_function.derivative(finalOutputs[i])
weightDelta = numpy.dot(
gradients, numpy.transpose(originalInputs))
elif i == self.layers-2:
# Output layer, use derivative of output function
gradients = self.learningRate * \
errors[i] * \
self.output_function.derivative(finalOutputs[i])
weightDelta = numpy.dot(gradients,
numpy.transpose(finalOutputs[i-1]))
else:
# Calculate gradients from derivative of activation function
gradients = self.learningRate * \
errors[i] * \
self.activation_function.derivative(finalOutputs[i])
weightDelta = numpy.dot(gradients,
numpy.transpose(finalOutputs[i-1]))
# Add momentum
self.weights[i] += (weightDelta + self.momentum[i])
self.biases[i] += (gradients + self.biases_momentum[i])
# Apply new momentum
self.momentum[i] = self.momentum_factor * weightDelta
self.biases_momentum[i] = self.momentum_factor * gradients
def printSums(self):
'''
Prints sum of weights for checking if network is the same after storing/loading
'''
print(numpy.sum(numpy.array([plane.sum() for plane in self.weights])))
def query(self, inputs):
''' Get output for input '''
inputs = numpy.array(inputs, ndmin=2).T
for i in range(0, self.layers-1):
if i == self.layers-2:
# Output Layer function
inputs = self.output_function.execute(
numpy.dot(self.weights[i], inputs) + self.biases[i])
else:
# Default activation on hidden layers
inputs = self.activation_function.execute(
numpy.dot(self.weights[i], inputs) + self.biases[i])
return inputs
def storeResult(self):
''' Store weights, configuration in csv file '''
script_dir = os.path.dirname(__file__)
filenameWeights = "./nn_weights"
filenameBiases = "./nn_biases"
pathWeights = os.path.join(script_dir, filenameWeights)
pathBiases = os.path.join(script_dir, filenameBiases)
# Store weights
numpy.save(pathWeights, self.weights)
# Store biases
numpy.save(pathBiases, self.biases)
def loadResult(self):
''' Load previous results and configure network '''
script_dir = os.path.dirname(__file__)
filenameWeights = "./nn_weights.npy"
filenameBiases = "./nn_biases.npy"
pathWeights = os.path.join(script_dir, filenameWeights)
pathBiases = os.path.join(script_dir, filenameBiases)
# Load weights
self.weights = numpy.load(pathWeights)
# Load biases
self.biases = numpy.load(pathBiases)
|
{"hexsha": "84aed9b4f414938067cf5f010300d6e454c39dff", "size": 7033, "ext": "py", "lang": "Python", "max_stars_repo_path": "library/neural_network.py", "max_stars_repo_name": "sunilson/Neural-Network-Backpropagation", "max_stars_repo_head_hexsha": "4aabbe22cb11ded7506d722c447f442862bd9494", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "library/neural_network.py", "max_issues_repo_name": "sunilson/Neural-Network-Backpropagation", "max_issues_repo_head_hexsha": "4aabbe22cb11ded7506d722c447f442862bd9494", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "library/neural_network.py", "max_forks_repo_name": "sunilson/Neural-Network-Backpropagation", "max_forks_repo_head_hexsha": "4aabbe22cb11ded7506d722c447f442862bd9494", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4404145078, "max_line_length": 160, "alphanum_fraction": 0.5963315797, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1443}
|
section \<open> Circus Trace Merge \<close>
theory utp_circus_traces
imports "UTP1-Stateful-Failures.utp_sf_rdes"
begin
subsection \<open> Function Definition \<close>
fun tr_par ::
"'\<theta> set \<Rightarrow> '\<theta> list \<Rightarrow> '\<theta> list \<Rightarrow> '\<theta> list set" where
"tr_par cs [] [] = {[]}" |
"tr_par cs (e # t) [] = (if e \<in> cs then {[]} else {[e]} \<^sup>\<frown> (tr_par cs t []))" |
"tr_par cs [] (e # t) = (if e \<in> cs then {[]} else {[e]} \<^sup>\<frown> (tr_par cs [] t))" |
"tr_par cs (e\<^sub>1 # t\<^sub>1) (e\<^sub>2 # t\<^sub>2) =
(if e\<^sub>1 = e\<^sub>2
then
if e\<^sub>1 \<in> cs
then {[e\<^sub>1]} \<^sup>\<frown> (tr_par cs t\<^sub>1 t\<^sub>2)
else
({[e\<^sub>1]} \<^sup>\<frown> (tr_par cs t\<^sub>1 (e\<^sub>2 # t\<^sub>2))) \<union>
({[e\<^sub>2]} \<^sup>\<frown> (tr_par cs (e\<^sub>1 # t\<^sub>1) t\<^sub>2))
else
if e\<^sub>1 \<in> cs then
if e\<^sub>2 \<in> cs then {[]}
else
{[e\<^sub>2]} \<^sup>\<frown> (tr_par cs (e\<^sub>1 # t\<^sub>1) t\<^sub>2)
else
if e\<^sub>2 \<in> cs then
{[e\<^sub>1]} \<^sup>\<frown> (tr_par cs t\<^sub>1 (e\<^sub>2 # t\<^sub>2))
else
{[e\<^sub>1]} \<^sup>\<frown> (tr_par cs t\<^sub>1 (e\<^sub>2 # t\<^sub>2)) \<union>
{[e\<^sub>2]} \<^sup>\<frown> (tr_par cs (e\<^sub>1 # t\<^sub>1) t\<^sub>2))"
abbreviation tr_inter :: "'\<theta> list \<Rightarrow> '\<theta> list \<Rightarrow> '\<theta> list set" (infixr "|||\<^sub>t" 100) where
"x |||\<^sub>t y \<equiv> tr_par {} x y"
subsection {* Lifted Trace Merge *}
syntax "_utr_par" ::
"logic \<Rightarrow> logic \<Rightarrow> logic \<Rightarrow> logic" ("(_ \<star>\<^bsub>_\<^esub>/ _)" [100, 0, 101] 100)
text {* The function @{const trop} is used to lift ternary operators. *}
translations
"t1 \<star>\<^bsub>cs\<^esub> t2" == "(CONST bop) (CONST tr_par cs) t1 t2"
subsection {* Trace Merge Lemmas *}
lemma tr_par_empty:
"tr_par cs t1 [] = {takeWhile (\<lambda>x. x \<notin> cs) t1}"
"tr_par cs [] t2 = {takeWhile (\<lambda>x. x \<notin> cs) t2}"
\<comment> \<open> Subgoal 1 \<close>
apply (induct t1; simp)
\<comment> \<open> Subgoal 2 \<close>
apply (induct t2; simp)
done
lemma tr_par_sym:
"tr_par cs t1 t2 = tr_par cs t2 t1"
apply (induct t1 arbitrary: t2)
\<comment> \<open> Subgoal 1 \<close>
apply (simp add: tr_par_empty)
\<comment> \<open> Subgoal 2 \<close>
apply (induct_tac t2)
\<comment> \<open> Subgoal 2.1 \<close>
apply (clarsimp)
\<comment> \<open> Subgoal 2.2 \<close>
apply (clarsimp)
apply (blast)
done
lemma tr_inter_sym: "x |||\<^sub>t y = y |||\<^sub>t x"
by (simp add: tr_par_sym)
lemma trace_merge_nil [simp]: "x \<star>\<^bsub>{}\<^esub> U([]) = {x}\<^sub>u"
by (pred_auto, simp_all add: tr_par_empty, metis takeWhile_eq_all_conv)
lemma trace_merge_empty [simp]:
"(U([]) \<star>\<^bsub>cs\<^esub> U([])) = U({[]})"
by (rel_auto)
lemma trace_merge_single_empty [simp]:
"a \<in> cs \<Longrightarrow> U([\<guillemotleft>a\<guillemotright>]) \<star>\<^bsub>cs\<^esub> U([]) = U({[]})"
by (rel_auto)
lemma trace_merge_empty_single [simp]:
"a \<in> cs \<Longrightarrow> U([]) \<star>\<^bsub>cs\<^esub> U([\<guillemotleft>a\<guillemotright>]) = U({[]})"
by (rel_auto)
lemma trace_merge_commute: "t\<^sub>1 \<star>\<^bsub>cs\<^esub> t\<^sub>2 = t\<^sub>2 \<star>\<^bsub>cs\<^esub> t\<^sub>1"
by (rel_simp, simp add: tr_par_sym)
lemma csp_trace_simps [simp]:
"U(v + []) = v" "U([] + v) = v"
"bop (#) x xs ^\<^sub>u ys = bop (#) x (xs ^\<^sub>u ys)"
by (rel_auto)+
text \<open> Alternative characterisation of traces, adapted from CSP-Prover \<close>
inductive_set
parx :: "'a set => ('a list * 'a list * 'a list) set"
for X :: "'a set"
where
parx_nil_nil [intro]:
"([], [], []) \<in> parx X" |
parx_Ev_nil [intro]:
"[| (u, s, []) \<in> parx X ; a \<notin> X |]
==> (a # u, a # s, []) \<in> parx X" |
parx_nil_Ev [intro]:
"[| (u, [], t) \<in> parx X ; a \<notin> X |]
==> (a # u, [], a # t) \<in> parx X" |
parx_Ev_sync [intro]:
"[| (u, s, t) \<in> parx X ; a \<in> X |]
==> (a # u, a # s, a # t) \<in> parx X" |
parx_Ev_left [intro]:
"[| (u, s, t) \<in> parx X ; a \<notin> X |]
==> (a # u, a # s, t) \<in> parx X" |
parx_Ev_right [intro]:
"[| (u, s, t) \<in> parx X ; a \<notin> X |]
==> (a # u, s, a # t) \<in> parx X"
lemma parx_implies_tr_par: "(t, t\<^sub>1, t\<^sub>2) \<in> parx cs \<Longrightarrow> t \<in> tr_par cs t\<^sub>1 t\<^sub>2"
apply (induct rule: parx.induct)
apply (auto)
apply (case_tac t)
apply (auto)
apply (case_tac s)
apply (auto)
done
end
|
{"author": "isabelle-utp", "repo": "utp-main", "sha": "27bdf3aee6d4fc00c8fe4d53283d0101857e0d41", "save_path": "github-repos/isabelle/isabelle-utp-utp-main", "path": "github-repos/isabelle/isabelle-utp-utp-main/utp-main-27bdf3aee6d4fc00c8fe4d53283d0101857e0d41/theories/circus/utp_circus_traces.thy"}
|
from __future__ import print_function
from __future__ import division
import os
import gdal
import shutil
import numpy as np
def WriteRaster(InputArray, file_name, dimension):
# create the 3-band raster file
dst_ds = gdal.GetDriverByName('GTiff').Create(file_name, dimension, dimension, 13, gdal.GDT_Int16)
for i in range(13):
dst_ds.GetRasterBand(i+1).WriteArray(InputArray[:,:,i]) # write r-band to the raster
dst_ds.FlushCache() # write to disk
def get_all_images(image_path, dest, name, dimension=64, stride=5, count_seed=0):
'''
This function gets all images from one large images with a stride given in the argument
:param image_path: path to large image
:param dest: folder to save the resultant images
:param stride: step size in number of pixels
:return: None
'''
# os.mkdir(dest)
# print('log: Created {}'.format(dest))
this_example = gdal.Open(image_path)
bands = range(1, this_example.RasterCount+1)
example_array = this_example.GetRasterBand(bands[0])
example_array = example_array.ReadAsArray()
for i in bands[1:]:
example_array = np.dstack((example_array,
this_example.GetRasterBand(i).ReadAsArray())).astype(np.int16)
# at this point we have read in the full image
# now let's begin the stride
# print(example_array.shape)
count = count_seed
for i in range(0, example_array.shape[0]-dimension, stride):
for j in range(0, example_array.shape[1]-dimension, stride):
count += 1
new_image = example_array[i:i+dimension, j:j+dimension, :]
# print(new_image.shape)
WriteRaster(InputArray=new_image,
file_name=os.path.join(dest, '{}-{}.tif').format(name, count),
dimension=dimension)
pass
def main(src, dst):
'''
This function will just go through all of the images in all of the folders and create a new dataset folder
:return: None
'''
if os.path.exists(dst):
shutil.rmtree(dst)
os.mkdir(dst)
for class_folder in os.listdir(src):
count = 0
dst_class_folder = os.path.join(dst, class_folder)
os.mkdir(dst_class_folder)
class_folder_path = os.path.join(src,class_folder)
for image in os.listdir(class_folder_path):
count += 1
print('log: On class {}, image {}'.format(class_folder, image))
image_name, ext = os.path.splitext(image)
src_image_path = os.path.join(class_folder_path,image)
# dst_image_path = os.path.join(dst_class_folder, os.path.join(image_name+'-{}.tif'.format(count)))
get_all_images(image_path=src_image_path, dest=dst_class_folder,
name=image_name, dimension=10, stride=5, count_seed=0)
pass
def single_test():
dest = '/home/annus/Desktop/test/'
if os.path.exists(dest):
shutil.rmtree(dest)
os.mkdir(dest)
get_all_images(image_path='/home/annus/Desktop/label_plainland_10.tif',
dest=dest,
name='this',
dimension=10,
stride=5)
if __name__ == '__main__':
main(src='/home/annus/PycharmProjects/ForestCoverChange_inputs_and_numerical_results/'
'patch_wise/pakistan_better_data/data_unpacked',
dst='/home/annus/PycharmProjects/ForestCoverChange_inputs_and_numerical_results/'
'patch_wise/pakistan_better_data/new_data_with_dim_10')
# single_test()
|
{"hexsha": "d42f6551dacbfd4e65c17fce193317b65a5548cc", "size": 3573, "ext": "py", "lang": "Python", "max_stars_repo_path": "labelling/get_all_images_from_large_images.py", "max_stars_repo_name": "annusgit/forestcoverUnet", "max_stars_repo_head_hexsha": "8ba4eafc6e5d637d3b08fa20d029e25173f96074", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-09-13T09:37:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-07T07:12:56.000Z", "max_issues_repo_path": "labelling/get_all_images_from_large_images.py", "max_issues_repo_name": "annusgit/forestcoverUnet", "max_issues_repo_head_hexsha": "8ba4eafc6e5d637d3b08fa20d029e25173f96074", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "labelling/get_all_images_from_large_images.py", "max_forks_repo_name": "annusgit/forestcoverUnet", "max_forks_repo_head_hexsha": "8ba4eafc6e5d637d3b08fa20d029e25173f96074", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-09-04T15:26:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-14T12:42:43.000Z", "avg_line_length": 36.0909090909, "max_line_length": 114, "alphanum_fraction": 0.6479149174, "include": true, "reason": "import numpy", "num_tokens": 836}
|
#!/usr/bin/env python
# dp2mr.py
import numpy as num
from dp2e import dp2e
from e2mr import e2mr
def dp2mr(p,t,dp,Tconvert=None):
"""w = dp2mr(p,t,dp,Tconvert)
compute water vapor mixing ratio (g/kg) given total
pressure p (mb), air temperature t (K), and dew point
temperature (K).
if input, Tconvert is used as the AIR temperature to switch
from using saturation vapor pressure over water to over ice.
dct 3/5/2000
"""
if ( Tconvert is not None ):
e = dp2e(t,dp,Tconvert)
else:
e = dp2e(t,dp)
# water vapor mixing ratio
w = e2mr(p,e)
return w
if __name__ == '__main__':
print(dp2e.__doc__)
t = num.array(
( 24.54, 23.16, 21.67, 20.23, 18.86, 17.49, 16.10, 14.69, 13.22, 11.52,
9.53, 7.24, 4.80, 2.34, 0.04, -2.29, -4.84, -7.64,-10.66,-13.95,
-17.54,-21.45,-25.58,-29.90,-34.33,-38.94,-43.78,-48.80,-53.94,-58.79,
-63.27,-67.32,-70.74,-73.62,-75.74,-77.07,-77.43,-76.63,-75.06,-73.14,
-71.43 ))
t = t + 273.15
td = num.array(
( 295.99569524, 294.88592297, 293.58149854, 292.11729779, 290.51490282,
288.80633219, 287.25337561, 285.56579921, 283.86054795, 281.99074887,
279.96863936, 278.00807838, 276.00353817, 273.74197577, 271.36371593,
268.74827599, 265.5596088, 261.9472149, 258.46973102, 255.00425602,
251.12242488, 247.15405877, 243.22262393, 238.86585074, 233.8823144,
228.4539335, 223.20007008, 217.86176743, 212.95046128, 209.08799585,
203.25047643, 202.6535621, 197.18886555, 196.61856765, 196.0340168,
195.44634221, 194.83729251, 194.21361376, 193.57543455, 192.93607596,
196.90293301))
p = num.array(
( 1012.0, 991.3, 969.1, 945.5, 920.4, 893.8, 865.7, 836.1, 805.1, 772.8,
739.5, 705.2, 670.3, 635.0, 599.7, 564.5, 529.8, 495.7, 462.6, 430.7,
400.0, 370.8, 343.0, 316.7, 292.0, 266.8, 247.2, 227.0, 208.2, 190.8,
174.7, 159.9, 146.2, 133.6, 121.9, 111.3, 101.5, 92.6, 84.4, 76.9,
70.0 ))
w = dp2mr(p,t,td,253.15)
print(w)
|
{"hexsha": "3f8af490e8abbde1ed80e6b1260b78ac1b9533b0", "size": 2038, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyuwphysret/common/pyfiles/atmos/dp2mr.py", "max_stars_repo_name": "graziano-giuliani/pythoncode", "max_stars_repo_head_hexsha": "4e505af5be3e32519cf4e62b85c101a63c885f77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyuwphysret/common/pyfiles/atmos/dp2mr.py", "max_issues_repo_name": "graziano-giuliani/pythoncode", "max_issues_repo_head_hexsha": "4e505af5be3e32519cf4e62b85c101a63c885f77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyuwphysret/common/pyfiles/atmos/dp2mr.py", "max_forks_repo_name": "graziano-giuliani/pythoncode", "max_forks_repo_head_hexsha": "4e505af5be3e32519cf4e62b85c101a63c885f77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-24T02:45:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-24T02:45:47.000Z", "avg_line_length": 37.7407407407, "max_line_length": 78, "alphanum_fraction": 0.6123650638, "include": true, "reason": "import numpy", "num_tokens": 957}
|
from time import time
import numpy as np
from pulp import LpMaximize, LpProblem, LpStatus, lpSum, LpVariable, LpMinimize
import copy
from verification.utils import massage_proj, LBFs_UBFs_onReLU, LBFs_UBFs_onSigmoid, solver_log_filename, lower_bound_from_logs, my_sigmoid, discretise_sigmoid_interval, get_solver
from matplotlib import pyplot as plt
def _set_input_constraints(problem, X_n, X_i, X_ii, l, U, model):
for i in X_n:
if l[i] >= 1e-1:
bound = 1 / np.sqrt(l[i])
# Lower Bound
problem += lpSum(
[ U[i,j] * X_i[j] for j in X_n ]) - lpSum([ U[i,j] * X_ii[j] for j in X_n ]) >= - bound
# Upper Bound
problem += lpSum(
[ U[i,j] * X_i[j] for j in X_n ]) - lpSum([ U[i,j] * X_ii[j] for j in X_n ]) <= bound
def _set_affine_constraints(problem, W, b, phi_i, phi_ii, zeta_i, zeta_ii, zeta_n):
for j, _ in enumerate(phi_i):
problem += lpSum([ W[k,j] * zeta_i[k] for k in zeta_n ]) + b[j] == phi_i[j]
problem += lpSum([ W[k,j] * zeta_ii[k] for k in zeta_n ]) + b[j] == phi_ii[j]
def build_global_problem_on_confidence_difference_MILP(problem, X_n, l, U, model, constraintMap, M, mip, time_limit):
''' Work in Progress'''
# Generate constraints for categorical features
cat_list = ['Continuous' if c == 0 else 'Binary' for c in constraintMap]
names = ["X_i_" + str(i) for i in range(len(cat_list))]
X_i = {i:LpVariable(names[i], lowBound=0, upBound=1, cat = cat_list[i])
for i in range(len(cat_list)) }
names = ["X_ii_" + str(i) for i in range(len(cat_list))]
X_ii = {i:LpVariable(names[i], lowBound=0, upBound=1, cat = cat_list[i])
for i in range(len(cat_list)) }
for c in np.unique(constraintMap):
if c > 0:
problem += lpSum( [X_i[j] for j in X_n if constraintMap[j] == int(c)] ) == 1
problem += lpSum( [X_ii[j] for j in X_n if constraintMap[j] == int(c)] ) == 1
# Input constraints
_set_input_constraints(problem, X_n, X_i, X_ii, l, U, model)
zeta_i = X_i
zeta_ii = X_ii
for i, layer in enumerate(model.layers):
zeta_n = range(len(zeta_i))
W, b = layer.get_weights()
W_n = range(W.shape[1])
phi_i = LpVariable.dicts(f"phi_i_l{i}", (W_n), cat='Continuous')
phi_ii = LpVariable.dicts(f"phi_ii_l{i}", (W_n), cat='Continuous')
# Add affine constraints
_set_affine_constraints(problem, W, b, phi_i, phi_ii, zeta_i, zeta_ii, zeta_n)
# build the Lower and upper bounds
last_layer = i == len(model.layers) - 1
phi_i_Ls = []
phi_i_Us = []
alpha_Ls = []
alpha_Us = []
beta_Ls = []
beta_Us = []
if not last_layer:
#The Relu output is always greater equal than zero
zeta_i = LpVariable.dicts("zeta_i_l" + str(i) , (W_n), cat='Continuous',lowBound=0)
zeta_ii = LpVariable.dicts("zeta_ii_l" + str(i) , (W_n), cat='Continuous',lowBound=0)
beta_i = LpVariable.dicts("beta_i_l" + str(i) , (W_n), cat='Binary',lowBound=0, upBound=1)
beta_ii = LpVariable.dicts("beta_ii_l" + str(i) , (W_n), cat='Binary',lowBound=0, upBound=1)
for j in phi_i:
problem_for_phi_i = copy.copy(problem)
problem_for_phi_i += phi_i[j]
# Minimisation
problem_for_phi_i.sense = 1
problem_for_phi_i.solve(get_solver()(msg = 0, mip=mip))
phi_i_Ls.append(problem_for_phi_i.objective.value())
# Maximisation
problem_for_phi_i.sense = -1
problem_for_phi_i.solve(get_solver()(msg = 0, mip=mip))
phi_i_Us.append(problem_for_phi_i.objective.value())
# Now I compute Relu upper and lower bounding coefficients
if not last_layer:
# We are in the flat side of the Relu
if phi_i_Us[j] <= 0:
problem += (zeta_i[j] == 0)
problem += (zeta_ii[j] == 0)
# We are in the identity side of the Relu
elif phi_i_Ls[j] >=0:
problem += (zeta_i[j] == phi_i[j])
problem += (zeta_ii[j] == phi_ii[j])
else:
problem += (zeta_i[j] <= phi_i[j] - phi_i_Ls[j]*(1 - beta_i[j]) )
problem += (zeta_i[j] <= phi_i_Us[j]*beta_i[j])
problem += (zeta_i[j] >= phi_i[j])
problem += (zeta_ii[j] <= phi_ii[j] - phi_i_Ls[j]*(1 - beta_ii[j]) )
problem += (zeta_ii[j] <= phi_i_Us[j]*beta_ii[j])
problem += (zeta_ii[j] >= phi_ii[j])
else:
assert(j==0)
#NEW IMPLEMENTATION FOR PHI_GRID BELOW. The idea is that now it is adaptive to the shape of the sigmoid, instead of uniform.
if (phi_i_Us[j] <= 0) or (phi_i_Ls[j] >= 0): #we are in the part of the sigmoid in which the convexity doesn't change
phi_grid = discretise_sigmoid_interval(2*M+1,my_sigmoid(phi_i_Ls[j]),my_sigmoid(phi_i_Us[j]) )
else:
phi_grid = np.concatenate((discretise_sigmoid_interval(M+1,my_sigmoid(phi_i_Ls[j]),0.5),discretise_sigmoid_interval(M+1,0.5,my_sigmoid(phi_i_Us[j]))[1:] ))
#To do so, I First have to iterate over the various intervals
y_grid_lb = []
y_grid_ub = []
prev_value_lb = np.inf
prev_value_ub = - np.inf
for i_grid in range(len(phi_grid) - 1):
curr_x_L = phi_grid[i_grid]
curr_x_U = phi_grid[i_grid + 1]
[alpha_L,beta_L,alpha_U,beta_U] = LBFs_UBFs_onSigmoid(curr_x_L,curr_x_U)
y_grid_lb.append(min(alpha_L + beta_L*curr_x_L, prev_value_lb))
y_grid_ub.append(max(alpha_U + beta_U*curr_x_L, prev_value_ub))
prev_value_lb = alpha_L + beta_L*curr_x_U
prev_value_ub = alpha_U + beta_U*curr_x_U
y_grid_lb.append(prev_value_lb)
y_grid_ub.append(prev_value_ub)
#binary value for activation of piecewise component
y_n = range(2*M)
lambda_n = range(2*M+1)
y_i = LpVariable.dicts(f"y_i_{i}", (y_n), cat='Binary',lowBound=0, upBound=1)
y_ii = LpVariable.dicts(f"y_ii_{i}", (y_n), cat='Binary',lowBound=0, upBound=1)
lambda_i = LpVariable.dicts("lambda_i_" + str(i) , (lambda_n), cat='Continuous',lowBound=0, upBound=1)
lambda_ii = LpVariable.dicts("lambda_ii_" + str(i) , (lambda_n), cat='Continuous',lowBound=0, upBound=1)
#constraints on y_i encoding
problem += lpSum( y_i[k] for k in y_n ) == 1
problem += lpSum( y_ii[k] for k in y_n ) == 1
problem += lpSum( lambda_i[k] for k in lambda_n ) == 1
problem += lpSum( lambda_ii[k] for k in lambda_n ) == 1
#constraints on lambda parameters
for k in y_n:
problem += ( y_i[k] <= lambda_i[k] + lambda_i[k+1])
problem += ( y_ii[k] <= lambda_ii[k] + lambda_ii[k+1])
problem += lpSum( phi_grid[k]*lambda_i[k] for k in lambda_n ) == phi_i[j]
problem += lpSum( phi_grid[k]*lambda_ii[k] for k in lambda_n ) == phi_ii[j]
# Optimisation objective
problem += lpSum( y_grid_ub[k]*lambda_i[k] - y_grid_lb[k]*lambda_ii[k] for k in lambda_n)
return problem
def verify_globally(test_ds, model, U, l, epsilon, delta, opt_mode, M, time_limit, mip, proc_id):
''' Implementing global property for the logit case - will have to see how to extend
it to the actual classification confidence level later...'''
print(f'\n\nSOLVER LOG FILE: {solver_log_filename(proc_id)}\n\n')
X_n = range(test_ds.X_df.values.shape[1])
constraintMap = np.zeros(test_ds.X_df.values.shape[1])
for i, category in enumerate(test_ds.cat_cols):
if i < len(test_ds.cat_cols):
for idx in test_ds.columns_map[category]:
constraintMap[idx] = i+1
print('Building Max Confidence difference problem')
s_t = time()
problem_max = LpProblem('fairness_constraints', LpMaximize)
if opt_mode == 'lp':
problem_max = build_global_problem_on_confidence_difference_lp(
problem_max, X_n, l, U, model)
elif opt_mode == 'milp':
problem_max = build_global_problem_on_confidence_difference_MILP(
problem_max, X_n, l, U, model, constraintMap, M, mip=False, time_limit=time_limit/5)
e_t = time()
problem_build_time = e_t - s_t
print(f'---> Time passed building constraints: {problem_build_time}')
print('Solving Max Confidence difference problem')
s_t = time()
problem_max.solve(get_solver()(
mip=mip, timeLimit=time_limit, logPath=solver_log_filename(proc_id)))
e_t = time()
problem_solve_time = e_t - s_t
print(f'---> Time passed solving constraints: {problem_solve_time}')
prob_value = problem_max.objective.value()
log_value = lower_bound_from_logs(proc_id)
print(f'\nProblem Value is: {prob_value}')
print(f'\nLog Value is: {log_value}')
max_confidence_difference = prob_value
if prob_value is None:
max_confidence_difference = log_value
print(f'Max Confidence Diff is: {max_confidence_difference}')
#The problem is completely symmetric on x' and x'', so the minimum is just the opposite value of the maximum.
verifieds = float(max_confidence_difference <= delta)
verification_results = {
'verified_fraction': verifieds,
'max_confidence_diff': max_confidence_difference,
'problem_build_time': problem_build_time,
'problem_solve_time': problem_solve_time,
'verification_time': problem_build_time + problem_solve_time,
}
return verification_results
def verify_globally_for_weighted_lp(test_ds, model, epsilon, delta, opt_mode, M, time_limit, mip, proc_id):
U = np.eye(test_ds.X_df.values.shape[1]) / epsilon
l = np.ones(test_ds.X_df.values.shape[1])
return verify_globally(
test_ds, model, U, l, epsilon, delta, opt_mode, M, time_limit, mip, proc_id)
def verify_globally_for_mahalanobis(test_ds, model, proj, epsilon, delta, opt_mode, M, time_limit, mip, proc_id):
l, U = np.linalg.eigh(proj/(epsilon**2))
U = U.T
return verify_globally(
test_ds, model, U, l, epsilon, delta, opt_mode, M, time_limit, mip, proc_id)
|
{"hexsha": "10be0758f25fe8ca578c1c52b38a4ea012472d99", "size": 10745, "ext": "py", "lang": "Python", "max_stars_repo_path": "verification/global_v.py", "max_stars_repo_name": "eliasbenussi/nn-cert-individual-fairness", "max_stars_repo_head_hexsha": "d5298902190caccb91c2762e0c9d96f98b1fbb84", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "verification/global_v.py", "max_issues_repo_name": "eliasbenussi/nn-cert-individual-fairness", "max_issues_repo_head_hexsha": "d5298902190caccb91c2762e0c9d96f98b1fbb84", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "verification/global_v.py", "max_forks_repo_name": "eliasbenussi/nn-cert-individual-fairness", "max_forks_repo_head_hexsha": "d5298902190caccb91c2762e0c9d96f98b1fbb84", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.5296610169, "max_line_length": 179, "alphanum_fraction": 0.5981386691, "include": true, "reason": "import numpy", "num_tokens": 2975}
|
#!/usr/bin/env python3
# Copyright (c) 2021 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest, OpTestTool
import paddle
import cinn
from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
class TestSliceOp(OpTest):
def setUp(self):
self.init_case()
def init_case(self):
self.inputs = {"inputs": np.random.random([10, 12]).astype("float32")}
self.axes = [0, 1]
self.starts = [2, 2]
self.ends = [5, 5]
self.strides = [1, 1]
def build_paddle_program(self, target):
x = paddle.to_tensor(self.inputs["inputs"], stop_gradient=True)
res = paddle.strided_slice(x, self.axes, self.starts, self.ends,
self.strides)
pd_res = paddle.to_tensor(res, stop_gradient=True)
self.paddle_outputs = [pd_res]
def build_cinn_program(self, target):
builder = NetBuilder("slice")
inputs = builder.create_input(
Float(32), self.inputs["inputs"].shape, "inputs")
out = builder.slice(
inputs,
axes=self.axes,
starts=self.starts,
ends=self.ends,
strides=self.strides)
prog = builder.build()
res = self.get_cinn_output(prog, target, [inputs],
[self.inputs["inputs"]], [out])
self.cinn_outputs = [res]
def test_check_results(self):
self.check_outputs_and_grads()
class TestSliceCase1(TestSliceOp):
def init_case(self):
self.inputs = {"inputs": np.random.random([10, 12]).astype("float32")}
self.axes = [0, 1]
self.starts = [1, 2]
self.ends = [6, 1000]
self.strides = [1, 2]
class TestSliceCase2(TestSliceOp):
def init_case(self):
self.inputs = {"inputs": np.random.random([10, 12]).astype("float32")}
self.axes = [0, 1]
self.starts = [2, 1]
self.ends = [-1, 7]
self.strides = [3, 2]
class TestSliceCase3(TestSliceOp):
def init_case(self):
self.inputs = {"inputs": np.random.random([10, 12]).astype("float32")}
self.axes = [0, 1]
self.starts = [2, 1000]
self.ends = [8, 1]
self.strides = [1, -2]
class TestSliceCase4(TestSliceOp):
def init_case(self):
self.inputs = {"inputs": np.random.random([10, 12]).astype("float32")}
self.axes = [0, 1]
self.starts = [-1, -2]
self.ends = [-5, -8]
self.strides = [-1, -2]
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "93109edc6283e27182393dff33ac4078f331b10a", "size": 3222, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/tests/ops/test_slice_op.py", "max_stars_repo_name": "Avin0323/CINN", "max_stars_repo_head_hexsha": "093217619c821e73cec15511fa54cb0026ed0476", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/tests/ops/test_slice_op.py", "max_issues_repo_name": "Avin0323/CINN", "max_issues_repo_head_hexsha": "093217619c821e73cec15511fa54cb0026ed0476", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/tests/ops/test_slice_op.py", "max_forks_repo_name": "Avin0323/CINN", "max_forks_repo_head_hexsha": "093217619c821e73cec15511fa54cb0026ed0476", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9807692308, "max_line_length": 78, "alphanum_fraction": 0.6076970826, "include": true, "reason": "import numpy", "num_tokens": 819}
|
\begin{abstract}
\pagenumbering{roman}
\setcounter{page}{1}
\paragraph{}
Unstructured data like doc, pdf, accdb is lengthy to search and filter for desired information. We need to go through every file manually for finding information. It is very time consuming and frustrating. It doesn’t need to be done this way if we can use high computing power to achieve much faster content retrieval.
\paragraph{} We can use features of big data management system like Hadoop to organize unstructured data dynamically and return desired information. Hadoop provides features like Map Reduce, HDFS, HBase to filter data as per user input. Finally we can develop Hadoop Addon for content search and filtering on unstructured data. This addon will be able to provide APIs for different search results and able to download full file, part of files which are actually related to that topic. It will also provide API for context aware search results like most visited documents and much more relevant documents placed first so user work get simplified.
\paragraph{} This Addon can be used by other industries and government authorities to use Hadoop for their data retrieval as per their requirement.
\paragraph{} After this addon, we are also planning to add more API features like content retrieval from scanned documents and image based documents.
\end{abstract}
|
{"hexsha": "c1f9a7648749a2a4c39e5accee3d505b29b6b2a5", "size": 1352, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/assignments/final-report/abstract.tex", "max_stars_repo_name": "arks-api/arks-api", "max_stars_repo_head_hexsha": "692093762bfd855a5ad72f2b23cced34b6827baf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2016-06-03T15:06:56.000Z", "max_stars_repo_stars_event_max_datetime": "2016-06-08T03:05:13.000Z", "max_issues_repo_path": "doc/assignments/final-report/abstract.tex", "max_issues_repo_name": "arks-api/arks-api", "max_issues_repo_head_hexsha": "692093762bfd855a5ad72f2b23cced34b6827baf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-01T21:45:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-01T21:45:15.000Z", "max_forks_repo_path": "doc/assignments/final-report/abstract.tex", "max_forks_repo_name": "arks-api/arks-api", "max_forks_repo_head_hexsha": "692093762bfd855a5ad72f2b23cced34b6827baf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-06-03T15:07:07.000Z", "max_forks_repo_forks_event_max_datetime": "2016-06-08T03:05:30.000Z", "avg_line_length": 122.9090909091, "max_line_length": 645, "alphanum_fraction": 0.8121301775, "num_tokens": 268}
|
import argparse
import os
import sys
from collections import defaultdict
import numpy as np
from mir_eval.multipitch import evaluate as evaluate_frames
from mir_eval.transcription import precision_recall_f1_overlap as evaluate_notes
from mir_eval.transcription_velocity import precision_recall_f1_overlap as evaluate_notes_with_velocity
from mir_eval.util import midi_to_hz
from scipy.stats import hmean
from tqdm import tqdm
import onsets_and_frames.dataset as dataset_module
from onsets_and_frames import *
eps = sys.float_info.epsilon
def evaluate(data, model, device, onset_threshold=0.5, frame_threshold=0.5, save_path=None):
metrics = defaultdict(list)
for label in data:
label['audio'] = label['audio'].to(device)
label['label'] = label['label'].to(device)
label['velocity'] = label['velocity'].to(device)
label['onset'] = label['onset'].to(device)
label['offset'] = label['offset'].to(device)
label['frame'] = label['frame'].to(device)
pred, losses = model.run_on_batch(label)
for key, loss in losses.items():
metrics[key].append(loss.item())
for key, value in pred.items():
value.squeeze_(0).relu_()
p_ref, i_ref, v_ref = extract_notes(label['onset'], label['frame'], label['velocity'])
p_est, i_est, v_est = extract_notes(pred['onset'], pred['frame'], pred['velocity'], onset_threshold, frame_threshold)
t_ref, f_ref = notes_to_frames(p_ref, i_ref, label['frame'].shape)
t_est, f_est = notes_to_frames(p_est, i_est, pred['frame'].shape)
scaling = HOP_LENGTH / SAMPLE_RATE
i_ref = (i_ref * scaling).reshape(-1, 2)
p_ref = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_ref])
i_est = (i_est * scaling).reshape(-1, 2)
p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])
t_ref = t_ref.astype(np.float64) * scaling
f_ref = [np.array([midi_to_hz(MIN_MIDI + midi) for midi in freqs]) for freqs in f_ref]
t_est = t_est.astype(np.float64) * scaling
f_est = [np.array([midi_to_hz(MIN_MIDI + midi) for midi in freqs]) for freqs in f_est]
p, r, f, o = evaluate_notes(i_ref, p_ref, i_est, p_est, offset_ratio=None)
metrics['metric/note/precision'].append(p)
metrics['metric/note/recall'].append(r)
metrics['metric/note/f1'].append(f)
metrics['metric/note/overlap'].append(o)
p, r, f, o = evaluate_notes(i_ref, p_ref, i_est, p_est)
metrics['metric/note-with-offsets/precision'].append(p)
metrics['metric/note-with-offsets/recall'].append(r)
metrics['metric/note-with-offsets/f1'].append(f)
metrics['metric/note-with-offsets/overlap'].append(o)
p, r, f, o = evaluate_notes_with_velocity(i_ref, p_ref, v_ref, i_est, p_est, v_est,
offset_ratio=None, velocity_tolerance=0.1)
metrics['metric/note-with-velocity/precision'].append(p)
metrics['metric/note-with-velocity/recall'].append(r)
metrics['metric/note-with-velocity/f1'].append(f)
metrics['metric/note-with-velocity/overlap'].append(o)
p, r, f, o = evaluate_notes_with_velocity(i_ref, p_ref, v_ref, i_est, p_est, v_est, velocity_tolerance=0.1)
metrics['metric/note-with-offsets-and-velocity/precision'].append(p)
metrics['metric/note-with-offsets-and-velocity/recall'].append(r)
metrics['metric/note-with-offsets-and-velocity/f1'].append(f)
metrics['metric/note-with-offsets-and-velocity/overlap'].append(o)
frame_metrics = evaluate_frames(t_ref, f_ref, t_est, f_est)
metrics['metric/frame/f1'].append(hmean([frame_metrics['Precision'] + eps, frame_metrics['Recall'] + eps]) - eps)
for key, loss in frame_metrics.items():
metrics['metric/frame/' + key.lower().replace(' ', '_')].append(loss)
if save_path is not None:
os.makedirs(save_path, exist_ok=True)
label_path = os.path.join(save_path, os.path.basename(label['path']) + '.label.png')
save_pianoroll(label_path, label['onset'], label['frame'])
pred_path = os.path.join(save_path, os.path.basename(label['path']) + '.pred.png')
save_pianoroll(pred_path, pred['onset'], pred['frame'])
midi_path = os.path.join(save_path, os.path.basename(label['path']) + '.pred.mid')
save_midi(midi_path, p_est, i_est, v_est)
return metrics
def evaluate_file(model_file, dataset, dataset_group, dataset_path, sequence_length, save_path,
onset_threshold, frame_threshold, device):
dataset_class = getattr(dataset_module, dataset)
kwargs = {'sequence_length': sequence_length, 'device': device, 'gpu_tensor': True, 'in_memory': True}
if dataset_group is not None:
kwargs['groups'] = [dataset_group]
if dataset_path is not None:
kwargs['path'] = dataset_path
dataset = dataset_class(**kwargs)
model = torch.load(model_file, map_location=device).eval()
summary(model)
metrics = evaluate(tqdm(dataset), model, device, onset_threshold, frame_threshold, save_path)
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
return metrics
def evaluate_files(model_files, dataset, dataset_group, dataset_path, sequence_length, save_path,
onset_threshold, frame_threshold, device):
dataset_class = getattr(dataset_module, dataset)
kwargs = {'sequence_length': sequence_length, 'device': device, 'gpu_tensor': True, 'in_memory': True}
if dataset_group is not None:
kwargs['groups'] = [dataset_group]
if dataset_path is not None:
kwargs['path'] = dataset_path
dataset = dataset_class(**kwargs)
metrics_list = []
print(f"Running {len(model_files)} models on dataset.")
for model_file in tqdm(model_files):
model = torch.load(model_file, map_location=device).eval()
# summary(model)
# metrics = evaluate(tqdm(dataset), model, device, onset_threshold, frame_threshold, save_path)
# for key, values in metrics.items():
# if key.startswith('metric/'):
# _, category, name = key.split('/')
# print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
# print(metrics)
# metrics_list.append(metrics)
return metrics_list
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('model_file', type=str)
parser.add_argument('dataset', nargs='?', default='MAPS')
parser.add_argument('dataset_group', nargs='?', default=None)
parser.add_argument('dataset_path', nargs='?', type=str, default=None)
parser.add_argument('--save-path', default=None)
parser.add_argument('--sequence-length', default=None, type=int)
parser.add_argument('--onset-threshold', default=0.5, type=float)
parser.add_argument('--frame-threshold', default=0.5, type=float)
parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu')
with torch.no_grad():
evaluate_file(**vars(parser.parse_args()))
|
{"hexsha": "13f1049fef6a261e82a9e48d6ea777ac3e308784", "size": 7349, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluate.py", "max_stars_repo_name": "brianc118/onsets-and-frames", "max_stars_repo_head_hexsha": "40ae19f7c44454a7ebb7414761e718fcffa7d400", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "evaluate.py", "max_issues_repo_name": "brianc118/onsets-and-frames", "max_issues_repo_head_hexsha": "40ae19f7c44454a7ebb7414761e718fcffa7d400", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evaluate.py", "max_forks_repo_name": "brianc118/onsets-and-frames", "max_forks_repo_head_hexsha": "40ae19f7c44454a7ebb7414761e718fcffa7d400", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.93125, "max_line_length": 125, "alphanum_fraction": 0.6629473398, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1785}
|
from typing import List
import luigi
import sklearn
import gokart
import numpy as np
class CalculateWordEmbedding(gokart.TaskOnKart):
task_namespace = 'redshells.word_item_similarity'
word_task = gokart.TaskInstanceParameter()
word2item_task = gokart.TaskInstanceParameter()
item2embedding_task = gokart.TaskInstanceParameter()
output_file_path = luigi.Parameter(default='app/word_item_similarity/calculate_word_embedding.pkl') # type: str
def requires(self):
return dict(word=self.word_task, word2item=self.word2item_task, item2embedding=self.item2embedding_task)
def output(self):
return self.make_target(self.output_file_path)
def run(self):
word_data = self.load('word')
word2item = self.load('word2item')
item2embedding = self.load('item2embedding')
results = {word: self._calculate(word2item[word], item2embedding) for word in word_data if word in word2item}
self.dump(results)
def _calculate(self, items, item2embedding):
embeddings = [item2embedding[item] for item in items if item in item2embedding]
if not embeddings:
return None
return sklearn.preprocessing.normalize([np.sum(embeddings, axis=0)], norm='l2', axis=1)[0]
class CalculateWordEmbeddingWithSCDV(gokart.TaskOnKart):
"""
Calculate word embeddings with scdv
"""
task_namespace = 'redshells.word_item_similarity'
word_task = gokart.TaskInstanceParameter()
scdv_task = gokart.TaskInstanceParameter()
l2_normalize = luigi.BoolParameter() # type: bool
output_file_path = luigi.Parameter(default='app/word_item_similarity/calculate_word_embedding.pkl') # type: str
def requires(self):
return dict(word=self.word_task, scdv=self.scdv_task)
def output(self):
return self.make_target(self.output_file_path)
def run(self):
scdv = self.load('scdv')
words = self.load('word') # type: List[str]
embeddings = scdv.infer_vector([[word] for word in words], l2_normalize=self.l2_normalize)
self.dump(dict(zip(list(words), list(embeddings))))
|
{"hexsha": "46311fd2ab5452c0e1e89c28612aed86c74f4dc3", "size": 2136, "ext": "py", "lang": "Python", "max_stars_repo_path": "redshells/app/word_item_similarity/calculate_word_embedding.py", "max_stars_repo_name": "mski-iksm/redshells", "max_stars_repo_head_hexsha": "1e956fed9b000ea3f6ba1c96e25d5dd953025155", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2019-01-02T01:31:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T08:56:12.000Z", "max_issues_repo_path": "redshells/app/word_item_similarity/calculate_word_embedding.py", "max_issues_repo_name": "mski-iksm/redshells", "max_issues_repo_head_hexsha": "1e956fed9b000ea3f6ba1c96e25d5dd953025155", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2019-03-28T02:33:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-27T00:45:25.000Z", "max_forks_repo_path": "redshells/app/word_item_similarity/calculate_word_embedding.py", "max_forks_repo_name": "mski-iksm/redshells", "max_forks_repo_head_hexsha": "1e956fed9b000ea3f6ba1c96e25d5dd953025155", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2019-02-21T03:08:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T23:27:48.000Z", "avg_line_length": 35.6, "max_line_length": 117, "alphanum_fraction": 0.7148876404, "include": true, "reason": "import numpy", "num_tokens": 505}
|
from tkinter import *
from tkinter import filedialog
from PIL import ImageTk , Image # We need pillow to visualize image in tkinter in an easy way
import cv2
from ttkbootstrap import Style
from tkinter import ttk
import numpy as np
# 1- Color Tracking
def lower_upper(color_no):
# define range of blue color in HSV
lower_blue = np.array([105,50,50])
upper_blue = np.array([130,255,255])
# define range of green color in HSV
lower_green = np.array([45,50,50])
upper_green = np.array([75,255,255])
# define range of red color in HSV
lower_red = np.array([0,50,50])
upper_red = np.array([10,255,255])
# Color Testing
if color_no == 1:
lower = lower_red
upper = upper_red
elif color_no == 2:
lower = lower_green
upper = upper_green
else:
lower = lower_blue
upper = upper_blue
return lower, upper
def track_method(track_var, img_in):
color_no = track_var.get()
lower, upper = lower_upper(color_no)
hsv = cv2.cvtColor(img_in, cv2.COLOR_BGR2HSV)
# Threshold the HSV image to get only blue colors
if color_no == 1:
lower_2 = np.array([170,50,50])
upper_2 = np.array([179,255,255])
mask_1 = cv2.inRange(hsv, lower, upper)
mask_2 = cv2.inRange(hsv, lower_2, upper_2)
mask = mask_1 + mask_2
else:
mask = cv2.inRange(hsv, lower, upper)
res = cv2.bitwise_and(img_in,img_in, mask= mask)
res = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
return res
# 2- Thresholding
def threshold_method(threshold_var, img_in):
threshold_type = threshold_var.get()
if len(img_in.shape) == 3:
img_in = cv2.cvtColor(img_in, cv2.COLOR_BGR2GRAY)
if threshold_type == 1:
# Binary/Global Thresholding
ret, img_res = cv2.threshold(img_in,127,255,cv2.THRESH_BINARY)
#return img_res
elif threshold_type == 2:
# Adaptive Gaussian Thresholding
img_res = cv2.adaptiveThreshold(img_in,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
#return img_res
else:
# Otsu's Thresholding
ret, img_res = cv2.threshold(img_in,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return img_res
# 3- Bluring
def blur_method(blur_var, img_in):
blur_type = blur_var.get()
img_in = cv2.cvtColor(img_in, cv2.COLOR_BGR2RGB)
if blur_type == 1:
# Averaging Blurring
img_res = cv2.blur(img_in,(5,5))
elif blur_type == 2:
# Gaussain Blurring
img_res = cv2.GaussianBlur(img_in,(21,21),0) # img , kernel size , Sigma ( how fat your kernel is?)
else:
# Median Blurring
img_res = cv2.medianBlur(img_in,5)
return img_res
# 4- Morphology
def morph_method(morph_var, img_in):
morph_type = morph_var.get()
if len(img_in.shape) == 3:
img_in = cv2.cvtColor(img_in, cv2.COLOR_BGR2GRAY)
kernel = np.ones((5,5),np.uint8)
#ret, img_in = cv2.threshold(img_in,127,255,cv.THRESH_BINARY_INV)
if morph_type == 1:
# Erosion
img_res = cv2.erode(img_in,kernel,iterations = 1) # img , kernel , iterations
elif morph_type == 2:
# Dilation
img_res = cv2.dilate(img_in,kernel,iterations = 1)
elif morph_type == 3:
# Openning
img_res = cv2.morphologyEx(img_in, cv2.MORPH_OPEN, kernel)
else:
# Closing
img_res = cv2.morphologyEx(img_in, cv2.MORPH_CLOSE, kernel)
return img_res
# 5- Edge detection
def edge_method(edge_var, img_in):
edge_type = edge_var.get()
if len(img_in.shape) == 3:
img_in = cv2.cvtColor(img_in, cv2.COLOR_BGR2GRAY)
if edge_type == 1:
# Sobel-X Edge
img_res = cv2.Sobel(img_in,-1,1,0,ksize=5) # Image , DDepth = -1 (uint8) would be the result
# Caution : you may need to try cv2.CV_64F for negative values to be considered
# Black to white and White to Black Both would appear.
elif edge_type == 2:
# Sobel-Y Edge
img_res = cv2.Sobel(img_in,-1,0,1,ksize=5) # 1 , 0 for dx and 0 , 1 for dy
elif edge_type == 3:
# Scharr-X Edge
img_res = cv2.Scharr(img_in,cv2.CV_64F,1,0,5) # img ,ddepth, dx , dy, kernel
elif edge_type == 4:
# Scharr-Y Edge
img_res = cv2.Scharr(img_in,cv2.CV_64F,0,1,5)
elif edge_type == 5:
# Laplacian Edge
img_res = cv2.Laplacian(img_in,cv2.CV_64F,ksize=3) # img , ddepth , kernel
else:
# Canny Edge
img_res = cv2.Canny(img_in,100,200) # image , min threshold , max threshold
return img_res
def scale_h(Wm, Hm, W, H):
X = Hm / H # Scale Factor for Height
Hf = int(X * H) # Height Final
Wf = int(X * W) # Width Final
return Wf, Hf
def scale_w(Wm, Hm, W, H):
X = Wm / W # Scale Factor for Width
Hf = int(X * H) # Height Final
Wf = int(X * W) # Width Final
return Wf, Hf
def new_shape(Wm, Hm, W, H):
# This function resizes image to be as program window size and saving its aspect ratio where (H1/W1 = H2/W2)
if (H <= Hm) and (W <= Wm):
Hf = H
Wf = W
elif (H <= Hm) or (W <= Wm):
if H <= Hm:
Wf, Hf = scale_w(Wm, Hm, W, H)
else:
Wf, Hf = scale_h(Wm, Hm, W, H)
elif H <= W:
Wf, Hf = scale_w(Wm, Hm, W, H)
else:
Wf, Hf = scale_h(Wm, Hm, W, H)
return Wf, Hf
def select_image(panelA, panelB):
path = filedialog.askopenfilename() # open a file chooser dialog and allow the user to select an input image
global img, img_show # global reference to the image panels
img = cv2.imread(path)
# Program Window Size (max size)
width_m = 500
height_m = 400
# Image Size
width = img.shape[1]
height = img.shape[0]
# Image Final Size
width_f, height_f = new_shape(width_m, height_m, width, height)
# dsize
#dsize = (width, height)
global dsize
dsize = (width_f, height_f)
# resize image
img_show = cv2.resize(img, dsize)
img_show = cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)
img_show = Image.fromarray(img_show) # convert the images to PIL format...
img_show = ImageTk.PhotoImage(img_show) # ...and then to tkinter format
panelA.configure(image=img_show)
panelB.configure(text="Output Image", image=out_empty)
panelA.image = img_show
panelB.image = out_empty
def apply_method(panelA, panelB, track_var, threshold_var, blur_var, morph_var, edge_var,
track_entry, threshold_entry, blur_entry, morph_entry, edge_entry):
img_check = False
img_res = img.copy()
for filter_index in range(1, 6, 1):
if (int(track_entry.get()) == filter_index) and (track_var.get() != 4):
img_check = True
img_res = track_method(track_var, img_res)
if (int(threshold_entry.get()) == filter_index) and (threshold_var.get() != 4):
img_check = True
img_res = threshold_method(threshold_var, img_res)
if (int(blur_entry.get()) == filter_index) and (blur_var.get() != 4):
img_check = True
img_res = blur_method(blur_var, img_res)
if (int(morph_entry.get()) == filter_index) and (morph_var.get() != 5):
img_check = True
img_res = morph_method(morph_var, img_res)
if (int(edge_entry.get()) == filter_index) and (edge_var.get() != 7):
img_check = True
img_res = edge_method(edge_var, img_res)
#######
# Check if any of above filters are applyed, or not
if img_check:
img_res = cv2.resize(img_res, dsize)
img_res = Image.fromarray(img_res) # convert the images to PIL format...
img_res = ImageTk.PhotoImage(img_res) # ...and then to tkinter format
panelB.configure(image=img_res)
panelB.image = img_res
else:
panelB.configure(text="Output Image", image=out_empty)
panelB.image = out_empty
# initialize the window toolkit along with the two image panels
style = Style(theme='cyborg')
#root = Tk()
root = style.master
# Program Size & Show Location
# root.geometry('widthxheight+left+top')
#root.geometry('1030x600+350+100')
root.title('Image Control Panel')
root.iconbitmap('Icon-01.ico')
# Main 3 Frames of Program
#image_frame = ttk.Frame(root, width=1010, height=1000)
image_frame = ttk.Frame(root)
option_frame = ttk.Frame(root)
control_frame = ttk.Frame(root)
image_frame.pack(side="top", fill="both", expand="yes", padx="10", pady="10")
option_frame.pack(fill="both", expand="yes", padx="10", pady="10")
control_frame.pack(side="bottom", fill="both", expand="yes", padx="10", pady="10")
panelA = ttk.Label(root, text="Original Image")
panelB = ttk.Label(root, text="Output Image")
panelA.pack(in_=image_frame, side="left", fill="both", expand="yes", padx="5", pady="5") # Localization
panelB.pack(in_=image_frame, side="right", fill="both", expand="yes", padx="5", pady="5") # Localization
global in_empty, out_empty
in_empty = cv2.imread('InputImage.jpg')
out_empty = cv2.imread('OutputImage.jpg')
in_empty = cv2.cvtColor(in_empty, cv2.COLOR_BGR2RGB)
out_empty = cv2.cvtColor(out_empty, cv2.COLOR_BGR2RGB)
in_empty = Image.fromarray(in_empty) # convert the images to PIL format...
out_empty = Image.fromarray(out_empty)
in_empty = ImageTk.PhotoImage(in_empty) # ...and then to tkinter format
out_empty = ImageTk.PhotoImage(out_empty)
panelA.configure(image=in_empty)
panelB.configure(image=out_empty)
panelA.image = in_empty
panelB.image = out_empty
# create a button, then when pressed, will trigger a file chooser
# dialog and allow the user to select an input image; then add the
# button the GUI
btn = ttk.Button(root, text="Select an image", command=lambda: select_image(panelA, panelB)) # yourapp , text , binded function
btn.pack(in_=control_frame, side="left", fill="both", expand="yes", padx="5", pady="5") # Localization
btn2 = ttk.Button(root, text="Applying Transformation",
command=lambda: apply_method(panelA, panelB, track_var, threshold_var, blur_var, morph_var, edge_var,
track_entry, threshold_entry, blur_entry, morph_entry, edge_entry)) # yourapp , text , binded function
btn2.pack(in_=control_frame, side="left", fill="both", expand="yes", padx="5", pady="5") # Localization
# Sub 5 Frames of Options Frame
# ( track_frame - threshold_frame - blur_frame - morph_frame - edge_frame )
# 1- Color Tracking
track_var = IntVar()
track_var.set(4) # need to use track_var.set and track_var.get to
# set and get the value of this variable
track_label = ttk.Label(root, text="Select color to track it:")
track_radio1 = ttk.Radiobutton(root, text="Red", variable=track_var, value=1)
track_radio2 = ttk.Radiobutton(root, text="Green", variable=track_var, value=2)
track_radio3 = ttk.Radiobutton(root, text="Blue", variable=track_var, value=3)
track_radio4 = ttk.Radiobutton(root, text="None", variable=track_var, value=4)
track_label2 = ttk.Label(root, text="Index:")
track_entry = Entry(root, justify="center", width=2)
track_entry.insert(0, 1)
track_label.grid(in_=option_frame, row=0, column=0, padx="2", pady="2", sticky="w")
track_radio1.grid(in_=option_frame, row=0, column=1, padx="2", pady="2", sticky="w")
track_radio2.grid(in_=option_frame, row=0, column=2, padx="2", pady="2", sticky="w")
track_radio3.grid(in_=option_frame, row=0, column=3, padx="2", pady="2", sticky="w")
track_radio4.grid(in_=option_frame, row=0, column=4, padx="2", pady="2", sticky="w")
track_label2.grid(in_=option_frame, row=0, column=6, padx="2", pady="2", sticky="w")
track_entry.grid(in_=option_frame, row=0, column=7, padx="2", pady="2", sticky="w")
# 2- Thresholding
threshold_var = IntVar()
threshold_var.set(4) # need to use threshold_var.set and threshold_var.get to
# set and get the value of this variable
threshold_label = ttk.Label(root, text="Select thresholding filter:")
threshold_radio1 = ttk.Radiobutton(root, text="Binary", variable=threshold_var, value=1)
threshold_radio2 = ttk.Radiobutton(root, text="Adaptive", variable=threshold_var, value=2)
threshold_radio3 = ttk.Radiobutton(root, text="Otsu", variable=threshold_var, value=3)
threshold_radio4 = ttk.Radiobutton(root, text="None", variable=threshold_var, value=4)
threshold_label2 = ttk.Label(root, text="Index:")
threshold_entry = Entry(root, justify="center", width=2)
threshold_label.grid(in_=option_frame, row=1, column=0, padx="2", pady="2", sticky="w")
threshold_radio1.grid(in_=option_frame, row=1, column=1, padx="2", pady="2", sticky="w")
threshold_radio2.grid(in_=option_frame, row=1, column=2, padx="2", pady="2", sticky="w")
threshold_radio3.grid(in_=option_frame, row=1, column=3, padx="2", pady="2", sticky="w")
threshold_radio4.grid(in_=option_frame, row=1, column=4, padx="2", pady="2", sticky="w")
threshold_label2.grid(in_=option_frame, row=1, column=6, padx="2", pady="2", sticky="w")
threshold_entry.grid(in_=option_frame, row=1, column=7, padx="2", pady="2", sticky="w")
threshold_entry.insert(0, 2)
# 3- Bluring
blur_var = IntVar()
blur_var.set(4) # need to use blur_var.set and blur_var.get to
# set and get the value of this variable
blur_label = ttk.Label(root, text="Select bluring filter:")
blur_radio1 = ttk.Radiobutton(root, text="Averaging", variable=blur_var, value=1)
blur_radio2 = ttk.Radiobutton(root, text="Gaussain", variable=blur_var, value=2)
blur_radio3 = ttk.Radiobutton(root, text="Median", variable=blur_var, value=3)
blur_radio4 = ttk.Radiobutton(root, text="None", variable=blur_var, value=4)
blur_label2 = ttk.Label(root, text="Index:")
blur_entry = Entry(root, justify="center", width=2)
blur_entry.insert(0, 3)
blur_label.grid(in_=option_frame, row=2, column=0, padx="2", pady="2", sticky="w")
blur_radio1.grid(in_=option_frame, row=2, column=1, padx="2", pady="2", sticky="w")
blur_radio2.grid(in_=option_frame, row=2, column=2, padx="2", pady="2", sticky="w")
blur_radio3.grid(in_=option_frame, row=2, column=3, padx="2", pady="2", sticky="w")
blur_radio4.grid(in_=option_frame, row=2, column=4, padx="2", pady="2", sticky="w")
blur_label2.grid(in_=option_frame, row=2, column=6, padx="2", pady="2", sticky="w")
blur_entry.grid(in_=option_frame, row=2, column=7, padx="2", pady="2", sticky="w")
# 4- Morphology
morph_var = IntVar()
morph_var.set(5) # need to use morph_var.set and morph_var.get to
# set and get the value of this variable
morph_label = ttk.Label(root, text="Select morphology:")
morph_radio1 = ttk.Radiobutton(root, text="Erosion", variable=morph_var, value=1)
morph_radio2 = ttk.Radiobutton(root, text="Dilation", variable=morph_var, value=2)
morph_radio3 = ttk.Radiobutton(root, text="Openning", variable=morph_var, value=3)
morph_radio4 = ttk.Radiobutton(root, text="Closing", variable=morph_var, value=4)
morph_radio5 = ttk.Radiobutton(root, text="None", variable=morph_var, value=5)
morph_label2 = ttk.Label(root, text="Index:")
morph_entry = Entry(root, justify="center", width=2)
morph_entry.insert(0, 4)
morph_label.grid(in_=option_frame, row=3, column=0, padx="2", sticky="w")
morph_radio1.grid(in_=option_frame, row=3, column=1, padx="2", pady="2", sticky="w")
morph_radio2.grid(in_=option_frame, row=3, column=2, padx="2", pady="2", sticky="w")
morph_radio3.grid(in_=option_frame, row=3, column=3, padx="2", pady="2", sticky="w")
morph_radio4.grid(in_=option_frame, row=3, column=4, padx="2", pady="2", sticky="w")
morph_radio5.grid(in_=option_frame, row=3, column=5, padx="2", pady="2", sticky="w")
morph_label2.grid(in_=option_frame, row=3, column=6, padx="2", pady="2", sticky="w")
morph_entry.grid(in_=option_frame, row=3, column=7, padx="2", pady="2", sticky="w")
# 5- Edge detection
edge_var = IntVar()
edge_var.set(7) # need to use edge_var.set and edge_var.get to
# set and get the value of this variable
edge_label = ttk.Label(root, text="Select edge detection:")
edge_radio1 = ttk.Radiobutton(root, text="Sobel-X", variable=edge_var, value=1)
edge_radio2 = ttk.Radiobutton(root, text="Sobel-Y", variable=edge_var, value=2)
edge_radio3 = ttk.Radiobutton(root, text="Scharr-X", variable=edge_var, value=3)
edge_radio4 = ttk.Radiobutton(root, text="Scharr-Y", variable=edge_var, value=4)
edge_radio5 = ttk.Radiobutton(root, text="Laplacian", variable=edge_var, value=5)
edge_radio6 = ttk.Radiobutton(root, text="Canny", variable=edge_var, value=6)
edge_radio7 = ttk.Radiobutton(root, text="None", variable=edge_var, value=7)
edge_label2 = ttk.Label(root, text="Index:")
edge_entry = Entry(root, justify="center", width=2)
edge_entry.insert(0, 5)
edge_label.grid(in_=option_frame, row=4, column=0, padx="2", pady="2", sticky="w")
edge_radio1.grid(in_=option_frame, row=4, column=1, padx="2", pady="2", sticky="w")
edge_radio2.grid(in_=option_frame, row=5, column=1, padx="2", pady="2", sticky="w")
edge_radio3.grid(in_=option_frame, row=4, column=2, padx="2", pady="2", sticky="w")
edge_radio4.grid(in_=option_frame, row=5, column=2, padx="2", pady="2", sticky="w")
edge_radio5.grid(in_=option_frame, row=4, column=3, padx="2", pady="2", sticky="w")
edge_radio6.grid(in_=option_frame, row=4, column=4, padx="2", pady="2", sticky="w")
edge_radio7.grid(in_=option_frame, row=4, column=5, padx="2", pady="2", sticky="w")
edge_label2.grid(in_=option_frame, row=4, column=6, padx="2", pady="2", sticky="w")
edge_entry.grid(in_=option_frame, row=4, column=7, padx="2", pady="2", sticky="w")
option_frame.columnconfigure(tuple(range(7)), weight=1)
option_frame.rowconfigure(tuple(range(4)), weight=1)
# kick off the GUI
root.mainloop()
|
{"hexsha": "cab82ebaf7a2aa27e27a12fb78e4cae83333fc67", "size": 17867, "ext": "py", "lang": "Python", "max_stars_repo_path": "Computer-Vision/Lec-04/ImageControlPanel.py", "max_stars_repo_name": "ashme2/ElectroPi", "max_stars_repo_head_hexsha": "9c28c65613a744af8a3bd58e557536644728df30", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-19T09:28:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T02:25:00.000Z", "max_issues_repo_path": "Computer-Vision/Lec-04/ImageControlPanel.py", "max_issues_repo_name": "ashme2/ElectroPi", "max_issues_repo_head_hexsha": "9c28c65613a744af8a3bd58e557536644728df30", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Computer-Vision/Lec-04/ImageControlPanel.py", "max_forks_repo_name": "ashme2/ElectroPi", "max_forks_repo_head_hexsha": "9c28c65613a744af8a3bd58e557536644728df30", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.240990991, "max_line_length": 149, "alphanum_fraction": 0.6693345273, "include": true, "reason": "import numpy", "num_tokens": 5197}
|
function [ candidates, scores ] = sample_bing_windows( im, num_samples)
%SAMPLE_BING_WINDOWS Will generate equaly distributed windows in space,
%following Bing sizes
% Bing uses 29 specific sizes, this method spread this sizes homogenously
% inside the image
scores = [];
im_wh = [size(im, 2), size(im, 1)];
original_bing_window_sizes = [[512 512 ]; [256, 512 ]; [128, 512 ]; [64, 512 ]; ...
[512, 256 ]; [256, 256 ]; [128, 256 ]; [64, 256 ]; [32, 256 ]; ...
[512, 128 ]; [256, 128 ]; [128, 128 ]; [64, 128 ]; [32, 128 ]; [16, 128 ]; ...
[512, 64 ]; [256, 64 ]; [128, 64 ]; [64, 64 ]; [32, 64 ]; [16, 64 ]; ...
[128, 32 ]; [64, 32 ]; [32, 32 ]; [16, 32 ]; ...
[64, 16 ]; [32, 16 ]; [16, 16 ]];
%original_bing_window_sizes = [[64, 128]; [32, 128 ]];
original_num_window_sizes = size(original_bing_window_sizes, 1);
bing_window_sizes = [];
% we filter window sizes to fit inside the image
for i=1:size(original_bing_window_sizes, 1),
window_wh = original_bing_window_sizes(i, :);
if(sum(window_wh < im_wh) == 2),
bing_window_sizes = [bing_window_sizes; window_wh];
else
% the window is disregarded
end
end
num_window_sizes = size(bing_window_sizes, 1);
assert(num_samples > num_window_sizes);
candidates = [];
if num_window_sizes ~= original_num_window_sizes,
% we add one candidate that covers the whole image size
candidates = [0, 0, im_wh];
end
use_v0 = false;
if use_v0,
% will have as many splits in x and y axis (non-square strides)
sqrt_num_samples_size = floor(sqrt(num_samples / num_window_sizes));
samples_per_size = sqrt_num_samples_size * sqrt_num_samples_size;
assert(samples_per_size > 3);
extra_samples = num_samples - (samples_per_size * num_window_sizes) - size(candidates, 1);
delta_samples = ((sqrt_num_samples_size + 1) * (sqrt_num_samples_size + 1)) - samples_per_size;
divisions_per_size = repmat(sqrt_num_samples_size, 1, num_window_sizes);
start_extra_size_index = 6; % because we like 256x256 (and smaller)
for i = start_extra_size_index:num_window_sizes,
if extra_samples > delta_samples,
divisions_per_size(i) = divisions_per_size(i) + 1;
extra_samples = extra_samples - delta_samples;
else
fprintf('Added %i extra sizes\n', i - 1);
break;
end
end
for i = 1:num_window_sizes,
num_divisions = divisions_per_size(i);
window_wh = bing_window_sizes(i, :);
x1 = 1;
y1 = 1;
x2 = im_wh(1) - window_wh(1);
y2 = im_wh(2) - window_wh(2);
assert(x2 > 1);
assert(y2 > 1);
[xx, yy] = meshgrid(linspace(x1, x2, num_divisions), ...
linspace(y1, y2, num_divisions));
top_left_xy = [xx(:) yy(:)];
for j = 1:size(top_left_xy, 1),
xy = top_left_xy(j, :);
window = [xy, xy + window_wh];
candidates = [candidates; window];
end
end
else
% v1
% will use square strides
num_samples_per_size = floor(num_samples / num_window_sizes);
stride_per_size = zeros(1, num_window_sizes);
total_placed_samples = size(candidates, 1);
for i = 1:num_window_sizes,
window_wh = bing_window_sizes(i, :);
x2 = im_wh(1) - window_wh(1);
y2 = im_wh(2) - window_wh(2);
assert(x2 > 0);
assert(y2 > 0);
block_area = (x2 * y2) / num_samples_per_size;
stride = sqrt(block_area);
assert(stride > 0);
num_samples_placed = compute_num_samples_placed(im_wh, window_wh, stride);
while(num_samples_placed > num_samples_per_size)
stride = stride + 1; % larger stride, less placements
num_samples_placed = compute_num_samples_placed(im_wh, window_wh, stride);
end
assert(num_samples_placed <= num_samples_per_size);
total_placed_samples = total_placed_samples + num_samples_placed;
assert(total_placed_samples <= num_samples);
stride_per_size(i) = stride;
end
assert(total_placed_samples <= num_samples);
sqrt_num_samples_size = floor(sqrt(num_samples / num_window_sizes));
samples_per_size = sqrt_num_samples_size * sqrt_num_samples_size;
% assert(samples_per_size > 3);
assert(samples_per_size > 0);
extra_samples = num_samples - total_placed_samples;
assert(extra_samples >= 0);
start_extra_size_index = 6; % because we like 256x256 (and smaller)
for i = start_extra_size_index:num_window_sizes,
window_wh = bing_window_sizes(i, :);
stride = stride_per_size(i);
num_samples_placed = compute_num_samples_placed(im_wh, window_wh, stride);
new_stride = stride * 0.75;
%new_stride = stride - 1;
new_num_samples_placed = compute_num_samples_placed(im_wh, window_wh, new_stride);
delta_samples = new_num_samples_placed - num_samples_placed;
if extra_samples > delta_samples,
stride_per_size(i) = new_stride;
total_placed_samples = total_placed_samples + delta_samples;
extra_samples = extra_samples - delta_samples;
else
fprintf('Added %i extra sizes\n', i - start_extra_size_index);
break;
end
end
if extra_samples > 0,
fprintf('%i extra_samples remaining\n', extra_samples);
end
for i = 1:num_window_sizes,
window_wh = bing_window_sizes(i, :);
stride = stride_per_size(i);
x1 = 1;
y1 = 1;
x2 = im_wh(1) - window_wh(1);
y2 = im_wh(2) - window_wh(2);
x_dots = x1 + (mod(x2 - x1, stride) / 2): stride : x2;
y_dots = y1 + (mod(y2 - y1, stride) / 2): stride : y2;
assert(x2 > 0);
assert(y2 > 0);
[xx, yy] = meshgrid(x_dots, y_dots);
top_left_xy = [xx(:) yy(:)];
for j = 1:size(top_left_xy, 1),
xy = top_left_xy(j, :);
window = [xy, xy + window_wh];
candidates = [candidates; window];
end
end
assert(size(candidates, 1) == total_placed_samples);
end
assert(size(candidates, 1) <= num_samples);
end
function [ret] = compute_num_samples_placed(im_wh, window_wh, stride)
x1 = 1;
y1 = 1;
x2 = im_wh(1) - window_wh(1);
y2 = im_wh(2) - window_wh(2);
x_dots = x1 + (mod(x2 - x1, stride) / 2): stride : x2;
y_dots = y1 + (mod(y2 - y1, stride) / 2): stride : y2;
num_samples_placed = size(x_dots, 2) * size(y_dots, 2);
ret = num_samples_placed;
end
|
{"author": "hosang", "repo": "detection-proposals", "sha": "858368afffde5ff4028020fcb1dd4381705ccbfb", "save_path": "github-repos/MATLAB/hosang-detection-proposals", "path": "github-repos/MATLAB/hosang-detection-proposals/detection-proposals-858368afffde5ff4028020fcb1dd4381705ccbfb/baselines/sample_bing_windows.m"}
|
#!/usr/bin/env python
#_*_coding:utf-8_*_
import sys, os, re
import math
import numpy as np
pPath = re.sub(r'codes$', '', os.path.split(os.path.realpath(__file__))[0])
sys.path.append(pPath)
from codes import readFasta
def Sim(a, b):
blosum62 = [
[ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, 0], # A
[-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, 0], # R
[-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 0], # N
[-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 0], # D
[ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, 0], # C
[-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0], # Q
[-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 0], # E
[ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, 0], # G
[-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0], # H
[-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, 0], # I
[-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, 0], # L
[-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0], # K
[-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, 0], # M
[-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, 0], # F
[-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, 0], # P
[ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0], # S
[ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, 0], # T
[-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, 0], # W
[-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, 0], # Y
[ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, 0], # V
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # -
]
AA = 'ARNDCQEGHILKMFPSTWYV-'
myDict = {}
for i in range(len(AA)):
myDict[AA[i]] = i
maxValue, minValue = 11, -4
return (blosum62[myDict[a]][myDict[b]] - minValue) / (maxValue - minValue)
def CalculateDistance(sequence1, sequence2):
if len(sequence1) != len(sequence2):
print(sequence1)
print(sequence2)
print('Error: inconsistent peptide length')
sys.exit(1)
distance = 1 - sum([Sim(sequence1[i], sequence2[i]) for i in range(len(sequence1))]) / len(sequence1)
return distance
def CalculateContent(myDistance, j, myLabelSets):
content = []
myDict = {}
for i in myLabelSets:
myDict[i] = 0
for i in range(j):
myDict[myDistance[i][0]] = myDict[myDistance[i][0]] + 1
for i in myLabelSets:
content.append(myDict[myLabelSets[i]] / j)
return content
def KNNpeptide(fastas, **kw):
trainFile = kw['train']
labelFile = kw['label']
if trainFile == None or labelFile == None:
print('Error: please specify the directory of train file ["--train"] and the label file ["--label"]')
sys.exit(1)
if os.path.exists(labelFile) == False:
print('Error: the label file does not exist.')
sys.exit(1)
trainData = readFasta.readFasta(trainFile)
with open(labelFile) as f:
records = f.readlines()
myLabel = {}
for i in records:
array = i.rstrip().split() if i.strip() != '' else None
myLabel[array[0]] = int(array[1])
myLabelSets = list(set(myLabel.values()))
if len(trainData) != len(myLabel):
print('ERROR: inconsistent sample number between train and label file.')
sys.exit(1)
kValues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, 0.13, 0.14, 0.15,
0.16, 0.17, 0.18, 0.19, 0.20, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.30]
kNum = []
for i in kValues:
kNum.append(math.ceil(len(trainData) * i))
encodings = []
header = ['#']
for k in kValues:
for l in myLabelSets:
header.append('Top' + str(k) + '.label' + str(l))
encodings.append(header)
for i in fastas:
name, sequence = i[0], i[1]
code = [name]
myDistance = []
for j in range(len(trainData)):
if name != trainData[j][0]:
myDistance.append([myLabel[trainData[j][0]], CalculateDistance(trainData[j][1], sequence)])
myDistance = np.array(myDistance)
myDistance = myDistance[np.lexsort(myDistance.T)]
for j in kNum:
code = code + CalculateContent(myDistance, j, myLabelSets)
encodings.append(code)
return encodings
|
{"hexsha": "bbfccc41e43df612685bc25ab362133c35b2a5a1", "size": 4591, "ext": "py", "lang": "Python", "max_stars_repo_path": "profab/utils/feature_extraction_module/iFeature/codes/KNNpeptide.py", "max_stars_repo_name": "Sametle06/PROFAB", "max_stars_repo_head_hexsha": "571b691df2c5e98df0bfc4d6335f3ecd245314fd", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2019-09-26T13:56:49.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-05T11:59:30.000Z", "max_issues_repo_path": "profab/utils/feature_extraction_module/iFeature/codes/KNNpeptide.py", "max_issues_repo_name": "Sametle06/PROFAB", "max_issues_repo_head_hexsha": "571b691df2c5e98df0bfc4d6335f3ecd245314fd", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-04T14:52:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-04T14:52:27.000Z", "max_forks_repo_path": "profab/utils/feature_extraction_module/iFeature/codes/KNNpeptide.py", "max_forks_repo_name": "Sametle06/PROFAB", "max_forks_repo_head_hexsha": "571b691df2c5e98df0bfc4d6335f3ecd245314fd", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-24T11:33:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-24T12:16:08.000Z", "avg_line_length": 38.906779661, "max_line_length": 103, "alphanum_fraction": 0.484426051, "include": true, "reason": "import numpy", "num_tokens": 2455}
|
source("eaglesoft-caplan-functions.r")
## get triples
res <- get.caplan.data(limit="30")
## fill in missing column names
## res <- fill.missing.caplan.columns(res)
## ## dates in res are in form "YYYY-MM-DD^^http://www.w3.org/2001/XMLSchema#date"
## ## so I need to lop off the "^^http://www.w3.org/2001/XMLSchema#date" part
## res <- trim.caplan.dates(res)
## ## order results by patientid, tooth number, procedure / finding date
## res.ordered <- order.caplan.rows(res)
## ## replace all NA values with "."
## res.ordered <- fill.missing.caplan.values(res.ordered)
## ## flip dates from YYYY-MM-DD to MM-DD-YYYY
## ## NB: do this after ordering rows!
## res.ordered <- flip.caplan.dates(res.ordered)
## ## put columns in order to match spreadsheet
## res.ordered <- order.caplan.columns(res.ordered)
## transform data into Caplan format
res <- transform.caplan.data(res)
## write results to file
write.caplan.spreadsheet(res)
write.caplan.matrix(res)
|
{"hexsha": "e9dcac9b848eb30059b4146e38e33d0c3998347a", "size": 962, "ext": "r", "lang": "R", "max_stars_repo_path": "src/tools/eaglesoft-query-all.r", "max_stars_repo_name": "oral-health-and-disease-ontologies/OHD-ontology", "max_stars_repo_head_hexsha": "e22530f45f0bfc31ccd8e1e69aa00791328e08b7", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-08T16:11:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T16:11:01.000Z", "max_issues_repo_path": "src/tools/eaglesoft-query-all.r", "max_issues_repo_name": "oral-health-and-disease-ontologies/OHD-ontology", "max_issues_repo_head_hexsha": "e22530f45f0bfc31ccd8e1e69aa00791328e08b7", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2019-05-13T19:04:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-31T18:34:56.000Z", "max_forks_repo_path": "src/tools/eaglesoft-query-all.r", "max_forks_repo_name": "oral-health-and-disease-ontologies/OHD-ontology", "max_forks_repo_head_hexsha": "e22530f45f0bfc31ccd8e1e69aa00791328e08b7", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0625, "max_line_length": 82, "alphanum_fraction": 0.7006237006, "num_tokens": 254}
|
###################################
# Routing module
###################################
"""
Route selector
Finds a route by using a choosen mode (fastest, shortest or based on Google Distances API) and returns intersections indeces for the route
**Arguments**
* `start_node` : unique start node id selected for an agent
* `waypoint` : unique node id of a waypoint or nothing when agent is driving directly from *start_node* to *fin_node*
* `fin_node` : unique finish id selected for an agent
* `activity` : string with category of waypoint or nothing when agent is driving directly from *start_node* to *fin_node*
* `sim_data` : `SimData` object
* `buffer` : array with already chosen routes stored as a `Road` object
* `routing_mode` : string determining a way how the route will be selected (fastest, shortest or based on Google Distances API)
"""
function get_route(start_node::Int,
waypoint::Union{Int,Nothing},
fin_node::Int,
activity::Union{String,Nothing},
sim_data::OpenStreetMapXSim.SimData,
buffer::Array{OpenStreetMapXSim.Road,1},
routing_mode::String)
if routing_mode == "fastest"
if isa(waypoint,Nothing)
route_nodes, distance, route_time = OpenStreetMapX.fastest_route(sim_data.map_data.network, start_node, fin_node)
else
route_nodes, distance, route_time = OpenStreetMapX.fastest_route(sim_data.map_data.network, start_node, waypoint, fin_node)
end
road = OpenStreetMapXSim.Road(start_node,fin_node, activity, routing_mode,route_nodes, 1)
push!(buffer,road)
return route_nodes
elseif routing_mode == "shortest"
if isa(waypoint,Nothing)
route_nodes, distance, route_time = OpenStreetMapX.shortest_route(sim_data.map_data.network, start_node, fin_node)
else
route_nodes, distance, route_time = OpenStreetMapX.shortest_route(sim_data.map_data.network, start_node, waypoint, fin_node)
end
road = OpenStreetMapXSim.Road(start_node,fin_node, activity, routing_mode,route_nodes, 1)
push!(buffer,road)
return route_nodes
else
if isa(waypoint,Nothing)
route_nodes,routing_mode = OpenStreetMapXSim.get_google_route(start_node, fin_node, sim_data.map_data, sim_data.googleapi_key)
else
route_nodes,routing_mode = OpenStreetMapXSim.get_google_route(start_node, fin_node, waypoint, sim_data.map_data, sim_data.googleapi_key)
end
road = OpenStreetMapXSim.Road(start_node,fin_node, activity, routing_mode,route_nodes, 1)
push!(buffer,road)
return route_nodes
end
end
"""
Waypoint selector
Selects waypoint by minimizing the length of the route from DA_start by waypoint to DA_fin
**Arguments**
* `start_node` : unique start node id selected for an agent
* `fin_node` : unique finish node id selected for an agent
* `activity` : string with category of sought waypoint
* `sim_data` : `SimData` object
* `exact` : boolean value indicing how the waypoint will be chosen (with approximate or exact algorithm)
"""
function get_waypoint(start_node::Int,
fin_node::Int,
activity::String,
sim_data::OpenStreetMapXSim.SimData,
exact::Bool)
waypoints = OpenStreetMapX.filter_graph_features(sim_data.features, sim_data.feature_to_intersections,sim_data.feature_classes,activity)
if exact
return waypoint = OpenStreetMapX.find_optimal_waypoint_exact(sim_data.map_data.network, sim_data.map_data.network.w, start_node, fin_node, waypoints)
else
return waypoint = OpenStreetMapX.find_optimal_waypoint_approx(sim_data.map_data.network, sim_data.map_data.network.w, start_node, fin_node, waypoints)
end
end
"""
Route module selector
Selects routing mode for two points from the following options: fastest route, shortest route or google route and returns a node indices of the choosen route
**Arguments**
* `DA_start` : unique DA unique id selected for an agent
* `DA_fin` : unique DA unique id selected for an agent
* `sim_data` : simulation data struct
* `DAs_to_intersection` : dictionary mapping each DA to nearest graph node
* `buffer` : array with already chosen routes stored as a `Road` object
**Assumptions**
- the probability of selecting each routing mode is equal
"""
function select_route(DA_start::Int, DA_fin::Int,
sim_data::OpenStreetMapXSim.SimData,
buffer::Array{OpenStreetMapXSim.Road,1}; google::Bool = false)
start_node = sim_data.DAs_to_intersection[DA_start]
fin_node = sim_data.DAs_to_intersection[DA_fin]
waypoint = activity = nothing
if google
routing_mode = rand(["shortest", "fastest", "google"])
else
routing_mode = rand(["shortest", "fastest"])
end
indice = findfirst(road -> (road.start_node == start_node) && (road.fin_node == fin_node) && (road.waypoint == waypoint) && (road.mode == routing_mode), buffer)
if isa(indice,Nothing)
return OpenStreetMapXSim.get_route(start_node, waypoint, fin_node, activity, sim_data, buffer, routing_mode)
else
buffer[indice].count += 1
return buffer[indice].route
end
end
"""
Route module selector for three points
Selects routing mode for three points from the following options: fastest route, shortest route or google route and returns a node indices of the choosen route
**Arguments**
* `DA_start` : unique DA id selected for an agent
* `DA_fin` : unique DA id selected for an agent
* `network` : routing network based on OSM data
* `DAs_to_intersection` : dictionary mapping each DA to nearest graph node
* `features` : dictionary with all features existing in simulation
* `feature_classes` : dictionary mapping each category to proper integer number
* `feature_to_intersections` : dictionary mapping each feature to nearest graph node
* `buffer` : array with already chosen routes stored as a `Road` object
**Assumptions**
- the probability of selecting each routing mode is equal
-agent is choosing a waypoint based on previously selected activity
-waypoint is approximately minimizing the length of the route from DA_start by waypoint to DA_fin
"""
function select_route(DA_start::Int, DA_fin::Int,
activity::String,
sim_data::OpenStreetMapXSim.SimData,
buffer::Array{OpenStreetMapXSim.Road,1}; google::Bool = false)
start_node = sim_data.DAs_to_intersection[DA_start]
fin_node = sim_data.DAs_to_intersection[DA_fin]
if google
routing_mode = rand(["shortest", "fastest", "google"])
else
routing_mode = rand(["shortest", "fastest"])
end
indice = findfirst(road -> (road.start_node == start_node) && (road.fin_node == fin_node) && (road.waypoint == activity) && (road.mode == routing_mode), buffer)
if isa(indice,Nothing)
waypoint = OpenStreetMapXSim.get_waypoint(start_node,fin_node,activity,sim_data,false)
return OpenStreetMapXSim.get_route(start_node, waypoint, fin_node, activity, sim_data, buffer, routing_mode)
else
buffer[indice].count += 1
return buffer[indice].route
end
end
|
{"hexsha": "a7f396afcf77ac7836bc1f143b67722e43056cd1", "size": 6979, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/routing_module.jl", "max_stars_repo_name": "pszufe/OpenStreetMapXSim.jl", "max_stars_repo_head_hexsha": "65fcfe6d366a4cab6cfb6f1fd3a4b0c051145f66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-13T03:27:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-13T03:27:17.000Z", "max_issues_repo_path": "src/routing_module.jl", "max_issues_repo_name": "pszufe/OpenStreetMapXSim.jl", "max_issues_repo_head_hexsha": "65fcfe6d366a4cab6cfb6f1fd3a4b0c051145f66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/routing_module.jl", "max_forks_repo_name": "pszufe/OpenStreetMapXSim.jl", "max_forks_repo_head_hexsha": "65fcfe6d366a4cab6cfb6f1fd3a4b0c051145f66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-13T03:27:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-13T03:27:19.000Z", "avg_line_length": 43.61875, "max_line_length": 166, "alphanum_fraction": 0.7360653389, "num_tokens": 1718}
|
ccc By Trifon Trifonov trifon@hku.hk
ccc You can modify if as you want, but please
ccc do not distribute without permision.
ccc This is not a final version!!!
ccc The final version will be available in the Python RVMod lib
ccc Trifonov et al. (in prep).
implicit none
real*8 PI, twopi
parameter (PI=3.14159265358979d0)
integer npl,ndset,idset,ndata,ma,mfit,i,j,NDSMAX,NPLMAX,MMAX
integer writeflag_best_par,hkl,gr_flag
integer writeflag_RV,writeflag_fit, amoebastarts
parameter (NDSMAX=20, NPLMAX=20, MMAX=200)
integer idsmax(NDSMAX),ia(MMAX),nt, ts(20000),ii, iter
real*8 x(20000),y(20000),sig(20000),y_in(20000)
real*8 a(MMAX),covar(MMAX,MMAX),alpha(MMAX,MMAX)
real*8 rms,mstar, mass(NPLMAX),ap(NPLMAX)
real*8 swift_mass(NPLMAX),s_mass(NPLMAX),j_mass(NPLMAX)
real*8 chisq,alamda,ochisq,dchisq, epsil, deltat
real*8 sigscale,x0,xmax, incl(NPLMAX),cap0m(NPLMAX)
real*8 t0,t1,t2,dt,offset,t_max,loglik,dy,sig2i
real*8 st_mass,sini,m1,a1,m2,a2,epoch,ftol
real*8 ymod(20000),dyda(MMAX), p(MMAX+1,MMAX),yamoeba(MMAX+1)
real*8 loglikk, ologlikk, dloglikk,best_w,best_we
external rvkep, compute_abs_loglik
character*80 infile
character*80 version_input, version
real*4 t_stop,when_to_kill, model_max,model_min
common /DSBLK/ npl,ndset,idsmax,idset,gr_flag
version = "0.07"
CALL getarg(1, version_input)
if(version_input.eq.'-version') then
write(*,*) version
goto 222
endif
twopi=2.d0*PI
ftol=0.000001d0
c first two just for consistency with dynamical input, not really used
read (*,*) epsil,deltat, amoebastarts,
& when_to_kill, nt, model_max, model_min ,gr_flag
c write(*,*) 'Stellar mass'
read (*,*) st_mass, writeflag_best_par, writeflag_RV,
& writeflag_fit
call io_read_data (ndata,x,ts,y,sig,epoch,
& x0,t_max,a,ia,ma,incl,cap0m,hkl)
mfit = 0
do j = 1,ma
if (ia(j).ne.0) mfit = mfit + 1
enddo
c call prepare_for_amoeba(p,MMAX+1,MMAX,yamoeba,a,ia,ma,mfit,
c & compute_abs_loglik,ndata,x,y,ymod,dyda,ts,sig)
i = 0
500 continue
if (i.eq.amoebastarts) then
i = 0
goto 502
endif
i = i + 1
ologlikk = loglikk
call prepare_for_amoeba(p,MMAX+1,MMAX,yamoeba,a,ia,ma,mfit,
& compute_abs_loglik,ndata,x,y,ymod,dyda,ts,sig, i,hkl)
call amoeba(p,yamoeba,MMAX+1,MMAX,mfit,ftol,compute_abs_loglik,
& iter,ndata,x,y,ymod,dyda,ma,ts,sig,a,ia,loglikk,hkl)
CALL SECOND(t_stop)
if (t_stop.ge.when_to_kill) then
write(*,*) 'Max. time=',when_to_kill, 'sec ',
& 'exceeded t_stop =', t_stop, 'sec '
goto 502
endif
loglikk = yamoeba(1)
dloglikk = ologlikk - loglikk
j=0
do ii=1,ma
if (ia(ii).ne.0) then
j=j+1
a(ii)=p(1,j)
endif
enddo
if (dabs(dloglikk).ge.0.000001d0) goto 500
502 idset = 1
chisq=0.d0
loglik=0.d0
if (hkl.eq.0) then
do i = 1,npl
j = 6*(i-1)
if (a(j+2).lt.0.d0) then ! if P<0, set P>0
a(j+2) = abs(a(j+2))
endif
if (a(j+1).lt.0.d0) then ! if K<0, set K>0 and w = w+PI
a(j+4) = a(j+4) + PI
a(j+1) = abs(a(j+1))
if (a(j+4).gt.2.d0*PI) a(j+4) = a(j+4)-2.d0*PI
endif
if (a(j+3).lt.0.d0) then ! if e<0, set e>0 and w=w+PI, M0=M0-PI
a(j+3) = abs(a(j+3))
a(j+4) = a(j+4) + PI
if (a(j+4).gt.2.d0*PI) a(j+4) = a(j+4)-2.d0*PI
a(j+5) = a(j+5) - PI
if (a(j+5).lt.0.d0) a(j+5) = a(j+5)+2.d0*PI
endif
if (a(j+4).lt.0.d0) a(j+4) = dmod(a(j+4)+2.d0*PI, 2.d0*PI)
if (a(j+5).lt.0.d0) a(j+5) = dmod(a(j+5)+2.d0*PI, 2.d0*PI)
c if (a(j+6).lt.0.d0) a(j+6) = dmod(a(j+6)+2.d0*PI, 2.d0*PI)
if (a(j+4).gt.2.d0*PI) a(j+4) = dmod(a(j+4), 2.d0*PI )
if (a(j+5).gt.2.d0*PI) a(j+5) = dmod(a(j+5), 2.d0*PI )
c if (a(j+6).gt.2.d0*PI) a(j+6) = dmod(a(j+6), 2.d0*PI )
enddo
else
do i = 1,npl
j = 6*(i-1)
if (a(j+1).lt.0.d0) then ! if K<0, set K>0 and w = w+PI
a(j+4) = -1.d0*a(j+4) ! which is h = -h, k = -k
a(j+3) = -1.d0*a(j+3)
a(j+1) = abs(a(j+1))
endif
if (a(j+5).lt.0.d0) a(j+5) = dmod(a(j+5)+2.d0*PI, 2.d0*PI)
if (a(j+5).gt.2.d0*PI) a(j+5) = dmod(a(j+5), 2.d0*PI )
if (a(j+6).lt.0.d0) a(j+6) = dmod(a(j+6)+2.d0*PI, 2.d0*PI)
if (a(j+6).gt.2.d0*PI) a(j+6) = dmod(a(j+6), 2.d0*PI )
c write(*,*) a(j+4),a(j+4),ecc(i) ,omega(i) ,capmm(i)
enddo
endif
do i = 1,ndata
idset = ts(i)
call RVKEP (x(i),a,ymod(i),dyda,ma,idset,hkl)
y_in(i) = y(i) - a(6*npl+idset) - a(6*npl+2*ndset+1)*x(i) -
& a(6*npl +2*ndset + 2)*x(i)**2
ymod(i) = ymod(i) - a(6*npl+idset)
& - a(6*npl +2*ndset + 1)*x(i) -
& a(6*npl +2*ndset + 2)*x(i)**2
dy = y_in(i) - ymod(i)
if (writeflag_RV.gt.0) then
write(*,*) x0 + x(i),
& ymod(i), y_in(i) + a(6*npl+2*ndset+1)*x(i) +
& a(6*npl +2*ndset + 2)*x(i)**2,
& dy, sig(i), idset
endif
sig2i = 1.d0/(sig(i)**2 + a(6*npl+ndset+idset)**2)
chisq = chisq + dy*dy*sig2i
loglik = loglik - 0.5*dy*dy*sig2i -
& 0.5*dlog(twopi*(sig(i)**2
& + a(6*npl+ndset+idset)**2))
rms = rms + dy**2
enddo
rms = dsqrt(rms/dble(ndata))
write(*,*) 'loglik, reduced chi^2, chi^2, rms:'
write(*,*) loglik, chisq/dble(ndata-mfit),chisq,rms
51 format(f10.3,f10.3,f10.3,f10.3,f10.3,f10.3,f10.3,f10.3)
52 format(a,f14.3)
53 format(a,i4,a,i4,a,f7.3,a,f7.3,a,f12.3)
call MA_J (a,ma,npl,st_mass,sini,mass,ap,hkl,gr_flag)
cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
if (writeflag_best_par.gt.0) then
do j = 1,npl+1
j_mass(j) = mass(j)/1.2667d17
c s_mass(j) = mass(j)/1.32712497d20
c swift_mass(j) = (mass(j)/1.32712497d20)*((4.d0*PI*PI)
c & /(365.25*365.25))
enddo
write (*,*) 'Best-fit K [m/s], P [days], e, w [deg],
& M0 [deg], i[deg], cap0m[deg], w dot [deg/yr], and their errors'
do j = 1,npl
i = 6*(j-1)
if (hkl.eq.0) then
best_w = a(i+4)*180.d0/PI
best_we = dsqrt(covar(i+4,i+4))*180.d0/PI
else
best_w = a(i+4)
best_we = dsqrt(covar(i+4,i+4))
endif
write (*,*) a(i+1),a(i+2),a(i+3),best_w,
& a(i+5)*180.d0/PI,incl(j),cap0m(j),a(i+6)*180.d0/PI
write (*,*) dsqrt(covar(i+1,i+1)),dsqrt(covar(i+2,i+2)),
& dsqrt(covar(i+3,i+3)),
& best_we,
& dsqrt(covar(i+5,i+5))*180.d0/PI,0.d0, 0.d0,
& dsqrt(covar(i+6,i+6))*180.d0/PI
enddo
write (*,*) 'Best-fit V0 [m/s] and their error bars:'
do j = 1,ndset
i = 6*npl + j
write (*,*) a(i)
write (*,*) dsqrt(covar(i,i))
enddo
write (*,*) 'Jitters for each data set:'
do j = 1,ndset
write (*,*) a(6*npl+ndset+j)
write (*,*) '0'
enddo
write (*,*) 'linear trend [m/s per day]:'
write (*,*) a(6*npl + 2*ndset + 1)
write (*,*) dsqrt(covar(6*npl + ndset + 1,6*npl + ndset + 1))
write (*,*) 'quad. trend [m/s per day]:'
write (*,*) a(6*npl + 2*ndset + 2)
write (*,*) dsqrt(covar(6*npl + ndset + 2,6*npl + ndset + 2))
write (*,*) ' ndata =',ndata
write (*,*) ' mfit =',mfit
write (*,*) ' RMS =',rms
write (*,*) ' Chi^2 =',chisq/dble(ndata-mfit)
write (*,*) ' epoch = ', x0
write (*,*) 'Jupiter mass'
write (*,*) (j_mass(i+1),i=1,npl)
write(*,*) 'semi-major axes in Jacobi'
write(*,*) (ap(i)/1.49597892d11,i=1,npl)
endif
if(writeflag_fit.gt.0) then
dt = (x(ndata)+model_max+model_min)/dble(nt - 1)
do i = 1,nt
x(i) = ((i-1)*dt)-model_min
do j = 1,ndset
a(6*npl + j) = 0.0
enddo
call RVKEP (x(i),a,ymod(i),dyda,ma,1,hkl)
write(*,*) x0 + x(i), ymod(i)
enddo
endif
c stop
222 end
subroutine compute_abs_loglik(ndata,x,y,a2,ymod,dyda,ma,mfit,ts,
& sig,loglik, num,a,ia,hkl)
implicit none
integer MMAX,NDSMAX,npl,ndset,idset,num, mfit,gr_flag
parameter (MMAX=200, NDSMAX=20)
real*8 loglik, PI, TWOPI
parameter (PI=3.14159265358979d0)
parameter (TWOPI=2.0*PI)
integer ndata, i, j, ma, ts(20000), ia(MMAX), idsmax(NDSMAX),hkl
real*8 dy, sig(20000), dyda(MMAX), x(20000), y(20000)
real*8 ymod(20000),a(MMAX),a2(mfit),a3(MMAX),sig2i,y_in(20000)
& , y2(20000)
common /DSBLK/ npl,ndset,idsmax,idset,gr_flag
loglik=0.d0
j=0
do i=1,ma
if (ia(i).ne.0) then
j=j+1
a3(i)=a2(j)
else
a3(i)=a(i)
endif
enddo
do i = 1,ndata
idset = ts(i)
call RVKEP (x(i),a3,y2(i),dyda,ma,idset,hkl)
y_in(i) = y(i) - a3(6*npl+idset)-
& a3(6*npl+2*ndset+1)*x(i)
& - a3(6*npl+2*ndset+2)*x(i)**2
y2(i) = y2(i) - a3(6*npl+idset) -
& a3(6*npl+2*ndset+1)*x(i)
& - a3(6*npl+2*ndset+2)*x(i)**2
dy = y_in(i) - y2(i)
sig2i = 1.d0/(sig(i)**2 + a3(6*npl+ndset+idset)**2)
loglik = loglik + 0.5*dy*dy*sig2i +
& dlog(dsqrt(TWOPI*(sig(i)**2 +
& a3(6*npl+ndset+idset)**2)))
& - dlog(dsqrt(TWOPI))
c write(*,*) loglik
enddo
return
end
subroutine io_read_data(ndata,t,ts,ys,sigs,epoch,t0,t_max,
& ar,iar,ma,incl,cap0m,hkl)
implicit none
integer ndset,idset,ndata,NDSMAX,NPLMAX,MMAX,npl,ma
real*8 t(20000),y(20000),sig(20000),ys(20000),sigs(20000),PI
parameter (NDSMAX=20,NPLMAX=20,MMAX=200)
parameter(PI=3.14159265358979d0)
real*8 ar(MMAX),incl(NPLMAX),cap0m(NPLMAX)
integer iar(MMAX),u_off(NDSMAX),u_jit(NDSMAX),hkl
integer idsmax(NDSMAX),ts(20000), u_incl, u_cap0m
real*8 jitt(NDSMAX),sigscale,t0,t_max, epoch
real*8 off(NDSMAX),loglik
integer i,k,j,gr_flag
character*80 infile
common /DSBLK/ npl,ndset,idsmax,idset,gr_flag
c write (*,*) ' Number of Data Sets: '
read (*,*) ndset
if (ndset.gt.NDSMAX) stop ' KEPFIT: ndset > NDSMAX.'
ndata = 1
do i = 1,ndset
read (*,50) infile
50 format (a)
read (*,*) off(i)
read (*,*) u_off(i)
read (*,*) jitt(i)
read (*,*) u_jit(i)
open (unit=10,file=infile)
100 continue
read (10,*,err=200,end=200) t0,y(ndata),sig(ndata)
c sig(ndata) = dsqrt(sig(ndata)**2 + jitt(i)**2)
if (ndata.eq.1) then ! make sequences for datapoints
t(ndata) = t0
ts(ndata) = i
ys(ndata) = y(ndata)
sigs(ndata) = sig(ndata)
else
j = 1
1002 if (t0.lt.t(j)) then
do k = ndata-1,j,-1
t(k+1) = t(k)
ts(k+1) = ts(k)
ys(k+1) = ys(k)
sigs(k+1) = sigs(k)
enddo
t(j) = t0
ts(j) = i
ys(j) = y(ndata)
sigs(j) = sig(ndata)
goto 1001
else
j = j + 1
if (j.eq.ndata) then
t(j) = t0
ts(j) = i
ys(j) = y(ndata)
sigs(j) = sig(ndata)
goto 1001
endif
goto 1002
endif
endif
1001 ndata = ndata + 1
goto 100
200 idsmax(i) = ndata - 1
close (unit=10)
enddo
read (*,*) npl
if (npl.gt.NPLMAX) stop ' KEPFIT: npl > NPLMAX.'
do i = 1,ndset
ar(6*npl+i)=off(i)
iar(6*npl+i)=u_off(i)
ar(6*npl+ndset+i)=jitt(i)
iar(6*npl+ndset+i)=u_jit(i)
enddo
ma = 6*npl + 2*ndset + 2
do j = 1,npl
i = 6*(j-1)
read (*,*) ar(i+1),ar(i+2),ar(i+3),ar(i+4),ar(i+5),incl(j),
& cap0m(j),ar(i+6)
read (*,*) iar(i+1),iar(i+2),iar(i+3),iar(i+4),iar(i+5),
& u_incl, u_cap0m,iar(i+6)
c inclinations and cap0m are always ignored in the fit, just for consistency with dynamical input and output
enddo
read (*,*) ar(6*npl+ 2*ndset+1)
read (*,*) iar(6*npl+2*ndset+1)
read (*,*) ar(6*npl+ 2*ndset+2)
read (*,*) iar(6*npl+2*ndset+2)
ndata = ndata - 1
c write(*,*) 'for epoch :'
read (*,*) epoch
t_max = t(ndata)
if (epoch.eq.0) then
t0 = t(1)
else
t0 = epoch
endif
read (*,*) hkl
do j = 1,npl
i = 6*(j-1)
if (hkl.eq.0) then
ar(i+4) = ar(i+4)*PI/180.d0
endif
ar(i+5) = ar(i+5)*PI/180.d0
ar(i+6) = ar(i+6)*PI/180.d0
enddo
do i = 1,ndata
t(i) = (t(i) - t0) ! time unit is day
enddo
return
end
subroutine RVKEP (x,a,y,dyda,ma,ts,hkl)
implicit none
real*8 PI,TWOPI
parameter (PI=3.14159265358979d0)
parameter (TWOPI=2.0d0*PI)
integer npl,ndset,idset,ma,i,j,NDSMAX,ts,hkl,gr_flag
parameter (NDSMAX=20)
integer idsmax(NDSMAX)
real*8 x,y,a(ma),a2(ma),dyda(ma),mass(10),ap(10)
real*8 cosw,sinw,capm,cape,cose,sine,cosf,sinf,fac1,fac2,fac3
real*8 orbel_ehybrid, f, coswf,omega(10),capmm(10),ecc(10)
real*8 ecc2,wm,sinwm,coswm,sin2wm,cos2wm,sin3wm,cos3wm,omegad(10)
common /DSBLK/ npl,ndset,idsmax,idset,gr_flag
y = 0.d0
do i = 1,ma
a2(i)=a(i)
enddo
if (hkl.eq.0) then
do i = 1,npl
j = 6*(i-1)
if (a2(j+2).lt.0.d0) then ! if P<0, set P>0
a2(j+2) = dabs(a2(j+2))
endif
if (a2(j+1).lt.0.d0) then ! if K<0, set K>0 and w = w+PI
a2(j+4) = a2(j+4) + PI
a2(j+1) = dabs(a2(j+1))
if (a2(j+4).gt.2.d0*PI) a2(j+4) = a2(j+4)-2.d0*PI
endif
if (a2(j+3).lt.0.d0) then ! if e<0, set e>0 and w=w+PI, M0=M0-PI
a2(j+3) = dabs(a2(j+3))
a2(j+4) = a2(j+4) + PI
if (a2(j+4).gt.2.d0*PI) a2(j+4) = a2(j+4)-2.d0*PI
a2(j+5) = a2(j+5) - PI
if (a2(j+5).lt.0.d0) a2(j+5) = a2(j+5)+2.d0*PI
endif
if (a2(j+4).lt.0.d0) a2(j+4)=dmod(a2(j+4)+2.d0*PI,2.d0*PI)
if (a2(j+5).lt.0.d0) a2(j+5)=dmod(a2(j+5)+2.d0*PI,2.d0*PI)
c if (a2(j+6).lt.0.d0) a2(j+6)=dmod(a2(j+6)+2.d0*PI,2.d0*PI)
if (a2(j+4).gt.2.d0*PI) a2(j+4)=dmod(a2(j+4), 2.d0*PI)
if (a2(j+5).gt.2.d0*PI) a2(j+5)=dmod(a2(j+5), 2.d0*PI)
c if (a2(j+6).gt.2.d0*PI) a2(j+6)=dmod(a2(j+6), 2.d0*PI)
ecc(i) = a2(j+3)
omega(i) = a2(j+4)
capmm(i) = a2(j+5)
c omegad(i) = a2(j+6)
if(gr_flag.ne.0) call MA_J (a,ma,npl,1.0d0,1.0d0,
& mass,ap,hkl,gr_flag)
omegad(i) = a(j+6)
c write(*,*) ecc(i) ,omega(i) ,capmm(i),omegad(i)
enddo
else
do i = 1,npl
j = 6*(i-1)
if (a2(j+1).lt.0.d0) then ! if K<0, set K>0 and w = w+PI
a2(j+4) = -1.d0*a2(j+4) ! which is h = -h, k = -k
a2(j+3) = -1.d0*a2(j+3)
a2(j+1) = dabs(a2(j+1))
endif
ecc(i) = dsqrt(a2(j+3)**2 + a2(j+4)**2)
omega(i) = datan2(a2(j+3),a2(j+4))
if(omega(i).lt.0.d0)omega(i)=dmod(omega(i)+2.d0*PI,2.d0*PI)
if(omega(i).gt.0.d0)omega(i)=dmod(omega(i), 2.d0*PI)
if (a2(j+5).lt.0.d0) a2(j+5)=dmod(a2(j+5)+2.d0*PI, 2.d0*PI)
if (a2(j+5).gt.2.d0*PI) a2(j+5) = dmod(a2(j+5), 2.d0*PI)
capmm(i) = a2(j+5) - omega(i)
if(capmm(i).lt.0.d0)capmm(i)=dmod(capmm(i)+2.d0*PI,2.d0*PI)
if(capmm(i).gt.0.d0)capmm(i)=dmod(capmm(i), 2.d0*PI)
c write(*,*) a2(j+4),a2(j+4),ecc(i) ,omega(i) ,capmm(i)
enddo
endif
if (hkl.eq.0) then
do j = 1,npl
i = 6*(j-1)
cosw = dcos(omega(j)+omegad(j)*x/365.25d0)
sinw = dsin(omega(j)+omegad(j)*x/365.25d0)
capm = TWOPI*x/a2(2+i) + capmm(j)
capm = dmod(capm, 2.d0*PI )
cape = ORBEL_EHYBRID (ecc(j),capm)
cose = dcos(cape)
sine = dsin(cape)
cosf = (cose - ecc(j))/(1.d0 - ecc(j)*cose)
sinf = (dsqrt(1.d0 - ecc(j)**2)*sine)/(1.d0 - ecc(j)*cose)
c f = 2.0d0*datan2( dsqrt(1.d0 - ecc(j))*dcos(cape/2.d0),
c & dsqrt(1.d0 + ecc(j))*dsin(cape/2.d0))
c coswf = dcos(omega(j)+f)
c fac1 = coswf + ecc(j)*cosw
fac1 = cosw*cosf - sinw*sinf + ecc(j)*cosw
fac2 = (cosw*sinf + sinw*cosf)/(1.d0 - ecc(j)*cose)**2
fac3 = -a2(1+i)*dsqrt(1.d0 - ecc(j)**2)*fac2
y = y + a2(1+i)*fac1
dyda(1+i) = fac1
dyda(2+i) = -TWOPI*fac3*x/a2(2+i)**2
dyda(3+i) = -a2(1+i)*sine*(2.d0 - ecc(j)**2 - ecc(j)*cose)*
& fac2/dsqrt(1.d0 - ecc(j)**2)
dyda(4+i) = -a2(1+i)*(sinw*cosf + cosw*sinf + ecc(j)*sinw)
dyda(5+i) = fac3
dyda(6+i) = -a2(1+i)*(sinw*cosf + cosw*sinf
& + ecc(j)*sinw)*x/365.25d0
enddo
else
do j = 1,npl
i = 6*(j-1)
c ecc2 = dsqrt(a2(3+i)**2 + a2(4+i)**2)
if (ecc(j).gt.1.d-2) then
cosw = dcos(omega(j)+omegad(i)*x/365.25d0)
sinw = dsin(omega(j)+omegad(i)*x/365.25d0)
capm = TWOPI*x/a2(2+i) + capmm(j)
capm = dmod(capm, 2.d0*PI )
c write(*,*) capm
cape = ORBEL_EHYBRID (ecc(j),capm)
cose = dcos(cape)
sine = dsin(cape)
cosf = (cose - ecc(j))/(1.d0 - ecc(j)*cose)
sinf = (dsqrt(1.d0 - ecc(j)**2)*sine)/(1.d0 - ecc(j)*cose)
fac1 = cosw*cosf - sinw*sinf + ecc(j)*cosw
fac2 = cosw*sinf + sinw*cosf
fac3 = -a2(1+i)*dsqrt(1.d0 - ecc(j)**2)*fac2/
& (1.d0 - ecc(j)*cose)**2
y = y + a2(1+i)*fac1
dyda(1+i) = fac1
dyda(2+i) = -TWOPI*fac3*x/a2(2+i)**2
dyda(3+i) = -a2(1+i)*fac2*((2.d0-ecc(j)**2-ecc(j)*cose)*
& sinw*sine/dsqrt(1.d0-ecc(j)**2) -
& dsqrt(1.d0 - ecc(j)*2)*cosw/ecc(j))/
& (1.d0 - ecc(j)*cose)**2 -
& a2(1+i)*fac2*cosw/ecc(j)
dyda(4+i) = -a2(1+i)*fac2*((2.d0-ecc(j)**2-ecc(j)*cose)*
& cosw*sine/dsqrt(1.d0 - ecc(j)**2) +
& dsqrt(1.d0 - ecc(j)*2)*sinw/ecc(j))/
& (1.d0 - ecc(j)*cose)**2 +
& a2(1+i)*fac2*sinw/ecc(j)
dyda(5+i) = fac3
dyda(6+i) = dyda(4+i)*x/365.25d0
else
wm = TWOPI*x/a2(2+i) + a2(5+i)
wm = dmod(wm, 2.d0*PI )
coswm = dcos(wm)
sinwm = dsin(wm)
cos2wm = dcos(2.d0*wm)
sin2wm = dsin(2.d0*wm)
cos3wm = dcos(3.d0*wm)
sin3wm = dsin(3.d0*wm)
fac1 = coswm + a2(3+i)*sin2wm - a2(4+i)*(1.d0 - cos2wm) -
& a2(3+i)**2*(0.875d0*coswm + 1.125d0*cos3wm) -
& a2(3+i)*a2(4+i)*(0.25d0*sinwm - 2.25d0*sin3wm) -
& a2(4+i)**2*1.125d0*(coswm - cos3wm)
fac3 = -sinwm + 2.d0*a2(3*i)*cos2wm - 2.d0*a2(4+i)*sin2wm +
& a2(3+i)**2*(0.875d0*sinwm - 3.375d0*sin3wm) -
& a2(3+i)*a2(4+i)*(0.25d0*coswm - 6.75d0*cos3wm) +
& a2(4+i)**2*(1.125d0*coswm - 3.375*sin3wm)
y = y + a2(1+i)*fac1
dyda(1+i) = fac1
dyda(2+i) = -a2(1+i)*TWOPI*fac3*x/a2(2+i)**2
dyda(3+i) = a2(1+i)*(sin2wm -
& a2(3+i)*(1.75d0*coswm + 2.25d0*cos3wm) -
& a2(4+i)*(0.25d0*sinwm - 2.25d0*sin3wm))
dyda(4+i) = a2(1+i)*(-1.d0 + a2(4+i)*cos2wm -
& a2(3+i)**(0.25d0*sinwm - 2.25d0*sin3wm) -
& a2(4+i)*2.25d0*(coswm - cos3wm))
dyda(5+i) = a2(1+i)*fac3
dyda(6+i) = dyda(4+i)*x/365.25d0
endif
enddo
endif
c do i = 1,idset
y = y + a2(6*npl+ts)
dyda(6*npl+ts) = 1.d0
c enddo
c do i = 1,idset
c y = y + a(5*npl+i)
c dyda(5*npl+i) = 1.d0
c enddo
y = y + a2(6*npl +2*ndset + 1)*x
y = y + a2(6*npl +2*ndset + 2) + a2(6*npl+2*ndset + 2)*x**2
c write(*,*) a2(6*npl +2*ndset + 1), a2(6*npl+ndset + 2)
c dyda(6*npl + ndset + 1) = x
c dyda(6*npl + ndset + 2) = x**2
do i = ts+1,ndset
dyda(6*npl+i) = 0.d0
enddo
return
end
subroutine prepare_for_amoeba(p,mp,np,y,a,ia,ma,mfit,funk,ndata,
& x,z,ymod,dyda,ts,sig, it,hkl)
integer MMAX,NDSMAX,ma,ts(20000), ndata,mp,np,mfit,it
parameter(MMAX=200,NDSMAX=20)
REAL*8 ftol,p(mp,np),y(mp),a(MMAX), a2(mfit),fr,frjitt
real*8 x(20000),z(20000),ymod(20000)
real*8 dyda(MMAX), sig(20000), loglik
parameter(fr=0.05, frjitt=0.05)
INTEGER i,j,k, ia(MMAX), idsmax(NDSMAX),hkl,gr_flag
external funk
common /DSBLK/ npl,ndset,idsmax,idset,gr_flag
k=0
do j=1,ma
if(ia(j).ne.0) then
k=k+1
p(1,k)=a(j)
do i=2,mfit+1
if (k.eq.(i-1)) then
if (j.gt.(6*npl+ndset)) then
p(i,k)=(1+frjitt)*(p(1,k)+0.1)
else
if (mod(j,5).eq.2) then
p(i,k)=(1+fr)*(p(1,k) + 0.1)
else if (mod(j,5).eq.3) then
p(i,k)=(1+frjitt)*(p(1,k)+0.1)
else
p(i,k)=(1+fr)*(p(1,k)+0.1)
endif
endif
else
p(i,k)=p(1,k)
endif
enddo
endif
enddo
do i=1,mfit+1
do j=1,mfit
a2(j)=p(i,j)
enddo
call funk(ndata,x,z,a2,ymod,dyda,ma,mfit,ts,sig,loglik,i,
& a,ia,hkl)
y(i)=loglik
c write(*,*) a2(1),a2(2),a2(3),a2(4),a2(5),a2(6),a2(7)
c write(*,*) a2(8),a2(9),a2(10),a2(11),a2(12),a2(13),a2(14)
c write(*,*) a2(15),a2(16),a2(17),a2(18),a2(19),a2(20),a2(21)
enddo
return
end
SUBROUTINE amoeba(p,y,mp,np,ndim,ftol,funk,iter,ndata,x,z,ymod,
& dyda,ma,ts,sig,a,ia,ytry,hkl)
implicit none
INTEGER iter,mp,ndim,np,NMAX,ITMAX, MMAX,ma,ts(20000), ndata
REAL*8 ftol,p(mp,np),y(mp),x(20000),z(20000),ymod(20000)
PARAMETER (NMAX=20,ITMAX=200000,MMAX=200)
real*8 dyda(MMAX), sig(20000), loglik, a(MMAX)
EXTERNAL funk
INTEGER i,ihi,ilo,inhi,j,m,n, ia(MMAX),hkl
REAL*8 rtol,summ,swap,ysave,ytry,psum(ndim),amotry
iter=0
1 do 12 n=1,ndim
summ=0.d0
do 11 m=1,ndim+1
summ=summ+p(m,n)
11 continue
psum(n)=summ
12 continue
2 ilo=1
if (y(1).gt.y(2)) then
ihi=1
inhi=2
else
ihi=2
inhi=1
endif
do 13 i=1,ndim+1
if(y(i).le.y(ilo)) ilo=i
if(y(i).gt.y(ihi)) then
inhi=ihi
ihi=i
else if(y(i).gt.y(inhi)) then
if(i.ne.ihi) inhi=i
endif
13 continue
rtol=2.d0*abs(y(ihi)-y(ilo))/(abs(y(ihi))+abs(y(ilo)))
if (rtol.lt.ftol) then
swap=y(1)
y(1)=y(ilo)
y(ilo)=swap
do 14 n=1,ndim
swap=p(1,n)
p(1,n)=p(ilo,n)
p(ilo,n)=swap
14 continue
return
endif
if (iter.ge.ITMAX) then
write (*,*) 'ITMAX exceeded in amoeba'
return
endif
iter=iter+2
ytry=amotry(p,y,psum,mp,np,ndim,funk,ihi,-1.0d0,ndata,x,z,ymod,
& dyda,ma,ts,sig,a,ia,hkl)
if (ytry.le.y(ilo)) then
ytry=amotry(p,y,psum,mp,np,ndim,funk,ihi,2.0d0,ndata,x,z,ymod,
& dyda,ma,ts,sig,a,ia,hkl)
else if (ytry.ge.y(inhi)) then
ysave=y(ihi)
ytry=amotry(p,y,psum,mp,np,ndim,funk,ihi,0.5d0,ndata,x,z,ymod,
& dyda,ma,ts,sig,a,ia,hkl)
if (ytry.ge.ysave) then
do 16 i=1,ndim+1
if(i.ne.ilo)then
do 15 j=1,ndim
psum(j)=0.5d0*(p(i,j)+p(ilo,j))
p(i,j)=psum(j)
15 continue
call funk(ndata,x,z,psum,ymod,dyda,ma,ndim,ts,sig,loglik,
& i,a,ia,hkl)
y(i)=loglik
endif
16 continue
iter=iter+ndim
goto 1
endif
else
iter=iter-1
endif
goto 2
END
C (C) Copr. 1986-92 Numerical Recipes Software 0=M,173+9.
FUNCTION amotry(p,y,psum,mp,np,ndim,funk,ihi,fac,ndata,x,z,ymod,
& dyda,ma,ts,sig,a,ia,hkl)
implicit none
INTEGER ihi,mp,ndim,np,NMAX, MMAX, ma, ts(20000),ndata
PARAMETER (NMAX=20, MMAX=200)
REAL*8 amotry,fac,p(mp,np),psum(np),y(mp),x(20000),z(20000),
& ymod(20000)
real*8 dyda(MMAX), sig(20000),loglik
EXTERNAL funk
INTEGER j, ia(MMAX),hkl
REAL*8 fac1,fac2,ytry,ptry(ndim), a(MMAX)
fac1=(1.0d0-fac)/ndim
fac2=fac1-fac
do 11 j=1,ndim
ptry(j)=psum(j)*fac1-p(ihi,j)*fac2
11 continue
call funk(ndata,x,z,ptry,ymod,dyda,ma,ndim,ts,sig,loglik,ihi,
& a,ia,hkl)
ytry=loglik
C WRITE(*,*) loglik
if (ytry.lt.y(ihi)) then
y(ihi)=ytry
do 12 j=1,ndim
psum(j)=psum(j)-p(ihi,j)+ptry(j)
p(ihi,j)=ptry(j)
12 continue
endif
amotry=ytry
return
END
C (C) Copr. 1986-92 Numerical Recipes Software 0=M,173+9.
subroutine MA_J (a,ma,npl,m0,sini,mass,ap,hkl,gr_flag)
implicit none
real*8 m0,PI,TWOPI,THIRD,GMSUN,dm,MSUN
integer npl,ma,i,j,NPLMAX,hkl,gr_flag
parameter (NPLMAX=7)
real*8 sini,mm(NPLMAX),ecc,corr
real*8 a(ma),mass(NPLMAX),ap(NPLMAX),mpold(NPLMAX),mtotal
parameter (THIRD=1.d0/3.d0)
parameter (PI=3.14159265358979d0,TWOPI=2.d0*PI)
parameter (GMSUN=1.32712497d20,MSUN=1.32712497d20)
c*******G is set to be unit, and s, m, kg as unit of time, length and mass
c*******expectively.
do j = 1,npl
i = 6*(j-1)
mm(j) = 2.d0*PI/(a(i+2)*8.64d4)
enddo
do i = 0,npl-1
if (hkl.eq.0) then
ecc = a(6*i+3)
else
ecc = dsqrt(a(6*i+3)**2+a(6*i+4)**2) !! only for h, k
endif
mass(1) = m0
mpold(i+1) = 0.d0
101 continue
if (i.eq.0) then
mtotal = m0
mass(i+2) = a(6*i+1)*(TWOPI/mm(i+1)*(m0 + mpold(i+1))**2/
& (TWOPI*GMSUN))**THIRD*
& dsqrt(1.d0 - ecc**2)
else
mtotal = m0
do j = 0, i-1
mtotal = mtotal + mass(j+2)
enddo
mass(i+2) = a(6*i+1)*(TWOPI/mm(i+1)*(mtotal
& +mpold(i+1))**2/(TWOPI*GMSUN))**THIRD*
& dsqrt(1.d0 - ecc**2)
endif
dm = dabs(mass(i+2)-mpold(i+1))/mass(i+2)
mpold(i+1) = mass(i+2)
if (dm.gt.0) goto 101
ap(i+1) = (GMSUN*(mtotal + mass(i+2))*(1.d0/mm(i+1))
& **2)**THIRD
enddo
do i = 1,npl+1
mass(i) = mass(i)*MSUN
enddo
if(gr_flag.ne.0) then
do i = 1,npl
j = 6*(i-1)
call gr_corr(ap(i),a(j+3),mass(1),mass(i+1),i,corr,1.0d0)
a(j+6) = corr*365.25
enddo
endif
return
end
subroutine gr_corr(a,e,gmi,mass,n,corr,dt)
implicit none
real*8 a ,e, T, PI, c, GMSUN, AU, st_mass
integer n,NPLMAX
real*8 mass, corr,THIRD,gmi,dt,prec_frac
parameter (PI=3.14159265358979d0)
parameter (c = 299792458.0d0)
parameter (GMSUN=1.32712497d20, AU=1.49597892d13)
parameter (THIRD=1.d0/3.d0)
T = 2.0d0*PI * sqrt((a**3.0d0)/(gmi ) )
corr = (24.0d0 * (PI**3.0d0) * (a**2.0d0)) / ( (T**2.0d0) *
& (c**2.0d0)*(1.0d0-e**2.0d0) )
corr = corr/T
return
end
***********************************************************************
c ORBEL_EHYBRID.F
***********************************************************************
* PURPOSE: Solves Kepler's eqn. e is ecc. m is mean anomaly.
*
* Input:
* e ==> eccentricity anomaly. (real scalar)
* m ==> mean anomaly. (real scalar)
* Returns:
* orbel_ehybrid ==> eccentric anomaly. (real scalar)
*
* ALGORITHM: For e < 0.18 uses fast routine ESOLMD
* For larger e but less than 0.8, uses EGET
* For e > 0.8 uses EHIE
* REMARKS: Only EHIE brings M and E into range (0,TWOPI)
* AUTHOR: M. Duncan
* DATE WRITTEN: May 25,1992.
* REVISIONS: 2/26/93 hfl
***********************************************************************
real*8 function orbel_ehybrid(e,m)
include 'swift_loglik_Jakub.inc'
c... Inputs Only:
real*8 e,m
c... Internals:
real*8 orbel_esolmd,orbel_eget,orbel_ehie
c----
c... Executable code
if(e .lt. 0.18d0) then
orbel_ehybrid = orbel_esolmd(e,m)
else
if( e .le. 0.8d0) then
orbel_ehybrid = orbel_eget(e,m)
else
orbel_ehybrid = orbel_ehie(e,m)
endif
endif
return
end ! orbel_ehybrid
c--------------------------------------------------------------------
***********************************************************************
c ORBEL_EHIE.F
***********************************************************************
* PURPOSE: Solves Kepler's eqn. e is ecc. m is mean anomaly.
*
* Input:
* e ==> eccentricity anomaly. (real scalar)
* m ==> mean anomaly. (real scalar)
* Returns:
* orbel_ehybrid ==> eccentric anomaly. (real scalar)
*
* ALGORITHM: Use Danby's quartic for 3 iterations.
* Eqn. is f(x) = x - e*sin(x+M). Note that
* E = x + M. First guess is very good for e near 1.
* Need to first get M between 0. and PI and use
* symmetry to return right answer if M between PI and 2PI
* REMARKS: Modifies M so that both E and M are in range (0,TWOPI)
* AUTHOR: M. Duncan
* DATE WRITTEN: May 25,1992.
* REVISIONS:
***********************************************************************
real*8 function orbel_ehie(e,m)
include 'swift_loglik_Jakub.inc'
c... Inputs Only:
real*8 e,m
c... Internals:
integer iflag,nper,niter,NMAX
real*8 dx,x,sa,ca,esa,eca,f,fp
parameter (NMAX = 3)
c----
c... Executable code
c In this section, bring M into the range (0,TWOPI) and if
c the result is greater than PI, solve for (TWOPI - M).
iflag = 0
nper = m/TWOPI
m = m - nper*TWOPI
if (m .lt. 0.d0) m = m + TWOPI
if (m.gt.PI) then
m = TWOPI - m
iflag = 1
endif
c Make a first guess that works well for e near 1.
x = (6.d0*m)**(1.d0/3.d0) - m
niter =0
c Iteration loop
do niter =1,NMAX
call orbel_scget(x + m,sa,ca)
esa = e*sa
eca = e*ca
f = x - esa
fp = 1.d0 -eca
dx = -f/fp
dx = -f/(fp + 0.5d0*dx*esa)
dx = -f/(fp + 0.5d0*dx*(esa+0.3333333333333333d0*eca*dx))
x = x + dx
enddo
orbel_ehie = m + x
if (iflag.eq.1) then
orbel_ehie = TWOPI - orbel_ehie
m = TWOPI - m
endif
return
end !orbel_ehie
c------------------------------------------------------------------
***********************************************************************
c ORBEL_SCGET.F
***********************************************************************
* PURPOSE: Given an angle, efficiently compute sin and cos.
*
* Input:
* angle ==> angle in radians (real scalar)
*
* Output:
* sx ==> sin(angle) (real scalar)
* cx ==> cos(angle) (real scalar)
*
* ALGORITHM: Obvious from the code
* REMARKS: The HP 700 series won't return correct answers for sin
* and cos if the angle is bigger than 3e7. We first reduce it
* to the range [0,2pi) and use the sqrt rather than cos (it's faster)
* BE SURE THE ANGLE IS IN RADIANS - NOT DEGREES!
* AUTHOR: M. Duncan.
* DATE WRITTEN: May 6, 1992.
* REVISIONS:
***********************************************************************
subroutine orbel_scget(angle,sx,cx)
include 'swift_loglik_Jakub.inc'
c... Inputs Only:
real*8 angle
c... Output:
real*8 sx,cx
c... Internals:
integer nper
real*8 x
real*8 PI3BY2
parameter(PI3BY2 = 1.5d0*PI)
c----
c... Executable code
nper = angle/TWOPI
x = angle - nper*TWOPI
if(x.lt.0.d0) then
x = x + TWOPI
endif
sx = sin(x)
cx= sqrt(1.d0 - sx*sx)
if( (x .gt. PIBY2) .and. (x .lt.PI3BY2)) then
cx = -cx
endif
return
end ! orbel_scget
c-------------------------------------------------------------------
***********************************************************************
c ORBEL_EGET.F
***********************************************************************
* PURPOSE: Solves Kepler's eqn. e is ecc. m is mean anomaly.
*
* Input:
* e ==> eccentricity anomaly. (real scalar)
* m ==> mean anomaly. (real scalar)
* Returns:
* orbel_eget ==> eccentric anomaly. (real scalar)
*
* ALGORITHM: Quartic convergence from Danby
* REMARKS: For results very near roundoff, give it M between
* 0 and 2*pi. One can condition M before calling EGET
* by calling my double precision function MOD2PI(M).
* This is not done within the routine to speed it up
* and because it works fine even for large M.
* AUTHOR: M. Duncan
* DATE WRITTEN: May 7, 1992.
* REVISIONS: May 21, 1992. Now have it go through EXACTLY two iterations
* with the premise that it will only be called if
* we have an ellipse with e between 0.15 and 0.8
***********************************************************************
real*8 function orbel_eget(e,m)
include 'swift_loglik_Jakub.inc'
c... Inputs Only:
real*8 e,m
c... Internals:
real*8 x,sm,cm,sx,cx
real*8 es,ec,f,fp,fpp,fppp,dx
c----
c... Executable code
c Function to solve Kepler's eqn for E (here called
c x) for given e and M. returns value of x.
c MAY 21 : FOR e < 0.18 use ESOLMD for speed and sufficient accuracy
c MAY 21 : FOR e > 0.8 use EHIE - this one may not converge fast enough.
call orbel_scget(m,sm,cm)
c begin with a guess accurate to order ecc**3
x = m + e*sm*( 1.d0 + e*( cm + e*( 1.d0 -1.5d0*sm*sm)))
c Go through one iteration for improved estimate
call orbel_scget(x,sx,cx)
es = e*sx
ec = e*cx
f = x - es - m
fp = 1.d0 - ec
fpp = es
fppp = ec
dx = -f/fp
dx = -f/(fp + dx*fpp/2.d0)
dx = -f/(fp + dx*fpp/2.d0 + dx*dx*fppp/6.d0)
orbel_eget = x + dx
c Do another iteration.
c For m between 0 and 2*pi this seems to be enough to
c get near roundoff error for eccentricities between 0 and 0.8
x = orbel_eget
call orbel_scget(x,sx,cx)
es = e*sx
ec = e*cx
f = x - es - m
fp = 1.d0 - ec
fpp = es
fppp = ec
dx = -f/fp
dx = -f/(fp + dx*fpp/2.d0)
dx = -f/(fp + dx*fpp/2.d0 + dx*dx*fppp/6.d0)
orbel_eget = x + dx
return
end ! orbel_eget
c---------------------------------------------------------------------
***********************************************************************
c ORBEL_ESOLMD.F
***********************************************************************
* PURPOSE: Solves Kepler's eqn. e is ecc. m is mean anomaly.
*
* Input:
* e ==> eccentricity anomaly. (real scalar)
* m ==> mean anomaly. (real scalar)
* Returns:
* orbel_esolmd ==> eccentric anomaly. (real scalar)
*
* ALGORITHM: Some sort of quartic convergence from Wisdom.
* REMARKS: ONLY GOOD FOR SMALL ECCENTRICITY SINCE IT ONLY
* ITERATES ONCE. (GOOD FOR PLANET CALCS.)
* ALSO DOES NOT PUT M OR E BETWEEN 0. AND 2*PI
* INCLUDES: needs SCGET.F
* AUTHOR: M. Duncan
* DATE WRITTEN: May 7, 1992.
* REVISIONS: 2/26/93 hfl
***********************************************************************
real*8 function orbel_esolmd(e,m)
include 'swift_loglik_Jakub.inc'
c... Inputs Only:
real*8 e,m
c... Internals:
real*8 x,sm,cm,sx,cx
real*8 es,ec,f,fp,fpp,fppp,dx
c----
c... Executable code
c... Function to solve Kepler's eqn for E (here called
c... x) for given e and M. returns value of x.
call orbel_scget(m,sm,cm)
x = m + e*sm*( 1.d0 + e*( cm + e*( 1.d0 -1.5d0*sm*sm)))
call orbel_scget(x,sx,cx)
es = e*sx
ec = e*cx
f = x - es - m
fp = 1.d0 - ec
fpp = es
fppp = ec
dx = -f/fp
dx = -f/(fp + dx*fpp/2.d0)
dx = -f/(fp + dx*fpp/2.d0 + dx*dx*fppp/6.d0)
orbel_esolmd = x + dx
return ! orbel_esolmd
end
c--------------------------------------------------------------------
|
{"hexsha": "66fc0500c701c814876a6a4a261e1c71d8ebacb1", "size": 40440, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/latest_f/kepfit_amoeba.f", "max_stars_repo_name": "Simske/exostriker", "max_stars_repo_head_hexsha": "587b0af4c9cadb46637a4ac61a5392a596e966b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 69, "max_stars_repo_stars_event_min_datetime": "2020-01-06T13:31:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T11:23:14.000Z", "max_issues_repo_path": "exostriker/source/latest_f/kepfit_amoeba.f", "max_issues_repo_name": "sai-33/Exostriker", "max_issues_repo_head_hexsha": "f59fa51c6bdce3a2ed51d6621fe42bfcd8c2846f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 67, "max_issues_repo_issues_event_min_datetime": "2019-11-30T14:45:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T20:26:06.000Z", "max_forks_repo_path": "exostriker/source/latest_f/kepfit_amoeba.f", "max_forks_repo_name": "sai-33/Exostriker", "max_forks_repo_head_hexsha": "f59fa51c6bdce3a2ed51d6621fe42bfcd8c2846f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-01-06T13:44:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T11:23:17.000Z", "avg_line_length": 30.0892857143, "max_line_length": 116, "alphanum_fraction": 0.4482443126, "num_tokens": 14563}
|
#!/usr/bin/env python
# Copyright (c) 2015 Andrew J. Hesford. All rights reserved.
# Restrictions are listed in the LICENSE file distributed with this package.
import numpy as np, os, sys, pyfftw, getopt
from fwht import fwht
from collections import defaultdict, OrderedDict
from functools import partial, reduce
import argparse
import multiprocessing
from habis.habiconf import matchfiles, buildpaths
from habis.formats import WaveformSet, loadkeymat, ArgparseLoader
from habis.sigtools import Window, WaveformMap, Waveform
from pycwp import process, mio, cutil
def specwin(nsamp, freqs=None):
# Ensure default None is encapsulated
if freqs is None: freqs = (None,)
# Find the spectral window
fs, fe, step = slice(*freqs[:2]).indices(nsamp)
if step != 1:
raise ValueError('Frequency range must specify consecutive values')
return Window(fs, end=fe, nonneg=True)
def _r2c_datatype(idtype):
'''
Return the input and output types for a floating-point R2C DFT of an
input array with data type idtype.
'''
if np.issubdtype(idtype, np.complexfloating):
raise TypeError('Input data type must not be complex')
# All types except for 32-bit floats are converted to 64-bit floats
if np.issubdtype(idtype, np.dtype('float32')):
return np.dtype('complex64')
else:
return np.dtype('complex128')
def mpfhfft(nproc, *args, **kwargs):
'''
Subdivide, along receive channels, the work of fhfft() among nproc
processes to Hadamard-decode and Fourier transform the WaveformSet
stored in infile into a WaveformSet file that will be written to
outfile.
The positional and keyward arguments are passed to fhfft(). Any
'stride', 'start', 'lock', or 'event' kwargs will be overridden by
internally generated values.
'''
if nproc == 1:
# For a single process, don't spawn
fhfft(*args, **kwargs)
return
# Add the stride to the kwargs
kwargs['stride'] = nproc
# Create a multiprocessing lock and event to serialize output access
kwargs['lock'] = multiprocessing.Lock()
kwargs['event'] = multiprocessing.Event()
# Span the desired processes to perform FHFFT
with process.ProcessPool() as pool:
for i in range(nproc):
# Give each process a meaningful name
procname = process.procname(i)
# Note the starting index for this processor
kwargs['start'] = i
pool.addtask(target=fhfft, name=procname, args=args, kwargs=kwargs)
pool.start()
pool.wait()
def fhfft(infile, outfile, groupmap, **kwargs):
'''
For a real WaveformSet file infile, perform Hadamard decoding and then
a DFT of the temporal samples. The Hadamard decoding follows the
grouping configuration stored in groupmap, a map
(element index) -> (local Hadamard index, group number)
that defines Hadamard groups and must agree with the local group
configuration represented in the input. The resulting transformed
records will be stored in the output outfile. The nature of outfile
depends on the optional argument trmap (see below).
If trmap is not provided, all records will be written as a binary blob;
the outfile should be a single string providing the location of the
output. The output will have shape Ns x Nt x Nr, where Ns is the number
of output samples per waveform (as governed by the spectral or temporal
windows applied), Nt is the number of input transmit channels, and Nr
is the number of input receive channels.
If trmap is provided, outfile should be a one-to-one map from the keys
of trmap to output files. A WaveformMap object will be created for each
key in trmap and stored at the location indicated by the corresponding
value in outfile.
Output file(s) will be created or truncated.
Any TGC parameters in the input, accessible as wset.context['tgc'],
will be used to adjust the amplitudes of the waveforms prior to
applying Hadamard and Fourier transforms.
The kwargs contain optional values or default overrides:
* freqs (default: None): When not None, a sequence (start, end)
to be passed as slice(start, end) to bandpass filter the input after
Hadamard decoding.
* rolloff (default: None): When not None, an integer that defines the
half-width of a Hann window that rolls off the bandpass filter
specified in freqs. Ignored if freqs is not provided.
* nsamp (default: None): The length of the time window over which
waveforms are considered (and DFTs are performed), starting from
global time 0 (i.e., without consideration for input F2C). If None,
the value of nsamp in the input is used.
** NOTE: Because the time window always starts at global time 0,
a waveform with a data window (start, length) will be cropped when
(f2c + start + length) > nsamp, even if nsamp is the value encoded in
the file.
* tgcsamps (default: 16 [for integer datatypes] or 0 [else]): The
number of temporal samples to which a single TGC parameter applies.
Signals will be scaled by an appropriate section of the multiplier
mpy = (invtgc[:,np.newaxis] *
np.ones((ntgc, tgcsamps), dtype=np.float32)).ravel('C'),
where the values invtgc = 10.**(-wset.context['tgc'] / 20.) and
ntgc = len(wset.context['tgc']). The multiplier mpy is defined over a
window that starts at file sample 0 (global time wset.f2c).
Set tgcsamps to 0 (or None) to disable compensation. If the
WaveformSet includes TGC parameters and tgcsamps is a positive
integer, then len(mpy) must be at least long enough to encompass all
data windows encoded in the file.
* tgcmap (default: None): If provided, should be a two-column, rank-2
Numpy array (or compatible sequence) that relates nominal gains in
column 0 to actual gains in column 1. The rows of the array will be
used as control points in a piecewise linear interpolation (using
numpy.interp) that will map TGC parameters specified in the
WaveformSet file to actual gains. In other words, the TGC values
described above will be replaced with
tgc = np.interp(tgc, tgcmap[:,0], tgcmap[:,1])
whenever tgcmap is provided.
* tdout (default: False): Set to True to output time-domain waveforms
rather than spectral samples. Preserves input acquisition windows.
* signs (default: None): When not None, should be a sequence of length
wset.txgrps.size that specifies a 1 for any local Hadamard index
(corresponding to lines in the file) that should be negated, and 0
anywhere else. Ignored when an FHT is not performed.
* trmap (default: None): If provided, must be a map from a label
(referencing an output location in the map outfile) to a map from
receive indices to lists of transmit indices that, together, identify
transmit-receive pairs to extract from the input.
* start (default: 0) and stride (default: 1): For an input WaveformSet
wset, process receive channels in wset.rxidx[start::stride].
* lock (default: None): If not None, it should be a context manager
that is invoked to serialize writes to output.
* event (default: None): Only used then trmap is not provided. If not
None, event.set() and event.wait() are called to ensure the output
header is written to the binary-blob output before records are
appended. The value event.is_set() should be False prior to
execution.
'''
# Override acquisition window, if desired
nsamp = kwargs.pop('nsamp', None)
# Grab synchronization mechanisms
try: lock = kwargs.pop('lock')
except KeyError: lock = multiprocessing.Lock()
try: event = kwargs.pop('event')
except KeyError: event = multiprocessing.Event()
# Grab FFT and FHT switches and options
tdout = kwargs.pop('tdout', False)
freqs = kwargs.pop('freqs', None)
rolloff = kwargs.pop('rolloff', None)
dofft = (freqs is not None) or not tdout
if freqs is not None:
flo, fhi = freqs
if rolloff and not 0 < rolloff < (fhi - flo) // 2:
raise ValueError('Rolloff must be None or less than half bandwidth')
# Grab striding information
start = kwargs.pop('start', 0)
stride = kwargs.pop('stride', 1)
# Grab sign map information
signs = kwargs.pop('signs', None)
# Grab the number of samples per TGC value and an optional gain map
tgcsamps = kwargs.pop('tgcsamps', None)
tgcmap = kwargs.pop('tgcmap', None)
trmap = kwargs.pop('trmap', None)
if len(kwargs):
raise TypeError(f"Unrecognized keyword '{next(iter(kwargs))}'")
# Open the input and create a corresponding output
wset = WaveformSet.load(infile)
# Pull default sample count from input file
if nsamp is None: nsamp = wset.nsamp
elif wset.nsamp < nsamp: wset.nsamp = nsamp
# Handle TGC compensation if necessary
try: tgc = np.asarray(wset.context['tgc'], dtype=np.float32)
except (KeyError, AttributeError): tgc = np.array([], dtype=np.float32)
if tgcmap is not None:
# Make sure that the TGC map is sorted and interpolate
tgx, tgy = zip(*sorted((k, v) for k, v in tgcmap))
# TGC curves are always float32, regardless of tgcmap types
tgc = np.interp(tgc, tgx, tgy).astype(np.float32)
# Pick a suitable default value for tgcsamps
if tgcsamps is None:
tgcsamps = 16 if np.issubdtype(wset.dtype, np.integer) else 0
# Linearize, invert, and expand the TGC curves
tgc = ((10.**(-tgc[:,np.newaxis] / 20.) *
np.ones((len(tgc), tgcsamps), dtype=np.float32))).ravel('C')
# Figure out the data type of compensated waveforms
if len(tgc): itype = np.dtype(wset.dtype.type(0) * tgc.dtype.type(0))
else: itype = wset.dtype
# Make sure that the data type is always floating-point
if not np.issubdtype(itype, np.floating): itype = np.dtype('float64')
# Create a WaveformSet object to hold the ungrouped data
ftype = _r2c_datatype(itype)
otype = ftype if not tdout else itype
# Make sure the WaveformSet has a local configuration
try:
gcount, gsize = wset.txgrps
except TypeError:
raise ValueError('A valid Tx-group configuration is required')
if gsize < 1 or (gsize & (gsize - 1)):
raise ValueError('Hadamard length must be a positive power of 2')
# Validate local portion of the group map and assign
wset.groupmap = groupmap
if signs is not None:
# Ensure signs has values 0 or 1 in the right type
signs = np.asarray([1 - 2 * s for s in signs], dtype=itype)
if signs.ndim != 1 or len(signs) != gsize:
msg = f'Sign list must have shape ({wset.txgrps[1]},)'
raise ValueError(msg)
# Identify all FHTs represented by stored transmission indices
fhts = { }
for i in wset.txidx:
g, l = i // gsize, i % gsize
try: fhts[g].append(l)
except KeyError: fhts[g] = [l]
# Verify that all FHTs are complete
for g, ll in fhts.items():
if len(ll) != gsize:
raise ValueError(f'FHT group {gi} is incomplete')
if any(i != j for i, j in enumerate(sorted(ll))):
raise ValueError(f'FHT group {gi} has improper local indices')
# Map each FHT group to a list of row indices for the FHT
# and each element corresponding to an FHT output to row indices
gidx = lambda l, g: g * gsize + l
fhts = { g: [ wset.tx2row(gidx(l, g)) for l in range(gsize) ] for g in fhts }
invgroups = { (l, g): i for i, (l, g) in wset.groupmap.items() }
el2row = { invgroups[l, g]: wset.tx2row(gidx(l, g))
for g in fhts for l in range(gsize) }
# Create intermediate (FHT) and output (FHFFT) arrays
# FFT axis is contiguous for FFT performance
b = pyfftw.empty_aligned((wset.ntx, nsamp), dtype=itype, order='C')
if dofft:
# Create FFT output and a plan
cdim = (wset.ntx, nsamp // 2 + 1)
c = pyfftw.empty_aligned(cdim, dtype=ftype, order='C')
fwdfft = pyfftw.FFTW(b, c, axes=(1,), direction='FFTW_FORWARD')
# Create an inverse FFT plan for time-domain output
if tdout:
invfft = pyfftw.FFTW(c, b, axes=(1,), direction='FFTW_BACKWARD')
# Find the spectral window of interest
fswin = specwin(cdim[1], freqs)
# Try to build bandpass tails
if rolloff: tails = np.hanning(2 * int(rolloff))
else: tails = np.array([])
if trmap:
# Identify the subset of receive channels needed
allrx = reduce(set.union, (trm.keys() for trm in trmap.values()), set())
rxneeded = sorted(allrx.intersection(wset.rxidx))[start::stride]
else:
rxneeded = wset.rxidx[start::stride]
# In blob mode, the first write must create a header
with lock:
if not event.is_set():
# Create a sliced binary matrix output
windim = (nsamp if tdout else fswin.length, wset.ntx, wset.nrx)
mio.Slicer(outfile, dtype=otype, trunc=True, dim=windim)
event.set()
# Ensure the output header has been written
event.wait()
# Map receive channels to rows (slabs) in the output
rx2slab = dict((i, j) for (j, i) in enumerate(sorted(wset.rxidx)))
# Map transmit channels to decoded FHT rows
outrows = [r for (e,r) in sorted(el2row.items())]
outbin = mio.Slicer(outfile)
for rxc in rxneeded:
# Find the input window relative to 0 f2c
iwin = wset.getheader(rxc).win.shift(wset.f2c)
owin = (0, nsamp)
try:
# Find overlap of global input and output windows
ostart, istart, dlength = cutil.overlap(owin, iwin)
except TypeError:
# Default to 0-length windows at start of acquisition
iwin = Window(0, 0, nonneg=True)
owin = Window(0, 0, nonneg=True)
else:
# Convert input and output windows from global f2c to file f2c
iwin = Window(istart, dlength, nonneg=True)
owin = Window(ostart, dlength, nonneg=True)
# Read the data over the input window
data = wset.getrecord(rxc, window=iwin)[1]
# Clear the data array
b[:,:] = 0.
ws, we = owin.start, owin.end
if iwin.length and gsize > 1:
# Perform grouped Hadamard transforms with optional sign flips
for grp, rows in fhts.items():
# Ensure FHT axis is contiguous for performance
dblk = np.asfortranarray(data[rows,:])
b[rows,ws:we] = fwht(dblk, axes=0) / gsize
if signs is not None: b[rows,ws:we] *= signs[:,np.newaxis]
else: b[:,ws:we] = data
# Time-gain compensation, if necessary
if len(tgc) and iwin.length:
twin = (0, len(tgc))
try:
tstart, istart, dlength = cutil.overlap(twin, iwin)
if dlength != iwin.length: raise ValueError
except (TypeError, ValueError):
raise ValueError(f'TGC curve does not encompass data for channel {rxc}')
b[:,ws:we] *= tgc[np.newaxis,tstart:tstart+dlength]
if dofft:
fwdfft()
# Suppress content out of the band
c[:,:fswin.start] = 0.
c[:,fswin.end:] = 0.
# Bandpass filter the spectral samples
if len(tails) > 0:
ltails = len(tails) // 2
c[:,fswin.start:fswin.start+ltails] *= tails[np.newaxis,:ltails]
c[:,fswin.end-ltails:fswin.end] *= tails[np.newaxis,-ltails:]
# Revert to time-domain representation if necessary
if tdout: invfft()
if not trmap:
# Write the binary blob for this receive channel
orow = rx2slab[rxc]
with lock:
if tdout: outbin[orow] = b[outrows,:].T
else: outbin[orow] = c[outrows,fswin.start:fswin.end].T
# Nothing more to do in blob mode
continue
# Slice desired range from output data
if tdout:
dblock = b[:,ws:we]
dstart = ws
else:
dblock = c[:,fswin.start:fswin.end]
dstart = fswin.start
for label, trm in trmap.items():
# Pull tx list for this tier and rx channel, if possible
try: tl = trm[rxc]
except KeyError: tl = [ ]
if not len(tl): continue
# Collect all transmissions for this rx channel
wmap = WaveformMap()
for t in tl:
# Make sure transmission is represented in output
try: row = el2row[t]
except KeyError: continue
wave = Waveform(nsamp, dblock[row], dstart)
wmap[t, rxc] = wave
# Flush the waveform map to disk
with lock: wmap.store(outfile[label], append=True)
def in2out(infile, outpath, labels=None):
'''
Map a single input file into one or more output files.
If labels is None,
habis.habiconf.buildpaths([infile], outpath, 'fhfft.mat')[0]
is invoked and returned to produce a single output file.
Otherwise, buildpaths is called repeatedly as above, with the extension
'fhfft.mat' generally replaced by f'{l}.wmz' for each element l in the
collection or iterator labls. The output will be a dictionary mapping
each element of labels to the corresponding buildpaths output. As a
special case, if l is the empty string for any element in labels, the
extension will just be 'wmz' (i.e., the extension will be constructed
to avoid two dots with no interceding character).
This will pass along any errors raised by IOError or attempts to
iterate through labels.
'''
if not labels:
return buildpaths([infile], outpath, 'fhfft.mat')[0]
return { l: buildpaths([infile], outpath,
l and f'{l}.wmz' or 'wmz')[0] for l in labels }
if __name__ == '__main__':
nptxtloader = partial(ArgparseLoader, np.loadtxt)
ikmloader = partial(ArgparseLoader, loadkeymat, scalar=False, dtype=np.uint32)
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS,
description='Filter, decode and descramble WaveformSet files')
parser.add_argument('-p', '--procs', type=int,
default=process.preferred_process_count(),
help='Use PROCS processes in parallel')
parser.add_argument('-t', '--tdout', action='store_true',
help='Produce time-domain, not spectral, output')
parser.add_argument('-f', '--freqs', metavar=('START', 'END'), type=int,
nargs=2, help='Spectral bandwidth of output in DFT bins')
parser.add_argument('-r', '--rolloff', type=int,
help='Frequency rolloff of output in DFT bins')
parser.add_argument('-n', '--nsamp', type=int,
help='Override length of acquisition window')
parser.add_argument('-o', '--outpath', default=None,
help='Store output in OUTPATH (default: alongside input)')
parser.add_argument('-s', '--signs', type=nptxtloader(dtype=bool),
help='List of signs applied before Hadamard decoding')
parser.add_argument('-l', '--tgc-length', dest='tgcsamps', type=int,
help='Number of TGC samples per TGC value in a WaveformSet')
parser.add_argument('-m', '--tgc-map',
dest='tgcmap', type=nptxtloader(dtype='float32'),
help='Two-column file mapping nominal to actual gain')
parser.add_argument('-T', '--trmap', action='append', type=ikmloader(),
help='T-R map of measurement pairs to store (multiples OK)')
parser.add_argument('-L', '--trlabel', action='append',
help='Label for provided TR map (one per -T flag)')
parser.add_argument('groupmap', type=ikmloader(),
help='Global transmit groupmap to assign to input files')
parser.add_argument('input', nargs='+', help='List of input files to process')
args = parser.parse_args(sys.argv[1:])
# Special case: handle the T-R maps
trmap = getattr(args, 'trmap', [])
try:
trlab = args.trlabel
delattr(args, 'trlabel')
except AttributeError: trlab = [ ]
if len(trmap) != len(trlab):
sys.exit(f'ERROR: must specify same number of -L and -T arguments')
if len(trlab) != len(set(trlab)):
sys.exit(f'ERROR: all labels provided with -L must be unique')
args.trmap = dict(zip(trlab, trmap))
# Convert args namespace to kwargs
kwargs = { }
# Build the keyword arguments
attrs = { d for d in dir(args) if not d.startswith('_') }
for attr in attrs.difference({'procs', 'outpath', 'input'}):
kwargs[attr] = getattr(args, attr)
try: infiles = matchfiles(args.input)
except IOError as e: sys.exit(f'ERROR: {e}')
for infile in infiles:
print('Processing data file', infile)
try: outfile = in2out(infile, args.outpath, args.trmap.keys())
except IOError as e: sys.ext(f'ERROR: {e}')
mpfhfft(args.procs, infile, outfile, **kwargs)
|
{"hexsha": "166be7c15f66a7558f923a566a7a6173757a164c", "size": 19358, "ext": "py", "lang": "Python", "max_stars_repo_path": "shell/fhfft.py", "max_stars_repo_name": "ahesford/habis-tools", "max_stars_repo_head_hexsha": "82f82b99fa18452697404100edcf83bd03d35abc", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "shell/fhfft.py", "max_issues_repo_name": "ahesford/habis-tools", "max_issues_repo_head_hexsha": "82f82b99fa18452697404100edcf83bd03d35abc", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shell/fhfft.py", "max_forks_repo_name": "ahesford/habis-tools", "max_forks_repo_head_hexsha": "82f82b99fa18452697404100edcf83bd03d35abc", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1963636364, "max_line_length": 79, "alphanum_fraction": 0.7108172332, "include": true, "reason": "import numpy", "num_tokens": 5453}
|
# Regular Python Libraries
import cv2, os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # get rid of any TF warning messages
import numpy as np
from PIL import Image
# Python GUI
import PySimpleGUI as sg
# Model Libraries
import tensorflow as tf
# Multi Image Classifier Library
from Multi_Classification.Multi_Image_Classification import Multi_Image_Classification as img_classifier
# Binary Image Classifier Library
from Binary_Classification.Image_Classification import Image_Classification as bin_classifier
'''
Application Class
Description:
1. Contains all the logic behind the application.
a. Take a picture of the object.
b. Pick the model you want to use.
c. Process the image to identify the image.
'''
class Application:
classifier = None # default is None, but in the application the user chooses between binary and categorical
model_type = None # default is None but this identifies the model if it is a binary model or categorical
model = None # default is None, but this is changed once the user selects the model
# finds all the models and labels in the model directory
def pick_model(self):
models = [] # stores all the models and labels in models list
# iterate the models directory
for root, dirs, files in os.walk("./Models"):
# iterate all the files in the directory
for name in files:
models.append(name) # append the file to show afterward
return models # return the models at the end
# main logic behind the application
def run_application(self):
models = self.pick_model() # get the list of all models and lists
# define the window layout => video, image, classification on the bottom
layout = [[sg.Image(filename='', key='_IMAGE_'), sg.Image(r'', key='IMAGE')], [sg.Text('Classification: ', key='label', font='Courier 9', size=(50, 1))], [sg.Listbox(values=models, size=(30, 6), key='LIST'), sg.Button('Take a Picture'), sg.Button('Process Image')]]
# create the window and show it without the plot
window = sg.Window('Pollution Detector', layout, location=(100, 100))
# --- Event LOOP Read and display frames, operate the GUI --- #
# Setup the OpenCV capture device (webcam)
cap = cv2.VideoCapture(0)
# iterate through infinitely until the user closes the application
var_stop = 0 # variable to stop when model is chosen
img_num = 0 # keep track of the images that you have taken pictures of in the application
while True:
# define these values for when there are certain events that occur in the application
event, values = window.Read(timeout=20, timeout_key='timeout')
# if there is something clicked and nothing was picked before, get the name of the model
if len(window.FindElement('LIST').get()) != 0 and var_stop != 1:
self.model = tf.keras.models.load_model('./Models/'+window.FindElement('LIST').get()[0]) # store the model in class' model
# identify the type of model
model_used = window.FindElement('LIST').get()[0] # gets the name of the model being used
model_tokenized = model_used.split('_') # splits the path into [Name_1, Name_2, ..., <Categorical or Binary>.h5]
get_type = model_tokenized[len(model_tokenized)-1].split('.h5') # [<Categorical or Binary>, '']
self.model_type = get_type[0] # get the model from the first element in the list
var_stop += 1 # increment once to stop going into this conditional statement
# if there is no more events the application isn't running
if event is None:
break
# if the user clicks the 'Take a Picture' button clear the directory then take a picture
if event == 'Take a Picture':
cv2.imwrite('./App_Data/Image_{}.png'.format(img_num), frame) # write the image to the directory
img = cv2.imencode('.png', np.float32(Image.open("./App_Data/Image_{}.png".format(img_num))))[1].tobytes() # to display the image in the GUI convert it to float32
window.FindElement('IMAGE').Update(data=img) # replaces the image and add it to the GUI according to the key
img_num += 1 # increment the img_num so each photo you take is distinct
# if the user clicks 'Process Image' button then classify the image that was just taken
if event == 'Process Image':
# ----------------- First get the labels used previously --------------------------------------------- #
labels_path = window.FindElement('LIST').get()[0] # get the labels path from the combobox
f = open("./Models/{}".format(labels_path)) # open the file
labels = f.read().splitlines() # get the list of labels from the file
f.close() # close the file after your done to save memory
if self.model_type == 'categorical':
self.classifier = img_classifier(False, labels, (200, 200), 10, 10) # create the categorical classifier object
self.classifier.model = self.model # store the model into the object
app_data_labels, app_data_images = self.classifier.set_data(directory_path='./App_Data') # get the labels and images from the app_data which should be empty afterwards
classification = self.classifier.classify_image(image=app_data_images, model=self.classifier.model) # get the classification
window.FindElement('label').Update(value="Classification: {}".format(classification)) # write the label for the user to see
elif self.model_type == 'binary':
self.classifier = bin_classifier(200, True, False, labels, 10) # create the binary classification object
self.classifier.model = self.model # store the model into the model object
app_data_labels, app_data_images = self.classifier.set_data(directory_path='./App_Data') # get the labels and images from the app_data which should be empty afterwards
classification_num = self.classifier.model.predict(app_data_images[0::1]) # get the number for classification
predicted_label = labels[np.argmax(classification_num)] # get the predicted label
window.FindElement('label').Update(value="Classification: {}".format(predicted_label)) # write the label for the user to see
# Read image from capture device (camera)
ret, frame = cap.read()
# Convert the image to PNG Bytes
imgbytes = cv2.imencode('.png', frame)[1].tobytes()
# Change the Image Element to show the new image
window.FindElement('_IMAGE_').Update(data=imgbytes)
app = Application()
app.run_application()
|
{"hexsha": "d963fd96da1e77a66dfd4d8e357703d68bcb7138", "size": 7012, "ext": "py", "lang": "Python", "max_stars_repo_path": "Application.py", "max_stars_repo_name": "KKanda900/UW-Pollution-Detector", "max_stars_repo_head_hexsha": "5caffb41a7f38bf003bd7e8c37b884fd30b645f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Application.py", "max_issues_repo_name": "KKanda900/UW-Pollution-Detector", "max_issues_repo_head_hexsha": "5caffb41a7f38bf003bd7e8c37b884fd30b645f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Application.py", "max_forks_repo_name": "KKanda900/UW-Pollution-Detector", "max_forks_repo_head_hexsha": "5caffb41a7f38bf003bd7e8c37b884fd30b645f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 59.4237288136, "max_line_length": 273, "alphanum_fraction": 0.6493154592, "include": true, "reason": "import numpy", "num_tokens": 1464}
|
import matplotlib.pyplot as plt
import numpy.random as rnd
from matplotlib.patches import Ellipse
NUM = 250
ells = [Ellipse(xy=rnd.rand(2)*10, width=rnd.rand(), height=rnd.rand(), angle=rnd.rand()*360)
for i in range(NUM)]
fig = plt.figure(0)
ax = fig.add_subplot(111, aspect='equal')
for e in ells:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(rnd.rand())
e.set_facecolor(rnd.rand(3))
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
plt.show()
|
{"hexsha": "d77e06fa86733c3b6164292916851025cf9ee6e3", "size": 472, "ext": "py", "lang": "Python", "max_stars_repo_path": "matplotlib_examples/examples_src/pylab_examples/ellipse_demo.py", "max_stars_repo_name": "xzlmark/webspider", "max_stars_repo_head_hexsha": "133c620c65aa45abea1718b0dada09618c2115bf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-04-09T02:35:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-27T17:00:21.000Z", "max_issues_repo_path": "matplotlib_examples/examples_src/pylab_examples/ellipse_demo.py", "max_issues_repo_name": "colorworlds/webspider", "max_issues_repo_head_hexsha": "133c620c65aa45abea1718b0dada09618c2115bf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matplotlib_examples/examples_src/pylab_examples/ellipse_demo.py", "max_forks_repo_name": "colorworlds/webspider", "max_forks_repo_head_hexsha": "133c620c65aa45abea1718b0dada09618c2115bf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-09T02:35:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-09T02:35:08.000Z", "avg_line_length": 21.4545454545, "max_line_length": 93, "alphanum_fraction": 0.6843220339, "include": true, "reason": "import numpy", "num_tokens": 142}
|
# usage:
# num_files h5_filenames updates
import numpy as np
import h5py
import sys
import os
from tqdm import tqdm
import pandas as pd
from keyname import keyname as kn
from fileshash import fileshash as fsh
import re
from collections import Counter, defaultdict
from joblib import delayed, Parallel
import json
num_files = int(sys.argv[1])
filenames = sys.argv[2:num_files+2]
updates = [int(v) for v in sys.argv[num_files+2:]]
# check all data is from same software source
assert len({kn.unpack(filename)['_source_hash'] for filename in filenames}) == 1
def FractionResevoir(filename):
file = h5py.File(filename, 'r')
nlev = int(file.attrs.get('NLEV'))
res = defaultdict(
lambda: [
0,
0,
]
)
for update in tqdm(updates):
chans = np.array(
file['Channel']['lev_'+str(nlev-1)]['upd_'+str(update)]
).flatten()
lives = np.array(
file['Live']['upd_'+str(update)]
).flatten()
stocks = np.array(
file['Stockpile']['upd_'+str(update)]
).flatten()
for i in range(len(chans)):
if lives[i]:
res[chans[i]][0] += 1
res[chans[i]][1] += (stocks[i] >= 1.0)
return [
resevoir / pop
for pop, resevoir in res.values()
if pop >= 9
]
def SafeFractionResevoir(filename):
try:
return FractionResevoir(filename)
except Exception as e:
print("warning: corrupt or incomplete data file... skipping")
print(" ", e)
return None
print("num files:" , len(filenames))
outfile = kn.pack({
'title' : 'fractionresevoir',
'_data_hathash_hash' : fsh.FilesHash().hash_files(filenames),
'_script_fullcat_hash' : fsh.FilesHash(
file_parcel="full_parcel",
files_join="cat_join"
).hash_files([sys.argv[0]]),
'_source_hash' :kn.unpack(filenames[0])['_source_hash'],
'ext' : '.csv'
})
pd.DataFrame.from_dict([
{
'Genotype' : (
'Wild Type' if 'id=1' in filename
else 'Messaging Knockout' if 'id=2' in filename
else None
),
'Seed' : kn.unpack(filename)['seed'],
'Fraction Resevoir': entry,
}
for filename, res in zip(
filenames,
Parallel(n_jobs=-1)(
delayed(SafeFractionResevoir)(filename)
for filename in tqdm(filenames)
)
)
for entry in res
]).to_csv(outfile, index=False)
print('Output saved to', outfile)
|
{"hexsha": "9227ceffb9735d22693de8acb11db9f33cb70199", "size": 2597, "ext": "py", "lang": "Python", "max_stars_repo_path": "old/script/FractionResevoirPrep.py", "max_stars_repo_name": "schregardusc/dishtiny", "max_stars_repo_head_hexsha": "b0b1841a457a955fa4c22f36a050d91f12484f9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-12T23:53:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-12T23:53:55.000Z", "max_issues_repo_path": "old/script/FractionResevoirPrep.py", "max_issues_repo_name": "schregardusc/dishtiny", "max_issues_repo_head_hexsha": "b0b1841a457a955fa4c22f36a050d91f12484f9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "old/script/FractionResevoirPrep.py", "max_forks_repo_name": "schregardusc/dishtiny", "max_forks_repo_head_hexsha": "b0b1841a457a955fa4c22f36a050d91f12484f9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.97, "max_line_length": 80, "alphanum_fraction": 0.5745090489, "include": true, "reason": "import numpy", "num_tokens": 647}
|
// Copyright (C) 2001 Jeremy Siek, Douglas Gregor, Brian Osman
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_GRAPH_ISOMORPHISM_HPP
#define BOOST_GRAPH_ISOMORPHISM_HPP
#include <utility>
#include <vector>
#include <iterator>
#include <algorithm>
#include <boost/config.hpp>
#include <boost/graph/depth_first_search.hpp>
#include <boost/utility.hpp>
#include <boost/detail/algorithm.hpp>
#include <boost/pending/indirect_cmp.hpp> // for make_indirect_pmap
#ifndef BOOST_GRAPH_ITERATION_MACROS_HPP
#define BOOST_ISO_INCLUDED_ITER_MACROS // local macro, see bottom of file
#include <boost/graph/iteration_macros.hpp>
#endif
namespace boost {
namespace detail {
template <typename Graph1, typename Graph2, typename IsoMapping,
typename Invariant1, typename Invariant2,
typename IndexMap1, typename IndexMap2>
class isomorphism_algo
{
typedef typename graph_traits<Graph1>::vertex_descriptor vertex1_t;
typedef typename graph_traits<Graph2>::vertex_descriptor vertex2_t;
typedef typename graph_traits<Graph1>::edge_descriptor edge1_t;
typedef typename graph_traits<Graph1>::vertices_size_type size_type;
typedef typename Invariant1::result_type invar1_value;
typedef typename Invariant2::result_type invar2_value;
const Graph1& G1;
const Graph2& G2;
IsoMapping f;
Invariant1 invariant1;
Invariant2 invariant2;
std::size_t max_invariant;
IndexMap1 index_map1;
IndexMap2 index_map2;
std::vector<vertex1_t> dfs_vertices;
typedef typename std::vector<vertex1_t>::iterator vertex_iter;
std::vector<int> dfs_num_vec;
typedef safe_iterator_property_map<typename std::vector<int>::iterator,
IndexMap1
#ifdef BOOST_NO_STD_ITERATOR_TRAITS
, int, int&
#endif /* BOOST_NO_STD_ITERATOR_TRAITS */
> DFSNumMap;
DFSNumMap dfs_num;
std::vector<edge1_t> ordered_edges;
typedef typename std::vector<edge1_t>::iterator edge_iter;
std::vector<char> in_S_vec;
typedef safe_iterator_property_map<typename std::vector<char>::iterator,
IndexMap2
#ifdef BOOST_NO_STD_ITERATOR_TRAITS
, char, char&
#endif /* BOOST_NO_STD_ITERATOR_TRAITS */
> InSMap;
InSMap in_S;
int num_edges_on_k;
friend struct compare_multiplicity;
struct compare_multiplicity
{
compare_multiplicity(Invariant1 invariant1, size_type* multiplicity)
: invariant1(invariant1), multiplicity(multiplicity) { }
bool operator()(const vertex1_t& x, const vertex1_t& y) const {
return multiplicity[invariant1(x)] < multiplicity[invariant1(y)];
}
Invariant1 invariant1;
size_type* multiplicity;
};
struct record_dfs_order : default_dfs_visitor
{
record_dfs_order(std::vector<vertex1_t>& v, std::vector<edge1_t>& e)
: vertices(v), edges(e) { }
void discover_vertex(vertex1_t v, const Graph1&) const {
vertices.push_back(v);
}
void examine_edge(edge1_t e, const Graph1& G1) const {
edges.push_back(e);
}
std::vector<vertex1_t>& vertices;
std::vector<edge1_t>& edges;
};
struct edge_cmp {
edge_cmp(const Graph1& G1, DFSNumMap dfs_num)
: G1(G1), dfs_num(dfs_num) { }
bool operator()(const edge1_t& e1, const edge1_t& e2) const {
using namespace std;
int u1 = dfs_num[source(e1,G1)], v1 = dfs_num[target(e1,G1)];
int u2 = dfs_num[source(e2,G1)], v2 = dfs_num[target(e2,G1)];
int m1 = (max)(u1, v1);
int m2 = (max)(u2, v2);
// lexicographical comparison
return std::make_pair(m1, std::make_pair(u1, v1))
< std::make_pair(m2, std::make_pair(u2, v2));
}
const Graph1& G1;
DFSNumMap dfs_num;
};
public:
isomorphism_algo(const Graph1& G1, const Graph2& G2, IsoMapping f,
Invariant1 invariant1, Invariant2 invariant2, std::size_t max_invariant,
IndexMap1 index_map1, IndexMap2 index_map2)
: G1(G1), G2(G2), f(f), invariant1(invariant1), invariant2(invariant2),
max_invariant(max_invariant),
index_map1(index_map1), index_map2(index_map2)
{
in_S_vec.resize(num_vertices(G1));
in_S = make_safe_iterator_property_map
(in_S_vec.begin(), in_S_vec.size(), index_map2
#ifdef BOOST_NO_STD_ITERATOR_TRAITS
, in_S_vec.front()
#endif /* BOOST_NO_STD_ITERATOR_TRAITS */
);
}
bool test_isomorphism()
{
{
std::vector<invar1_value> invar1_array;
BGL_FORALL_VERTICES_T(v, G1, Graph1)
invar1_array.push_back(invariant1(v));
sort(invar1_array);
std::vector<invar2_value> invar2_array;
BGL_FORALL_VERTICES_T(v, G2, Graph2)
invar2_array.push_back(invariant2(v));
sort(invar2_array);
if (! equal(invar1_array, invar2_array))
return false;
}
std::vector<vertex1_t> V_mult;
BGL_FORALL_VERTICES_T(v, G1, Graph1)
V_mult.push_back(v);
{
std::vector<size_type> multiplicity(max_invariant, 0);
BGL_FORALL_VERTICES_T(v, G1, Graph1)
++multiplicity[invariant1(v)];
sort(V_mult, compare_multiplicity(invariant1, &multiplicity[0]));
}
std::vector<default_color_type> color_vec(num_vertices(G1));
safe_iterator_property_map<std::vector<default_color_type>::iterator,
IndexMap1
#ifdef BOOST_NO_STD_ITERATOR_TRAITS
, default_color_type, default_color_type&
#endif /* BOOST_NO_STD_ITERATOR_TRAITS */
>
color_map(color_vec.begin(), color_vec.size(), index_map1);
record_dfs_order dfs_visitor(dfs_vertices, ordered_edges);
typedef color_traits<default_color_type> Color;
for (vertex_iter u = V_mult.begin(); u != V_mult.end(); ++u) {
if (color_map[*u] == Color::white()) {
dfs_visitor.start_vertex(*u, G1);
depth_first_visit(G1, *u, dfs_visitor, color_map);
}
}
// Create the dfs_num array and dfs_num_map
dfs_num_vec.resize(num_vertices(G1));
dfs_num = make_safe_iterator_property_map(dfs_num_vec.begin(),
dfs_num_vec.size(),
index_map1
#ifdef BOOST_NO_STD_ITERATOR_TRAITS
, dfs_num_vec.front()
#endif /* BOOST_NO_STD_ITERATOR_TRAITS */
);
size_type n = 0;
for (vertex_iter v = dfs_vertices.begin(); v != dfs_vertices.end(); ++v)
dfs_num[*v] = n++;
sort(ordered_edges, edge_cmp(G1, dfs_num));
int dfs_num_k = -1;
return this->match(ordered_edges.begin(), dfs_num_k);
}
private:
bool match(edge_iter iter, int dfs_num_k)
{
if (iter != ordered_edges.end()) {
vertex1_t i = source(*iter, G1), j = target(*iter, G2);
if (dfs_num[i] > dfs_num_k) {
vertex1_t kp1 = dfs_vertices[dfs_num_k + 1];
BGL_FORALL_VERTICES_T(u, G2, Graph2) {
if (invariant1(kp1) == invariant2(u) && in_S[u] == false) {
f[kp1] = u;
in_S[u] = true;
num_edges_on_k = 0;
if (match(iter, dfs_num_k + 1))
#if 0
// dwa 2003/7/11 -- this *HAS* to be a bug!
;
#endif
return true;
in_S[u] = false;
}
}
}
else if (dfs_num[j] > dfs_num_k) {
vertex1_t k = dfs_vertices[dfs_num_k];
num_edges_on_k -=
count_if(adjacent_vertices(f[k], G2), make_indirect_pmap(in_S));
for (int jj = 0; jj < dfs_num_k; ++jj) {
vertex1_t j = dfs_vertices[jj];
num_edges_on_k -= count(adjacent_vertices(f[j], G2), f[k]);
}
if (num_edges_on_k != 0)
return false;
BGL_FORALL_ADJ_T(f[i], v, G2, Graph2)
if (invariant2(v) == invariant1(j) && in_S[v] == false) {
f[j] = v;
in_S[v] = true;
num_edges_on_k = 1;
BOOST_USING_STD_MAX();
int next_k = max BOOST_PREVENT_MACRO_SUBSTITUTION(dfs_num_k, max BOOST_PREVENT_MACRO_SUBSTITUTION(dfs_num[i], dfs_num[j]));
if (match(boost::next(iter), next_k))
return true;
in_S[v] = false;
}
}
else {
if (container_contains(adjacent_vertices(f[i], G2), f[j])) {
++num_edges_on_k;
if (match(boost::next(iter), dfs_num_k))
return true;
}
}
} else
return true;
return false;
}
};
template <typename Graph, typename InDegreeMap>
void compute_in_degree(const Graph& g, InDegreeMap in_degree_map)
{
BGL_FORALL_VERTICES_T(v, g, Graph)
put(in_degree_map, v, 0);
BGL_FORALL_VERTICES_T(u, g, Graph)
BGL_FORALL_ADJ_T(u, v, g, Graph)
put(in_degree_map, v, get(in_degree_map, v) + 1);
}
} // namespace detail
template <typename InDegreeMap, typename Graph>
class degree_vertex_invariant
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_t;
typedef typename graph_traits<Graph>::degree_size_type size_type;
public:
typedef vertex_t argument_type;
typedef size_type result_type;
degree_vertex_invariant(const InDegreeMap& in_degree_map, const Graph& g)
: m_in_degree_map(in_degree_map), m_g(g) { }
size_type operator()(vertex_t v) const {
return (num_vertices(m_g) + 1) * out_degree(v, m_g)
+ get(m_in_degree_map, v);
}
// The largest possible vertex invariant number
size_type max BOOST_PREVENT_MACRO_SUBSTITUTION () const {
return num_vertices(m_g) * num_vertices(m_g) + num_vertices(m_g);
}
private:
InDegreeMap m_in_degree_map;
const Graph& m_g;
};
template <typename Graph1, typename Graph2, typename IsoMapping,
typename Invariant1, typename Invariant2,
typename IndexMap1, typename IndexMap2>
bool isomorphism(const Graph1& G1, const Graph2& G2, IsoMapping f,
Invariant1 invariant1, Invariant2 invariant2,
std::size_t max_invariant,
IndexMap1 index_map1, IndexMap2 index_map2)
{
// Graph requirements
function_requires< VertexListGraphConcept<Graph1> >();
function_requires< EdgeListGraphConcept<Graph1> >();
function_requires< VertexListGraphConcept<Graph2> >();
function_requires< BidirectionalGraphConcept<Graph2> >();
typedef typename graph_traits<Graph1>::vertex_descriptor vertex1_t;
typedef typename graph_traits<Graph2>::vertex_descriptor vertex2_t;
typedef typename graph_traits<Graph1>::vertices_size_type size_type;
// Vertex invariant requirement
function_requires< AdaptableUnaryFunctionConcept<Invariant1,
size_type, vertex1_t> >();
function_requires< AdaptableUnaryFunctionConcept<Invariant2,
size_type, vertex2_t> >();
// Property map requirements
function_requires< ReadWritePropertyMapConcept<IsoMapping, vertex1_t> >();
typedef typename property_traits<IsoMapping>::value_type IsoMappingValue;
BOOST_STATIC_ASSERT((is_same<IsoMappingValue, vertex2_t>::value));
function_requires< ReadablePropertyMapConcept<IndexMap1, vertex1_t> >();
typedef typename property_traits<IndexMap1>::value_type IndexMap1Value;
BOOST_STATIC_ASSERT((is_convertible<IndexMap1Value, size_type>::value));
function_requires< ReadablePropertyMapConcept<IndexMap2, vertex2_t> >();
typedef typename property_traits<IndexMap2>::value_type IndexMap2Value;
BOOST_STATIC_ASSERT((is_convertible<IndexMap2Value, size_type>::value));
if (num_vertices(G1) != num_vertices(G2))
return false;
if (num_vertices(G1) == 0 && num_vertices(G2) == 0)
return true;
detail::isomorphism_algo<Graph1, Graph2, IsoMapping, Invariant1,
Invariant2, IndexMap1, IndexMap2>
algo(G1, G2, f, invariant1, invariant2, max_invariant,
index_map1, index_map2);
return algo.test_isomorphism();
}
namespace detail {
template <typename Graph1, typename Graph2,
typename IsoMapping,
typename IndexMap1, typename IndexMap2,
typename P, typename T, typename R>
bool isomorphism_impl(const Graph1& G1, const Graph2& G2,
IsoMapping f, IndexMap1 index_map1, IndexMap2 index_map2,
const bgl_named_params<P,T,R>& params)
{
std::vector<std::size_t> in_degree1_vec(num_vertices(G1));
typedef safe_iterator_property_map<std::vector<std::size_t>::iterator,
IndexMap1
#ifdef BOOST_NO_STD_ITERATOR_TRAITS
, std::size_t, std::size_t&
#endif /* BOOST_NO_STD_ITERATOR_TRAITS */
> InDeg1;
InDeg1 in_degree1(in_degree1_vec.begin(), in_degree1_vec.size(), index_map1);
compute_in_degree(G1, in_degree1);
std::vector<std::size_t> in_degree2_vec(num_vertices(G2));
typedef safe_iterator_property_map<std::vector<std::size_t>::iterator,
IndexMap2
#ifdef BOOST_NO_STD_ITERATOR_TRAITS
, std::size_t, std::size_t&
#endif /* BOOST_NO_STD_ITERATOR_TRAITS */
> InDeg2;
InDeg2 in_degree2(in_degree2_vec.begin(), in_degree2_vec.size(), index_map2);
compute_in_degree(G2, in_degree2);
degree_vertex_invariant<InDeg1, Graph1> invariant1(in_degree1, G1);
degree_vertex_invariant<InDeg2, Graph2> invariant2(in_degree2, G2);
return isomorphism(G1, G2, f,
choose_param(get_param(params, vertex_invariant1_t()), invariant1),
choose_param(get_param(params, vertex_invariant2_t()), invariant2),
choose_param(get_param(params, vertex_max_invariant_t()), (invariant2.max)()),
index_map1, index_map2
);
}
} // namespace detail
// Named parameter interface
template <typename Graph1, typename Graph2, class P, class T, class R>
bool isomorphism(const Graph1& g1,
const Graph2& g2,
const bgl_named_params<P,T,R>& params)
{
typedef typename graph_traits<Graph2>::vertex_descriptor vertex2_t;
typename std::vector<vertex2_t>::size_type n = num_vertices(g1);
std::vector<vertex2_t> f(n);
return detail::isomorphism_impl
(g1, g2,
choose_param(get_param(params, vertex_isomorphism_t()),
make_safe_iterator_property_map(f.begin(), f.size(),
choose_const_pmap(get_param(params, vertex_index1),
g1, vertex_index), vertex2_t())),
choose_const_pmap(get_param(params, vertex_index1), g1, vertex_index),
choose_const_pmap(get_param(params, vertex_index2), g2, vertex_index),
params
);
}
// All defaults interface
template <typename Graph1, typename Graph2>
bool isomorphism(const Graph1& g1, const Graph2& g2)
{
return isomorphism(g1, g2,
bgl_named_params<int, buffer_param_t>(0));// bogus named param
}
// Verify that the given mapping iso_map from the vertices of g1 to the
// vertices of g2 describes an isomorphism.
// Note: this could be made much faster by specializing based on the graph
// concepts modeled, but since we're verifying an O(n^(lg n)) algorithm,
// O(n^4) won't hurt us.
template<typename Graph1, typename Graph2, typename IsoMap>
inline bool verify_isomorphism(const Graph1& g1, const Graph2& g2, IsoMap iso_map)
{
#if 0
// problematic for filtered_graph!
if (num_vertices(g1) != num_vertices(g2) || num_edges(g1) != num_edges(g2))
return false;
#endif
BGL_FORALL_EDGES_T(e1, g1, Graph1) {
bool found_edge = false;
BGL_FORALL_EDGES_T(e2, g2, Graph2) {
if (source(e2, g2) == get(iso_map, source(e1, g1)) &&
target(e2, g2) == get(iso_map, target(e1, g1))) {
found_edge = true;
}
}
if (!found_edge)
return false;
}
return true;
}
} // namespace boost
#ifdef BOOST_ISO_INCLUDED_ITER_MACROS
#undef BOOST_ISO_INCLUDED_ITER_MACROS
#include <boost/graph/iteration_macros_undef.hpp>
#endif
#endif // BOOST_GRAPH_ISOMORPHISM_HPP
|
{"hexsha": "29f6ef2c58e72b859218ba0b71030a52fe507581", "size": 17641, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/lib/boost/graph/isomorphism.hpp", "max_stars_repo_name": "EricBoittier/vina-carb-docker", "max_stars_repo_head_hexsha": "e8730d1ef90395e3d7ed3ad00264702313b0766a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 31.0, "max_stars_repo_stars_event_min_datetime": "2015-01-18T20:27:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-03T03:58:47.000Z", "max_issues_repo_path": "src/lib/boost/graph/isomorphism.hpp", "max_issues_repo_name": "EricBoittier/vina-carb-docker", "max_issues_repo_head_hexsha": "e8730d1ef90395e3d7ed3ad00264702313b0766a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11.0, "max_issues_repo_issues_event_min_datetime": "2016-11-22T13:14:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T00:56:51.000Z", "max_forks_repo_path": "src/lib/boost/graph/isomorphism.hpp", "max_forks_repo_name": "EricBoittier/vina-carb-docker", "max_forks_repo_head_hexsha": "e8730d1ef90395e3d7ed3ad00264702313b0766a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11.0, "max_forks_repo_forks_event_min_datetime": "2015-02-03T19:24:10.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-20T10:59:50.000Z", "avg_line_length": 37.856223176, "max_line_length": 139, "alphanum_fraction": 0.6074485573, "num_tokens": 4241}
|
/-
Copyright (c) 2020 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
! This file was ported from Lean 3 source module category_theory.limits.colimit_limit
! leanprover-community/mathlib commit 59382264386afdbaf1727e617f5fdda511992eb9
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.CategoryTheory.Limits.Types
import Mathlib.CategoryTheory.Functor.Currying
import Mathlib.CategoryTheory.Limits.FunctorCategory
/-!
# The morphism comparing a colimit of limits with the corresponding limit of colimits.
For `F : J × K ⥤ C` there is always a morphism $\colim_k \lim_j F(j,k) → \lim_j \colim_k F(j, k)$.
While it is not usually an isomorphism, with additional hypotheses on `J` and `K` it may be,
in which case we say that "colimits commute with limits".
The prototypical example, proved in `CategoryTheory.Limits.FilteredColimitCommutesFiniteLimit`,
is that when `C = Type`, filtered colimits commute with finite limits.
## References
* Borceux, Handbook of categorical algebra 1, Section 2.13
* [Stacks: Filtered colimits](https://stacks.math.columbia.edu/tag/002W)
-/
universe v u
open CategoryTheory
namespace CategoryTheory.Limits
variable {J K : Type v} [SmallCategory J] [SmallCategory K]
variable {C : Type u} [Category.{v} C]
variable (F : J × K ⥤ C)
open CategoryTheory.prod
theorem map_id_left_eq_curry_map {j : J} {k k' : K} {f : k ⟶ k'} :
F.map ((𝟙 j, f) : (j, k) ⟶ (j, k')) = ((curry.obj F).obj j).map f :=
rfl
#align category_theory.limits.map_id_left_eq_curry_map CategoryTheory.Limits.map_id_left_eq_curry_map
theorem map_id_right_eq_curry_swap_map {j j' : J} {f : j ⟶ j'} {k : K} :
F.map ((f, 𝟙 k) : (j, k) ⟶ (j', k)) = ((curry.obj (Prod.swap K J ⋙ F)).obj k).map f :=
rfl
#align category_theory.limits.map_id_right_eq_curry_swap_map CategoryTheory.Limits.map_id_right_eq_curry_swap_map
variable [HasLimitsOfShape J C]
variable [HasColimitsOfShape K C]
/-- The universal morphism
$\colim_k \lim_j F(j,k) → \lim_j \colim_k F(j, k)$.
-/
noncomputable def colimitLimitToLimitColimit :
colimit (curry.obj (Prod.swap K J ⋙ F) ⋙ lim) ⟶ limit (curry.obj F ⋙ colim) :=
limit.lift (curry.obj F ⋙ colim)
{ pt := _
π :=
{ app := fun j =>
colimit.desc (curry.obj (Prod.swap K J ⋙ F) ⋙ lim)
{ pt := _
ι :=
{ app := fun k =>
limit.π ((curry.obj (Prod.swap K J ⋙ F)).obj k) j ≫
colimit.ι ((curry.obj F).obj j) k
naturality := by
intro k k' f
simp only [Functor.comp_obj, lim_obj, colimit.cocone_x,
Functor.const_obj_obj, Functor.comp_map, lim_map,
curry_obj_obj_obj, Prod.swap_obj, limMap_π_assoc, curry_obj_map_app,
Prod.swap_map, Functor.const_obj_map, Category.comp_id]
rw [map_id_left_eq_curry_map, colimit.w] } }
naturality := by
intro j j' f
dsimp
ext k
simp only [Functor.comp_obj, lim_obj, Category.id_comp, colimit.ι_desc,
colimit.ι_desc_assoc, Category.assoc, ι_colimMap,
curry_obj_obj_obj, curry_obj_map_app]
rw [map_id_right_eq_curry_swap_map, limit.w_assoc] } }
#align category_theory.limits.colimit_limit_to_limit_colimit CategoryTheory.Limits.colimitLimitToLimitColimit
/-- Since `colimit_limit_to_limit_colimit` is a morphism from a colimit to a limit,
this lemma characterises it.
-/
@[reassoc (attr := simp)]
theorem ι_colimitLimitToLimitColimit_π (j) (k) :
colimit.ι _ k ≫ colimitLimitToLimitColimit F ≫ limit.π _ j =
limit.π ((curry.obj (Prod.swap K J ⋙ F)).obj k) j ≫ colimit.ι ((curry.obj F).obj j) k := by
dsimp [colimitLimitToLimitColimit]
simp
#align category_theory.limits.ι_colimit_limit_to_limit_colimit_π CategoryTheory.Limits.ι_colimitLimitToLimitColimit_π
@[simp]
theorem ι_colimitLimitToLimitColimit_π_apply (F : J × K ⥤ Type v) (j : J) (k : K) (f) :
limit.π (curry.obj F ⋙ colim) j
(colimitLimitToLimitColimit F (colimit.ι (curry.obj (Prod.swap K J ⋙ F) ⋙ lim) k f)) =
colimit.ι ((curry.obj F).obj j) k (limit.π ((curry.obj (Prod.swap K J ⋙ F)).obj k) j f) := by
dsimp [colimitLimitToLimitColimit]
simp
#align category_theory.limits.ι_colimit_limit_to_limit_colimit_π_apply CategoryTheory.Limits.ι_colimitLimitToLimitColimit_π_apply
/-- The map `colimit_limit_to_limit_colimit` realized as a map of cones. -/
@[simps]
noncomputable def colimitLimitToLimitColimitCone (G : J ⥤ K ⥤ C) [HasLimit G] :
colim.mapCone (limit.cone G) ⟶ limit.cone (G ⋙ colim)
where
Hom :=
colim.map (limitIsoSwapCompLim G).hom ≫
colimitLimitToLimitColimit (uncurry.obj G : _) ≫
lim.map (whiskerRight (currying.unitIso.app G).inv colim)
w j := by
dsimp
ext1 k
simp only [Category.assoc, limMap_π, Functor.comp_obj, colim_obj, whiskerRight_app,
colim_map, ι_colimMap_assoc, lim_obj, limitIsoSwapCompLim_hom_app,
ι_colimitLimitToLimitColimit_π_assoc, curry_obj_obj_obj, Prod.swap_obj,
uncurry_obj_obj, ι_colimMap, currying_unitIso_inv_app_app_app, Category.id_comp,
limMap_π_assoc, Functor.flip_obj_obj, flipIsoCurrySwapUncurry_hom_app_app]
erw [limitObjIsoLimitCompEvaluation_hom_π_assoc]
#align category_theory.limits.colimit_limit_to_limit_colimit_cone CategoryTheory.Limits.colimitLimitToLimitColimitCone
end CategoryTheory.Limits
|
{"author": "leanprover-community", "repo": "mathlib4", "sha": "b9a0a30342ca06e9817e22dbe46e75fc7f435500", "save_path": "github-repos/lean/leanprover-community-mathlib4", "path": "github-repos/lean/leanprover-community-mathlib4/mathlib4-b9a0a30342ca06e9817e22dbe46e75fc7f435500/Mathlib/CategoryTheory/Limits/ColimitLimit.lean"}
|
[STATEMENT]
lemma [simp]: "\<forall>fs_opt. (fields P ty fs_opt) = (fields_f P ty = fs_opt)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>fs_opt. fields P ty fs_opt = (fields_f P ty = fs_opt)
[PROOF STEP]
apply(rule)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>fs_opt. fields P ty fs_opt = (fields_f P ty = fs_opt)
[PROOF STEP]
apply(rule)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>fs_opt. fields P ty fs_opt \<Longrightarrow> fields_f P ty = fs_opt
2. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(case_tac fs_opt)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>fs_opt. \<lbrakk>fields P ty fs_opt; fs_opt = None\<rbrakk> \<Longrightarrow> fields_f P ty = fs_opt
2. \<And>fs_opt a. \<lbrakk>fields P ty fs_opt; fs_opt = Some a\<rbrakk> \<Longrightarrow> fields_f P ty = fs_opt
3. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(clarsimp)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. fields P ty None \<Longrightarrow> fields_f P ty = None
2. \<And>fs_opt a. \<lbrakk>fields P ty fs_opt; fs_opt = Some a\<rbrakk> \<Longrightarrow> fields_f P ty = fs_opt
3. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(erule fields.cases)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>Pa tya. \<lbrakk>P = Pa; ty = tya; None = None; find_path_ty Pa tya None\<rbrakk> \<Longrightarrow> fields_f P ty = None
2. \<And>Pa tya ctxclds fs. \<lbrakk>P = Pa; ty = tya; None = Some fs; find_path_ty Pa tya (Some ctxclds); fields_in_path ctxclds fs\<rbrakk> \<Longrightarrow> fields_f P ty = None
3. \<And>fs_opt a. \<lbrakk>fields P ty fs_opt; fs_opt = Some a\<rbrakk> \<Longrightarrow> fields_f P ty = fs_opt
4. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(clarsimp)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. find_path_ty_f P ty = None \<Longrightarrow> fields_f P ty = None
2. \<And>Pa tya ctxclds fs. \<lbrakk>P = Pa; ty = tya; None = Some fs; find_path_ty Pa tya (Some ctxclds); fields_in_path ctxclds fs\<rbrakk> \<Longrightarrow> fields_f P ty = None
3. \<And>fs_opt a. \<lbrakk>fields P ty fs_opt; fs_opt = Some a\<rbrakk> \<Longrightarrow> fields_f P ty = fs_opt
4. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(simp add: fields_f_def)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>Pa tya ctxclds fs. \<lbrakk>P = Pa; ty = tya; None = Some fs; find_path_ty Pa tya (Some ctxclds); fields_in_path ctxclds fs\<rbrakk> \<Longrightarrow> fields_f P ty = None
2. \<And>fs_opt a. \<lbrakk>fields P ty fs_opt; fs_opt = Some a\<rbrakk> \<Longrightarrow> fields_f P ty = fs_opt
3. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(clarsimp)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>fs_opt a. \<lbrakk>fields P ty fs_opt; fs_opt = Some a\<rbrakk> \<Longrightarrow> fields_f P ty = fs_opt
2. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(erule fields.cases)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>fs_opt a Pa tya. \<lbrakk>fs_opt = Some a; P = Pa; ty = tya; fs_opt = None; find_path_ty Pa tya None\<rbrakk> \<Longrightarrow> fields_f P ty = fs_opt
2. \<And>fs_opt a Pa tya ctxclds fs. \<lbrakk>fs_opt = Some a; P = Pa; ty = tya; fs_opt = Some fs; find_path_ty Pa tya (Some ctxclds); fields_in_path ctxclds fs\<rbrakk> \<Longrightarrow> fields_f P ty = fs_opt
3. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>fs_opt a Pa tya ctxclds fs. \<lbrakk>fs_opt = Some a; P = Pa; ty = tya; fs_opt = Some fs; find_path_ty Pa tya (Some ctxclds); fields_in_path ctxclds fs\<rbrakk> \<Longrightarrow> fields_f P ty = fs_opt
2. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(clarsimp)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>ctxclds. find_path_ty_f P ty = Some ctxclds \<Longrightarrow> fields_f P ty = Some (fields_in_path_f ctxclds)
2. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(simp add: fields_f_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>fs_opt. fields_f P ty = fs_opt \<Longrightarrow> fields P ty fs_opt
[PROOF STEP]
apply(simp add: fields_f_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fields P ty (case find_path_ty_f P ty of None \<Rightarrow> None | Some ctxclds \<Rightarrow> Some (fields_in_path_f ctxclds))
[PROOF STEP]
apply(case_tac "find_path_ty_f P ty")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. find_path_ty_f P ty = None \<Longrightarrow> fields P ty (case find_path_ty_f P ty of None \<Rightarrow> None | Some ctxclds \<Rightarrow> Some (fields_in_path_f ctxclds))
2. \<And>a. find_path_ty_f P ty = Some a \<Longrightarrow> fields P ty (case find_path_ty_f P ty of None \<Rightarrow> None | Some ctxclds \<Rightarrow> Some (fields_in_path_f ctxclds))
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. find_path_ty_f P ty = None \<Longrightarrow> fields P ty None
2. \<And>a. find_path_ty_f P ty = Some a \<Longrightarrow> fields P ty (case find_path_ty_f P ty of None \<Rightarrow> None | Some ctxclds \<Rightarrow> Some (fields_in_path_f ctxclds))
[PROOF STEP]
apply(simp add: fields_noneI[simplified])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a. find_path_ty_f P ty = Some a \<Longrightarrow> fields P ty (case find_path_ty_f P ty of None \<Rightarrow> None | Some ctxclds \<Rightarrow> Some (fields_in_path_f ctxclds))
[PROOF STEP]
apply(clarsimp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a. find_path_ty_f P ty = Some a \<Longrightarrow> fields P ty (Some (fields_in_path_f a))
[PROOF STEP]
apply(case_tac "find_path_ty_f P ty")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>a. \<lbrakk>find_path_ty_f P ty = Some a; find_path_ty_f P ty = None\<rbrakk> \<Longrightarrow> fields P ty (Some (fields_in_path_f a))
2. \<And>a aa. \<lbrakk>find_path_ty_f P ty = Some a; find_path_ty_f P ty = Some aa\<rbrakk> \<Longrightarrow> fields P ty (Some (fields_in_path_f a))
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a aa. \<lbrakk>find_path_ty_f P ty = Some a; find_path_ty_f P ty = Some aa\<rbrakk> \<Longrightarrow> fields P ty (Some (fields_in_path_f a))
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a. find_path_ty_f P ty = Some a \<Longrightarrow> fields P ty (Some (fields_in_path_f a))
[PROOF STEP]
apply(rule fields_someI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>a. find_path_ty_f P ty = Some a \<Longrightarrow> find_path_ty P ty (Some (?ctxclds27 a))
2. \<And>a. find_path_ty_f P ty = Some a \<Longrightarrow> fields_in_path (?ctxclds27 a) (fields_in_path_f a)
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a. find_path_ty_f P ty = Some a \<Longrightarrow> fields_in_path a (fields_in_path_f a)
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 3001, "file": "LightweightJava_Lightweight_Java_Equivalence", "length": 24}
|
"""
losses.py
=======================
Some additional loss functions that can be called using the pipeline, some of which still to be implemented.
"""
import torch
import numpy as np
from typing import Iterable, List, Set, Tuple
# from typing import Any, Callable, TypeVar, Union
from torch import Tensor, einsum
import torch.nn.functional as F
from scipy.ndimage import distance_transform_edt as distance
from torch import nn
def assert_(condition, message="", exception_type=AssertionError):
"""https://raw.githubusercontent.com/inferno-pytorch/inferno/0561e8a95cde6bfc5e10a3609841b7b0ca5b03ca/inferno/utils/exceptions.py
Like assert, but with arbitrary exception types."""
if not condition:
raise exception_type(message)
class ShapeError(
ValueError
): # """https://raw.githubusercontent.com/inferno-pytorch/inferno/0561e8a95cde6bfc5e10a3609841b7b0ca5b03ca/inferno/utils/exceptions.py"""
pass
def flatten_samples(input_):
"""
Flattens a tensor or a variable.
Flattens a tensor or variable such that the channel axis is first and the
sample axis is second. The shapes are transformed as follows:
(N, C, H, W) --> (C, N * H * W)
(N, C, D, H, W) --> (C, N * D * H * W)
(N, C) --> (C, N)
The input must be at least 2-D.
Notes
-----
https://raw.githubusercontent.com/inferno-pytorch/inferno/0561e8a95cde6bfc5e10a3609841b7b0ca5b03ca/inferno/utils/torch_utils.py
"""
assert_(
input_.dim() >= 2,
"Tensor or variable must be atleast 2D. Got one of dim {}.".format(
input_.dim()
),
ShapeError,
)
# Get number of channels
num_channels = input_.size(1)
# Permute the channel axis to first
permute_axes = list(range(input_.dim()))
permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0]
# For input shape (say) NCHW, this should have the shape CNHW
permuted = input_.permute(*permute_axes).contiguous()
# Now flatten out all but the first axis and return
flattened = permuted.view(num_channels, -1)
return flattened
class GeneralizedDiceLoss(nn.Module):
"""
https://raw.githubusercontent.com/inferno-pytorch/inferno/0561e8a95cde6bfc5e10a3609841b7b0ca5b03ca/inferno/extensions/criteria/set_similarity_measures.py
Computes the scalar Generalized Dice Loss defined in https://arxiv.org/abs/1707.03237
This version works for multiple classes and expects predictions for every class (e.g. softmax output) and
one-hot targets for every class.
"""
def __init__(self, weight=None, channelwise=False, eps=1e-6, add_softmax=False):
super(GeneralizedDiceLoss, self).__init__()
self.register_buffer("weight", weight)
self.channelwise = channelwise
self.eps = eps
self.add_softmax = add_softmax
def forward(self, input, target):
"""
input: torch.FloatTensor or torch.cuda.FloatTensor
target: torch.FloatTensor or torch.cuda.FloatTensor
Expected shape of the inputs:
- if not channelwise: (batch_size, nb_classes, ...)
- if channelwise: (batch_size, nb_channels, nb_classes, ...)
"""
assert input.size() == target.size()
if self.add_softmax:
input = F.softmax(input, dim=1)
if not self.channelwise:
# Flatten input and target to have the shape (nb_classes, N),
# where N is the number of samples
input = flatten_samples(input)
target = flatten_samples(target).float()
# Find classes weights:
sum_targets = target.sum(-1)
class_weigths = 1.0 / (sum_targets * sum_targets).clamp(min=self.eps)
# Compute generalized Dice loss:
numer = ((input * target).sum(-1) * class_weigths).sum()
denom = ((input + target).sum(-1) * class_weigths).sum()
loss = 1.0 - 2.0 * numer / denom.clamp(min=self.eps)
else:
def flatten_and_preserve_channels(tensor):
tensor_dim = tensor.dim()
assert tensor_dim >= 3
num_channels = tensor.size(1)
num_classes = tensor.size(2)
# Permute the channel axis to first
permute_axes = list(range(tensor_dim))
permute_axes[0], permute_axes[1], permute_axes[2] = (
permute_axes[1],
permute_axes[2],
permute_axes[0],
)
permuted = tensor.permute(*permute_axes).contiguous()
flattened = permuted.view(num_channels, num_classes, -1)
return flattened
# Flatten input and target to have the shape (nb_channels, nb_classes, N)
input = flatten_and_preserve_channels(input)
target = flatten_and_preserve_channels(target)
# Find classes weights:
sum_targets = target.sum(-1)
class_weigths = 1.0 / (sum_targets * sum_targets).clamp(min=self.eps)
# Compute generalized Dice loss:
numer = ((input * target).sum(-1) * class_weigths).sum(-1)
denom = ((input + target).sum(-1) * class_weigths).sum(-1)
channelwise_loss = 1.0 - 2.0 * numer / denom.clamp(min=self.eps)
if self.weight is not None:
if channelwise_loss.dim() == 2:
channelwise_loss = channelwise_loss.squeeze(1)
assert (
self.weight.size() == channelwise_loss.size()
), """`weight` should have shape (nb_channels, ),
`target` should have shape (batch_size, nb_channels, nb_classes, ...)"""
# Apply channel weights:
channelwise_loss = self.weight * channelwise_loss
loss = channelwise_loss.sum()
return loss
class FocalLoss(nn.Module): # add boundary loss
"""Focal Loss, as proposed in [1]_.
Attributes
----------
num_class
Binarizes the labels of a column(s).
alpha : int
Number of desired targets to preidict on.
gamma : float or double
When gamma is nonnegative, the relative loss for well-classified examples (p>0.5) is reduced, putting more focus on hard misclassified examples.
smooth : float or double
Smooth value to use when computing cross entropy.
balance_index : int
The balance class index, should be specific when alpha is a float.
size_average : bool, optional
By default, the losses are averaged over each loss element in the batch.
Notes
-----
.. math:: FL(p_t)=-\alpha_t(1-p_t)^\gamma\log(p_t)
https://raw.githubusercontent.com/Hsuxu/Loss_ToolBox-PyTorch/master/FocalLoss/focal_loss.py.
.. [1] Lin, Tsung-Yi, et al. "Focal Loss for Dense Object Detection."
ArXiv:1708.02002 [Cs], Feb. 2018. arXiv.org,
http://arxiv.org/abs/1708.02002.
"""
def __init__(
self,
num_class,
alpha=None,
gamma=2,
balance_index=-1,
smooth=None,
size_average=True,
):
super(FocalLoss, self).__init__()
self.num_class = num_class
self.alpha = alpha
self.gamma = gamma
self.smooth = smooth
self.size_average = size_average
if self.alpha is None:
self.alpha = torch.ones(self.num_class, 1)
elif isinstance(self.alpha, (list, np.ndarray)):
assert len(self.alpha) == self.num_class
self.alpha = torch.FloatTensor(alpha).view(self.num_class, 1)
self.alpha = self.alpha / self.alpha.sum()
elif isinstance(self.alpha, float):
alpha = torch.ones(self.num_class, 1)
alpha = alpha * (1 - self.alpha)
alpha[balance_index] = self.alpha
self.alpha = alpha
else:
raise TypeError("Not support alpha type")
if self.smooth is not None:
if self.smooth < 0 or self.smooth > 1.0:
raise ValueError("smooth value should be in [0,1]")
def forward(self, logit, target):
# logit = F.softmax(input, dim=1)
if logit.dim() > 2:
# N,C,d1,d2 -> N,C,m (m=d1*d2*...)
logit = logit.view(logit.size(0), logit.size(1), -1)
logit = logit.permute(0, 2, 1).contiguous()
logit = logit.view(-1, logit.size(-1))
target = target.view(-1, 1)
# N = input.size(0)
# alpha = torch.ones(N, self.num_class)
# alpha = alpha * (1 - self.alpha)
# alpha = alpha.scatter_(1, target.long(), self.alpha)
epsilon = 1e-10
alpha = self.alpha
if alpha.device != input.device:
alpha = alpha.to(input.device)
idx = target.cpu().long()
one_hot_key = torch.FloatTensor(target.size(0), self.num_class).zero_()
one_hot_key = one_hot_key.scatter_(1, idx, 1)
if one_hot_key.device != logit.device:
one_hot_key = one_hot_key.to(logit.device)
if self.smooth:
one_hot_key = torch.clamp(
one_hot_key, self.smooth / (self.num_class - 1), 1.0 - self.smooth
)
pt = (one_hot_key * logit).sum(1) + epsilon
logpt = pt.log()
gamma = self.gamma
alpha = alpha[idx]
loss = -1 * alpha * torch.pow((1 - pt), gamma) * logpt
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
def uniq(a: Tensor) -> Set:
"""https://raw.githubusercontent.com/LIVIAETS/surface-loss/master/utils.py"""
return set(torch.unique(a.cpu()).numpy())
def sset(a: Tensor, sub: Iterable) -> bool:
"""https://raw.githubusercontent.com/LIVIAETS/surface-loss/master/utils.py"""
return uniq(a).issubset(sub)
def eq(a: Tensor, b) -> bool:
"""https://raw.githubusercontent.com/LIVIAETS/surface-loss/master/utils.py"""
return torch.eq(a, b).all()
def simplex(t: Tensor, axis=1) -> bool:
"""https://raw.githubusercontent.com/LIVIAETS/surface-loss/master/utils.py"""
_sum = t.sum(axis).type(torch.float32)
_ones = torch.ones_like(_sum, dtype=torch.float32)
return torch.allclose(_sum, _ones)
def one_hot(t: Tensor, axis=1) -> bool:
"""https://raw.githubusercontent.com/LIVIAETS/surface-loss/master/utils.py"""
return simplex(t, axis) and sset(t, [0, 1])
def class2one_hot(seg: Tensor, C: int) -> Tensor:
"""https://raw.githubusercontent.com/LIVIAETS/surface-loss/master/utils.py"""
if len(seg.shape) == 2: # Only w, h, used by the dataloader
seg = seg.unsqueeze(dim=0)
assert sset(seg, list(range(C)))
b, w, h = seg.shape # type: Tuple[int, int, int]
res = torch.stack([seg == c for c in range(C)], dim=1).type(torch.int32)
assert res.shape == (b, C, w, h)
assert one_hot(res)
return res
def one_hot2dist(seg: np.ndarray) -> np.ndarray:
"""https://raw.githubusercontent.com/LIVIAETS/surface-loss/master/utils.py"""
assert one_hot(torch.Tensor(seg), axis=0)
C: int = len(seg)
res = np.zeros_like(seg)
for c in range(C):
posmask = seg[c].astype(np.bool)
if posmask.any():
negmask = ~posmask
res[c] = distance(negmask) * negmask - (distance(posmask) - 1) * posmask
return res
class SurfaceLoss:
"""https://raw.githubusercontent.com/LIVIAETS/surface-loss/master/losses.py"""
def __init__(self, **kwargs):
# Self.idc is used to filter out some classes of the target mask. Use fancy indexing
self.idc: List[int] = kwargs["idc"]
print(f"Initialized {self.__class__.__name__} with {kwargs}")
def __call__(self, probs: Tensor, dist_maps: Tensor, _: Tensor) -> Tensor:
assert simplex(probs)
assert not one_hot(dist_maps)
pc = probs[:, self.idc, ...].type(torch.float32)
dc = dist_maps[:, self.idc, ...].type(torch.float32)
multipled = einsum("bcwh,bcwh->bcwh", pc, dc)
loss = multipled.mean()
return loss
class GeneralizedDice:
"""https://raw.githubusercontent.com/LIVIAETS/surface-loss/master/losses.py"""
def __init__(self, **kwargs):
# Self.idc is used to filter out some classes of the target mask. Use fancy indexing
self.idc: List[int] = kwargs["idc"]
print(f"Initialized {self.__class__.__name__} with {kwargs}")
def __call__(self, probs: Tensor, target: Tensor, _: Tensor) -> Tensor:
assert simplex(probs) and simplex(target)
pc = probs[:, self.idc, ...].type(torch.float32)
tc = target[:, self.idc, ...].type(torch.float32)
w: Tensor = 1 / ((einsum("bcwh->bc", tc).type(torch.float32) + 1e-10) ** 2)
intersection: Tensor = w * einsum("bcwh,bcwh->bc", pc, tc)
union: Tensor = w * (einsum("bcwh->bc", pc) + einsum("bcwh->bc", tc))
divided: Tensor = 1 - 2 * (einsum("bc->b", intersection) + 1e-10) / (
einsum("bc->b", union) + 1e-10
)
loss = divided.mean()
return loss
|
{"hexsha": "70d680384069f2dd5baa693a41dd0ce5f2da3a47", "size": 13147, "ext": "py", "lang": "Python", "max_stars_repo_path": "pathflowai/losses.py", "max_stars_repo_name": "sumanthratna/PathFlowAI", "max_stars_repo_head_hexsha": "70324e78da7ad9452789478b9be7cc76515ea3ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pathflowai/losses.py", "max_issues_repo_name": "sumanthratna/PathFlowAI", "max_issues_repo_head_hexsha": "70324e78da7ad9452789478b9be7cc76515ea3ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pathflowai/losses.py", "max_forks_repo_name": "sumanthratna/PathFlowAI", "max_forks_repo_head_hexsha": "70324e78da7ad9452789478b9be7cc76515ea3ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6287262873, "max_line_length": 157, "alphanum_fraction": 0.6086559671, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3362}
|
import pandas as pd
import dask.dataframe as dd
import numpy as np
from itertools import combinations, permutations
from multiprocessing import Pool, cpu_count
def pairwise(df, operation, columns = None):
"""Form interactions between all pairs of numeric columns
Arguments:
df -- the DataFrame to run on
operation -- the operation(s) to compute between pairs
columns -- specify a subset of columns to consider
Possible operations are:
'sum', 'difference', 'product', 'ratio',
'power', 'root', 'log', 'arctan', 'mod',
'arctanh', 'l1', 'l2', 'max' or 'all'
"""
if type(df) is dd.core.DataFrame:
concat = dd.concat
elif type(df) is pd.core.frame.DataFrame:
concat = pd.concat
else:
raise ValueError("df is not a pandas.core.frame.DataFrame or a dask.dataframe.core.DataFrame.")
result = df.loc[:,[]]
if columns is None:
columns = [c for c in df.columns if np.issubdtype(df[c], np.number)]
if operation == 'all':
operation = ['sum', 'difference', 'product', 'ratio',
'power', 'root', 'log', 'arctan', 'mod',
'arctanh', 'l1', 'l2', 'max']
if type(operation) is list:
if 'all' in operation:
raise ValueError("No point in a list containing 'all', silly.")
operation = pd.unique(operation)
return concat(
[pairwise(df, op, columns) for op in operation],
axis=1
)
if operation == 'sum':
f = lambda df, c1, c2: df[c1]+df[c2]
pairs = combinations(columns, 2)
name = '%s + %s'
elif operation == 'difference':
f = lambda df, c1, c2: df[c1]-df[c2]
pairs = combinations(columns, 2)
name = '%s - %s'
elif operation == 'product':
f = lambda df, c1, c2: df[c1]*df[c2]
pairs = combinations(columns, 2)
name = '%s * %s'
elif operation == 'ratio':
f = lambda df, c1, c2: df[c1]/df[c2]
pairs = permutations(columns, 2)
name = '%s / %s'
elif operation == 'power':
f = lambda df, c1, c2: df.loc[df[c2]>=0,c1]**df.loc[df[c2]>=0,c2]
pairs = permutations(columns, 2)
name = '%s ^ %s'
elif operation == 'root':
f = lambda df, c1, c2: df[c1]**(1/df[c2])
pairs = permutations(columns, 2)
name = "%s to %s'th root"
elif operation == 'log':
f = lambda df, c1, c2: np.log(df[c2])/np.log(df[c1])
pairs = permutations(columns, 2)
name = 'log_%s %s'
elif operation == 'arctan':
f = lambda df, c1, c2: np.arctan(df[c1]/df[c2])
pairs = combinations(columns, 2)
name = 'arctan(%s / %s)'
elif operation == 'mod':
f = lambda df, c1, c2: df[c1] % df[c2]
pairs = permutations(columns, 2)
name = '%s mod %s'
elif operation == 'arctanh':
f = lambda df, c1, c2: np.arctanh((df[c1]/df[c2]).clip(-1,1))
pairs = permutations(columns, 2)
name = 'arctanh(%s/%s)'
elif operation == 'l1':
f = lambda df, c1, c2: df[c1].abs() + df[c2].abs()
pairs = combinations(columns, 2)
name = '||%s - %s||_2'
elif operation == 'l2':
f = lambda df, c1, c2: (df[c1]**2+df[c2]**2)**0.5
pairs = combinations(columns, 2)
name = '||%s - %s||_2'
elif operation == 'max':
f = lambda df, c1, c2: df[[c1, c2]].abs().max(axis=1)
pairs = combinations(columns, 2)
name = 'max(%s, %s)'
else:
raise ValueError(
"'%s' is not a supported interaction." % operation
)
for c1, c2 in pairs:
result[name % (c1,c2)] = f(df, c1, c2)
return result, result.columns
|
{"hexsha": "5d514d7612a4aa5c9872c67a2a520164b786d19e", "size": 3739, "ext": "py", "lang": "Python", "max_stars_repo_path": "featurama/interactions/numeric.py", "max_stars_repo_name": "atomichighfive/featurama", "max_stars_repo_head_hexsha": "725481957fc56bd709bfa70e4892f8dbe6e48b0b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "featurama/interactions/numeric.py", "max_issues_repo_name": "atomichighfive/featurama", "max_issues_repo_head_hexsha": "725481957fc56bd709bfa70e4892f8dbe6e48b0b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "featurama/interactions/numeric.py", "max_forks_repo_name": "atomichighfive/featurama", "max_forks_repo_head_hexsha": "725481957fc56bd709bfa70e4892f8dbe6e48b0b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3027522936, "max_line_length": 103, "alphanum_fraction": 0.5402514041, "include": true, "reason": "import numpy", "num_tokens": 1094}
|
import shutil
import unittest
import numpy as np
import discretize
from SimPEG import (
utils,
maps,
regularization,
data_misfit,
optimization,
inverse_problem,
directives,
inversion,
)
from SimPEG.potential_fields import gravity
np.random.seed(43)
class GravInvLinProblemTest(unittest.TestCase):
def setUp(self):
# Create a self.mesh
dx = 5.0
hxind = [(dx, 5, -1.3), (dx, 5), (dx, 5, 1.3)]
hyind = [(dx, 5, -1.3), (dx, 5), (dx, 5, 1.3)]
hzind = [(dx, 5, -1.3), (dx, 6)]
self.mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC")
# Get index of the center
midx = int(self.mesh.nCx / 2)
midy = int(self.mesh.nCy / 2)
# Lets create a simple Gaussian topo and set the active cells
[xx, yy] = np.meshgrid(self.mesh.vectorNx, self.mesh.vectorNy)
zz = -np.exp((xx**2 + yy**2) / 75**2) + self.mesh.vectorNz[-1]
# Go from topo to actv cells
topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)]
actv = utils.surface2ind_topo(self.mesh, topo, "N")
actv = np.where(actv)[0]
# Create active map to go from reduce space to full
self.actvMap = maps.InjectActiveCells(self.mesh, actv, -100)
nC = len(actv)
# Create and array of observation points
xr = np.linspace(-20.0, 20.0, 20)
yr = np.linspace(-20.0, 20.0, 20)
X, Y = np.meshgrid(xr, yr)
# Move the observation points 5m above the topo
Z = -np.exp((X**2 + Y**2) / 75**2) + self.mesh.vectorNz[-1] + 5.0
# Create a MAGsurvey
locXYZ = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)]
rxLoc = gravity.Point(locXYZ)
srcField = gravity.SourceField([rxLoc])
survey = gravity.Survey(srcField)
# We can now create a density model and generate data
# Here a simple block in half-space
model = np.zeros((self.mesh.nCx, self.mesh.nCy, self.mesh.nCz))
model[(midx - 2) : (midx + 2), (midy - 2) : (midy + 2), -6:-2] = 0.5
model = utils.mkvc(model)
self.model = model[actv]
# Create reduced identity map
idenMap = maps.IdentityMap(nP=nC)
# Create the forward model operator
sim = gravity.Simulation3DIntegral(
self.mesh,
survey=survey,
rhoMap=idenMap,
actInd=actv,
store_sensitivities="ram",
)
# Compute linear forward operator and compute some data
data = sim.make_synthetic_data(
self.model, relative_error=0.0, noise_floor=0.001, add_noise=True
)
# Create a regularization
reg = regularization.Sparse(self.mesh, indActive=actv, mapping=idenMap)
reg.norms = np.c_[0, 0, 0, 0]
reg.gradientType = "component"
# reg.eps_p, reg.eps_q = 5e-2, 1e-2
# Data misfit function
dmis = data_misfit.L2DataMisfit(simulation=sim, data=data)
# Add directives to the inversion
opt = optimization.ProjectedGNCG(
maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3
)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e8)
# Here is where the norms are applied
IRLS = directives.Update_IRLS(f_min_change=1e-4, minGNiter=1)
update_Jacobi = directives.UpdatePreconditioner()
sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False)
self.inv = inversion.BaseInversion(
invProb, directiveList=[IRLS, sensitivity_weights, update_Jacobi]
)
self.sim = sim
def test_grav_inverse(self):
# Run the inversion
mrec = self.inv.run(self.model)
residual = np.linalg.norm(mrec - self.model) / np.linalg.norm(self.model)
print(residual)
# plt.figure()
# ax = plt.subplot(1, 2, 1)
# midx = int(self.mesh.nCx/2)
# self.mesh.plotSlice(self.actvMap*mrec, ax=ax, normal='Y', ind=midx,
# grid=True, clim=(0, 0.5))
# ax = plt.subplot(1, 2, 2)
# midx = int(self.mesh.nCx/2)
# self.mesh.plotSlice(self.actvMap*self.model, ax=ax, normal='Y', ind=midx,
# grid=True, clim=(0, 0.5))
# plt.show()
self.assertTrue(residual < 0.05)
def tearDown(self):
# Clean up the working directory
if self.sim.store_sensitivities == "disk":
shutil.rmtree(self.sim.sensitivity_path)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "5a53194faed066ab0110e6870f165d3fe8b8377c", "size": 4606, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/pf/test_grav_inversion_linear.py", "max_stars_repo_name": "ElliotCheung/simpeg", "max_stars_repo_head_hexsha": "ce5bde154179ca63798a62a12787a7ec3535472c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/pf/test_grav_inversion_linear.py", "max_issues_repo_name": "ElliotCheung/simpeg", "max_issues_repo_head_hexsha": "ce5bde154179ca63798a62a12787a7ec3535472c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/pf/test_grav_inversion_linear.py", "max_forks_repo_name": "ElliotCheung/simpeg", "max_forks_repo_head_hexsha": "ce5bde154179ca63798a62a12787a7ec3535472c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6666666667, "max_line_length": 86, "alphanum_fraction": 0.5890143291, "include": true, "reason": "import numpy", "num_tokens": 1318}
|
# Train a new network on a data set with train.py
# Basic usage: python train.py data_directory
# Prints out training loss, validation loss, and validation accuracy as the network trains
# Options:
# Set directory to save checkpoints: python train.py data_dir --save_dir save_directory
# Choose architecture: python train.py data_dir --arch "vgg16"
# Set hyperparameters: python train.py data_dir --learning_rate 0.01 --hidden_units 512 --epochs20
# Use GPU for training: python train.py data_dir --gpu
import argparse
from collections import OrderedDict
import json
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
from PIL import Image
import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import torchvision.transforms.functional as TF
parser = argparse.ArgumentParser()
parser.add_argument('data_dir',
action='store',
default="flowers",
help='Data directory, default should be <flowers>')
parser.add_argument('--save_dir',
action='store',
default="save_dir",
dest='save_dir',
help='Set directory to save checkpoints, default should be <save_dir>')
parser.add_argument('--arch',
action='store',
default="vgg16",
dest="arch",
help='Choose architecture')
parser.add_argument('--learning_rate',
action='store',
default=0.001,
dest='learning_rate',
help='hyperparameter')
parser.add_argument('--hidden_units',
action='store',
default=512,
dest='hidden_units',
help='hyperparameter')
parser.add_argument('--epochs',
action='store',
default=1,
dest='epochs',
help='hyperparameter')
cli_args = parser.parse_args()
print('data_dir = {!r}'.format(cli_args.data_dir))
print('save_dir = {!r}'.format(cli_args.save_dir))
print('arch = {!r}'.format(cli_args.arch))
print('learning_rate = {!r}'.format(cli_args.learning_rate))
print('hidden_units = {!r}'.format(cli_args.hidden_units))
print('epochs = {!r}'.format(cli_args.epochs))
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# TODO: Define your transforms for the training, validation, and testing sets
transform = transforms.Compose(
[
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]
)
]
)
# TODO: Load the datasets with ImageFolder
train_datasets = datasets.ImageFolder(
train_dir,
transform=transform
)
valid_datasets = datasets.ImageFolder(
valid_dir,
transform=transform
)
test_datasets = datasets.ImageFolder(
test_dir,
transform=transform
)
# TODO: Using the image datasets and the trainforms, define the dataloaders
train_dataloader = torch.utils.data.DataLoader(
train_datasets,
batch_size=32,
shuffle=True
)
valid_dataloader = torch.utils.data.DataLoader(
valid_datasets,
batch_size=32,
shuffle=True
)
test_dataloader = torch.utils.data.DataLoader(
test_datasets,
batch_size=32,
shuffle=True
)
categories_to_name = {}
with open('cat_to_name.json', 'r') as f:
categories_to_name = json.load(f)
num_of_categories = len(categories_to_name)
print("Number of Categories: ", num_of_categories)
print(json.dumps(categories_to_name, sort_keys=True, indent=4))
# load a pre trained model
# vgg variants classifiers have 25088
# as input_features of 1st FC layer
if cli_args.arch == "vgg16":
model = models.vgg16(pretrained=True)
else:
print(cli_args.arch+" not yet implemented")
quit()
print(model)
img_input = 25088
recommended_hidden = cli_args.hidden_units # 1024
output_size=102
# TODO: Build and train your network
# freeze pre-trained parameters
for param in model.parameters():
param.requires_grad = False
# create custom classifier
recommended_classifier = nn.Sequential(
OrderedDict(
[
# in_feature in fc1 should match in_features
# from the first FC layer of a pre-trained classifier
('fc1', nn.Linear(img_input, recommended_hidden)),
('relu', nn.ReLU()),
('dropout', nn.Dropout()),
# in_features of fc2 should
# match out_feature of fc1, and so on.
#
# out_features on the last layer should
# match the classes (102) you to want to predict
('fc2', nn.Linear(recommended_hidden, output_size)),
('output', nn.LogSoftmax(dim=1))
]
)
)
model.classifier = recommended_classifier
print(model)
# Use GPU if it's available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=cli_args.learning_rate)
steps = 0
tot_train_loss = 0
print_every = 5
train_losses, test_losses = [], []
model.train()
for epoch in range(cli_args.epochs):
for inputs, labels in train_dataloader:
steps += 1
# Move input and label tensors to the default device
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
# Forward
log_ps = model.forward(inputs)
loss = criterion(log_ps, labels)
# Backward
loss.backward()
optimizer.step()
tot_train_loss += loss.item()
train_loss = tot_train_loss / len(train_dataloader.dataset)
train_losses.append(train_loss)
if steps % print_every == 0:
print(f"Epoch {epoch+1}/{cli_args.epochs}.. "
f"Train loss: {tot_train_loss/print_every:.3f}.. ")
tot_train_loss = 0.0
print('Finished Training')
# TODO: Do validation on the test set
steps = 0
tot_test_loss = 0
test_correct = 0 # Number of correct predictions on the test set
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
for images, labels in test_dataloader:
steps += 1
# Move input and label tensors to the default device
inputs = inputs.to(device)
labels = labels.to(device)
images = images.to(device)
log_ps = model(images)
loss = criterion(log_ps, labels)
tot_test_loss += loss.item()
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
test_correct += equals.sum().item()
# Get mean loss to enable comparison between train and test sets
# train_loss = tot_train_loss / len(train_dataloader.dataset)
test_loss = tot_test_loss / len(test_dataloader.dataset)
# At completion of epoch
test_losses.append(test_loss)
print("Epoch: {}/{}.. ".format(1, cli_args.epochs),
"Test Loss: {:.3f}.. ".format(test_loss),
"Test Accuracy: {:.3f}".format(test_correct / len(test_dataloader.dataset)))
print("Done Testing")
# TODO: Save the checkpoint
model.class_to_idx = train_datasets.class_to_idx
config_dictionary = {
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict,
'learning_rate': cli_args.learning_rate,
'epochs': cli_args.epochs,
'class_to_idx': train_datasets.class_to_idx
}
if not os.path.exists(cli_args.save_dir):
os.mkdir(cli_args.save_dir)
torch.save(config_dictionary, cli_args.save_dir+"/checkpoint.pth")
|
{"hexsha": "0bc2ad2f07d2dc1257d3122b9c0e980251392bc5", "size": 8140, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "AlanACruz/aipnd-project", "max_stars_repo_head_hexsha": "e0d5dcb49865cced1a9e88f03adaf71f6d0bf1a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train.py", "max_issues_repo_name": "AlanACruz/aipnd-project", "max_issues_repo_head_hexsha": "e0d5dcb49865cced1a9e88f03adaf71f6d0bf1a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "AlanACruz/aipnd-project", "max_forks_repo_head_hexsha": "e0d5dcb49865cced1a9e88f03adaf71f6d0bf1a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7815699659, "max_line_length": 98, "alphanum_fraction": 0.6409090909, "include": true, "reason": "import numpy", "num_tokens": 1783}
|
#!/usr/bin/env python
# coding: utf-8
#%% global packages
import mesa.batchrunner as mb
import numpy as np
import networkx as nx
#import uuid
#import pandas as pd
from IPython.core.display import display
import matplotlib as mpl
#import matplotlib.figure as figure
mpl.rc('text', usetex = True)
mpl.rc('font', size = 12)
#%% local functions
script_path = ""
import os
try:
script_path = os.path.dirname(__file__)
os.chdir(script_path)
except FileNotFoundError:
script_path = os.getcwd()
else:
script_path = os.getcwd()
import sys
sys.path.append("..")
from JanosikGraphModel import JanosikGraphModel
import indicators
##############################################################################
############################## BATCH EXECUTION ###############################
##############################################################################
#%% simulation parameters for batch execution
# initial capital
init_capital = 20
# initial capital distribution
#init_capital_dist = "uniform" # init_capital is base capital
# init_capital_dist = "boltzmann" # init_capital is mean capital
init_capital_dist = sys.argv[1]
# bias in the game
default_eps = 0.15
# size of the grid
grid_width = 10
grid_height = 10
# graph used in the experiments
graph_id = "w"+str(grid_width) + "_h"+str(grid_height)
graph_file_path = script_path + '/graphs/grid2d/' + graph_id + ".gz"
# graph generation and saving - ca be used only onece for the grid
# graph = nx.generators.lattice.grid_2d_graph(grid_width,grid_height,periodic=True)
# nx.readwrite.write_gexf(graph, graph_file_path)
# nx.draw(graph)
# values of the bias used in the experiments
#eps_vals = [0.0]+list(-1*np.array([0.025, 0.05, 0.10, 0.125, 0.15, 0.25, 0.3, 0.5]))
eps_vals = [0.0]+list(-1*np.array([0.025, 0.05, 0.10, 0.15, 0.25, 0.3, 0.5]))
# eps_vals = [-0.5, -0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.5]
#%% batch execution of the simulations
fixed_params = {
"graph_spec": graph_file_path,
"init_capital": init_capital,
"init_capital_dist": init_capital_dist
}
variable_params = {
"num_agents" : range(20, 141, 20),
"default_boost" : ["matthew", "antimatthew", "strongmatthew" ,"strongantimatthew"],
"default_eps" : eps_vals
}
batch_run = mb.BatchRunnerMP(
JanosikGraphModel,
nr_processes = 14,
variable_parameters=variable_params,
fixed_parameters=fixed_params,
iterations=50,
max_steps=250,
model_reporters={
"Gini index" : indicators.gini_index,
#"Hoover index" : indicators.hoover_index,
# "Total capital": indicators.total_capital,
# "Mean capital": indicators.mean_capital,
# "Median capital": indicators.median_capital,
# "Min capital": indicators.min_capital,
# "Max capital": indicators.max_capital
}
)
# string describing the experiment
exp_desc = 'janosik_'+init_capital_dist+"_"+str(init_capital)+"_grid_"+str(grid_width)+'x'+str(grid_height)+"_"+str(batch_run.iterations)+"runs_"+str(batch_run.max_steps)+"steps"#"_" + str(default_eps)+"eps"
#%% run the experiment
print("[INFO] Executing", np.prod(list(map(len,variable_params.values())))*batch_run.iterations, "iterations.", flush=True)
batch_run.run_all()
#%% results form the batch execution
rd = batch_run.get_model_vars_dataframe()
#%% workaround for the Mesa bug
#rd.columns = ['num_agents', 'default_boost', 'default_eps', 'Run', 'Gini index', 'Hoover index', 'Total capital', 'Mean capital', 'Median capital', 'Min capital', 'Max capital', 'graph_spec', 'init_wealth']
#rd.columns = ['num_agents', 'default_boost', 'default_eps', 'Run', 'Gini index', 'Hoover index', 'graph_spec', 'init_wealth', 'init_capital_dist']
rd.columns = ['num_agents', 'default_boost', 'default_eps', 'Run', 'Gini index', 'graph_spec', 'init_wealth', 'init_capital_dist']
# %% save data
print("[INFO] Saving in", "data/"+exp_desc+".zip")
rd.to_csv("data/"+exp_desc+".zip", index=False, compression=dict(method='zip', archive_name='data.csv'))
# %% plot Gini data
plot_label = {"matthew" : "Matthew", "antimatthew" : "anti-Matthew", "strongmatthew" : "strong Matthew", "strongantimatthew": "strong anti-Matthew"}
plot_marker = {"matthew" : "bx", "antimatthew" : "go", "strongmatthew": "r+", "strongantimatthew": "k^"}
plt_marker_size = {"matthew" : 32}
gini_min = {}
gini_max = {}
hoover_min = {}
hoover_max = {}
poly_app_deg = 2
x_vals = range(20,141,20)
x_vals_dense = range(20,141,10)
gini_data = np.loadtxt(script_path+"/data/gini_index_values-constant.dat")
# hoover_data = np.loadtxt(script_path+"/data/hoover_index_values-constant.dat")
#%% ploting Gini index
# fig = mpl.figure.Figure(figsize=(10,8))
# for i,curr_eps in enumerate(eps_vals ):
# axs = fig.add_subplot(331+i)
# plot_desc = r'$\epsilon=$ '+str(curr_eps)#+r", grid("+str(grid_width)+'x'+str(grid_height)+')'
# axs.grid(alpha=0.5,ls='--')
# axs.plot(x_vals_dense,gini_data[x_vals_dense],'k-.')
# for b in ["matthew", "strongmatthew", "antimatthew", "strongantimatthew" ]:
# gini_max[b] = [rd[(rd.default_eps==curr_eps) & (rd.default_boost == b)][rd.num_agents==r]['Gini index'].max() for r in x_vals]
# gini_min[b] = [rd[(rd.default_eps==curr_eps) & (rd.default_boost == b)][rd.num_agents==r]['Gini index'].min() for r in x_vals]
# axs.plot(x_vals, gini_max[b], plot_marker[b], label=plot_label[b])
# axs.plot(x_vals, np.polyval(np.polyfit(x_vals,gini_max[b],poly_app_deg),x_vals), plot_marker[b][0]+":", linewidth=1)
# axs.plot(x_vals, gini_min[b], plot_marker[b])
# axs.plot(x_vals, np.polyval(np.polyfit(x_vals,gini_min[b],poly_app_deg),x_vals), plot_marker[b][0]+":", linewidth=1)
# # axs.plot(gini_data)
# # axs.plot(x_vals_dense, gini_data[x_vals_dense],"k",linewidth=1, markersize=4)
# #axs.set_xlabel('Number of agents')
# axs.set_xlim((2,x_vals[-1]+15))
# axs.set_ylim((0.0,0.8))
# # axs.set_ylabel('Gini index')
# # axs.legend(ncol=1, columnspacing=0, labelspacing=0.5)
# axs.set_title(plot_desc)
# handles, labels = axs.get_legend_handles_labels()
# lgd = fig.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.525,1.05), ncol=4)
# display(fig)
# fig.tight_layout()
# fig.savefig("plots/"+ exp_desc +"_gini.pdf", bbox_extra_artists=(lgd,), bbox_inches='tight')
# fig.savefig("plots/png/"+ exp_desc +".png")
# #%% ploting Hoover index
# fig = mpl.figure.Figure(figsize=(10,8))
# for i,curr_eps in enumerate(eps_vals ):
# axs = fig.add_subplot(331+i)
# plot_desc = r'$\epsilon=$ '+str(curr_eps)#+r", grid("+str(grid_width)+'x'+str(grid_height)+')'
# axs.grid(alpha=0.5,ls='--')
# axs.plot(x_vals_dense,hoover_data[x_vals_dense],'k-.')
# for b in ["matthew", "strongmatthew", "antimatthew", "strongantimatthew" ]:
# hoover_max[b] = [rd[(rd.default_eps==curr_eps) & (rd.default_boost == b)][rd.num_agents==r]['Hoover index'].max() for r in x_vals]
# hoover_min[b] = [rd[(rd.default_eps==curr_eps) & (rd.default_boost == b)][rd.num_agents==r]['Hoover index'].min() for r in x_vals]
# axs.plot(x_vals, hoover_max[b], plot_marker[b], label=plot_label[b])
# axs.plot(x_vals, np.polyval(np.polyfit(x_vals,hoover_max[b],poly_app_deg),x_vals), plot_marker[b][0]+":", linewidth=1)
# axs.plot(x_vals, hoover_min[b], plot_marker[b])
# axs.plot(x_vals, np.polyval(np.polyfit(x_vals,hoover_min[b],poly_app_deg),x_vals), plot_marker[b][0]+":", linewidth=1)
# # axs.plot(hoover_data)
# # axs.plot(x_vals_dense, hoover_data[x_vals_dense],"k",linewidth=1, markersize=4)
# #axs.set_xlabel('Number of agents')
# axs.set_xlim((2,x_vals[-1]+15))
# axs.set_ylim((0.0,0.8))
# # axs.set_ylabel('Hoover index')
# # axs.legend(ncol=1, columnspacing=0, labelspacing=0.5)
# axs.set_title(plot_desc)
# handles, labels = axs.get_legend_handles_labels()
# lgd = fig.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.525,1.05), ncol=4)
# display(fig)
# fig.tight_layout()
# fig.savefig("plots/"+ exp_desc + "_hoover.pdf", bbox_extra_artists=(lgd,), bbox_inches='tight')
# fig.savefig("plots/png/"+ exp_desc +".png")
# %% plot capital data
# #%% ploting
# fig = mpl.figure.Figure(figsize=(6,3/4*6))
# axs = fig.add_subplot()
# plot_desc = ""#curr_policy#+r", grid("+str(grid_width)+'x'+str(grid_height)+')'
# axs.grid(alpha=0.5,ls='--')
# axs.plot(x_vals_dense,gini_data[x_vals_dense],'k-.')
# for b in ["matthew", "strongmatthew", "antimatthew", "strongantimatthew" ]:
# gini_max[b] = [rd[(rd.default_boost == b)][rd.num_agents==r]['Gini index'].max() for r in x_vals]
# gini_min[b] = [rd[(rd.default_boost == b)][rd.num_agents==r]['Gini index'].min() for r in x_vals]
# axs.plot(x_vals, gini_max[b], plot_marker[b], label=plot_label[b])
# axs.plot(x_vals, np.polyval(np.polyfit(x_vals,gini_max[b],poly_app_deg),x_vals), plot_marker[b][0]+":", linewidth=1)
# axs.plot(x_vals, gini_min[b], plot_marker[b])
# axs.plot(x_vals, np.polyval(np.polyfit(x_vals,gini_min[b],poly_app_deg),x_vals), plot_marker[b][0]+":", linewidth=2)
# # axs.plot(gini_data)
# axs.plot(x_vals_dense, gini_data[x_vals_dense],"ok",linewidth=1, markersize=4)
# #axs.set_xlabel('Number of agents')
# axs.set_xlim((12,x_vals[-1]+8))
# axs.set_ylim((-.2,2.1))
# # axs.set_ylabel('Gini index')
# axs.legend(ncol=2, columnspacing=0, labelspacing=0.5)
# axs.set_title(plot_desc)
# display(fig)
# fig.tight_layout()
# fig.savefig("plots/"+ exp_desc +".pdf")
# fig.savefig("plots/png/"+ exp_desc +".png")
|
{"hexsha": "97205eb9dbea4093de18794154e0423a55429b77", "size": 9845, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model/Janosik_grid2d_batch.py", "max_stars_repo_name": "jmiszczak/matthew_reduction_game", "max_stars_repo_head_hexsha": "377c699f4ee908f8f7b84eafaf6a749149c59b81", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/model/Janosik_grid2d_batch.py", "max_issues_repo_name": "jmiszczak/matthew_reduction_game", "max_issues_repo_head_hexsha": "377c699f4ee908f8f7b84eafaf6a749149c59b81", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model/Janosik_grid2d_batch.py", "max_forks_repo_name": "jmiszczak/matthew_reduction_game", "max_forks_repo_head_hexsha": "377c699f4ee908f8f7b84eafaf6a749149c59b81", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3073929961, "max_line_length": 207, "alphanum_fraction": 0.6458100559, "include": true, "reason": "import numpy,import networkx", "num_tokens": 2838}
|
[STATEMENT]
lemma "\<forall>(x::'a::linordered_field) y. x \<noteq> y \<and> 5 * x \<le> y \<longrightarrow> 500 * x \<le> 100 * y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x y. x \<noteq> y \<and> (5::'a) * x \<le> y \<longrightarrow> (500::'a) * x \<le> (100::'a) * y
[PROOF STEP]
by ferrack
|
{"llama_tokens": 146, "file": null, "length": 1}
|
import numpy as np
# The Geometry and Polygon classes are adapted from
# https://github.com/Oktosha/DeepSDF-explained/blob/master/deepSDF-explained.ipynb
class Geometry(object):
EPS = 1e-12
@staticmethod
def distance_from_point_to_segment(a, b, p):
res = min(np.linalg.norm(a - p), np.linalg.norm(b - p))
if (np.linalg.norm(a - b) > Geometry.EPS
and np.dot(p - a, b - a) > Geometry.EPS
and np.dot(p - b, a - b) > Geometry.EPS):
res = abs(np.cross(p - a, b - a) / np.linalg.norm(b - a))
return res
class Polygon(object):
def __init__(self):
self.v = np.array([])
# Number of vertices/edges
self.num = 0
def set_v(self, v):
self.v = v
self.num = len(self.v)
def sdf(self, p):
return -self.distance(p) if self.inside(p) else self.distance(p)
def inside(self, p):
angle_sum = 0
for i in range(self.num):
a = self.v[i]
b = self.v[(i + 1) % self.num]
angle_sum += np.arctan2(np.cross(a - p, b - p), np.dot(a - p, b - p))
return abs(angle_sum) > 1
def distance(self, p):
res = Geometry.distance_from_point_to_segment(self.v[-1], self.v[0], p)
for i in range(len(self.v) - 1):
res = min(res, Geometry.distance_from_point_to_segment(self.v[i], self.v[i + 1], p))
return res
def load(self, path, name):
vertices = []
f = open(f'{path}{name}.txt', 'r')
line = f.readline()
while line:
x, y = map(lambda n: np.double(n), line.strip('\n').split(' '))
vertices.append([x, y])
line = f.readline()
f.close()
self.set_v(np.array(vertices, dtype=np.double))
|
{"hexsha": "5abeb5b3cb91dbebbae327440a5eb92944aaa45b", "size": 1782, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/shape.py", "max_stars_repo_name": "mintpancake/2d-sdf-net", "max_stars_repo_head_hexsha": "6170f53f7eb5fc9fe84d1a164c615f9348115958", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-07-30T13:23:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T11:27:36.000Z", "max_issues_repo_path": "code/shape.py", "max_issues_repo_name": "mintpancake/2d-sdf-net", "max_issues_repo_head_hexsha": "6170f53f7eb5fc9fe84d1a164c615f9348115958", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/shape.py", "max_forks_repo_name": "mintpancake/2d-sdf-net", "max_forks_repo_head_hexsha": "6170f53f7eb5fc9fe84d1a164c615f9348115958", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8214285714, "max_line_length": 96, "alphanum_fraction": 0.544332211, "include": true, "reason": "import numpy", "num_tokens": 489}
|
import flask
from flask import Flask, redirect, url_for, request, render_template
import tensorflow as tf
import os
from PIL import Image
import numpy as np
import base64
import io
import tensorflow_hub as hub
MODELS_PATH = './models/'
BASE_MODEL = 'SRWNNbase.h5'
srwnnModelPaht = MODELS_PATH + BASE_MODEL
denoise1ModelPaht = MODELS_PATH + 'SRWNNdeNoise1.h5'
denoise2ModelPaht = MODELS_PATH + 'SRWNNdeNoise2.h5'
denoise3ModelPaht = MODELS_PATH + 'SRWNNdeNoise3.h5'
deblur1ModelPaht = MODELS_PATH + 'SRWNNdeBlur1.h5'
deblur2ModelPaht = MODELS_PATH + 'SRWNNdeBlur1.h5'
deblur3ModelPaht = MODELS_PATH + 'SRWNNdeBlur1.h5'
app = Flask(__name__)
def validImage(image):
image = Image.open(image.stream)
width, height = image.size
if width * height > 1000000:
return False
else:
return True
def validateResie(image):
width, height = image.size
if width * height > 1000000:
return False
else:
return True
def reshapeImage(image):
image = Image.open(image.stream)
width, height = image.size
newWidth, newHeight = int(round(width*.80)), int(round(height*.80))
newImage = image.resize((newWidth, newHeight))
#print('IMAGE RESHAPED TO', newWidth, newHeight)
return newImage
def generate(imageInput, modelPath):
generator = tf.keras.models.load_model(modelPath)
isValidImage = validImage(imageInput)
if isValidImage:
imageInput = Image.open(imageInput.stream)
else:
#print('SRWNN reshapedimage')
while isValidImage == False:
imageInput = reshapeImage(imageInput)
isValidImage = validateResie(imageInput)
if isValidImage == True:
break
arrayInput = np.array(imageInput)
input = tf.cast(arrayInput, tf.float32)[...,:3]
input = (input/127.5) - 1
image = tf.expand_dims(input, axis = 0)
genOutput = generator(image, training = False)
return genOutput[0, ...]
def esrganGenerator(imageInput):
generator = hub.load(MODELS_PATH + "esrgan-tf2_1")
isValidImage = validImage(imageInput)
if isValidImage:
imageInput = Image.open(imageInput.stream)
else:
#print('ESRGAN reshapedimage')
imageInput = reshapeImage(imageInput)
#imageInput = Image.open(imageInput.stream)
arrayInput = np.array(imageInput)
input = tf.cast(arrayInput, tf.float32)[...,:3]
image = tf.expand_dims(input, axis = 0)
genOutput = generator(image)
genOutput = tf.cast(tf.clip_by_value(genOutput, 0, 255), tf.uint8)
return genOutput[0, ...]
def getModelPath(modelConfig):
if modelConfig == '0000': return srwnnModelPaht
if modelConfig == '0100': return denoise1ModelPaht #change to actual model for images
if modelConfig == '0010': return denoise1ModelPaht
if modelConfig == '0020': return denoise2ModelPaht
if modelConfig == '0030': return denoise3ModelPaht
if modelConfig == '0001': return deblur1ModelPaht
if modelConfig == '0002': return deblur2ModelPaht
if modelConfig == '0003': return deblur3ModelPaht
else: return srwnnModelPaht
@app.route('/')
def index():
return "Super.Resolution.Waifu.Neural.Network"
@app.route('/generate', methods=['POST'])
def gen():
success = '0'
if request.files.get("image"):
image = request.files['image']
payload = request.form.to_dict()
modelConfig = payload['model']
#print('MODEL CONFI: ', modelConfig)
if modelConfig == '1000':
#print('USING ESRGAN')
try:
generatedImageArray = esrganGenerator(image)
generatedImage = Image.fromarray(np.uint8((generatedImageArray)), 'RGB')
buffer = io.BytesIO()
generatedImage.save(buffer,format="png")
imageBuffer = buffer.getvalue()
encodedImage = base64.b64encode(imageBuffer)
success = '1'
except:
encodedImage = ''
success = '0'
else:
#print('USING SRWNN')
try:
modelPathStr = getModelPath(modelConfig)
generatedImageArray = generate(image, modelPathStr)
generatedImage = Image.fromarray(np.uint8(((generatedImageArray+1)/2)*255), 'RGB')
buffer = io.BytesIO()
generatedImage.save(buffer,format="png")
imageBuffer = buffer.getvalue()
encodedImage = base64.b64encode(imageBuffer)
success = '1'
except:
encodedImage = ''
success = '0'
img_str = encodedImage
return flask.jsonify({'msg': str(success), 'img': str(img_str) })
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True)
|
{"hexsha": "50195361cb08c4bf8fd1eeaaeefd4ee6c8fab97f", "size": 4881, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "Exusai/srwnn-generator-server", "max_stars_repo_head_hexsha": "2223a0ad8a7caf6b2283073402dda632a9bea547", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "Exusai/srwnn-generator-server", "max_issues_repo_head_hexsha": "2223a0ad8a7caf6b2283073402dda632a9bea547", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "Exusai/srwnn-generator-server", "max_forks_repo_head_hexsha": "2223a0ad8a7caf6b2283073402dda632a9bea547", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4903225806, "max_line_length": 98, "alphanum_fraction": 0.6279450932, "include": true, "reason": "import numpy", "num_tokens": 1226}
|
using Gridap
using Gridap.Io
using GridapGmsh
model = GmshDiscreteModel("elasticFlag_coarse.msh")
writevtk(model,"elasticFlag_coarse")
fn = "elasticFlag_coarse.json"
to_json_file(model,fn)
|
{"hexsha": "a3491586cc4dd1413d228bae88ead5aa44760fb9", "size": 192, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "models/elasticFlag_coarse.jl", "max_stars_repo_name": "gridapapps/GridapFSI.jl", "max_stars_repo_head_hexsha": "7924fccb46b7bbcd04715564559698aabf452190", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2020-08-05T14:25:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-26T18:57:24.000Z", "max_issues_repo_path": "models/elasticFlag_coarse.jl", "max_issues_repo_name": "gridapapps/GridapFSI.jl", "max_issues_repo_head_hexsha": "7924fccb46b7bbcd04715564559698aabf452190", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-08-12T18:49:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-05T08:57:27.000Z", "max_forks_repo_path": "models/elasticFlag_coarse.jl", "max_forks_repo_name": "gridapapps/GridapFSI.jl", "max_forks_repo_head_hexsha": "7924fccb46b7bbcd04715564559698aabf452190", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-10-03T20:01:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T14:27:14.000Z", "avg_line_length": 17.4545454545, "max_line_length": 51, "alphanum_fraction": 0.8125, "num_tokens": 58}
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
import numpy as np
import staticchar as ch
from psbutils.misc import find_subrepo_directory
SUBREPO_DIR = find_subrepo_directory()
def test_integral_well_basic() -> None:
"""Assert that the integral method produces known values"""
# Load data
data_name = "S-shape"
data_folder = SUBREPO_DIR / "tests" / "test_data" / data_name
data = ch.Dataset(data_folder)
# Load config
spec_name = "integral_basic.yml"
spec_filename = SUBREPO_DIR / "tests" / "configs" / spec_name
config = ch.config.load(spec_filename, ch.config.CharacterizationConfig)
# Integral method for a single frame
example = data.get_a_frame()
subtracted = ch.subtract_background(
example, columns=config.background_subtract_columns(), strategy=ch.BackgroundChoices.Minimum
)
integral = ch.integrate(data=subtracted, signals=config.signals, interval=config.time_window)
rounded_integral = {k: np.round(v) for k, v in integral.items()}
expected_integral = {"EYFP": 4096, "ECFP": 8415}
assert rounded_integral == expected_integral
|
{"hexsha": "654c9e5518d21bab19782db9bc1650ed69c69877", "size": 1415, "ext": "py", "lang": "Python", "max_stars_repo_path": "PyStationB/libraries/StaticCharacterization/tests/test_integral.py", "max_stars_repo_name": "BrunoKM/station-b-libraries", "max_stars_repo_head_hexsha": "ea3591837e4a33f0bef789d905467754c27913b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-09-29T15:46:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T18:39:51.000Z", "max_issues_repo_path": "PyStationB/libraries/StaticCharacterization/tests/test_integral.py", "max_issues_repo_name": "BrunoKM/station-b-libraries", "max_issues_repo_head_hexsha": "ea3591837e4a33f0bef789d905467754c27913b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PyStationB/libraries/StaticCharacterization/tests/test_integral.py", "max_forks_repo_name": "BrunoKM/station-b-libraries", "max_forks_repo_head_hexsha": "ea3591837e4a33f0bef789d905467754c27913b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-09-27T10:35:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-02T17:53:07.000Z", "avg_line_length": 41.6176470588, "max_line_length": 100, "alphanum_fraction": 0.6367491166, "include": true, "reason": "import numpy", "num_tokens": 293}
|
#/opt/local/bin/python3
import sys, math, re, time, os
import numpy as np
import numpy.random as rand
import random
import hashlib
from copy import deepcopy
#helpful resources: https://www.youtube.com/c/learnmeabitcoin/videos
#Txn for transaction
#BlkChn for blockchain
##################################################################################
class blockEntry:
_thisHash = None;
_preHash = None;
_timeTime = None;
_transactions = None;
def __init__(self, preHash, transactions):
self._transactions = deepcopy(transactions);
self._preHash = deepcopy(preHash);
self._timeTime = str(int(time.time()));
strToHash="".join(self._transactions) + self._preHash + self._timeTime;
self._thisHash = hashlib.sha1(strToHash.encode()).hexdigest();
def content(self):
return deepcopy({"transactions": self._transactions,\
"timeTime": self._timeTime,\
"thisHash": self._thisHash,\
"preHash": self._preHash});
def secretContentAccess(self):
return {"transactions": self._transactions,\
"timeTime": self._timeTime,\
"thisHash": self._thisHash,\
"preHash": self._preHash};
class userEntry:
__name = None;
__wallet = None;
__mineRate = None;
__txnRate = None;
__mempool = None;
__blockchain = None;
__updateIter = None;
__isMalicious = False;
def __init__(self, name, mineRate, txnRate, blockchain):
self.__name = deepcopy(name);
self.__blockchain = deepcopy(blockchain);
self.__wallet = 10;
self.__mineRate = mineRate;
self.__txnRate = txnRate;
self.__updateIter = 0;
self.__mempool = [];
self.updateTxnFromBlkChn();
def updateMempool(self, mempool, userList, selfPosted=False,\
clearSelfMempool=False, ignoreDoubleSpending=False):
if clearSelfMempool == True:
self.__mempool = [];
memNew = self.__mempool + mempool;
memNew = list(set(memNew));
blockNtoCheck = 3;
warnings = [];
for block in self.__blockchain[-blockNtoCheck:]:
transactions = block.content()["transactions"];
memNew = [m for m in memNew if m not in transactions];
#check wallet amount
walletNums = {"A": 0, "B": 0, "C": 0, "D": 0, "E": 0};
txnNums = {"A": 0, "B": 0, "C": 0, "D": 0, "E": 0};
for user in userList:
uName = user.content()["name"];
uWallet = user.content()["wallet"];
walletNums[uName] += uWallet;
#sort in time
memNewSort = [];
for transaction in memNew:
memNewSort.append([transaction.split(":")[-1], transaction]);
memNewSort = np.array(memNewSort);
memNewSort.sort(axis=0);
memNewTemp = [];
for item in memNewSort:
memNewTemp.append(item[1]);
memNew = memNewTemp;
#remove unsolicited
txnWanted = "";
RmIdxs = 0;
for i, transaction in enumerate(memNew):
if "Rm_" in transaction:
txnWanted = transaction[1:].replace("Rm_", "");
if int(transaction[0]) > RmIdxs:
RmIdxs = int(transaction[0]);
RmIdxs += 1;
self.__mempool = [t for t in self.__mempool\
if (t != txnWanted) and ("Rm_" not in t)];
memNew = [t for t in memNew\
if (t != txnWanted) and ("Rm_" not in t)];
if txnWanted != "":
memNew.append(str(RmIdxs) + "Rm_" + txnWanted);
###
for transaction in self.__mempool:
if "Rm_" not in transaction:
sender = transaction.split("=>")[0];
txnNums[sender] += 1;
for transaction in memNew:
if transaction not in self.__mempool:
txn = transaction.split("=>");
sender = txn[0];
receiver = txn[1].split(":")[0];
coin = int(txn[1].split(":")[1]);
if "Rm_" in transaction:
if "9Rm_" not in transaction:
self.__mempool.append(transaction);
elif (selfPosted == False) and (sender == self.__name):
warnings.append(
self.genWarning(self.__name, receiver, transaction,
"unsolicited transaction"));
self.__mempool.append("0Rm_"+transaction);
elif coin > walletNums[sender]:
warnings.append(
self.genWarning(self.__name, sender, transaction,
"not enough coin in wallet"));
else:
txnNums[sender] += 1;
if txnNums[sender] > 1:
warnings.append(
self.genWarning(self.__name, sender, transaction,\
"only one transaction allowed "+\
"in memory pool"));
if ignoreDoubleSpending == True:
self.__mempool.append(transaction);
else:
self.__mempool.append(transaction);
return warnings;
def checkTxnValidityFromBlkChn(self, name, blockchain):
validity = True;
warnings = [];
txnWanted = "";
for transaction in self.__mempool:
if "Rm_" in transaction:
txnWanted = transaction[1:].replace("Rm_", "");
for block in blockchain[max(0, len(self.__blockchain)-3):]:
transactions = block.content()["transactions"];
for transaction in transactions:
txn = transaction.split("=>");
if "genesis" not in transaction:
formatBool = (len(txn) == 2) and\
(len(txn[1].split(":")) == 3) and\
(int(txn[1].split(":")[1]) >= 0);
if (formatBool == False) or\
("Rm_" in transaction):
validity = False;
warnings.append(self.genWarning(self.__name, name,\
transaction, "invalid transaction in block"));
elif transaction in warnings:
validity = False;
warnings.append(self.genWarning(self.__name, name,\
transaction, "unsolicited transaction in block"));
return validity, warnings;
def checkTxnDuplicateFromBlkChn(self, name, blockchain):
blockNtoCheck = 10;
validity = True;
txns = [];
warnings = [];
for block in blockchain[-blockNtoCheck:]:
transactions = block.content()["transactions"];
for transaction in transactions:
if transaction in txns:
validity = False;
warnings.append(self.genWarning(self.__name, name,\
transaction, "repeated transaction"));
txns = txns + transactions;
return validity, warnings;
def genWarning(self, nameFind, nameCulprit, transaction, reason):
warning = "WARNING: "+nameFind+" reject transaction from ";
warning += nameCulprit+" (";
warning += transaction.split(":")[0]+":"+transaction.split(":")[1]+"): ";
warning += reason+".";
return warning;
def checkBlkChnValidity(self, blockchain):
validity = True;
warnings = [];
for i, block in enumerate(blockchain):
if i != 0:
transactions = block.content()["transactions"];
preHash = block.content()["preHash"];
timeTime = block.content()["timeTime"];
strToHash = "".join(transactions) + preHash + timeTime;
thisHash = hashlib.sha1(strToHash.encode()).hexdigest();
if (block.content()["thisHash"] != thisHash) and\
(self.__isMalicious == False):
validity = False;
blockName = "block" + str(i);
firstTxn = block.content()["transactions"][0];
warnings.append(self.genWarning(self.__name,blockName,firstTxn,
"block hash mismatching"));
return validity, warnings;
def checkBlkChnLength(self, blockchain):
if len(blockchain) > len(self.__blockchain):
return True;
return False;
def updateBlkChn(self, blockchain):
self.__updateIter = len(self.__blockchain) - 1;
while self.__blockchain[self.__updateIter].content()["thisHash"] != \
blockchain[self.__updateIter].content()["thisHash"]:
self.__reverseWallet(self.__blockchain[self.__updateIter]);
self.__updateIter -= 1;
if self.__updateIter < 0:
break;
self.__updateIter += 1;
self.__blockchain = deepcopy(blockchain);
def __reverseWallet(self, block):
transactions = block.content()["transactions"];
if "genesis" in transactions:
pass;
else:
for transaction in transactions:
txn = transaction.split("=>");
sender = txn[0];
receiver = txn[1].split(":")[0];
coin = int(txn[1].split(":")[1]);
if sender == self.__name:
self.__wallet += coin;
if receiver == self.__name:
self.__wallet -= coin;
def updateTxnFromBlkChn(self):
for block in self.__blockchain[self.__updateIter:]:
transactions = block.content()["transactions"];
if "genesis" in transactions:
pass;
else:
for transaction in transactions:
txn = transaction.split("=>");
sender = txn[0];
receiver = txn[1].split(":")[0];
coin = int(txn[1].split(":")[1]);
if sender == self.__name:
self.__wallet -= coin;
if receiver == self.__name:
self.__wallet += coin;
self.__mempool = [t for t in self.__mempool if t not in transactions];
self.__updateIter = len(self.__blockchain);
return 0;
def getTxnRate(self):
return deepcopy(self.__txnRate);
def getMineRate(self):
return deepcopy(self.__mineRate);
def mine(self):
blockNtoCheck = 3;
for block in self.__blockchain[-blockNtoCheck:]:
transactions = block.content()["transactions"];
self.__mempool = [m for m in self.__mempool if m not in transactions];
txnStr = "SYS=>" + self.__name + ":1"; #earning 1 coin for mining
txnStr += ":" + str(int(np.ceil(time.time()*pow(10, 3))));
self.__mempool.append(txnStr);
blockPre = self.__blockchain[-1];
block = blockEntry(blockPre.content()["thisHash"], self.__mempool);
self.__blockchain.append(block);
self.__mempool = [];
def content(self):
return deepcopy({"name": self.__name,\
"wallet": self.__wallet,\
"mempool": self.__mempool,\
"blockchain": self.__blockchain});
def setMalicious(self, isMalicious):
self.__isMalicious = isMalicious;
def secretContentAccess(self):
return {"name": self.__name,\
"wallet": self.__wallet,\
"mempool": self.__mempool,\
"blockchain": self.__blockchain};
|
{"hexsha": "bb58913ba987d29cd64a9c2ea2744314253dc281", "size": 11982, "ext": "py", "lang": "Python", "max_stars_repo_path": "bitCoinFuncs.py", "max_stars_repo_name": "Rabbitybunny/zMisc_bitCoinSim", "max_stars_repo_head_hexsha": "9ce7e9fded23778afe38a5fd00a2c13b602554a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bitCoinFuncs.py", "max_issues_repo_name": "Rabbitybunny/zMisc_bitCoinSim", "max_issues_repo_head_hexsha": "9ce7e9fded23778afe38a5fd00a2c13b602554a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bitCoinFuncs.py", "max_forks_repo_name": "Rabbitybunny/zMisc_bitCoinSim", "max_forks_repo_head_hexsha": "9ce7e9fded23778afe38a5fd00a2c13b602554a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.0514705882, "max_line_length": 83, "alphanum_fraction": 0.5160240361, "include": true, "reason": "import numpy", "num_tokens": 2587}
|
#!/usr/bin/env python2.7
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Cadishi --- CAlculation of DIStance HIstograms
#
# Copyright (c) Klaus Reuter, Juergen Koefinger
# See the file AUTHORS.rst for the full list of contributors.
#
# Released under the MIT License, see the file LICENSE.txt.
"""
Example on how-to access HDF5 data saved by Cadishi or Capriqorn.
We open a data file, find out the number of frames and the frame
indices, select the last frame, pick two datasets and plot them
using matplotlib.
"""
import numpy as np
import matplotlib.pyplot as plt
from cadishi.io.hdf5 import H5Reader
file_name="histograms.h5"
# open the HDF5 file via the Cadishi HDF5 reader
reader = H5Reader(file=file_name)
# obtain information about the trajectory
ti = reader.get_trajectory_information()
# get the last available frame index
idx = ti.frame_numbers[-1]
# load the frame into memory as a base.Container() object (nested structure of
# dicts and NumPy arrays)
frm = reader.get_frame(idx)
# optional during development: explore the base.Container() object
#print frm.get_keys('/')
#print frm.get_keys('/histograms')
# access the radial distance histogram of the C atoms via a path-like string,
# default locations used by Cadishi and Capriqorn are defined in cadihi.base
radii = frm.get_data('/histograms/radii')
histo_cc = frm.get_data('/histograms/C,C')
# let us plot the histogram via matplotlib
plt.plot(radii, histo_cc)
plt.xlabel('radius')
plt.ylabel('count')
plt.title('Cadishi distance histogram of C,C')
plt.grid(True)
# save the plot to an image file
plt.savefig("histo_cc.svg")
# finally, display the plot on the screen
plt.show()
|
{"hexsha": "b089b4188098edb376c63129ef708d27f8c793c4", "size": 1759, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/scripts/plot_hdf5_data.py", "max_stars_repo_name": "bio-phys/cadishi", "max_stars_repo_head_hexsha": "b44351fcb77737c6a6da5249a0c24ee8e34f72d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2017-08-22T13:00:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T14:07:55.000Z", "max_issues_repo_path": "doc/html/_downloads/plot_hdf5_data.py", "max_issues_repo_name": "bio-phys/cadishi", "max_issues_repo_head_hexsha": "b44351fcb77737c6a6da5249a0c24ee8e34f72d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-19T14:07:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-19T14:07:38.000Z", "max_forks_repo_path": "doc/html/_downloads/plot_hdf5_data.py", "max_forks_repo_name": "bio-phys/cadishi", "max_forks_repo_head_hexsha": "b44351fcb77737c6a6da5249a0c24ee8e34f72d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3275862069, "max_line_length": 78, "alphanum_fraction": 0.7578169414, "include": true, "reason": "import numpy", "num_tokens": 446}
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
cmap = plt.cm.viridis
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def convert_2d_to_3d(u, v, z, K):#将2d图像转到3d空间中
v0 = K[1][2]
u0 = K[0][2]
fy = K[1][1]
fx = K[0][0]
x = (u-u0)*z/fx
y = (v-v0)*z/fy
return (x, y, z)
#特征点匹配
def feature_match(img1, img2, max_n_features):
r''' Find features on both images and match them pairwise
'''
use_flann = False # better not use flann
detector = cv2.xfeatures2d.SIFT_create(max_n_features)
# find the keypoints and descriptors with SIFT
kp1, des1 = detector.detectAndCompute(img1, None)
kp2, des2 = detector.detectAndCompute(img2, None)
if (des1 is None) or (des2 is None):
return [], []
des1 = des1.astype(np.float32)
des2 = des2.astype(np.float32)
if use_flann:
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
else:
matcher = cv2.DescriptorMatcher().create('BruteForce')
matches = matcher.knnMatch(des1,des2,k=2)
good = []
pts1 = []
pts2 = []
# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
if m.distance < 0.8*n.distance:
good.append(m)
pts1.append(kp1[m.queryIdx].pt)
pts2.append(kp2[m.trainIdx].pt)
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
return pts1, pts2
def depth_colorize(depth):
depth = (depth - np.min(depth)) / (np.max(depth) - np.min(depth))
depth = 255 * cmap(depth)[:, :, :3] # H, W, C
return depth.astype('uint8')
#使用F来剔除外点
def Fundamental(kp1,kp2):
kp1=np.array(kp1)
kp2=np.array(kp2)
if(len(kp1)>=4):
#H,mask=cv2.findHomography(kp1,kp2,cv2.RANSAC)
F, mask = cv2.findFundamentalMat(kp1,kp2,cv2.FM_RANSAC,2,0.99)
if F is None:
print('F matrix is None.')
return [],[]
else:
kp1=kp1[mask.ravel()==1]
kp2=kp2[mask.ravel()==1]
return kp1,kp2
##pnp求出位姿
def get_pose_pnp(rgb_curr, rgb_near, depth_curr, depth_near, K):#有这个oheight主要是因为对图片进行了底部裁剪,因此从3d->
gray_curr = rgb2gray(rgb_curr).astype(np.uint8)
gray_near = rgb2gray(rgb_near).astype(np.uint8)
height, width = gray_curr.shape
max_n_fetures_pose = 1000
pts2d_curr, pts2d_near = feature_match(gray_curr, gray_near, max_n_fetures_pose)# feature matching
#pts2d_curr, pts2d_near = Fundamental(pts2d_curr,pts2d_near)
#对深度图进行膨胀
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(4, 4))
depth_curr_int = depth_curr.astype(np.int16)#需要将类型转成int16类型
depth_near_int = depth_near.astype(np.int16)
depth_curr_dilated = cv2.dilate(depth_curr_int, kernel)
depth_near_dilated = cv2.dilate(depth_near_int, kernel)
# extract 3d pts
pts3d_curr = []
pts2d_near_filtered = [] # keep only feature points with depth in the current frame
pts3d_near = []
pts2d_curr_filtered = []
sparse_input = np.zeros((height, width))
sparse_input_near = np.zeros((height, width))
#像素的横坐标u与纵坐标v分别是其图像数组中所在的列数与行数
for i in range(len(pts2d_curr)): #提取pts2d_curr的特征点并给定深度
u, v = pts2d_curr[i][0], pts2d_curr[i][1]#匹配上的特征点的个数
u_n, v_n = pts2d_near[i][0], pts2d_near[i][1]
z = depth_curr_dilated[v, u]
z_n = depth_near_dilated[v_n, u_n]
sparse_input[v, u] = z #产生当前输入的稀疏深度图,有深度的信息即为特征点所在的位置
sparse_input_near[v_n, u_n] = z_n #产生相邻帧的深度图
if z > 0:
xyz_curr = convert_2d_to_3d(u, v, z, K)
pts3d_curr.append(xyz_curr)
pts2d_near_filtered.append(pts2d_near[i])
if z_n > 0:
xyz_near = convert_2d_to_3d(u_n, v_n, z_n, K)
pts3d_near.append(xyz_near)
pts2d_curr_filtered.append(pts2d_curr[i])
pts_for_loss = pts2d_curr
pts_for_loss_near = pts2d_near
# the minimal number of points accepted by solvePnP is 4:
if len(pts3d_curr)>=4 and len(pts2d_near_filtered)>=4 and len(pts3d_near)>=4 and len(pts2d_curr_filtered)>=4:
##计算从curr到near的位姿
pts3d_curr = np.expand_dims(np.array(pts3d_curr).astype(np.float32), axis=1)
pts2d_near_filtered = np.expand_dims(np.array(pts2d_near_filtered).astype(np.float32), axis=1)
# ransac
ret = cv2.solvePnPRansac(pts3d_curr, pts2d_near_filtered, K, distCoeffs=None)
success = ret[0]
rotation_vector = ret[1]
translation_vector = ret[2]
##计算从near到curr的位姿
pts3d_near = np.expand_dims(np.array(pts3d_near).astype(np.float32), axis=1)
pts2d_curr_filtered = np.expand_dims(np.array(pts2d_curr_filtered).astype(np.float32), axis=1)
ret_inv = cv2.solvePnPRansac(pts3d_near, pts2d_curr_filtered, K, distCoeffs=None)
success_inv = ret_inv[0]
rotation_vector_inv = ret_inv[1]
translation_vector_inv = ret_inv[2]
return (success and success_inv, rotation_vector, translation_vector, rotation_vector_inv, translation_vector_inv,\
sparse_input, depth_curr_dilated, sparse_input_near, depth_near_dilated, pts_for_loss, pts_for_loss_near)
else:
return (0, None, None, None, None, sparse_input, depth_curr_dilated, sparse_input_near, depth_near_dilated, pts_for_loss, pts_for_loss_near)
|
{"hexsha": "c06c4e452f17a4642b2cc747e9fbbbbdccd4edfd", "size": 5344, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataloaders/pose_estimator.py", "max_stars_repo_name": "Hansry/Semi-supervised-depth-estimation", "max_stars_repo_head_hexsha": "1e8e77d9074ce8e5e2471705d843627a0111b8e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dataloaders/pose_estimator.py", "max_issues_repo_name": "Hansry/Semi-supervised-depth-estimation", "max_issues_repo_head_hexsha": "1e8e77d9074ce8e5e2471705d843627a0111b8e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataloaders/pose_estimator.py", "max_forks_repo_name": "Hansry/Semi-supervised-depth-estimation", "max_forks_repo_head_hexsha": "1e8e77d9074ce8e5e2471705d843627a0111b8e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6100628931, "max_line_length": 146, "alphanum_fraction": 0.6792664671, "include": true, "reason": "import numpy", "num_tokens": 1807}
|
import numpy as np
import numpy.ma as ma
def find_peak(field, comp=0, max_radius=None, min_radius=None):
"""Find the peak magnitude of a component in the field.
Args:
field ``GraspField``: The field to work on.
comp int: The field component to look at.
max_radius float: Ignore portions of the grid outside this radius from the center of the field.
min_radius float: Ignore portions of the grid inside this radius from the center fo the field.
Returns:
x_peak float:, y_peak float: The x and y values of the peak value."""
x_vals, y_vals = field.positions_1d
f = abs(field.field[:, :, comp])
if max_radius is not None:
rad = field.radius_grid()
rad_max_mask = ma.masked_greater(rad, max_radius)
f = ma.array(f, mask=rad_max_mask.mask)
if min_radius is not None:
rad = field.radius_grid()
rad_min_mask = ma.masked_less(rad, min_radius)
f = ma.array(f, mask=rad_min_mask.mask)
ny, nx = np.unravel_index(np.argmax(abs(f)), f.shape)
x_peak = x_vals[nx]
y_peak = y_vals[ny]
return x_peak, y_peak
# find the center of illumination of the field
def find_center(field, comp=0, trunc_level=0.0, max_radius=None, min_radius=None):
"""Find the center of illumination by finding the "center of mass" of the field.
Parameters:
field ``GraspField``: The field to work on.
comp int: The field component to look at.
trunc_level float: Ignore the contributions from portions of the grid below this field level.
max_radius float: Ignore portions of the grid outside this radius from the center of the field.
min_radius float: Ignore portions of the grid inside this radius from the center fo the field.
Returns:
x_cent float, y_cent float: The x and y values of the center of the field."""
xv, yv = field.positions
f = abs(field.field[:, :, comp])
if trunc_level != 0.0:
f = ma.masked_less_equal(f, trunc_level)
xv = ma.array(xv, mask=f.mask)
yv = ma.array(yv, mask=f.mask)
if max_radius is not None:
rad = field.radius_grid()
rad_max_mask = ma.masked_greater(rad, max_radius)
f = ma.array(f, mask=rad_max_mask.mask)
xv = ma.array(xv, mask=rad_max_mask.mask)
yv = ma.array(yv, mask=rad_max_mask.mask)
if min_radius is not None:
rad = field.radius_grid()
rad_min_mask = ma.masked_less(rad, min_radius)
f = ma.array(f, mask=rad_min_mask.mask)
xv = ma.array(xv, mask=rad_min_mask.mask)
yv = ma.array(yv, mask=rad_min_mask.mask)
x_illum = xv * f
y_illum = yv * f
norm = np.sum(f)
x_cent = np.sum(x_illum) / norm
y_cent = np.sum(y_illum) / norm
return x_cent, y_cent
def combine_grids(grids, coherent=True):
"""Sum fields from different grid files.
Assumes that all fields across grids have the same positions, size, etc.
Args:
grids list: of ``GraspGrid``: List of grid objects to combine.
coherent bool: Determines whether to form coherent or incoherent sum. Defaults to coherent as that is the more
likely use case.
Returns:
``GraspGrid``: A grid object containing a field for each of the matched fields in the supplied list of grids."""
new_grid = np.copy.deepcopy(grids[0])
for n, field in enumerate(new_grid.fields):
new_grid.fields[n].field = np.zeros_like(grids[0].fields[n].field)
for g, grid in enumerate(grids):
if coherent:
new_grid.fields[n].field += grids[g].fields[n].field
else:
new_grid.fields[n].field += np.abs(grids[g].fields[n].field)
|
{"hexsha": "a628abc7a266cab97f4ec4fb372b994afedb76a7", "size": 3740, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/graspfile/analysis/grid.py", "max_stars_repo_name": "Spect4tor/python-graspfile", "max_stars_repo_head_hexsha": "b1d623c018edb5f27714b083e967d924527b7509", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-08T09:46:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T09:46:03.000Z", "max_issues_repo_path": "src/graspfile/analysis/grid.py", "max_issues_repo_name": "Spect4tor/python-graspfile", "max_issues_repo_head_hexsha": "b1d623c018edb5f27714b083e967d924527b7509", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-05-07T18:42:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-02T17:22:34.000Z", "max_forks_repo_path": "src/graspfile/analysis/grid.py", "max_forks_repo_name": "PaulKGrimes/python-graspfile", "max_forks_repo_head_hexsha": "4ab4b652a615344596699eda92b6a744b1321bbf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-04T11:02:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T11:02:29.000Z", "avg_line_length": 37.0297029703, "max_line_length": 120, "alphanum_fraction": 0.6518716578, "include": true, "reason": "import numpy", "num_tokens": 932}
|
[STATEMENT]
lemma poly_compose_mult:
assumes "is_poly_tuple m fs"
assumes "length fs = n"
assumes "f \<in> carrier (Q\<^sub>p[\<X>\<^bsub>n\<^esub>])"
assumes "g \<in> carrier (Q\<^sub>p[\<X>\<^bsub>n\<^esub>])"
shows "Qp_poly_comp m fs (f \<otimes>\<^bsub>Q\<^sub>p[\<X>\<^bsub>n\<^esub>]\<^esub> g) = (Qp_poly_comp m fs f) \<otimes>\<^bsub>Q\<^sub>p[\<X>\<^bsub>m\<^esub>]\<^esub> (Qp_poly_comp m fs g)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Qp_poly_comp m fs (f \<otimes>\<^bsub>Q\<^sub>p [\<X>\<^bsub>n\<^esub>]\<^esub> g) = Qp_poly_comp m fs f \<otimes>\<^bsub>Q\<^sub>p [\<X>\<^bsub>m\<^esub>]\<^esub> Qp_poly_comp m fs g
[PROOF STEP]
using Qp.cring_axioms assms poly_compose_mult
[PROOF STATE]
proof (prove)
using this:
cring Q\<^sub>p
is_poly_tuple m fs
length fs = n
f \<in> carrier (Q\<^sub>p [\<X>\<^bsub>n\<^esub>])
g \<in> carrier (Q\<^sub>p [\<X>\<^bsub>n\<^esub>])
\<lbrakk>is_poly_tuple ?m ?fs; length ?fs = ?n; ?f \<in> carrier (Q\<^sub>p [\<X>\<^bsub>?n\<^esub>]); ?g \<in> carrier (Q\<^sub>p [\<X>\<^bsub>?n\<^esub>])\<rbrakk> \<Longrightarrow> poly_compose ?n ?m ?fs (?f \<otimes>\<^bsub>Q\<^sub>p [\<X>\<^bsub>?n\<^esub>]\<^esub> ?g) = poly_compose ?n ?m ?fs ?f \<otimes>\<^bsub>Q\<^sub>p [\<X>\<^bsub>?m\<^esub>]\<^esub> poly_compose ?n ?m ?fs ?g
goal (1 subgoal):
1. Qp_poly_comp m fs (f \<otimes>\<^bsub>Q\<^sub>p [\<X>\<^bsub>n\<^esub>]\<^esub> g) = Qp_poly_comp m fs f \<otimes>\<^bsub>Q\<^sub>p [\<X>\<^bsub>m\<^esub>]\<^esub> Qp_poly_comp m fs g
[PROOF STEP]
unfolding is_poly_tuple_def Qp_poly_comp_def
[PROOF STATE]
proof (prove)
using this:
cring Q\<^sub>p
set fs \<subseteq> carrier (Q\<^sub>p [\<X>\<^bsub>m\<^esub>])
length fs = n
f \<in> carrier (Q\<^sub>p [\<X>\<^bsub>n\<^esub>])
g \<in> carrier (Q\<^sub>p [\<X>\<^bsub>n\<^esub>])
\<lbrakk>set ?fs \<subseteq> carrier (Q\<^sub>p [\<X>\<^bsub>?m\<^esub>]); length ?fs = ?n; ?f \<in> carrier (Q\<^sub>p [\<X>\<^bsub>?n\<^esub>]); ?g \<in> carrier (Q\<^sub>p [\<X>\<^bsub>?n\<^esub>])\<rbrakk> \<Longrightarrow> poly_compose ?n ?m ?fs (?f \<otimes>\<^bsub>Q\<^sub>p [\<X>\<^bsub>?n\<^esub>]\<^esub> ?g) = poly_compose ?n ?m ?fs ?f \<otimes>\<^bsub>Q\<^sub>p [\<X>\<^bsub>?m\<^esub>]\<^esub> poly_compose ?n ?m ?fs ?g
goal (1 subgoal):
1. poly_compose (length fs) m fs (f \<otimes>\<^bsub>Q\<^sub>p [\<X>\<^bsub>n\<^esub>]\<^esub> g) = poly_compose (length fs) m fs f \<otimes>\<^bsub>Q\<^sub>p [\<X>\<^bsub>m\<^esub>]\<^esub> poly_compose (length fs) m fs g
[PROOF STEP]
by blast
|
{"llama_tokens": 1144, "file": "Padic_Field_Padic_Field_Powers", "length": 3}
|
from utils.templates.func import dict_to_par
def template_class(name,message,default,className,estimator):
string = """
class {0}(object):
def __init__(self):
print("This is {1} Model")
self.default ={3}
def getClassName(self):
return "{2}"
def getLibraryName(self):
return ""
def train_model(self,train,test,target,parameter,n_estimators = 500):
values={4}
try:
for key in self.default.keys():
values[key] = parameter[key] if parameter.has_key(key) else self.default[key]
except:
values = self.default
try:
model = {2}({5})
model.fit(train,target)
except:
model = {2}()
model.fit(train,target)
self.results = model.predict(test)
def prediction(self):
return self.results
""".format(name,message,className,default,"{}",estimator)
print(string)
# template("RTECModel","Random Tree Embedded Classifier",
# "{'warm_start': False, 'n_jobs': 1, 'verbose': 0, 'max_leaf_nodes': None, 'min_samples_leaf': 1, 'n_estimators': 10, 'min_weight_fraction_leaf': 0.0, 'random_state': None, 'sparse_output': True, 'min_samples_split': 2, 'max_depth': 5}",
# "RandomTreesEmbedding",
# "n_estimators=values['n_estimators'],max_depth=values['max_depth'],min_samples_split=values['min_samples_split'],min_samples_leaf=values['min_samples_leaf'],min_weight_fraction_leaf=values['min_weight_fraction_leaf'],max_leaf_nodes=values['max_leaf_nodes'],sparse_output=values['sparse_output'],n_jobs=values['n_jobs'],random_state=values['random_state'],verbose=values['verbose'],warm_start=values['warm_start']",
# )
def template_code_snippet(script_gen_bool,library,class_name,trainPath,testPath,index,target,subPath,estimator,save_at,script_at):
# library=""
# class_name=""
# inPath=""
# outPath=""
# subPath=""
# index=""
# target=""
# estimator=""
# save_at=""
if script_gen_bool:
string = """
import numpy as np
import pandas as pd
from {0} import {1}
def basic_treatment(df): # this is most general basic treatment for most cases and is applied to treat by default
for col in df:
if df[col].dtype == np.dtype('O'):
df[col] = df[col].apply(lambda x : hash(str(x)))
df.fillna(-1, inplace = True)
def output_function_cat(x):
if x < 0.5:
return 0
elif x > 1.5:
return 2
else:
return 1
def trimmer(data,to_trim,target_var):
trimmed_features = data.columns.tolist()
for feature in features:
if str(to_trim).__contains__(","):
trim = str(to_trim).split(",")
if feature in trim:
trimmed_features.remove(feature)
else:
if feature == to_trim:
trimmed_features.remove(feature)
if features.__contains__(target_var):
features.remove(target_var)
return data[trimmed_features]
train = pd.read_csv({2})
test = pd.read_csv({3})
basic_treatment(train)
basic_treatment(test)
target = train[{5}]
model = {1}({7})
model.fit(trimmer(train,'{5}','{4}'),target)
results = model.predict(trimmer(test,'{5}','{4}')
results = [output_function_cat(x) for x in results]
sub = pd.read_csv({6})
sub[{4}] = test[{4}]
sub[{5}] = test[{5}]
sub.to_csv({8},index=False)
""".format(library,class_name,trainPath,testPath,index,target,subPath,estimator,save_at)
print(string)
with open(script_at, "w") as text_file:
text_file.write("{}".format(string))
|
{"hexsha": "ad26a94064b81da518447dfd6c84e6f260096fa2", "size": 3785, "ext": "py", "lang": "Python", "max_stars_repo_path": "fatigue/DriverFatigueness/utils/templates/template_Gen.py", "max_stars_repo_name": "jkapila/paper-codebase", "max_stars_repo_head_hexsha": "35198a924b66299cab0bf405d4f5ab54ca504be9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fatigue/DriverFatigueness/utils/templates/template_Gen.py", "max_issues_repo_name": "jkapila/paper-codebase", "max_issues_repo_head_hexsha": "35198a924b66299cab0bf405d4f5ab54ca504be9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fatigue/DriverFatigueness/utils/templates/template_Gen.py", "max_forks_repo_name": "jkapila/paper-codebase", "max_forks_repo_head_hexsha": "35198a924b66299cab0bf405d4f5ab54ca504be9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0245901639, "max_line_length": 425, "alphanum_fraction": 0.6052840159, "include": true, "reason": "import numpy", "num_tokens": 903}
|
import ast
import json
import numpy as np
from methods.utils import isSquared, progressiveSustitution, regresiveSustitution
def doolittle(A, b):
A = ast.literal_eval(A)
b = ast.literal_eval(b)
n = len(A[0])
A = np.array(A).astype(float)
b = np.array(b).astype(float)
U = np.zeros((n,n))
L = np.eye(n, dtype=float)
res = {}
pivots = []
diag=1
#Validates if matrix is squared
if(not isSquared(A)):
res["source"] = 'Not square matrix!'
res["error"] = True
return res
# Determines if det is 0
if(np.linalg.det(A) == 0):
res["source"] = 'Determinant is 0'
res["error"] = True
return res
# L,U = inicializa(n,0)
for k in range(n):
suma1 = 0
for p in range(0,k):
suma1 += L[k][p]*U[p][k]
U[k][k] = A[k][k]-suma1
for i in range(k+1,n):
suma2 = 0
for p in range(k):
suma2 += L[i][p]*U[p][k]
L[i][k] = (A[i][k]-suma2)/float(U[k][k])
for j in range(k+1,n):
suma3 = 0
for p in range(k):
suma3 += L[k][p]*U[p][j]
U[k][j]= (A[k][j]-suma3)/float(L[k][k])
#imprimir L U y k etapa
pivots.append({'step': k, "L": json.dumps(L.tolist()), "U": json.dumps(U.tolist())})
Lb = np.concatenate([L,b.reshape((A.shape[0],1)) ], axis=1)
for i in range(0,n):
diag = diag*U[i][i]
if(diag != 0):
indexes = np.arange(0, n)
z = progressiveSustitution(Lb, n, indexes)
z = np.array(z).astype(float)
Uz = np.concatenate([U, z.reshape((U.shape[0], 1))], axis=1)
results = regresiveSustitution(Uz, n - 1, indexes)
else:
res["source"] = 'Infinite solutions'
res["error"] = True
return res
res["pivots"] = pivots
res["error"] = False
res["results"] = results
return res
|
{"hexsha": "b7255811b806d48ca5f5524b02809a39edc70ca8", "size": 1989, "ext": "py", "lang": "Python", "max_stars_repo_path": "methods/doolittle.py", "max_stars_repo_name": "eechava6/NumericalAnalysis", "max_stars_repo_head_hexsha": "1b44349fe4c5e24413c3d5faeca7d227272814ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "methods/doolittle.py", "max_issues_repo_name": "eechava6/NumericalAnalysis", "max_issues_repo_head_hexsha": "1b44349fe4c5e24413c3d5faeca7d227272814ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "methods/doolittle.py", "max_forks_repo_name": "eechava6/NumericalAnalysis", "max_forks_repo_head_hexsha": "1b44349fe4c5e24413c3d5faeca7d227272814ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6, "max_line_length": 94, "alphanum_fraction": 0.4937154349, "include": true, "reason": "import numpy", "num_tokens": 608}
|
subroutine edgeele(edge,mrng,neface,ne,bcel,n_bcel)
! Find the element index which containing part of edge and return as bcel
! edge is from 1 to 4 fro 2-D case
integer edge ! edge index what to find
integer mrng(neface,ne) ! boundary information
integer ne ! number of element
integer neface ! edge per element
integer bcel(n_bcel) ! returning results
integer n_bcel ! number of elements having part of edge
integer i
integer j
integer k
k=0
do i=1,ne
do j=1,neface
if(mrng(j,i) == edge) then
k=k+1
bcel(k)=i
endif
enddo
enddo
end
|
{"hexsha": "dc2ba949d70680613e8a27028a8ca2db6e3df505", "size": 602, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "edgeele.f90", "max_stars_repo_name": "biofluids/IFEM-archive", "max_stars_repo_head_hexsha": "ed14a3ff980251ba98e519a64fb0549d4c991744", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-15T11:40:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-15T11:40:16.000Z", "max_issues_repo_path": "edgeele.f90", "max_issues_repo_name": "biofluids/IFEM-archive", "max_issues_repo_head_hexsha": "ed14a3ff980251ba98e519a64fb0549d4c991744", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "edgeele.f90", "max_forks_repo_name": "biofluids/IFEM-archive", "max_forks_repo_head_hexsha": "ed14a3ff980251ba98e519a64fb0549d4c991744", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-11T16:50:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-08T07:24:59.000Z", "avg_line_length": 22.2962962963, "max_line_length": 73, "alphanum_fraction": 0.6760797342, "num_tokens": 180}
|
module TypeDB_tutorial
# Write your package code here.
end
|
{"hexsha": "23c8e6f8c75ccef10a0b329f48b76dd0b15fd2e7", "size": 61, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/TypeDB_tutorial.jl", "max_stars_repo_name": "FrankUrbach/TypeDB_tutorial", "max_stars_repo_head_hexsha": "51937c34a8d410861313368ed0d6a4569ef3c369", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-30T02:45:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T02:45:04.000Z", "max_issues_repo_path": "src/TypeDB_tutorial.jl", "max_issues_repo_name": "FrankUrbach/TypeDB_tutorial", "max_issues_repo_head_hexsha": "51937c34a8d410861313368ed0d6a4569ef3c369", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/TypeDB_tutorial.jl", "max_forks_repo_name": "FrankUrbach/TypeDB_tutorial", "max_forks_repo_head_hexsha": "51937c34a8d410861313368ed0d6a4569ef3c369", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.1666666667, "max_line_length": 31, "alphanum_fraction": 0.7868852459, "num_tokens": 15}
|
# -*- coding: utf-8 -*-
import numpy
import torch
class Metric:
def __init__(self, num_scales):
self.num_scales = num_scales
self.sum_metric = [0.0 for i in range(num_scales * 2)]
self.num_update = 0
self.multiply_factor = 10000
def update(self, loss_branch):
for i in range(self.num_scales):
loss_score = loss_branch[i * 2]
loss_bbox = loss_branch[i * 2 + 1]
self.sum_metric[i * 2] += loss_score
self.sum_metric[i * 2 + 1] += loss_bbox
self.num_update += 1
def get(self):
return_string_list = []
for i in range(self.num_scales):
return_string_list.append('cls_loss_score_' + str(i))
return_string_list.append('reg_loss_bbox_' + str(i))
return return_string_list, [m / self.num_update * self.multiply_factor for i, m in enumerate(self.sum_metric)]
def reset(self):
self.sum_metric = [0.0 for i in range(self.num_scales * 2)]
self.num_update = 0
|
{"hexsha": "a9b00606aafc9bd679b07d5ccd5e101b2c4707d9", "size": 1030, "ext": "py", "lang": "Python", "max_stars_repo_path": "face_detection/metric_farm/metric_default.py", "max_stars_repo_name": "CNN-NISER/lffd-pytorch", "max_stars_repo_head_hexsha": "7d6476ece79cf75c6265c89346ddac48929ce8f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 220, "max_stars_repo_stars_event_min_datetime": "2019-10-14T01:08:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T06:42:39.000Z", "max_issues_repo_path": "face_detection/metric_farm/metric_default.py", "max_issues_repo_name": "CNN-NISER/lffd-pytorch", "max_issues_repo_head_hexsha": "7d6476ece79cf75c6265c89346ddac48929ce8f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2019-10-16T07:40:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-26T07:46:14.000Z", "max_forks_repo_path": "face_detection/metric_farm/metric_default.py", "max_forks_repo_name": "CNN-NISER/lffd-pytorch", "max_forks_repo_head_hexsha": "7d6476ece79cf75c6265c89346ddac48929ce8f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 37, "max_forks_repo_forks_event_min_datetime": "2019-10-22T01:49:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-01T13:50:30.000Z", "avg_line_length": 29.4285714286, "max_line_length": 118, "alphanum_fraction": 0.6077669903, "include": true, "reason": "import numpy", "num_tokens": 264}
|
from __future__ import division
import cv2
import numpy as np
import rtmp
import analyzer
import drawer
import processor
##
# Drangonfly - Main video analyzer
# Takes a video and analyze it for features
# Edmund
##
capture = rtmp.captureVideo("rtmp://192.168.1.139:1935/live/edmund live=1 buffer=10")
oldFrame = rtmp.getFrame(capture)#[92:,:]
currentText = np.array([[0,0,0,0]])
frameSkipCount = 0
displaySkipCount = 0
def combineMultiple(rectArray):
confidAlignment = processor.alignmentTextConfidence(cannyTextRect,100)
confidCloseness = processor.closenessTextConfidence(cannyTextRect,250)
confidRatio = processor.simpleWHRatio(cannyTextRect,1.2,1)
confidCombined = confidAlignment[:,:1]/(35/100) + confidAlignment[:,1:2]/(35/100) + confidCloseness/(20/100) + confidRatio/(10/100)
# confidCombined = confidAlignment[:,:1]/4 + confidAlignment[:,1:2]/4 + confidCloseness/4 + confidRatio/4
confidCombined = (confidCombined - confidCombined.min())/confidCombined.max()
return confidCombined
while(True):
#** get frame
currentFrame = rtmp.getFrame(capture)#[92:,:] #[68:,:] #[92:,:]
if frameSkipCount == 0:
frameSkipCount = 0
else:
frameSkipCount += 1
continue
grayFrame = cv2.cvtColor(cv2.cvtColor(currentFrame, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)
black = np.zeros(currentFrame.shape)
displayFrame = np.copy(currentFrame)
#** analyze frames
# text = analyzer.gradientText(currentFrame,5,10,100,.4)
# cannyTextRect = analyzer.cannyText(currentFrame,5,10,100,1000,.4)
#** create output frame
# displayFrame = drawer.drawRectangles(displayFrame,cannyTextRect,(255,0,255),2)
# output2 = drawer.drawRectangles(grayFrame,cannyText,(0,255,255),3)
# Alignment testing
# confidAlignment = processor.alignmentTextConfidence(cannyTextRect,100)
# zeroPad = np.zeros((confidAlignment.shape[0],1))
# confidAlignment = np.append(zeroPad,confidAlignment,axis=1)
# confidOutput = drawer.drawRectanglesWithColors(grayFrame,cannyTextRect,confidAlignment,2)
# Alignment testing (COMBINED)
# confidAlignment = processor.alignmentTextConfidence(cannyTextRect,100)
# confidAlignmentCombined = confidAlignment[:,:1]/2 + confidAlignment[:,1:2]/2
# zeroPad = np.zeros((confidAlignment.shape[0],2))
# confidAlignmentCombined = np.append(zeroPad,confidAlignmentCombined,axis=1)
# confidOutput = drawer.drawRectanglesWithColors(grayFrame,cannyTextRect,confidAlignmentCombined,2)
# Closeness testing
# confidCloseness = processor.closenessTextConfidence(cannyTextRect,250)
# zeroPad = np.zeros((confidCloseness.shape[0],2))
# confidCloseness = np.append(zeroPad,confidCloseness,axis=1)
# confidOutput = drawer.drawRectanglesWithColors(grayFrame,cannyTextRect,confidCloseness,2)
# Combine both
# confidCombine = np.append(confidCloseness,confidAlignment,axis=1)
# confidOutput = drawer.drawRectanglesWithColors(currentFrame,cannyTextRect,confidCombine,2)
# w,h ratio
# confidRatio = processor.simpleWHRatio(cannyTextRect,1.2,1)
# zeroPad = np.zeros((confidRatio.shape[0],2))
# confidRatio = np.append(zeroPad,confidRatio,axis=1)
# confidOutput = drawer.drawRectanglesWithColors(grayFrame,cannyTextRect,confidRatio,2)
# combine multiplier
# confid = combineMultiple(cannyTextRect)
# zeroPad = np.zeros((confid.shape[0],2))
# confid = np.append(zeroPad,confid,axis=1)
# confidOutput = drawer.drawRectanglesWithColors(grayFrame,cannyTextRect,confid,2)
#
# newDiff = analyzer.simpleDifference(oldFrame,currentFrame)
# if newDiff.size:
# if currentText.size:
# combinedNewDiff = processor.combineDiff(newDiff,currentFrame,5,150,100)
# currentText = processor.cleanDiff(currentText,combinedNewDiff,currentFrame) # remove old box in changed areas
# imagesForProcess = processor.getDiffImages(combinedNewDiff,currentFrame,10,10) # get image of each newly changed area
#
# # process newly changed area
# for imageObj in imagesForProcess:
# cannyTextRectLocale = analyzer.cannyText(imageObj["image"],5,10,100,1000,.4) #find text
# imageCoordRectArray = processor.convertLocalCoordsToImageCoords(cannyTextRectLocale,imageObj) #conver local coords to image coords
# currentText = np.vstack([currentText,imageCoordRectArray])
#
# displayFrame = drawer.drawRectangles(displayFrame,combinedNewDiff,(255,0,255),2)
#
# else:
# currentText = newDiff
#
# displayFrame = drawer.drawRectangles(displayFrame,currentText,(0,0,255),2)
#
# if displaySkipCount == 19:
# displaySkipCount = 0
# cv2.imshow("video", displayFrame)
# else:
# displaySkipCount += 1
# continue
cv2.imshow("video", displayFrame)
oldFrame = currentFrame
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
{"hexsha": "5c613fba88c18bc8e796e7fc1234fa01ba4ecfe5", "size": 5020, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "edfungus/Text-Detection-from-RTMP-Stream", "max_stars_repo_head_hexsha": "f84a9185cbf2d49a24f372687b0960fe85a0e195", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-12-28T03:52:42.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-26T03:50:19.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "edfungus/Text-Detection-from-RTMP-Stream", "max_issues_repo_head_hexsha": "f84a9185cbf2d49a24f372687b0960fe85a0e195", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-06-08T10:56:19.000Z", "max_issues_repo_issues_event_max_datetime": "2016-08-24T05:12:50.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "edfungus/Text-Detection-from-RTMP-Stream", "max_forks_repo_head_hexsha": "f84a9185cbf2d49a24f372687b0960fe85a0e195", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4838709677, "max_line_length": 148, "alphanum_fraction": 0.7085657371, "include": true, "reason": "import numpy", "num_tokens": 1398}
|
import numpy as np
import pandas as pd
from remodnav.clf import deg_per_pixel, EyegazeClassifier
from neurogaze.analyze import _get_screen_x_y
from neurogaze.gaze import SAMPLING_RATE
def longest_stretch(df, col='left_gaze_point_on_display_area_x'):
a = df[col].values
m = np.concatenate(([True], np.isnan(a), [True]))
ss = np.flatnonzero(m[1:] != m[:-1]).reshape(-1, 2)
start, stop = ss[(ss[:, 1] - ss[:, 0]).argmax()]
return df.iloc[start:stop]
def classify_events(df):
viewing_distance = (
df.left_gaze_origin_in_user_coordinate_system_z.mean() / 10
)
px2deg = deg_per_pixel(
screen_size=31,
viewing_distance=viewing_distance,
screen_resolution=2560,
)
clf = EyegazeClassifier(
px2deg=px2deg,
sampling_rate=SAMPLING_RATE,
)
tmp_df = pd.DataFrame()
tmp_df['x'], tmp_df['y'] = _get_screen_x_y(df, 2560, 1440)
tmp_df.to_csv('test.csv', index=False, header=False)
data = np.recfromcsv(
'test.csv',
delimiter=',',
names=['x', 'y'],
usecols=[0, 1])
pp = clf.preproc(data, savgol_length=0)
events = clf(pp, classify_isp=True, sort_events=True)
return clf, pp, events, data
|
{"hexsha": "8342b56704decee50ff659a5450e8a5dfcbd1ed0", "size": 1249, "ext": "py", "lang": "Python", "max_stars_repo_path": "neurogaze/analyze/gaze_classification.py", "max_stars_repo_name": "chrizzzlybear/neurogaze_research", "max_stars_repo_head_hexsha": "6d22a1e54b9f941333a935db4795f014fa1efe26", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neurogaze/analyze/gaze_classification.py", "max_issues_repo_name": "chrizzzlybear/neurogaze_research", "max_issues_repo_head_hexsha": "6d22a1e54b9f941333a935db4795f014fa1efe26", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neurogaze/analyze/gaze_classification.py", "max_forks_repo_name": "chrizzzlybear/neurogaze_research", "max_forks_repo_head_hexsha": "6d22a1e54b9f941333a935db4795f014fa1efe26", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0208333333, "max_line_length": 67, "alphanum_fraction": 0.6381104884, "include": true, "reason": "import numpy", "num_tokens": 352}
|
import numpy as np
import pytest
from kiez.neighbors import HNSW, NNG, Annoy, SklearnNN
rng = np.random.RandomState(2)
@pytest.mark.parametrize("algo_cls", [HNSW, SklearnNN, NNG, Annoy])
def test_str_rep(algo_cls, n_samples=20, n_features=5):
source = rng.rand(n_samples, n_features)
algo = algo_cls()
assert "is unfitted" in str(algo._describe_source_target_fitted())
algo.fit(source, source)
assert "is fitted" in str(algo._describe_source_target_fitted())
def test_check_k_value():
with pytest.raises(ValueError) as exc_info:
SklearnNN()._check_k_value(k=-1, needed_space=2)
assert "Expected" in str(exc_info.value)
with pytest.raises(TypeError) as exc_info:
SklearnNN()._check_k_value(k="test", needed_space=2)
assert "integer" in str(exc_info.value)
checked = SklearnNN()._check_k_value(k=3, needed_space=2)
assert checked == 2
|
{"hexsha": "d181a3df22c5539a6556b39c75d244e2cad56453", "size": 900, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/neighbors/test_base.py", "max_stars_repo_name": "cthoyt/kiez", "max_stars_repo_head_hexsha": "25f9f103ed51d4084e10f7ac532bb24183fe3894", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-07-22T12:35:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T04:35:17.000Z", "max_issues_repo_path": "tests/neighbors/test_base.py", "max_issues_repo_name": "cthoyt/kiez", "max_issues_repo_head_hexsha": "25f9f103ed51d4084e10f7ac532bb24183fe3894", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-07-23T11:20:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-06T12:59:06.000Z", "max_forks_repo_path": "tests/neighbors/test_base.py", "max_forks_repo_name": "cthoyt/kiez", "max_forks_repo_head_hexsha": "25f9f103ed51d4084e10f7ac532bb24183fe3894", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-23T10:53:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-01T01:14:37.000Z", "avg_line_length": 32.1428571429, "max_line_length": 70, "alphanum_fraction": 0.7255555556, "include": true, "reason": "import numpy", "num_tokens": 245}
|
import argparse
import datetime
import glob
import os
import pickle
import numpy as np
import time
import sys
from loguru import logger
import torch
from torch import autograd
from utils.load_synth_data import process_loaded_sequences
from train_functions.train_sahp import make_model, train_eval_sahp
DEFAULT_BATCH_SIZE = 32
DEFAULT_HIDDEN_SIZE = 16
DEFAULT_LEARN_RATE = 5e-5
parser = argparse.ArgumentParser(description="Train the models.")
parser.add_argument('-e', '--epochs', type=int, default = 1000,
help='number of epochs.')
parser.add_argument('-b', '--batch', type=int,
dest='batch_size', default=DEFAULT_BATCH_SIZE,
help='batch size. (default: {})'.format(DEFAULT_BATCH_SIZE))
parser.add_argument('--lr', default=DEFAULT_LEARN_RATE, type=float,
help="set the optimizer learning rate. (default {})".format(DEFAULT_LEARN_RATE))
parser.add_argument('--hidden', type=int,
dest='hidden_size', default=DEFAULT_HIDDEN_SIZE,
help='number of hidden units. (default: {})'.format(DEFAULT_HIDDEN_SIZE))
parser.add_argument('--d-model', type=int, default=DEFAULT_HIDDEN_SIZE)
parser.add_argument('--atten-heads', type=int, default=8)
parser.add_argument('--pe', type=str,default='add',help='concat, add')
parser.add_argument('--nLayers', type=int, default=4)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--cuda', type=int, default=0)
parser.add_argument('--train-ratio', type=float, default=0.8,
help='override the size of the training dataset.')
parser.add_argument('--lambda-l2', type=float, default=3e-4,
help='regularization loss.')
parser.add_argument('--dev-ratio', type=float, default=0.1,
help='override the size of the dev dataset.')
parser.add_argument('--early-stop-threshold', type=float, default=1e-2,
help='early_stop_threshold')
parser.add_argument('--log-dir', type=str,
dest='log_dir', default='logs',
help="training logs target directory.")
parser.add_argument('--save_model', default=True,
help="do not save the models state dict and loss history.")
parser.add_argument('--bias', default=False,
help="use bias on the activation (intensity) layer.")
parser.add_argument('--samples', default=10,
help="number of samples in the integral.")
parser.add_argument('-m', '--model', default='sahp',
type=str, choices=['sahp'],
help='choose which models to train.')
parser.add_argument('-t', '--task', type=str, default='retweet',
help = 'task type')
args = parser.parse_args()
if torch.cuda.is_available():
USE_CUDA = True
else:
USE_CUDA = False
# SYNTH_DATA_FILES = glob.glob("../data/simulated/*.pkl")
# TYPE_SIZE_DICT = {'retweet': 3, 'bookorder':8, 'meme':5000, 'mimic':75, 'stackOverflow':22,
# 'synthetic':2}
# REAL_WORLD_TASKS = list(TYPE_SIZE_DICT.keys())[:5]
# SYNTHETIC_TASKS = list(TYPE_SIZE_DICT.keys())[5:]
start_time = time.time()
if __name__ == '__main__':
args.log_dir = os.path.join(args.log_dir, args.task)
os.makedirs(args.log_dir, exist_ok=True)
# sys.stdout = open(os.path.join(args.log_dir, "log.txt"), "w")
logger.add(os.path.join(args.log_dir, "log_{time}.log"))
logger.info(args)
cuda_num = 'cuda:{}'.format(args.cuda)
device = torch.device(cuda_num if USE_CUDA else 'cpu')
logger.info("Training on device {}".format(device))
# process_dim = TYPE_SIZE_DICT[args.task]
# logger.info("Loading {}-dimensional process.".format(process_dim), end=' \n')
# if args.task in SYNTHETIC_TASKS:
# logger.info("Available files:")
# for i, s in enumerate(SYNTH_DATA_FILES):
# logger.info("{:<8}{:<8}".format(i, s))
#
# chosen_file_index = -1
# chosen_file = SYNTH_DATA_FILES[chosen_file_index]
# logger.info('chosen file:%s'+str(chosen_file))
#
# with open(chosen_file, 'rb') as f:
# loaded_hawkes_data = pickle.load(f)
#
# mu = loaded_hawkes_data['mu']
# alpha = loaded_hawkes_data['alpha']
# decay = loaded_hawkes_data['decay']
# tmax = loaded_hawkes_data['tmax']
# logger.info("Simulated Hawkes process parameters:")
# for label, val in [("mu", mu), ("alpha", alpha), ("decay", decay), ("tmax", tmax)]:
# logger.info("{:<20}{}".format(label, val))
#
# seq_times, seq_types, seq_lengths, _ = process_loaded_sequences(loaded_hawkes_data, process_dim)
#
# seq_times = seq_times.to(device)
# seq_types = seq_types.to(device)
# seq_lengths = seq_lengths.to(device)
#
# total_sample_size = seq_times.size(0)
# logger.info("Total sample size: {}".format(total_sample_size))
#
# train_ratio = args.train_ratio
# train_size = int(train_ratio * total_sample_size)
# dev_ratio = args.dev_ratio
# dev_size = int(dev_ratio * total_sample_size)
# logger.info("Train sample size: {:}/{:}".format(train_size, total_sample_size))
# logger.info("Dev sample size: {:}/{:}".format(dev_size, total_sample_size))
#
# # Define training data
# train_times_tensor = seq_times[:train_size]
# train_seq_types = seq_types[:train_size]
# train_seq_lengths = seq_lengths[:train_size]
# logger.info("No. of event tokens in training subset:", train_seq_lengths.sum())
#
# # Define development data
# dev_times_tensor = seq_times[train_size:]#train_size+dev_size
# dev_seq_types = seq_types[train_size:]
# dev_seq_lengths = seq_lengths[train_size:]
# logger.info("No. of event tokens in development subset:", dev_seq_lengths.sum())
#
# test_times_tensor = dev_times_tensor
# test_seq_types = dev_seq_types
# test_seq_lengths = dev_seq_lengths
# logger.info("No. of event tokens in test subset:", test_seq_lengths.sum())
#
# elif args.task in REAL_WORLD_TASKS:
train_path = './data/' + args.task + '/train_manifold_format.pkl'
dev_path = './data/' + args.task + '/dev_manifold_format.pkl'
test_path = './data/' + args.task + '/test_manifold_format.pkl'
chosen_file = args.task
with open(train_path, 'rb') as f:
train_hawkes_data = pickle.load(f)
with open(dev_path, 'rb') as f:
dev_hawkes_data = pickle.load(f)
with open(test_path, 'rb') as f:
test_hawkes_data = pickle.load(f)
with open(os.path.join("data", args.task, "train.pkl"), "rb") as f:
try:
_origin_data = pickle.load(f)
except:
_origin_data = pickle.load(f, encoding='latin-1')
process_dim = _origin_data["dim_process"]
train_seq_times, train_seq_types, train_seq_lengths, train_tmax = \
process_loaded_sequences(train_hawkes_data, process_dim)
dev_seq_times, dev_seq_types, dev_seq_lengths, dev_tmax = \
process_loaded_sequences(dev_hawkes_data, process_dim)
test_seq_times, test_seq_types, test_seq_lengths, test_tmax = \
process_loaded_sequences(test_hawkes_data, process_dim)
tmax = max([train_tmax,dev_tmax,test_tmax])
train_sample_size = train_seq_times.size(0)
logger.info("Train sample size: {}".format(train_sample_size))
dev_sample_size = dev_seq_times.size(0)
logger.info("Dev sample size: {}".format(dev_sample_size))
test_sample_size = test_seq_times.size(0)
logger.info("Test sample size: {}".format(test_sample_size))
# Define training data
train_times_tensor = train_seq_times.to(device)
train_seq_types = train_seq_types.to(device)
train_seq_lengths = train_seq_lengths.to(device)
logger.info("No. of event tokens in training subset:{}".format(train_seq_lengths.sum()))
# Define development data
dev_times_tensor = dev_seq_times.to(device)
dev_seq_types = dev_seq_types.to(device)
dev_seq_lengths = dev_seq_lengths.to(device)
logger.info("No. of event tokens in development subset:{}".format(dev_seq_lengths.sum()))
# Define test data
test_times_tensor = test_seq_times.to(device)
test_seq_types = test_seq_types.to(device)
test_seq_lengths = test_seq_lengths.to(device)
logger.info("No. of event tokens in test subset:{}".format(test_seq_lengths.sum()))
# else:
# exit()
MODEL_TOKEN = args.model
logger.info("Chose models {}".format(MODEL_TOKEN))
hidden_size = args.hidden_size
logger.info("Hidden size: {}".format(hidden_size))
learning_rate = args.lr
# Training parameters
BATCH_SIZE = args.batch_size
EPOCHS = args.epochs
model = None
if MODEL_TOKEN == 'sahp':
# with autograd.detect_anomaly(False):
params = args, process_dim, device, tmax, \
train_times_tensor, train_seq_types, train_seq_lengths, \
dev_times_tensor, dev_seq_types, dev_seq_lengths, \
test_times_tensor, test_seq_types, test_seq_lengths, \
BATCH_SIZE, EPOCHS, USE_CUDA, logger
model = train_eval_sahp(params)
else:
exit()
if args.save_model:
# Model file dump
SAVED_MODELS_PATH = os.path.abspath(os.path.join(args.log_dir, 'saved_models'))
os.makedirs(SAVED_MODELS_PATH, exist_ok=True)
# print("Saved models directory: {}".format(SAVED_MODELS_PATH))
date_format = "%Y%m%d-%H%M%S"
now_timestamp = datetime.datetime.now().strftime(date_format)
extra_tag = "{}".format(args.task)
filename_base = "{}-{}_hidden{}-{}".format(
MODEL_TOKEN, extra_tag,
hidden_size, now_timestamp)
from utils.save_model import save_model
save_model(model, chosen_file, extra_tag,
hidden_size, now_timestamp, SAVED_MODELS_PATH, MODEL_TOKEN)
logger.info('Done! time elapsed %.2f sec for %d epoches' % (time.time() - start_time, EPOCHS))
sys.stdout.close()
|
{"hexsha": "5ca9e381bb38093fdfbd289d76938189c2a7056d", "size": 10193, "ext": "py", "lang": "Python", "max_stars_repo_path": "sahp/sahp_training/main_func.py", "max_stars_repo_name": "yangalan123/anhp-andtt", "max_stars_repo_head_hexsha": "b907f3808ed2ce1616edb1bc2229993a6742cee9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2022-01-05T15:34:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T02:17:03.000Z", "max_issues_repo_path": "sahp/sahp_training/main_func.py", "max_issues_repo_name": "yangalan123/anhp-andtt", "max_issues_repo_head_hexsha": "b907f3808ed2ce1616edb1bc2229993a6742cee9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-15T07:58:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-16T03:30:42.000Z", "max_forks_repo_path": "sahp/sahp_training/main_func.py", "max_forks_repo_name": "yangalan123/anhp-andtt", "max_forks_repo_head_hexsha": "b907f3808ed2ce1616edb1bc2229993a6742cee9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-04T02:23:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T02:23:48.000Z", "avg_line_length": 41.9465020576, "max_line_length": 106, "alphanum_fraction": 0.65427254, "include": true, "reason": "import numpy", "num_tokens": 2449}
|
#!/usr/bin/env python
# coding=utf-8
"""
Fine-tuning a 🤗 Transformers model on summarization.
"""
import argparse
import logging
import math
import os
import random
from pathlib import Path
import datasets
import numpy as np
import torch
from datasets import load_metric
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator, DeepSpeedPlugin
from filelock import FileLock
from huggingface_hub import Repository
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AdamW,
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
SchedulerType,
get_scheduler,
set_seed,
)
from transformers.file_utils import is_offline_mode
from transformers.utils.versions import require_version
from rouge_score import rouge_scorer
from soft_q_loss import SoftQLearningCriterion
from data_utils import get_raw_dataset, process_raw_dataset, postprocess_text
_has_wandb = False
try:
import wandb
_has_wandb = True
except:
logger.warning(
"W&B logger is not installed, \
for advanced logging please install using pip install wandb"
)
logger = logging.getLogger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
# You should update this to your particular problem to have better documentation of `model_type`
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a summarization task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--ignore_pad_token_for_loss",
type=bool,
default=True,
help="Whether to ignore the tokens corresponding to " "padded labels in the loss computation or not.",
)
parser.add_argument(
"--max_source_length",
type=int,
default=1024,
help="The maximum total input sequence length after "
"tokenization.Sequences longer than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--source_prefix",
type=str,
default=None,
help="A prefix to add before every source text " "(useful for T5 models).",
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--max_target_length",
type=int,
default=128,
help="The maximum total sequence length for target text after "
"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded."
"during ``evaluate`` and ``predict``.",
)
parser.add_argument(
"--val_max_target_length",
type=int,
default=None,
help="The maximum total sequence length for validation "
"target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be "
"padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` "
"param of ``model.generate``, which is used during ``evaluate`` and ``predict``.",
)
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--num_beams",
type=int,
default=None,
help="Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.",
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--text_column",
type=str,
default=None,
help="The name of the column in the datasets containing the full texts (for summarization).",
)
parser.add_argument(
"--summary_column",
type=str,
default=None,
help="The name of the column in the datasets containing the summaries (for summarization).",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--model_type",
type=str,
default=None,
help="Model type to use if training from scratch.",
choices=MODEL_TYPES,
)
parser.add_argument(
"--cache_dir",
type=str,
default="~/.cache/huggingface/datasets",
help="Cache directory for datasets."
)
parser.add_argument(
"--job_name",
type=str,
default="kd_experiment",
help="W&B job name."
)
parser.add_argument(
"--project_name",
type=str,
default="summarization-kd",
help="W&B project name."
)
# soft-Q loss arguments
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--reward-shaping', action='store_true',
help='Whether use reward shaping')
parser.add_argument('--old-r-min', default=0., type=float,
help='Original minimum reward value')
parser.add_argument('--old-r-max', default=1.0, type=float,
help='Original maximum reward value')
parser.add_argument('--new-r-min', default=-0.5, type=float,
help='Minimum reward value after reshaping')
parser.add_argument('--new-r-max', default=0.5, type=float,
help='Maximum reward value after reshaping')
parser.add_argument('--gamma-pcl', default=1.0, type=float,
help='Reward discount factor')
parser.add_argument('--tau-pcl', default=1.0, type=float,
help='Shannon entropy coefficient in PCL')
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
return args
def setup_wandb(args, model, resume_id=None):
if _has_wandb:
if resume_id is not None:
wandb.init(
project=args.project_name,
group=args.job_name,
dir="./",
resume="allow",
id=resume_id,
)
else:
wandb.init(project=args.project_name, group=args.job_name, dir="./")
wandb.config.update(args, allow_val_change=True)
wandb.watch(model)
else:
logger.info("W&B library not installed. Using only CLI logging.")
def report_metrics(lr, loss, step):
current_lr = lr[0] if type(lr) == list else lr
if _has_wandb:
log_info = {
f"train/lr": current_lr,
f"train/train_loss": loss,
}
wandb.log(log_info, step=step)
def load_pretrained_model_and_tokenizer(
model_name_or_path,
config_name,
tokenizer_name,
model_type=None,
use_slow_tokenizer=False
):
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if config_name:
config = AutoConfig.from_pretrained(config_name)
elif model_name_or_path:
config = AutoConfig.from_pretrained(model_name_or_path)
else:
config = CONFIG_MAPPING[model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_fast=not use_slow_tokenizer)
elif model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=not use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(
model_name_or_path,
from_tf=bool(".ckpt" in model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config)
return config, tokenizer, model
def setup_optimizer(args, model):
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
return optimizer
def eval(args, accelerator, model, tokenizer, eval_dataloader, metric):
model.eval()
if args.val_max_target_length is None:
args.val_max_target_length = args.max_target_length
gen_kwargs = {
"max_length": args.val_max_target_length if args is not None else config.max_length,
"num_beams": args.num_beams,
}
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = accelerator.unwrap_model(model).generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
**gen_kwargs,
)
generated_tokens = accelerator.pad_across_processes(
generated_tokens, dim=1, pad_index=tokenizer.pad_token_id
)
labels = batch["labels"]
if not args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
labels = accelerator.pad_across_processes(
batch["labels"], dim=1, pad_index=tokenizer.pad_token_id
)
generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()
labels = accelerator.gather(labels).cpu().numpy()
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
if isinstance(generated_tokens, tuple):
generated_tokens = generated_tokens[0]
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(predictions=decoded_preds, references=decoded_labels)
def generate(
args,
accelerator,
model,
tokenizer,
batch,
do_sample=False
):
model.eval()
if args.val_max_target_length is None:
args.val_max_target_length = args.max_target_length
gen_kwargs = {
"max_length": args.val_max_target_length if args is not None else config.max_length,
"num_beams": 1,
}
with torch.no_grad():
generated_tokens = accelerator.unwrap_model(model).generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
**gen_kwargs,
do_sample=do_sample,
)
return generated_tokens
def decode_ids_to_strs(
args,
labels,
accelerator,
tokenizer
):
if not args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
labels = accelerator.pad_across_processes(
labels, dim=1, pad_index=tokenizer.pad_token_id
)
labels = labels.cpu().numpy()
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
return decoded_labels
def calculate_rouge(args, decoded_preds, decoded_labels):
assert len(decoded_preds) == len(decoded_labels), \
"predicts: {}; references: {}".format(len(decoded_preds), len(decoded_labels))
scorer = rouge_scorer.RougeScorer(['rouge1', 'rougeL'], use_stemmer=True)
rouge_scores, rewards, length_rewards = [], [], []
for r, p in zip(decoded_labels, decoded_preds):
rouge = scorer.score(r, p)
rouge_scores.append(rouge)
reward = rouge['rouge1'].fmeasure
# if args.length_reward:
# length_reward = 1.0 if len(p) <= len(r) else 0.0
# reward += length_reward
# length_rewards.append(length_reward)
rewards.append(reward)
return rewards
def main():
args = parse_args()
if args.source_prefix is None and args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with "
"`--source_prefix 'summarize: ' `"
)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
# deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=args.gradient_accumulation_steps)
# accelerator = Accelerator(fp16=True, deepspeed_plugin=deepspeed_plugin)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Load pretrained model and tokenizer
config, tokenizer, model = load_pretrained_model_and_tokenizer(
args.model_name_or_path,
args.config_name,
args.tokenizer_name,
model_type=args.model_type,
use_slow_tokenizer=args.use_slow_tokenizer
)
_, _, tgt_model = load_pretrained_model_and_tokenizer(
args.model_name_or_path,
args.config_name,
args.tokenizer_name,
model_type=args.model_type,
use_slow_tokenizer=args.use_slow_tokenizer
)
model.resize_token_embeddings(len(tokenizer))
tgt_model.resize_token_embeddings(len(tokenizer))
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
# setup W&B logging
if accelerator.is_main_process:
setup_wandb(args, model, resume_id=None)
# Get the raw dataset
raw_datasets = get_raw_dataset(args)
# Preprocessing the datasets.
processed_datasets = process_raw_dataset(args, accelerator, raw_datasets, tokenizer)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 1):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
label_pad_token_id = -100 if args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if accelerator.use_fp16 else None,
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
# Prepare loss function
criterion = SoftQLearningCriterion(1, args.label_smoothing)
# Prepare optimizer
optimizer = setup_optimizer(args, model)
# Prepare everything with our `accelerator`.
model, tgt_model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, tgt_model, optimizer, train_dataloader, eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Metric
metric = load_metric("rouge")
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
def polyak_update(model, model_, target_lr=0.001):
for param_, param in zip(model_.parameters(), model.parameters()):
param_.data.copy_((1 - target_lr) * param_ + target_lr * param)
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
"""
batch: {
attention_mask: [batch_size, src_length]
input_ids: [batch_size, src_length]
labels: [batch_size, tgt_length]
decoder_input_ids: [batch_size, tgt_length]
}
"""
if step % 2 == 0:
generated_tokens = generate(
args,
accelerator,
model,
tokenizer,
batch,
do_sample=True
)
# [batch_size, tgt_length]
labels = torch.zeros_like(generated_tokens)
labels[:, 1:] = generated_tokens[:, 1:]
decoder_input_ids = torch.zeros_like(generated_tokens)
decoder_input_ids[:, 1:] = labels[:, :-1]
decoder_input_ids[:, 0] = 2
else: # learning from demonstration
pad_mask = batch.labels.eq(-100) # replace -100 with 1
labels = batch.labels.masked_fill(pad_mask, 1)
decoder_input_ids = batch.decoder_input_ids
outputs = model(
input_ids=batch.input_ids,
attention_mask=batch.attention_mask,
decoder_input_ids=decoder_input_ids,
)
with torch.no_grad():
tgt_outputs = tgt_model(
input_ids=batch.input_ids,
attention_mask=batch.attention_mask,
decoder_input_ids=decoder_input_ids,
)
# reward calculation
decoded_labels = decode_ids_to_strs(args, batch.labels, accelerator, tokenizer)
decoded_preds = decode_ids_to_strs(args, labels, accelerator, tokenizer)
rewards = calculate_rouge(args, decoded_preds, decoded_labels)
rewards = torch.tensor(rewards).to(outputs[0])
sample = {'target': labels, 'rewards': rewards}
loss = criterion(outputs[0], tgt_outputs[0], sample)[0]
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
# update target model
polyak_update(model, tgt_model)
# W&B logging
if accelerator.is_main_process:
report_metrics(
lr_scheduler.get_last_lr(),
loss.item(),
completed_steps
)
if completed_steps >= args.max_train_steps:
break
# Run evaluation
logger.info("***** Running evaluation *****")
eval(args, accelerator, model, tokenizer, eval_dataloader, metric)
# Extract a few results from ROUGE
result = metric.compute(use_stemmer=True)
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
result = {k: round(v, 4) for k, v in result.items()}
logger.info(result)
if accelerator.is_main_process and _has_wandb:
log_info = {"Validation/" + k: v for k, v in result.items()}
wandb.log(log_info, completed_steps)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if __name__ == "__main__":
main()
|
{"hexsha": "1e64159fba23e7b1ba376892c80c2d6931654b5b", "size": 27050, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_summarization_no_trainer.py", "max_stars_repo_name": "mcao516/soft-q-learning-for-text-summarization", "max_stars_repo_head_hexsha": "b06050741de444490b2534fce24f31b7f5258e52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run_summarization_no_trainer.py", "max_issues_repo_name": "mcao516/soft-q-learning-for-text-summarization", "max_issues_repo_head_hexsha": "b06050741de444490b2534fce24f31b7f5258e52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_summarization_no_trainer.py", "max_forks_repo_name": "mcao516/soft-q-learning-for-text-summarization", "max_forks_repo_head_hexsha": "b06050741de444490b2534fce24f31b7f5258e52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5047233468, "max_line_length": 119, "alphanum_fraction": 0.6445471349, "include": true, "reason": "import numpy", "num_tokens": 5719}
|
# sys
import os
import sys
import numpy as np
import random
import pickle
# torch
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
# visualization
import time
class Feeder(torch.utils.data.Dataset):
def __init__(self,
data_path,
data_audio_path,
label_path,
window_size=-1,
debug=False):
self.debug = debug
self.data_path = data_path
self.data_audio_path = data_audio_path
self.label_path = label_path
self.window_size = window_size
self.load_data()
def load_data(self):
# data: N C V T M
# load label
if '.pkl' in self.label_path:
try:
with open(self.label_path) as f:
self.sample_name, self.label = pickle.load(f)
except:
# for pickle file from python2
with open(self.label_path, 'rb') as f:
self.sample_name, self.label = pickle.load(
f, encoding='latin1')
# old label format
elif '.npy' in self.label_path:
self.label = list(np.load(self.label_path))
self.sample_name = [str(i) for i in range(len(self.label))]
else:
raise ValueError()
# load data
self.data = np.load(self.data_path)
self.data_audio = np.load(self.data_audio_path)
if self.debug:
self.label = self.label[0:100]
self.data = self.data[0:100]
self.sample_name = self.sample_name[0:100]
self.N, self.C, self.T = self.data.shape
def __len__(self):
return len(self.label)
def __iter__(self):
return self
def __getitem__(self, index):
# get data
data_numpy = self.data[index]
# # fill data_upsampled_numpy
# data_upsampled_numpy = np.zeros((self.C, int(self.AUDIO_LENGTH/2)))
# for i in range(self.C):
# data_upsampled_numpy[i,:] = librosa.resample(data_numpy[i,:],30,11025)
audio_numpy = self.data_audio[index]
label = self.label[index]
return data_numpy, audio_numpy, label
def accuracy(self, score):
rank = score.argsort()
hit = [l in rank[i, -1:] for i, l in enumerate(self.label)]
return sum(hit) * 1.0 / len(hit)
if __name__ == '__main__':
data_path = "./data/NTU-RGB-D/xview/val_data.npy"
label_path = "./data/NTU-RGB-D/xview/val_label.pkl"
test(data_path, label_path, vid='S003C001P017R001A044')
|
{"hexsha": "3bfaf5f25f0b8555871ea914d8a48d1ec0a2530d", "size": 2699, "ext": "py", "lang": "Python", "max_stars_repo_path": "newapproach/feeder.py", "max_stars_repo_name": "chigur/pose", "max_stars_repo_head_hexsha": "3e8ecebbc24ea59a1cb217b15a9b2a1a1de09085", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "newapproach/feeder.py", "max_issues_repo_name": "chigur/pose", "max_issues_repo_head_hexsha": "3e8ecebbc24ea59a1cb217b15a9b2a1a1de09085", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "newapproach/feeder.py", "max_forks_repo_name": "chigur/pose", "max_forks_repo_head_hexsha": "3e8ecebbc24ea59a1cb217b15a9b2a1a1de09085", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.99, "max_line_length": 84, "alphanum_fraction": 0.5861430159, "include": true, "reason": "import numpy", "num_tokens": 635}
|
"""
"""
import unittest
import os.path
import numpy as np
import carribean
from carribean.points_grid import PointsGridGraph, four_points_connectivity, eight_points_connectivity
from carribean.carribean import get_best_island
class PointsGridGraphTest(unittest.TestCase):
"""
Simple test case for the PointsGridGraph and get_best_island
"""
def setUp(self):
self.input = np.genfromtxt(os.path.join(os.path.dirname(carribean.__file__), 'test_data.csv'), delimiter=',')
self.input = self.input.T
def test_four_points_connectivity(self):
"""
tests that four point connectivity provides the right number of components
"""
connectivity = four_points_connectivity
graph = PointsGridGraph(input_map=self.input, connectivity_strategy=connectivity)
components = graph.get_connected_components(min_component_size=1)
self.assertEqual(7, len(components.island.unique()))
def test_eight_points_connectivity(self):
"""
tests that eight oint connectivity provides the right number of components
"""
connectivity = eight_points_connectivity
graph = PointsGridGraph(input_map=self.input, connectivity_strategy=connectivity)
components = graph.get_connected_components(min_component_size=1)
self.assertEqual(5, len(components.island.unique()))
def test_min_filter(self):
"""
tests the minimal island size filter
"""
connectivity = four_points_connectivity
graph = PointsGridGraph(input_map=self.input, connectivity_strategy=connectivity)
components = graph.get_connected_components(min_component_size=9)
self.assertEqual(1, len(components.island.unique()))
def test_get_best_island(self):
"""
tests that get_best_island returns the right score
"""
connectivity = four_points_connectivity
graph = PointsGridGraph(input_map=self.input, connectivity_strategy=connectivity)
components = graph.get_connected_components(min_component_size=2)
score, _ = get_best_island(components)
self.assertEqual(5.75, score)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "4d7867aa73b1a8cdef1a21cda31b18c782f2d093", "size": 2245, "ext": "py", "lang": "Python", "max_stars_repo_path": "carribean_test.py", "max_stars_repo_name": "areshytko/interview-challenge-2", "max_stars_repo_head_hexsha": "495b999961401d1a50f5c8216f40f3c77342e21c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "carribean_test.py", "max_issues_repo_name": "areshytko/interview-challenge-2", "max_issues_repo_head_hexsha": "495b999961401d1a50f5c8216f40f3c77342e21c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "carribean_test.py", "max_forks_repo_name": "areshytko/interview-challenge-2", "max_forks_repo_head_hexsha": "495b999961401d1a50f5c8216f40f3c77342e21c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5362318841, "max_line_length": 117, "alphanum_fraction": 0.7091314031, "include": true, "reason": "import numpy", "num_tokens": 443}
|
[STATEMENT]
lemma properties_loop:
assumes "mu \<le> i"
shows "seq (i + j * lambda) = seq i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. seq (i + j * lambda) = seq i
[PROOF STEP]
using P assms
[PROOF STATE]
proof (prove)
using this:
local.properties lambda mu
mu \<le> i
goal (1 subgoal):
1. seq (i + j * lambda) = seq i
[PROOF STEP]
by (simp add: properties_def)
|
{"llama_tokens": 154, "file": "TortoiseHare_Basis", "length": 2}
|
\documentclass[letterpaper]{article}
\usepackage{fullpage}
\usepackage{nopageno}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{tikz}
\usepackage[utf8]{luainputenc}
\usepackage{aeguill}
\usepackage{setspace}
\tikzstyle{edge} = [fill,opacity=.5,fill opacity=.5,line cap=round, line join=round, line width=50pt]
\usetikzlibrary{graphs,graphdrawing}
\usegdlibrary{trees}
\pgfdeclarelayer{background}
\pgfsetlayers{background,main}
\allowdisplaybreaks
\newcommand{\abs}[1]{\left\lvert #1 \right\rvert}
\begin{document}
\title{Notes}
\date{9 mars, 2015}
\maketitle
\section*{7.1 genus of a graph}
last chapter we discussed ``how far'' from planar a graph was. we used the crossing \#
in the same vein, but more useful is the genus of a graph. we know that $K_{3,3}$ cannot be embedded on the plane. what about on a donut (torus)?
recall that $\text{cr}(K_5)=1$.
Now we can ``mold'' the torus into figure 7.5 on page 271.
so handles can get around crossings.
a sphere with $k$ handles is called a surface with genus $k$. the book calls it $S_k$. think a $k$ holed torus.
an easier way:
think of the torus as a plane rolled up into a tube, with edges connected. now we associated opposite edges. use arrows or something to show this
with this interpretation
\subsection*{thrm}
if $G$ is connected with $|G|=n, |E(G)|=m$ and $G$ is embedded minimally with $r$ regions, then we have the $n-m+2=2-2\gamma(G)$ where $\gamma(G)=$minimal genus
like before we get a bound right away:
if $G$ is a connected graph with $|G|\ge 3$ then $\gamma(G)\ge \frac{m}{6}-\frac{n}{2}+1$
\section*{Homework}
2,8,11
\end{document}
|
{"hexsha": "b54fd5a1c425984f653b6458ba32ef63687f77b0", "size": 1629, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "graph/graph-notes-2015-03-09.tex", "max_stars_repo_name": "ylixir/school", "max_stars_repo_head_hexsha": "66d433f2090b6396c8dd2a53a733c25dbe7bc90f", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "graph/graph-notes-2015-03-09.tex", "max_issues_repo_name": "ylixir/school", "max_issues_repo_head_hexsha": "66d433f2090b6396c8dd2a53a733c25dbe7bc90f", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graph/graph-notes-2015-03-09.tex", "max_forks_repo_name": "ylixir/school", "max_forks_repo_head_hexsha": "66d433f2090b6396c8dd2a53a733c25dbe7bc90f", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0892857143, "max_line_length": 160, "alphanum_fraction": 0.730509515, "num_tokens": 524}
|
[STATEMENT]
lemma ta_union_der_disj_states:
assumes "\<Q> \<A> |\<inter>| \<Q> \<B> = {||}" and "q |\<in>| ta_der (ta_union \<A> \<B>) t"
shows "q |\<in>| ta_der \<A> t \<or> q |\<in>| ta_der \<B> t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. q |\<in>| ta_der \<A> t \<or> q |\<in>| ta_der \<B> t
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
q |\<in>| ta_der (ta_union \<A> \<B>) t
goal (1 subgoal):
1. q |\<in>| ta_der \<A> t \<or> q |\<in>| ta_der \<B> t
[PROOF STEP]
proof (induct rule: ta_der_induct)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>q v. v = q \<or> (v, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+| \<Longrightarrow> q |\<in>| ta_der \<A> (Var v) \<or> q |\<in>| ta_der \<B> (Var v)
2. \<And>f ts ps p q. \<lbrakk>f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>); length ts = length ps; p = q \<or> (p, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|; \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der (ta_union \<A> \<B>) (ts ! i); \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i) \<or> ps ! i |\<in>| ta_der \<B> (ts ! i)\<rbrakk> \<Longrightarrow> q |\<in>| ta_der \<A> (Fun f ts) \<or> q |\<in>| ta_der \<B> (Fun f ts)
[PROOF STEP]
case (Var q v)
[PROOF STATE]
proof (state)
this:
v = q \<or> (v, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|
goal (2 subgoals):
1. \<And>q v. v = q \<or> (v, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+| \<Longrightarrow> q |\<in>| ta_der \<A> (Var v) \<or> q |\<in>| ta_der \<B> (Var v)
2. \<And>f ts ps p q. \<lbrakk>f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>); length ts = length ps; p = q \<or> (p, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|; \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der (ta_union \<A> \<B>) (ts ! i); \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i) \<or> ps ! i |\<in>| ta_der \<B> (ts ! i)\<rbrakk> \<Longrightarrow> q |\<in>| ta_der \<A> (Fun f ts) \<or> q |\<in>| ta_der \<B> (Fun f ts)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
v = q \<or> (v, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
v = q \<or> (v, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|
goal (1 subgoal):
1. q |\<in>| ta_der \<A> (Var v) \<or> q |\<in>| ta_der \<B> (Var v)
[PROOF STEP]
using ta_union_eps_disj_states[OF assms(1)]
[PROOF STATE]
proof (prove)
using this:
v = q \<or> (v, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|
(?p, ?q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+| \<Longrightarrow> (?p, ?q) |\<in>| (eps \<A>)|\<^sup>+| \<or> (?p, ?q) |\<in>| (eps \<B>)|\<^sup>+|
goal (1 subgoal):
1. q |\<in>| ta_der \<A> (Var v) \<or> q |\<in>| ta_der \<B> (Var v)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
q |\<in>| ta_der \<A> (Var v) \<or> q |\<in>| ta_der \<B> (Var v)
goal (1 subgoal):
1. \<And>f ts ps p q. \<lbrakk>f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>); length ts = length ps; p = q \<or> (p, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|; \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der (ta_union \<A> \<B>) (ts ! i); \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i) \<or> ps ! i |\<in>| ta_der \<B> (ts ! i)\<rbrakk> \<Longrightarrow> q |\<in>| ta_der \<A> (Fun f ts) \<or> q |\<in>| ta_der \<B> (Fun f ts)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>f ts ps p q. \<lbrakk>f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>); length ts = length ps; p = q \<or> (p, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|; \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der (ta_union \<A> \<B>) (ts ! i); \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i) \<or> ps ! i |\<in>| ta_der \<B> (ts ! i)\<rbrakk> \<Longrightarrow> q |\<in>| ta_der \<A> (Fun f ts) \<or> q |\<in>| ta_der \<B> (Fun f ts)
[PROOF STEP]
case (Fun f ts ps p q)
[PROOF STATE]
proof (state)
this:
f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>)
length ts = length ps
p = q \<or> (p, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|
?i < length ts \<Longrightarrow> ps ! ?i |\<in>| ta_der (ta_union \<A> \<B>) (ts ! ?i)
?i < length ts \<Longrightarrow> ps ! ?i |\<in>| ta_der \<A> (ts ! ?i) \<or> ps ! ?i |\<in>| ta_der \<B> (ts ! ?i)
goal (1 subgoal):
1. \<And>f ts ps p q. \<lbrakk>f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>); length ts = length ps; p = q \<or> (p, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|; \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der (ta_union \<A> \<B>) (ts ! i); \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i) \<or> ps ! i |\<in>| ta_der \<B> (ts ! i)\<rbrakk> \<Longrightarrow> q |\<in>| ta_der \<A> (Fun f ts) \<or> q |\<in>| ta_der \<B> (Fun f ts)
[PROOF STEP]
have dist: "fset_of_list ps |\<subseteq>| \<Q> \<A> \<Longrightarrow> i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i)"
"fset_of_list ps |\<subseteq>| \<Q> \<B> \<Longrightarrow> i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der \<B> (ts ! i)" for i
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<A>; i < length ts\<rbrakk> \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i)) &&& (\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<B>; i < length ts\<rbrakk> \<Longrightarrow> ps ! i |\<in>| ta_der \<B> (ts ! i))
[PROOF STEP]
using Fun(2) Fun(5)[of i] assms(1)
[PROOF STATE]
proof (prove)
using this:
length ts = length ps
i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i) \<or> ps ! i |\<in>| ta_der \<B> (ts ! i)
\<Q> \<A> |\<inter>| \<Q> \<B> = {||}
goal (1 subgoal):
1. (\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<A>; i < length ts\<rbrakk> \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i)) &&& (\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<B>; i < length ts\<rbrakk> \<Longrightarrow> ps ! i |\<in>| ta_der \<B> (ts ! i))
[PROOF STEP]
by (auto dest!: ta_der_not_stateD fsubsetD)
[PROOF STATE]
proof (state)
this:
\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<A>; ?i < length ts\<rbrakk> \<Longrightarrow> ps ! ?i |\<in>| ta_der \<A> (ts ! ?i)
\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<B>; ?i < length ts\<rbrakk> \<Longrightarrow> ps ! ?i |\<in>| ta_der \<B> (ts ! ?i)
goal (1 subgoal):
1. \<And>f ts ps p q. \<lbrakk>f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>); length ts = length ps; p = q \<or> (p, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|; \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der (ta_union \<A> \<B>) (ts ! i); \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i) \<or> ps ! i |\<in>| ta_der \<B> (ts ! i)\<rbrakk> \<Longrightarrow> q |\<in>| ta_der \<A> (Fun f ts) \<or> q |\<in>| ta_der \<B> (Fun f ts)
[PROOF STEP]
from Fun(1)
[PROOF STATE]
proof (chain)
picking this:
f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>)
[PROOF STEP]
consider (a) "fset_of_list ps |\<subseteq>| \<Q> \<A>" | (b) "fset_of_list ps |\<subseteq>| \<Q> \<B>"
[PROOF STATE]
proof (prove)
using this:
f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>)
goal (1 subgoal):
1. \<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<A> \<Longrightarrow> thesis; fset_of_list ps |\<subseteq>| \<Q> \<B> \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp: ta_union_def dest: rule_statesD)
[PROOF STATE]
proof (state)
this:
\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<A> \<Longrightarrow> ?thesis; fset_of_list ps |\<subseteq>| \<Q> \<B> \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. \<And>f ts ps p q. \<lbrakk>f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>); length ts = length ps; p = q \<or> (p, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|; \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der (ta_union \<A> \<B>) (ts ! i); \<And>i. i < length ts \<Longrightarrow> ps ! i |\<in>| ta_der \<A> (ts ! i) \<or> ps ! i |\<in>| ta_der \<B> (ts ! i)\<rbrakk> \<Longrightarrow> q |\<in>| ta_der \<A> (Fun f ts) \<or> q |\<in>| ta_der \<B> (Fun f ts)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<A> \<Longrightarrow> ?thesis; fset_of_list ps |\<subseteq>| \<Q> \<B> \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<A> \<Longrightarrow> ?thesis; fset_of_list ps |\<subseteq>| \<Q> \<B> \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. q |\<in>| ta_der \<A> (Fun f ts) \<or> q |\<in>| ta_der \<B> (Fun f ts)
[PROOF STEP]
using dist Fun(1, 2) assms(1) ta_union_eps_disj_states[OF assms(1), of p q] Fun(3)
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<A> \<Longrightarrow> ?thesis; fset_of_list ps |\<subseteq>| \<Q> \<B> \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<A>; ?i < length ts\<rbrakk> \<Longrightarrow> ps ! ?i |\<in>| ta_der \<A> (ts ! ?i)
\<lbrakk>fset_of_list ps |\<subseteq>| \<Q> \<B>; ?i < length ts\<rbrakk> \<Longrightarrow> ps ! ?i |\<in>| ta_der \<B> (ts ! ?i)
f ps \<rightarrow> p |\<in>| rules (ta_union \<A> \<B>)
length ts = length ps
\<Q> \<A> |\<inter>| \<Q> \<B> = {||}
(p, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+| \<Longrightarrow> (p, q) |\<in>| (eps \<A>)|\<^sup>+| \<or> (p, q) |\<in>| (eps \<B>)|\<^sup>+|
p = q \<or> (p, q) |\<in>| (eps (ta_union \<A> \<B>))|\<^sup>+|
goal (1 subgoal):
1. q |\<in>| ta_der \<A> (Fun f ts) \<or> q |\<in>| ta_der \<B> (Fun f ts)
[PROOF STEP]
by (cases) (auto simp: fsubsetI rule_statesD ta_union_def intro!: exI[of _ p] exI[of _ ps])
[PROOF STATE]
proof (state)
this:
q |\<in>| ta_der \<A> (Fun f ts) \<or> q |\<in>| ta_der \<B> (Fun f ts)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4560, "file": "Regular_Tree_Relations_Tree_Automata_Tree_Automata", "length": 20}
|
import h5py
import numpy as np
import logging
from . import parse_as
from . import color
logger = logging.getLogger(__name__)
def report_default(key, value):
logger.info("Using default '{}': {}".format(key, value))
def apply_defaults(scene):
# Note: Only set defaults for options the user would expect to have a
# default. For example, the Animation.Crop is set so the full waveform data
# is shown propagating through the domain, but the Animation.Speed has no
# obvious default.
if 'View' not in scene:
scene['View'] = {}
view_config = scene['View']
if 'OrientationAxesVisibility' not in view_config:
view_config['OrientationAxesVisibility'] = False
# WaveformToVolume
# TODO: make this more robust, work with multiple waveform volume renderings
if 'WaveformToVolume' not in scene:
scene['WaveformToVolume'] = {}
waveform_to_volume_config = scene['WaveformToVolume']
if 'VolumeRepresentation' not in scene:
scene['VolumeRepresentation'] = {}
vol_repr = scene['VolumeRepresentation']
if 'Representation' not in vol_repr:
vol_repr['Representation'] = 'Volume'
if 'VolumeRenderingMode' not in vol_repr:
vol_repr['VolumeRenderingMode'] = 'GPU Based'
if 'Shade' not in vol_repr:
vol_repr['Shade'] = True
# Animation
if 'Animation' not in scene:
scene['Animation'] = {}
animation_config = scene['Animation']
# Crop time to full propagation through domain
if ('FreezeTime' not in animation_config and 'Crop' not in animation_config
and 'Size' in scene['WaveformToVolume'] and 'RadialScale' in scene['WaveformToVolume']):
waveform_file_and_subfile = parse_as.file_and_subfile(
scene['Datasources']['Waveform'])
with h5py.File(waveform_file_and_subfile[0], 'r') as waveform_file:
waveform_data = waveform_file[waveform_file_and_subfile[1]]
mode_data = waveform_data['Y_l2_m2.dat']
t0, t1 = mode_data[0, 0], mode_data[-1, 0]
domain_radius = scene['WaveformToVolume']['Size'] * scene[
'WaveformToVolume']['RadialScale']
animation_config['Crop'] = (t0 + domain_radius, t1 + domain_radius)
report_default('Animation.Crop', animation_config['Crop'])
# CameraShots
if 'CameraShots' not in scene:
camera_distance = 2 * scene['WaveformToVolume']['Size']
scene['CameraShots'] = [{
'Position': [-camera_distance, 0., 0.],
'ViewUp': [0., 0., 1.],
'FocalPoint': [0., 0., 0.],
'ViewAngle': 60.
}]
if 'Horizons' in scene['Datasources'] and 'Horizons' not in scene:
scene['Horizons'] = []
for horizon_datasource in scene['Datasources']['Horizons']:
scene['Horizons'].append({
'Name': horizon_datasource,
})
# TransferFunctions
if 'TransferFunctions' not in scene:
scene['TransferFunctions'] = []
tfs_config = scene['TransferFunctions']
needed_tfs = set([
color.extract_color_by(scene['VolumeRepresentation'], delete=False)[1]
])
available_tfs = set([tf['Field'] for tf in tfs_config])
default_tfs = needed_tfs - available_tfs
for tf_field in default_tfs:
tfs_config.append({
'Field': tf_field,
'TransferFunction': {
'Peaks': {
'Colormap': 'Rainbow Uniform'
}
}
})
# Compute default peaks for waveform volume rendering
for tf_config in tfs_config:
tf_field = tf_config['Field']
if tf_field not in ['Plus strain', 'Cross strain']:
continue
if 'Peaks' not in tf_config['TransferFunction']:
continue
peaks_config = tf_config['TransferFunction']['Peaks']
if 'NumPeaks' not in peaks_config:
peaks_config['NumPeaks'] = 10
if 'FirstPeak' not in peaks_config and 'LastPeak' not in peaks_config:
waveform_file_and_subfile = parse_as.file_and_subfile(
scene['Datasources']['Waveform'])
with h5py.File(waveform_file_and_subfile[0], 'r') as waveform_file:
waveform_data = waveform_file[waveform_file_and_subfile[1]]
mode_data = waveform_data['Y_l2_m2.dat']
mode_max = np.max(
np.abs(mode_data[:, 1] + 1j * mode_data[:, 2]))
pos_first_peak, pos_last_peak = 0.01 * mode_max, 0.2 * mode_max
peaks_config['FirstPeak'] = {
'Position': pos_first_peak,
'Opacity': 0.03
}
peaks_config['LastPeak'] = {
'Position': pos_last_peak,
'Opacity': 0.5
}
|
{"hexsha": "7568f6f4b5aab24b6c08935794030141eb66fff7", "size": 4785, "ext": "py", "lang": "Python", "max_stars_repo_path": "gwpv/scene_configuration/defaults.py", "max_stars_repo_name": "damibabayemi/gwpv", "max_stars_repo_head_hexsha": "e6705787fc2e25b72eaef2508357b1f0b9258581", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-04-17T18:25:57.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-05T00:30:31.000Z", "max_issues_repo_path": "gwpv/scene_configuration/defaults.py", "max_issues_repo_name": "damibabayemi/gwpv", "max_issues_repo_head_hexsha": "e6705787fc2e25b72eaef2508357b1f0b9258581", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-06T11:47:48.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-06T11:47:48.000Z", "max_forks_repo_path": "gwpv/scene_configuration/defaults.py", "max_forks_repo_name": "nilsleiffischer/gwpv", "max_forks_repo_head_hexsha": "2b65be422ffcfc443547db6e4b1b3c2832a7f621", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-19T02:27:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-19T02:27:15.000Z", "avg_line_length": 39.5454545455, "max_line_length": 92, "alphanum_fraction": 0.6140020899, "include": true, "reason": "import numpy", "num_tokens": 1159}
|
from kapteyn import maputils
import numpy
from service import *
fignum = 37
fig = plt.figure(figsize=figsize)
frame = fig.add_axes((0.1,0.15,0.8,0.75))
title = 'WCS polyconic (PGSBOX fig.1)'
rot = 30.0 *numpy.pi/180.0
header = {'NAXIS' : 2, 'NAXIS1': 512, 'NAXIS2': 512,
'CTYPE1' : 'RA---PCO',
'PC1_1' : numpy.cos(rot), 'PC1_2' : numpy.sin(rot),
'PC2_1' : -numpy.sin(rot), 'PC2_2' : numpy.cos(rot),
'CRVAL1' : 332.0, 'CRPIX1' : 192, 'CUNIT1' : 'deg', 'CDELT1' : -1.0/5.0,
'CTYPE2' : 'DEC--PCO',
'CRVAL2' : 40.0, 'CRPIX2' : 640, 'CUNIT2' : 'deg', 'CDELT2' : 1.0/5.0,
'LONPOLE' : -30.0
}
X = numpy.arange(-180,180.0,15.0);
Y = numpy.arange(-75,90,15.0)
# Here we demonstrate how to avoid a jump at the right corner boundary
# of the plot by increasing the value of 'gridsamples'.
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=X, starty=Y, gridsamples=4000)
grat.setp_lineswcs0(0, lw=2)
grat.setp_lineswcs1(0, lw=2)
grat.setp_tick(wcsaxis=0, position=15*numpy.array((18,20,22,23)), visible=False)
grat.setp_tick(wcsaxis=0, fmt="Hms")
grat.setp_tick(wcsaxis=1, fmt="Dms")
header['CRVAL1'] = 0.0
header['CRVAL2'] = 0.0
header['LONPOLE'] = 999
border = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon, -180+epsilon), starty=(-89.5,))
border.setp_gratline((0,1), color='g', lw=2)
border.setp_plotaxis((0,1,2,3), mode='no_ticks', visible=False)
lon_world = list(range(0,360,30))
lat_world = [-dec0, -60, -30, 30, 60, dec0]
labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'}
labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'}
doplot(frame, fignum, annim, grat, title,
lon_world=lon_world, lat_world=lat_world,
labkwargs0=labkwargs0, labkwargs1=labkwargs1)
|
{"hexsha": "7b622f849f9f912e5b64c20fc75fe2dd11fdacda", "size": 1960, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/source/EXAMPLES/allskyf37.py", "max_stars_repo_name": "kapteyn-astro/kapteyn", "max_stars_repo_head_hexsha": "f12332cfd567c7c0da40628dcfc7b297971ee636", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-04-28T08:55:33.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-23T18:35:58.000Z", "max_issues_repo_path": "doc/source/EXAMPLES/allskyf37.py", "max_issues_repo_name": "kapteyn-astro/kapteyn", "max_issues_repo_head_hexsha": "f12332cfd567c7c0da40628dcfc7b297971ee636", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-07-23T12:28:37.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-13T18:26:06.000Z", "max_forks_repo_path": "doc/source/EXAMPLES/allskyf37.py", "max_forks_repo_name": "kapteyn-astro/kapteyn", "max_forks_repo_head_hexsha": "f12332cfd567c7c0da40628dcfc7b297971ee636", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-05-03T14:01:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-23T12:23:28.000Z", "avg_line_length": 42.6086956522, "max_line_length": 82, "alphanum_fraction": 0.6239795918, "include": true, "reason": "import numpy", "num_tokens": 752}
|
include("attributes.jl")
function SMD1_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) + sum((xl1).^2) + sum((xu2).^2) + sum((xu2 - tan.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
#
# functionValue,equalityConstrVals,inequalityConstrVals
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD2_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) - sum((xl1).^2) + sum((xu2).^2) - sum((xu2 - log.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD3_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) + sum((xl1).^2) + sum((xu2).^2) + sum((xu2.^2 - tan.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD4_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) - sum((xl1).^2) + sum((xu2).^2) - sum((abs.(xu2) - log.(1 .+ xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD5_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
term2 = 0
for i=1:q-1
term2 = term2 + (xl1[i+1] - xl1[i].^2).^2 + (xl1[i] - 1).^2
end
#Same as SMD5
functionValue = sum((xu1).^2) - term2 + sum((xu2).^2) - sum((abs.(xu2) - xl2.^2).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD6_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = floor(Int,(length(xl) - r)/2 - eps())
s = ceil(Int, (length(xl) - r)/2 + eps())
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q+s]
xl2 = xl[q+s+1:q+s+r]
functionValue = sum((xu1).^2) - sum(xl1[1:q].^2) + sum(xl1[q+1:q+s].^2) + sum((xu2).^2) - sum((xu2 - xl2).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD7_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
m = 1:p
functionValue = 1+1/400*sum((xu1).^2) - prod(cos.(xu1./sqrt.(m))) - sum((xl1).^2) + sum((xu2).^2) - sum((xu2 - log.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD8_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
term2 = 0
for i=1:q-1
term2 += (xl1[i+1] - xl1[i].^2)^2 + (xl1[i] - 1).^2
end
functionValue = 20+exp(1)-20*exp(-0.2*sqrt.(1/p*sum((xu1).^2))) - exp(1/p*sum(cos.(2*pi*xu1))) - term2 + sum((xu2).^2) - sum((xu2 - xl2.^3).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD9_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) - sum((xl1).^2) + sum((xu2).^2) - sum((xu2 - log.(1 .+xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = [sum(xu1.^2)+sum(xu2.^2) - floor.(sum(xu1.^2)+sum(xu2.^2)+0.5)]
inequalityConstrVals = - inequalityConstrVals
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD10_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
a = 2*ones(size(xu1))
c = 2*ones(size(xu2))
functionValue = sum((xu1 - a).^2) + sum((xl1).^2) + sum((xu2 - c).^2) - sum((xu2 - tan.(xl2)).^2)
inequalityConstrVals = Float64[]
#####################################
#Write the constraints here
for i=1:p
push!(inequalityConstrVals, xu1[i] + xu1[i].^3 - sum(xu1.^3) - sum(xu2.^3))
end
for i=1:r
push!(inequalityConstrVals, xu2[i] + xu2[i].^3 - sum(xu2.^3) - sum(xu1.^3))
end
inequalityConstrVals = - inequalityConstrVals
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD11_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) - sum((xl1).^2) + sum((xu2).^2) - sum((xu2 - log.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = xu2 .- 1/sqrt(r) .- log.(xl2)
inequalityConstrVals = - inequalityConstrVals
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD12_leader(xu, xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
a = 2*ones(size(xu1))
c = 2*ones(size(xu2))
functionValue = sum((xu1 - a).^2) + sum((xl1).^2) + sum((xu2 - c).^2) + sum(tan.(abs.(xl2))) - sum((xu2 - tan.(xl2)).^2)
inequalityConstrVals = Float64[]
#####################################
#Write the constraints here
for i=1:p
push!(inequalityConstrVals, xu1[i] + xu1[i].^3 - sum(xu1.^3) - sum(xu2.^3))
end
for i=1:r
push!(inequalityConstrVals, xu2[i] + xu2[i].^3 - sum(xu2.^3) - sum(xu1.^3))
end
inequalityConstrVals = vcat(inequalityConstrVals, xu2 - tan.(xl2))
inequalityConstrVals = - inequalityConstrVals
equalityConstrVals = zeros(0)
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD1_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) + sum((xl1).^2) + sum((xu2 - tan.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD2_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) + sum((xl1).^2) + sum((xu2 - log.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD3_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) + q + sum(xl1.^2 - cos.(2*pi*xl1)) + sum((xu2.^2 - tan.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD4_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) + q + sum(xl1.^2 - cos.(2*pi*xl1)) + sum((abs.(xu2) - log.(1 .+xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD5_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
term2 = 0
for i=1:q-1
term2 = term2 + (xl1[i+1] - xl1[i].^2)^2 + (xl1[i] - 1).^2
end
functionValue = sum((xu1).^2) + term2 + sum((abs.(xu2) - xl2.^2).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD6_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = ceil(Int, (length(xl) - r)/2 - eps())
s = floor(Int,(length(xl) - r)/2 + eps())
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q+s]
xl2 = xl[q+s+1:q+s+r]
term2 = sum(xl1[1:q].^2)
for i=q+1:2:q+s-1
term2 = term2 + (xl1[i+1] - xl1[i]).^2
end
functionValue = sum((xu1).^2) + term2 + sum((xu2 - xl2).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD7_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^3) + sum((xl1).^2) + sum((xu2 - log.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD8_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
term2 = 0
for i=1:q-1
term2 = term2 + (xl1[i+1] - xl1[i].^2).^2 + (xl1[i] - 1).^2
end
functionValue = sum(abs.(xu1)) + term2 + sum((xu2 - xl2.^3).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(0)
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD9_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) + sum((xl1).^2) + sum((xu2 - log.(1 .+xl2)).^2)
# functionValue = -functionValue
#####################################
#Write the constraints here
inequalityConstrVals = [sum(xl1.^2)+sum(xl2.^2) - floor.(sum(xl1.^2)+sum(xl2.^2)+0.5)]
inequalityConstrVals = - inequalityConstrVals
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD10_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
b = 2*ones(size(xl1))
functionValue = sum((xu1).^2) + sum((xl1 - b).^2) + sum((xu2 - tan.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(q)
for i=1:q
inequalityConstrVals[i] = xl1[i] + xl1[i].^3 - sum(xl1.^3)
end
inequalityConstrVals = - inequalityConstrVals
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD11_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
functionValue = sum((xu1).^2) + sum((xl1).^2) + sum((xu2 - log.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = [sum((xu2 - log.(xl2)).^2) - 1]
inequalityConstrVals = - inequalityConstrVals
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
function SMD12_follower(xu,xl)
r = floor(Int,length(xu)/2)
p = length(xu) - r
q = length(xl) - r
xu1 = xu[1:p]
xu2 = xu[p+1:p+r]
xl1 = xl[1:q]
xl2 = xl[q+1:q+r]
b = 2*ones(size(xl1))
functionValue = sum((xu1).^2) + sum((xl1 - b).^2) + sum((xu2 - tan.(xl2)).^2)
#####################################
#Write the constraints here
inequalityConstrVals = zeros(q+1)
for i=1:q
inequalityConstrVals[i] = xl1[i] + xl1[i].^3 - sum(xl1.^3)
end
inequalityConstrVals[q+1] = sum((xu2 - tan.(xl2)).^2) - 1
inequalityConstrVals = - inequalityConstrVals
equalityConstrVals = zeros(0)
#####################################
functionValue,inequalityConstrVals,equalityConstrVals
end
|
{"hexsha": "57b43845d08a2d577f2a0ec05bfd05617135f9bf", "size": 15164, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Bilevel/SMD/SMD.jl", "max_stars_repo_name": "jmejia8/HardTestProblems.jl", "max_stars_repo_head_hexsha": "cde9e6c654f046fc8b9f01a434f7b213a0fab182", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Bilevel/SMD/SMD.jl", "max_issues_repo_name": "jmejia8/HardTestProblems.jl", "max_issues_repo_head_hexsha": "cde9e6c654f046fc8b9f01a434f7b213a0fab182", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-08T04:09:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T04:09:36.000Z", "max_forks_repo_path": "src/Bilevel/SMD/SMD.jl", "max_forks_repo_name": "jmejia8/HardTestProblems.jl", "max_forks_repo_head_hexsha": "cde9e6c654f046fc8b9f01a434f7b213a0fab182", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.9557661927, "max_line_length": 148, "alphanum_fraction": 0.5099577948, "num_tokens": 5247}
|
\documentclass[a4paper,11pt,leqno,fleqn]{artikel3}
\usepackage[dvips]{color}
%\definecolor{backgray}{gray}{0.925}
%\definecolor{verylightgray}{gray}{0.95}
\usepackage{fullpage, fancyvrb, amssymb, listings, url}
%\usepackage[breqn, inline]{emaxima}
%\usepackage[cmbase]{flexisym}
%% \usepackage{breqn}
%% \setkeys{breqn}{compact}
\lstset{
language={},
keepspaces=true,
xleftmargin=3mm,
xrightmargin=3mm,
basicstyle=\ttfamily,
frame=tb,
framesep=1mm,
framerule=0.5pt,
frameround=tttt,
columns=flexible %,
%backgroundcolor=\color[gray]{0.9}
}
\newcommand{\N}{\noindent}
\newcommand{\D}{\displaystyle}
\newcommand{\bc}{\begin{center}}
\newcommand{\ec}{\end{center}}
\newcommand{\bv}{\begin{verbatim}}
\newcommand{\ev}{\end{verbatim}}
\newcommand{\tr}[1]{\textcolor{red}{#1}}
\newcommand{\tb}[1]{\textcolor{blue}{#1}}
\newcommand{\rb}[1]{\raisebox{2mm}[0mm][1mm]{#1}}
\newcommand{\rbb}[1]{\raisebox{-4mm}[0mm][9mm]{#1}}
\title{Finite Fields Computations in Maxima}
\author{
\begin{tabular}{lr} Fabrizio Caruso & \url{caruso@dm.unipi.it} \\
Jacopo D'Aurizio & \url{elianto84@gmail.com} \\
Alasdair McAndrew & \url{amca01@gmail.com} \\
Volker van Nek & \url{volkervannek@gmail.com}
\end{tabular}
}
\date{April, 2008 - July, 2013}
\begin{document}
\maketitle
This file documents a Maxima package for computations in finite fields.
It is suitable for teaching and exploration.
The first version of the package was based on the paper
``Finite Fields Manipulations in Macsyma'' by Kevin Rowley and Robert
Silverman, SIGSAM 1989, but for which the source code is long gone.
Meanwhile it contains lots of new features
and optimizations implemented by Fabrizio Caruso and Jacopo D'Aurizio.
A full review was done in 2012 and 2013 by Volker van Nek. Most of the functions described below
became core functions and some function names have been modified.
If you use a version of Maxima prior to 5.31 please refer to an appropriate
version of this file or alternatively load the necessary files from current sources.
These are \texttt{src/numth.lisp} (all basic Galois Fields functions)
and \texttt{share/contrib/gf/gf.mac} (square and cubic roots).
If speed matters compile these two files and load the binaries.
In version 5.29 and later only for root computations it is necessary to
load \texttt{gf.mac}.
Tests for basic computations in Galois Fields are located in
\texttt{src/rtest\_numth.mac}, tests for root computations in
\texttt{share/contrib/gf/gf\_test.mac}. Tests can be performed by \\
\texttt{batch(<path\_to\_test\_file>, test)}.
\section*{Getting started}
All user commands are prefixed with ``\verb!gf_!''. All you need to start is
to enter the parameters for your field. All fields in this package are of the
form
\[
\mathbb{F}_p[x]/{m(x)}
\]
where $p$ is a prime number and $m(x)$ is an polynomial irreducible over
$\mathbb{F}_p$. If the degree of $m(x)$ is $n$, the the finite
field will contain $p^n$ elements, each element being a polynomial of degree
strictly less than $n$, and all coefficients being in $\{0,1,\ldots,p-1\}$.
Such a field is called a \emph{finite field} or \emph{Galois field} of order
$p^n$, and is denoted $\mathbb{F}_{p^n}$. Note that although there are many different
irreducible polynomials to choose from, if $m(x)$ and $n(x)$ are different
polynomials irreducible over $\mathbb{F}_p$ and of the same degree,
then the fields
\[
\mathbb{F}_p[x]/{m(x)}
\]
and
\[
\mathbb{F}_p[x]/{n(x)}
\]
are isomorphic.
In these fields, addition and subtraction are performed on the coefficients
modulo $p$, and multiplication and division modulo $m(x)$.
Given a prime number $p$ and a polynomial $m(x)$
you can create a field by using the command ``\verb!gf_set_data(p, m(x))!''.
\verb!gf_set_data! checks that $p$ is prime, and it also checks
whether $m(x)$ is irreducible over $\mathbb{F}_p$. If these conditions are met,
a primitive element in this field is computed and some pre-calculations are performed.
Maxima returns a Lisp structure containing the fields data which is
suitable for later use by ``\verb!gf_set_again(gf_data)!'' if needed again (see below).
Some of these data can be viewed by ``\verb!gf_info()!'' and ``\verb!gf_infolist()!''.
%\begin{maximasession}
% \maximaoutput*
% \i1. gf_set_data(2, x^4+x+1);\\
% \o1. [x, x^4+x+1]\\
%\end{maximasession}
%
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i1)}# F16 : gf_set_data(2, x^4+x+1);
#\tr{(\%o1)}\bc\rb{$\tb{Structure [GF-DATA]}$}\ec#
#\vspace{2mm}\tr{(\%i2)}# gf_info()$
#\rb{$\tb{characteristic\ =\ 2}$}#
#\rb{$\tb{reduction\ polynomial\ =\ x^4+x+1}$}#
#\rb{$\tb{primitive\ element\ =\ x}$}#
#\rb{$\tb{nr\ of\ elements\ =\ 16}$}#
#\rb{$\tb{nr\ of\ units\ =\ 15}$}#
#\rb{$\tb{nr\ of\ primitive\ elements\ =\ 8}$}#
\end{lstlisting}
In case there is no irreducible polynomial $m(x)$ available
it is sufficient to set an exponent instead.
E.g. ``\verb!gf_set_data(2, 4)!'' returns the same as ``\verb!gf_set_data(2, x^4+x+1)!''.
In addition to \verb!gf_set_data! there is a command ``\verb!gf_minimal_set(p, m(x))!''
to allow basic arithmetics without checking irreducibility and
without computing a primitive element.
\bigskip
Having set up the field, we can now perform arithmetic on field elements:
\paragraph{Addition/subtraction.}
These are performed with the commands ``\verb!gf_add!'' and ``\verb!gf_sub!''.
In the particular field entered above, since all arithmetic of coefficients is
performed modulo 2, addition and subtraction are equivalent:
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i3)}# a : x^3+x;
#\tr{(\%o3)}\bc\rb{$\tb{x^3+x}$}\ec#
#\tr{(\%i4)}# b : x^3+x^2+1;
#\tr{(\%o4)}\bc\rb{$\tb{x^3+x^2+1}$}\ec#
#\tr{(\%i5)}# gf_add(a, b);
#\tr{(\%o5)}\bc\rb{$\tb{x^2+x+1}$}\ec#
\end{lstlisting}
\paragraph{Multiplication.}
This is performed with the command ``\verb!gf_mult!'':
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i6)}# gf_mult(a, b);
#\tr{(\%o6)}\bc\rb{$\tb{x^3+x+1}$}\ec#
\end{lstlisting}
\paragraph{Inversion and division.}
The inverse of a field element $p(x)$ is the element $q(x)$ for which their
product is equal to 1 (modulo $m(x)$). This is performed by
``\verb!gf_inv!''. In a finite field, division is defined as multiplying by
the inverse; thus
\[
a(x)/b(x)=a(x)(b(x))^{-1}.
\]
These operations are performed with the commands ``\verb!gf_inv!'' and
``\verb!gf_div!'':
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i7)}# gf_inv(b);
#\tr{(\%o7)}\bc\rb{$\tb{x^2}$}\ec#
#\tr{(\%i8)}# gf_div(a, b);
#\tr{(\%o8)}\bc\rb{$\tb{x^3+x^2+x}$}\ec#
#\tr{(\%i9)}# gf_mult(a, gf_inv(b));
#\tr{(\%o9)}\bc\rb{$\tb{x^3+x^2+x}$}\ec#
\end{lstlisting}
\paragraph{Exponentiation.}
To raise a field element to an integer power, use ``\verb!gf_exp!'':
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i10)}# gf_exp(a, 14);
#\tr{(\%o10)}\bc\rb{$\tb{x^3+x^2}$}\ec#
#\tr{(\%i11)}# gf_exp(a, 15);
#\tr{(\%o11)}\bc\rb{$\tb{1}$}\ec#
\end{lstlisting}
\paragraph{Random elements.}
Finally, a random element can be obtained with ``\verb!gf_random()!'':
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i12)}# makelist(gf_random(), i,1,3);
#\tr{(\%o12)}\bc\rb{$\tb{[\,x^2+x+1,\ x^2+x,\ x^3\,]}$}\ec#
\end{lstlisting}
\section*{Primitive elements, powers and logarithms}
The non-zero elements of a finite field form a multiplicative group; a
generator of this group is a \emph{primitive element} in the field. The
command ``\verb!gf_primitive()!'' returns the already computed primitive element:
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i13)}# gf_primitive();
#\tr{(\%o13)}\bc\rb{$\tb{x}$}\ec#
\end{lstlisting}
Given that any non-zero element in the field can be expressed as a power of
this primitive element, this power is the \emph{index} of the element; its
value is obtained with ``\verb!gf_index!'':
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i14)}# a : x^3+x#\$#
#\tr{(\%i15)}# gf_index(a);
#\tr{(\%o15)}\bc\rb{$\tb{9}$}\ec#
#\tr{(\%i16)}# is(a = gf_exp(x, 9));
#\tr{(\%o16)}\bc\rb{$\tb{true}$}\ec#
\end{lstlisting}
Since every element of the field can be represented as a polynomial
\[
a_{n-1}x^{n-1}+a_{n-2}x^{n-2}+\cdots+a_2x^2+a_1x+a_0
\]
where every coefficient $a_i$ satisfies $0\le a_i\le p-1$, a field element can
also be considered as a list:
\[
[a_{n-1},a_{n-2},\ldots,a_2,a_1,a_0].
\]
This list can be considered as the ``digits'' of a number in base $p$, in
which the field element is equivalent to the number
\[
a_{n-1}p^{n-1}+a_{n-2}p^{n-2}+\cdots+a_2p^2+a_1p+a_0.
\]
Thus every polynomial is equivalent to a number between 0 and $p^n-1$; this
number is obtained by ``\texttt{gf\_p2n}''.
The reverse direction is given by ``\texttt{gf\_n2p}''.
Since every non-zero field element $a=a(x)$ is both equivalent to a number $A$
and a power $i$ of a primitive element $e$, we can create an array of powers
corresponding to particular numbers. This array, \texttt{gf\_powers},
which is created by \verb!gf_make_logs!,
is defined as follows:
its $i$-th element (starting with zero) is the numerical form of the $i$-th power of the
primitive element. Thus, if
\[
a(x)\equiv A\equiv e^i
\]
where $e$ is the primitive element, then the $i$-th element of \texttt{gf\_powers} is
$A$. By definition we have $e^{p^n-1}=1$.
The numbers $A$ run over all integers from 1 to $p^n-1$, and the powers $i$
run over all the integers from 0 to $p^n-1$, there is a corresponding
``logarithm'' array, \texttt{gf\_logs}.
The logarithm table may be considered to be
indexed from 0 to $p^n-1$, and its $i$-th element (ignoring the 0-th) is the power
corresponding to that element.
The last array returned by \verb!gf_make_logs! is \verb!gf_zech_logs!
which enables efficient addition.
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i17)}# map(listarray, gf_make_logs());
#\vspace{-4mm}#
#\tr{(\%o17)}\bc\rb{$\tb{
[\,\,[1,2,4,8,3,6,12,11,5,10,7,14,15,13,9,1],
}$}\ec#
#\bc\hspace*{1mm}\rb{$\tb{
[false,0,1,4,2,8,5,10,3,14,9,7,6,13,11,12],
}$}\ec#
#\bc\hspace*{1mm}\rb{$\tb{
[false,4,8,14,1,10,13,9,2,7,5,12,11,6,3,false]\,\,]
}$}\ec#
#\tr{(\%i18)}# c : gf_exp(x, 4);
#\tr{(\%o18)}\bc\rb{$\tb{x + 1}$}\ec#
#\tr{(\%i19)}# gf_p2n(c);
#\tr{(\%o19)}\bc\rb{$\tb{3}$}\ec#
#\tr{(\%i20)}# gf_index(c);
#\tr{(\%o20)}\bc\rb{$\tb{4}$}\ec#
#\tr{(\%i21)}# gf_logs[3];
#\tr{(\%o21)}\bc\rb{$\tb{4}$}\ec#
#\tr{(\%i22)}# gf_powers[4];
#\tr{(\%o22)}\bc\rb{$\tb{3}$}\ec#
\end{lstlisting}
The creation of the arrays \texttt{gf\_logs} and \texttt{gf\_powers}
only has to be done once.
\paragraph{Logarithms.}
The array \texttt{gf\_logs} contains the logarithm of any non-zero element
with respect to the primitive element \texttt{e} of the field.
The same holds for \texttt{gf\_ind}. The logarithm
of any element relative to the base of another can be obtained by the
command ``\verb!gf_log!'':
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i23)}# a : x^3+x#\$#
#\tr{(\%i24)}# b : x^3+x^2+1#\$#
#\tr{(\%i25)}# gf_log(a, b);
#\tr{(\%o25)}\vspace*{2mm}\bc\rb{$\tb{3}$}\ec#
#\tr{(\%i26)}# is(a = gf_exp(b, 3));
#\tr{(\%o26)}\bc\rb{$\tb{true}$}\ec#
\end{lstlisting}
We conclude that, in our field, $a=b^{3}$.
\paragraph{Primitive elements.}
A given field will have many primitive elements, and the command \\
``\verb!gf_primitive_p!'' tests whether an element is primitive:
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i27)}# gf_primitive_p(a);
#\tr{(\%o27)}\vspace{0mm}\bc\rb{$\tb{false}$}\ec#
#\tr{(\%i28)}# gf_primitive_p(b);
#\tr{(\%o28)}\bc\rb{$\tb{true}$}\ec#
\end{lstlisting}
\paragraph{Order.}
By definition, any element $a$ of the field will satisfy $a^{p^n-1}=1$. The
\emph{order} of $a$ is the \emph{lowest} power $m$ for which $a^m=1$. It will
be a factor of $p^n-1$, and is obtained with ``\verb!gf_order!'':
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i29)}# gf_order(a);
#\tr{(\%o29)}\bc\rb{$\tb{5}$}\ec#
#\tr{(\%i30)}# gf_order(b);
#\tr{(\%o30)}\bc\rb{$\tb{15}$}\ec#
\end{lstlisting}
\section*{Minimal polynomials}
Associated with every element $a\in GF(p^n)$ is a polynomial $p(x)$ which
satisfies:
\begin{enumerate}
\item $p(a)=0$,
\item the coefficient of the highest power in $p(x)$ is one,
\item for any other polynomial $q(x)$ with $q(a)=0$, $p(x)$ is a divisor of $q(x)$.
\end{enumerate}
The polynomial $p(x)$ is thus, in a very strict sense, the \emph{smallest}
polynomial which has $a$ as a root. It is the \emph{minimal polynomial} of
$a$. The command ``\verb!gf_minimal_poly!'' calculates it:
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i31)}# a : x^3+x#\$#
#\tr{(\%i32)}# p : gf_minimal_poly(a);
#\tr{(\%o32)}\bc\rb{$\tb{z^4+z^3+z^2+z+1}$}\ec#
\end{lstlisting}
To check this, substitute $a$ for $z$ in $p$:
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i33)}# subst(a, z, p);
#\tr{(\%o33)}\bc\rb{$\tb{(x^3+x)^4+(x^3+x)^3+(x^3+x)^2+x^3+x+1}$}\ec#
#\tr{(\%i34)}# gf_eval(%);
#\tr{(\%o34)}\bc\rb{$\tb{0}$}\ec#
\end{lstlisting}
\section*{An application: the Chor-Rivest knapsack cryptosystem}
The Chor-Rivest knapsack cryptosystem is the only knapsack cryptosystem which
doesn't use modular arithmetic; instead it uses the arithmetic of finite
fields. Although it has been broken, it is still a very good example of
finite field arithmetic.
Assuming the two protagonists are Alice and Bob, and Alice wishes to set up a
public key for Bob to encrypt messages to her. Alice chooses a finite field
$\mathbb{F}_{p^n}=\mathbb{F}_p[x]/m(x)$, and a random primitive element $g(x)$. She
then computes $a_i=\log_{g(x)}(x+i)$ for every $i\in\mathbb{F}_p$. She
selects a random integer $d$ for which $0\le d\le p^n-2$, and computes
$c_i=(a_i+d)\pmod{p^n-1}$. Her public key is the sequence $c_i$, with the
parameters $p$ and $n$.
To encrypt a message to Alice, Bob encodes the message as binary blocks of
length $p$ which contain $n$ ones. Given one such block
$M=(M_0,M_1,\ldots,M_{p-1})$, Bob creates the cipher text
\[
c=\sum_{i=0}^{p-1}M_ic_i\pmod{p^n-1}
\]
which he sends to Alice.
To decrypt $c$, Alice first computes $r=(c-nd)\pmod{p^n-1}$, and then computes
$u(x)=g(x)^r\pmod{m(x)}$. She then computes $s(x)=u(x)+m(x)$ and factors $s$
into linear factors $x+t_i$. The $t_i$ values are the positions of the ones
in the message block $M$.
Actually, the complete cryptosystem also involves a permutation, which is
applied to the sequence $a_i$ to create $c_i$. But for this example we are
just interested in the field arithmetic.
We shall choose the example given in chapter 8 of HAC, but without the
permutation. Here the field is
\[
GF(7^4)=\mathbb{F}_7[x]/(x^4+3x^3+5x^2+6x+2)
\]
and the primitive element chosen is $g(x)=3x^3+3x^2+6$ and the random integer
$d$ is 1702.
First, Alice must compute her public key:
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i35)}# gf_set_data(7, x^4+3*x^3+5*x^2+6*x+2)#\$#
#\tr{(\%i36)}# g : 3*x^3+3*x^2+6#\$#
#\tr{(\%i37)}# gf_primitive_p(g);
#\tr{(\%o37)}\bc\rb{$\tb{true}$}\ec#
#\tr{(\%i38)}# a : makelist(gf_log(x+i, g), i,0,6);
#\tr{(\%o38)}\bc\rb{$\tb{[\,1028,\,1935,\,2054,\,1008,\,379,\,1780,\,223\,]}$}\ec#
#\tr{(\%i39)}# d : 1702#\$#
#\tr{(\%i40)}# c : makelist(mod(a[i] + d, gf_order()), i,1,7);
#\tr{(\%o40)}\bc\rb{$\tb{[\,330,\,1237,\,1356,\,310,\,2081,\,1082,\,1925\,]}$}\ec#
\end{lstlisting}
Now Bob can encrypt a message to Alice; suppose one such block is
$[1,0,1,1,0,0,1]$, which is a block of length 7 which contains exactly 4 ones.
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i41)}# M : [1,0,1,1,0,0,1];
#\tr{(\%o41)}\bc\rb{$\tb{[\,1,\,0,\,1,\,1,\,0,\,0,\,1\,]}$}\ec#
#\tr{(\%i42)}# c : mod(sum(M[i] * c[i], i,1,7), gf_order());
#\tr{(\%o42)}\bc\rb{$\tb{1521}$}\ec#
\end{lstlisting}
This last value is the ciphertext. Alice now needs to decrypt it:
\vspace*{-4mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i43)}# r : mod(c - gf_exponent() * d, gf_order());
#\tr{(\%o43)}\bc\rb{$\tb{1913}$}\ec#
#\tr{(\%i44)}# u : gf_exp(g, r);
#\tr{(\%o44)}\bc\rb{$\tb{x^3+3\,x^2+2\,x+5}$}\ec#
#\tr{(\%i45)}# s : u + gf_reduction();
#\tr{(\%o45)}\bc\rb{$\tb{x^4+4\,x^3+8\,x^2+8\,x+7}$}\ec#
#\tr{(\%i46)}# gf_factor(s);
#\tr{(\%o46)}\vspace{0mm}\bc\rb{$\tb{x\ (x + 2)\ (x + 3)\ (x + 6)}$}\ec#
\end{lstlisting}
The $t_i$ values are $0,2,3,6$ and these are the positions of the ones in $M$.
\vspace*{-3mm}
\section*{Matrices}
There are commands for dealing with matrices over finite fields. E.g.
``\verb!gf_invert_by_lu!'' for inverting a matrix, and ``\verb!gf_matmult!'' for
multiplying matrices.
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i47)}# gf_set_again(F16)#\$#
#\tr{(\%i48)}# m : matrix([1,x^3+1], [x^2+1,x]);
#\tr{(\%o48)}\bc\rbb{$\tb{\D{\left(\begin{array}[h]{cc}1&x^3+1\\x^2+1&x\end{array}\right)}}$}\vspace{3mm}\ec#
#\tr{(\%i49)}# m_inv : gf_invert_by_lu(m);
#\tr{(\%o49)}\bc\rbb{$\tb{\D{\left(\begin{array}[h]{cc}x^2&1\\x^3+x&x\end{array}\right)}}$}\vspace{3mm}\ec#
#\tr{(\%i50)}# gf_matmult(m, m_inv);
#\tr{(\%o50)}\bc\rbb{$\tb{\D{\left(\begin{array}[h]{cc}1&0\\0&1\end{array}\right)}}$}\vspace{0mm}\ec#
\end{lstlisting}
\vspace*{-3mm}
\section*{Normal bases}
Any field $GF(p^n)$ may be considered as a vector space over
$\mathbb{F}_p$; one basis is the set
\[
\{1,x,x^2,\ldots,x^{n-1}\}
\]
which is called the \emph{polynomial basis}. A \emph{normal element} is a
field element $e$ for which the set
\[
\{e,e^p,e^{p^2},\ldots,e^{p^{n-1}}\}
\]
forms a basis. There are several commands for dealing with normal elements
and bases. The command ``\verb!gf_random_normal()!'' finds a normal element by
simply picking field elements at random and testing each one for normality.
Although this is a probabilistic algorithm, in practice it works very quickly:
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i51)}# gf_set_data(2, x^10+x^3+1)#\$#
#\tr{(\%i52)}# p : gf_random_normal();
#\tr{(\%o52)}\vspace{0mm}\bc\rb{$\tb{x^9+x^8+x^7+x^6+x^5+x}$}\ec#
\end{lstlisting}
The command ``\verb!gf_normal()!'' is a brute force search through all
field elements; in general it is slower than \verb!gf_random_normal()!.
Having found a normal element the command ``\verb!gf_normal_basis()!'' produces a
matrix the rows of which are the coefficients of the basis elements
$e^{p^k}$. This command takes an optional parameter; a polynomial $p$. If
present, \verb!gf_normal_basis()! checks if the field element is normal, and if
so, produces the matrix, otherwise prints an error message. If the parameter
is not given, \verb!gf_normal_basis()! first finds a normal element, and then
uses that element to produce the matrix.
With the normal basis, the command ``\verb!gf_normal_basis_rep(p, mat)!'' produces the
normal basis representation of \texttt{p}, with respect to the basis
\texttt{mat}, as a list of coefficients. One attraction of using normal bases
is that much arithmetic can be simplified; for example, in a normal basis
representation, a power of the prime $p$ is equivalent to a shift of
coefficients:
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i53)}# m : gf_normal_basis(p)#\$#
#\tr{(\%i54)}# a : gf_random();
#\tr{(\%o54)}\vspace{0mm}\bc\rb{$\tb{x^9+x^5+x^3+x^2+1}$}\ec#
#\tr{(\%i55)}# gf_normal_basis_rep(a, m);
#\tr{(\%o55)}\bc\rb{$\tb{[\,1,\,0,\,1,\,0,\,0,\,1,\,1,\,0,\,0,\,0\,]}$}\ec#
#\tr{(\%i56)}# gf_normal_basis_rep(gf_exp(a, 2), m);
#\tr{(\%o56)}\bc\rb{$\tb{[\,1,\,1,\,0,\,1,\,0,\,1,\,0,\,1,\,1,\,1\,]}$}\ec#
\end{lstlisting}
\section*{Large fields}
\texttt{gf\_set\_data} computes and stores powers $x^{p^k}$.
In case \texttt{gf\_set\_data} was called \verb!gf_exp! uses the technique
of modular composition. Otherwise \verb!gf_exp! performs ``repeated squaring''.
\verb!gf_index! resp. \verb!gf_log! use a
Pohlig-Hellman reduction and Brent's version of Pollard Rho.
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i57)}# gf_set_data(2, x^20+x^3+1);
#\tr{(\%o57)}\bc\rb{$\tb{Structure [GF-DATA]}$}\ec#
#\vspace{2mm}\tr{(\%i58)}# gf_info()$
#\rb{$\tb{characteristic\ =\ 2}$}#
#\rb{$\tb{reduction\ polynomial\ =\ x^{20}+x^3+1}$}#
#\rb{$\tb{primitive\ element\ =\ x}$}#
#\rb{$\tb{nr\ of\ elements\ =\ 1048576}$}#
#\rb{$\tb{nr\ of\ units\ =\ 1048575}$}#
#\rb{$\tb{nr\ of\ primitive\ elements\ =\ 480000}$}#
#\tr{(\%i59)}# a : x^15+x^5+1;
#\tr{(\%o59)}\bc\rb{$\tb{x^{15}+x^5+1}$}\ec#
#\tr{(\%i60)}# index : gf_index(a);
#\tr{(\%o60)}\bc\rb{$\tb{720548}$}\ec#
#\tr{(\%i61)}# gf_exp(gf_primitive(), index);
#\tr{(\%o61)}\bc\rb{$\tb{x^{15}+x^5+1}$}\ec#
#\tr{(\%i62)}# gf_exp(a, 3^12);
#\tr{(\%o62)}\bc\rb{$\tb{x^{17}+x^{16}+x^{13}+x^{12}+x^{11}+x^3+x^2+x}$}\ec#
\end{lstlisting}
\section*{Field extensions - the AES mixed columns operation}
Above we described that an element of a finite field $GF(p^n)$ can be interpreted as a polynomial
$f(x) = c_{n-1}\,x^{n-1} + c_{n-2}\,x^{n-2} + \cdot \cdot +\,c_1\,x + c_0$
of degree $n-1$ with coefficients in the prime field $GF(p)$.
An element of an extension field $GF(q^k)$ where $q$ is a power of $p$, $q = p^n$,
can be interpreted as a polynomial of degree $k-1$ with coefficients in
the base field $GF(p^n)$ which means that the coefficients itself can be interpreted
as polynomials of degree $n-1$ over $GF(p)$ and all coefficient arithmetic
is carried out in $GF(p^n)$.
The reduction polynomial of degree $k$ used to define $GF(q^k)$
must be irreducible over $GF(p^n)$. Otherwise the defined extension is no field and
not every non-zero element is invertible.
The AES mixed columns operation is for example defined by a multiplication
in $GF(256^4)$ where the non-irreducible polynomial $x^4+1$ is used for reduction.
The element $a = 3\,x^3+x^2+x+2$ used for the mixed columns multiplication is invertible,
$a^{-1} = B\,x^3+D\,x^2+9\,x+E$ in base $16$, so the inverse operation is guaranteed.
The following session shows the mixed columns operation applied to one column represented
by the list $[30, 5D, BF, D4]$. (These four bytes are taken from the example on page 33 of the
AES specification document fips-197.pdf and they are the first four bytes that are modified
by the mixed columns operation there.)
User commands for field extensions are prefixed with ``\verb!ef_!'' and for nearly every
\texttt{gf}-function there is a corresponding \texttt{ef}-function.
The polynomial \texttt{m(x)} used for reduction can be defined by
``\verb!ef_minimal_set(m(x))!'' or ``\verb!ef_set_data(m(x))!''.
The \texttt{ef}-functions then refer to the
previously by \texttt{gf\_set\_data} defined field as the base field.
\vspace*{4mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i1)}# gf_set_data(2, x^8+x^4+x^3+x+1)#\$#
#\tr{(\%i2)}# ef_irreducible_p(x^4+1);
#\tr{(\%o2)}\bc\rb{$\tb{false}$}\ec#
#\tr{(\%i3)}# ef_minimal_set(x^4+1);
#\tr{(\%o3)}\bc\rb{$\tb{reduction\ polynomial\ =\ x^4+1}$}\ec#
#\tr{(\%i4)}# ibase : obase : 16#\$#
#\tr{(\%i5)}# p : ef_l2p([30, 5D, 0BF, 0D4]);
#\tr{(\%o5)}\bc\rb{$\tb{30\ x^3+5D\ x^2+0BF\ x+0D4}$}\ec#
#\tr{(\%i6)}# a : 3*x^3+x^2+x+2;
#\tr{(\%o6)}\bc\rb{$\tb{3\ x^3+x^2+x+2}$}\ec#
#\tr{(\%i7)}# ef_p2l(pa : ef_mult(p, a));
#\tr{(\%o7)}\bc\rb{$\tb{[0E5,\ 81,\ 66,\ 4]}$}\ec#
#\tr{(\%i8)}# ai : ef_inv(a);
#\tr{(\%o8)}\bc\rb{$\tb{0B\ x^3+0D\ x^2+9\ x+0E}$}\ec#
#\tr{(\%i9)}# ef_mult(ai, a);
#\tr{(\%o9)}\bc\rb{$\tb{1}$}\ec#
#\tr{(\%i10)}# ef_mult(ai, pa);
#\tr{(\%o10)}\bc\rb{$\tb{30\ x^3+5D\ x^2+0BF\ x+0D4}$}\ec#
#\tr{(\%i11)}# ibase : obase : 10.#\$#
\end{lstlisting}
\section*{Square and cube roots}
Multiple algorithms have been implemented in order to solve the square and cube root extraction problem over $\mathbb{F}_p$; all of them basically perform an exponentiation in a extension field (ie $\mathbb{F}_{p^2}=\mathbb{F}_{p}[x]/(x^2+bx+a)$ or $\mathbb{F}_{p^3}=\mathbb{F}_{p}[x]/(x^3-bx-a)$) through a repeated-squaring scheme, reaching so a complexity of $O(n \log(p))$ multiplications in $\mathbb{F}_p$; however, due to some differences in the representation and multiplication of elements in the extension field, they do not have exactly the same running time:
\begin{enumerate}
\item \verb!msqrt(a,p)! returns the two square roots of $a$ in $\mathbb{F}_p$ (if they exist) representing every $k$-th power of $x$ in $\mathbb{F}_{p}[x]/(x^2+bx+a)$ as the first column of the matrix $M^k$, where $M$ is the companion matrix associated with the polynomial $x^2+bx+a$ and $b^2-4a$ is a quadratic non-residue in $\mathbb{F}_p^*$. It requires $5 \log_2(p)$ multiplications in $\mathbb{F}_p$.
\item \verb!ssqrt(a,p)! returns the two square roots of $a$ in $\mathbb{F}_p$ (if they exist) using Shanks algorithm. It requires $5 \log_2(p)$ multiplications in $\mathbb{F}_p$.
\item \verb!gf_sqrt(a,p)! returns the two square roots of $a$ in $\mathbb{F}_p$ (if they exist) using the Muller algorithm (an improved, shifted version of Cipolla-Lehmer's) and should reach the best performance, requiring only $2 \log_2(p)$ multiplications in $\mathbb{F}_p$.
\item \verb!mcbrt(a,p)! returns the cube roots of $a$ in $\mathbb{F}_p$ (if they exist) representing every $k$-th power of $x$ in $\mathbb{F}_{p}[x]/(x^3+bx+a)$ as the vector $(M_{2,2},M_{2,3},M_{3,2})$ in the matrix $M^k$, where $M$ is the companion matrix associated with the polynomial $x^3+bx+a$, irreducible over $\mathbb{F}_p$ (Stickelberger-Redei irreducibility test for cubic polynomials is used). It requires $10 \log_2(p)$ multiplications in $\mathbb{F}_p$.
\item \verb!scbrt(a,p)! follows the same multiplication steps of \verb!mcbrt(a,p)!, using a simpler polynomial representation for the elements of the field extension. It requires about $11 \log_2(p)$ multiplications in $\mathbb{F}_p$.
\item \verb!gf_cbrt(a,p)! returns the cube roots of $a$ in $\mathbb{F}_p$ (if they exist) using the generalized Shanks algorithm: it's pretty fast, requiring about $4 \log_2(p)$ multiplications in $\mathbb{F}_p$, being so the candidate choice for cube root extraction.
\end{enumerate}
Other implemented routines, using about the same ideas, are:
\begin{enumerate}
\item \verb!lucas(n)!, returning the $n$-th Lucas number through a Muller-like scheme; it requires exactly $2$ squarings and $3$ sums for each bit in the binary representation of $n$, having so a bit-complexity bounded by $2\log_2(n)^{3+\varepsilon}$, with $\varepsilon$ depending on the adopted integer squaring algorithm.
\item \verb!qsplit(p)! and \verb!csplit(p)!, splitting a prime $p$ over $\mathbb{Z}[i]$ and $\mathbb{Z}[\omega]$, ie finding $(a,b)$ such that $p=a^2+b^2$ (this is possible only when $p$ is in the form $4k+1$) or $p=a^2+ab+b^2$ (this is possible only when $p$ is in the form $3k+1$), by the reduction of a binary quadratic form of the proper discriminant. They have the same complexity of the computation of a single Jacobi symbol, $O(\log(p)^2)$ bit-operations.
\end{enumerate}
\vspace{1cm}
In Maxima 5.29 and later \verb!lucas! is a core function.
\vspace*{2mm}
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i1)}# lucas(141);
#\tr{(\%o1)}\bc\rb{$\tb{293263001536128903730947142076}$}\ec#
\end{lstlisting}
\vspace*{4mm}
All the other functions listed above need to be loaded by ``\verb!load(gf)!'':
\pagebreak
\begin{lstlisting}[escapechar=\#]
#\tr{(\%i2)}# load(gf)#\$#
#\tr{(\%i3)}# msqrt(64, 1789); ssqrt(64, 1789); gf_sqrt(64, 1789);
#\tr{(\%o3)}\bc\rb{$\tb{[\,1781,\,8\,]}$}\ec#
#\tr{(\%o4)}\bc\rb{$\tb{[\,8,\,1781\,]}$}\ec#
#\tr{(\%o5)}\bc\rb{$\tb{[\,1781,\,8\,]}$}\ec#
#\tr{(\%i6)}# mcbrt(64, 1789); scbrt(64, 1789); gf_cbrt(64, 1789);
#\tr{(\%o6)}\bc\rb{$\tb{[\,4,\,608,\,1177\,]}$}\ec#
#\tr{(\%o7)}\bc\rb{$\tb{[\,4,\,608,\,1177\,]}$}\ec#
#\tr{(\%o8)}\bc\rb{$\tb{[\,4,\,1177,\,608\,]}$}\ec#
#\tr{(\%i9)}# gf_factor(x^3-64, 1789);
#\tr{(\%o9)}\bc\rb{$\tb{(x + 612)\ (x + 1181)\ (x + 1785)}$}\ec#
#\tr{(\%i10)}# map(lambda([n], n - 1789), %);
#\tr{(\%o10)}\bc\rb{$\tb{(x - 1177)\ (x - 608)\ (x - 4)}$}\ec#
#\tr{(\%i11)}# qsplit(1789);
#\tr{(\%o11)}\bc\rb{$\tb{[\,5,\,42\,]}$}\ec#
#\tr{(\%i12)}# csplit(1789);
#\tr{(\%o12)}\bc\rb{$\tb{[\,12,\,35\,]}$}\ec#
\end{lstlisting}
\end{document}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "finite_fields"
%%% End:
|
{"hexsha": "cc8f99e18627d0b27160af3ac328fbfca93c1151", "size": 28424, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "maxima/src/maxima/share/contrib/gf/gf_manual.tex", "max_stars_repo_name": "nilqed/spadlib", "max_stars_repo_head_hexsha": "d317f6abdeff4fedc24231a9a39c51c3121f3475", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-09T21:31:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-09T21:31:09.000Z", "max_issues_repo_path": "maxima/src/maxima/share/contrib/gf/gf_manual.tex", "max_issues_repo_name": "nilqed/spadlib", "max_issues_repo_head_hexsha": "d317f6abdeff4fedc24231a9a39c51c3121f3475", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "maxima/src/maxima/share/contrib/gf/gf_manual.tex", "max_forks_repo_name": "nilqed/spadlib", "max_forks_repo_head_hexsha": "d317f6abdeff4fedc24231a9a39c51c3121f3475", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.375, "max_line_length": 569, "alphanum_fraction": 0.6518435125, "num_tokens": 10821}
|
"""
ModelGrid.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Thu Dec 5 15:49:16 MST 2013
Description: For working with big model grids. Setting them up, running them,
and analyzing them.
"""
from __future__ import print_function
import os
import gc
import re
import copy
import time
import signal
import subprocess
import numpy as np
from .GridND import GridND
from ..util import ProgressBar
from .ModelFit import ModelFit
from ..analysis import ModelSet
from ..simulations import Global21cm
from ..util.ReadData import concatenate
from ..analysis import Global21cm as _AnalyzeGlobal21cm
from ..util.Pickling import read_pickle_file, write_pickle_file
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
class ModelGrid(ModelFit):
"""Create an object for setting up and running model grids."""
@property
def tol(self):
if not hasattr(self, '_tol'):
if self.grid.structured:
self._tol = 1e-6
else:
self._tol = 1e-3
return self._tol
@property
def phenomenological(self):
if not hasattr(self, '_tanh'):
self._phenomenological = False
if 'tanh_model' in self.base_kwargs:
if self.base_kwargs['tanh_model']:
self._phenomenological = True
if 'gaussian_model' in self.base_kwargs:
if self.base_kwargs['gaussian_model']:
self._phenomenological = True
if 'parametric_model' in self.base_kwargs:
if self.base_kwargs['parametric_model']:
self._phenomenological = True
return self._phenomenological
@property
def simulator(self):
if not hasattr(self, '_simulator'):
self._simulator = Global21cm
return self._simulator
def _read_restart(self, prefix, procid=None):
"""
Figure out which models have already been run.
Note that how this works depends on if the gridding has been changed.
Parameters
----------
prefix : str
File prefix of files ending in *.pkl to be read in.
"""
# Array of ones/zeros: has this model already been done?
# This is only the *new* grid points.
if self.grid.structured:
done = np.zeros(self.grid.shape)
if procid is None:
procid = rank
if os.path.exists('{0!s}.{1!s}.chain.pkl'.format(prefix, str(procid).zfill(3))):
prefix_by_proc = '{0!s}.{1!s}'.format(prefix, str(procid).zfill(3))
else:
return done
#raise ValueError('This shouldn\'t happen anymore.')
# Need to see if we're running with the same number of processors
# We could probably set things up such that this isn't a problem.
#fn_by_proc = lambda proc: '{0!s}.{1!s}.chain.pkl'.format(prefix, str(proc).zfill(3))
#fn_size_p1 = fn_by_proc(size+1)
#if os.path.exists(fn_size_p1):
# raise IOError('Original grid run with more processors!')
#
# proc_id = size + 1
# while os.path.exists(fn_by_proc(proc_id)):
# proc_id += 1
# continue
# Read in current status of model grid, i.e., the old
# grid points.
chain = concatenate(read_pickle_file('{!s}.chain.pkl'.format(\
prefix_by_proc), nloads=None, verbose=False))
# If we said this is a restart, but there are no elements in the
# chain, just run the thing. It probably means the initial run never
# made it to the first checkpoint.
if chain.size == 0:
if rank == 0:
print(("Pre-existing chain file(s) empty for proc #{}. " +\
"Running from beginning.").format(rank))
if not self.grid.structured:
self.done = np.array([0])
return done
# Read parameter info
(axes_names, is_log) = read_pickle_file(\
'{!s}.pinfo.pkl'.format(prefix), nloads=1, verbose=False)
# Prepare for blobs (optional)
if os.path.exists('{!s}.binfo.pkl'.format(prefix)):
self.pf = read_pickle_file('{!s}.binfo.pkl'.format(prefix),\
nloads=1, verbose=False)
elif os.path.exists('{!s}.setup.pkl'.format(prefix)):
self.pf = read_pickle_file('{!s}.setup.pkl'.format(prefix),\
nloads=1, verbose=False)
if len(axes_names) != chain.shape[1]:
raise ValueError('Cannot change dimensionality on restart!')
if self.grid.structured:
if axes_names != self.grid.axes_names:
raise ValueError('Cannot change axes variables on restart!')
else:
for par in axes_names:
if par in self.grid.axes_names:
continue
raise ValueError('Cannot change axes variables on restart!')
# What follows is irrelevant for unstructured grids.
if (not self.grid.structured):
self.done = done = np.array([chain.shape[0]])
return done
# Loop over chain read-in from disk and compare to grid.
# Which models have already been computed?
for link in chain:
# Parameter set from pre-existing chain
kw = {par:link[i] \
for i, par in enumerate(self.grid.axes_names)}
# Its location in *new* grid
kvec = self.grid.locate_entry(kw, tol=self.tol)
if None in kvec:
continue
done[kvec] = 1
if done.sum() != len(chain):
print("WARNING: Some chain elements not found.")
return done
@property
def axes(self):
return self.grid.axes
@axes.setter
def axes(self, kwargs):
"""
Create GridND instance, construct N-D parameter space.
"""
for kwarg in kwargs:
assert kwargs[kwarg].size == np.unique(kwargs[kwarg]).size, \
"Redundant elements detected for parameter={!s}".format(kwarg)
self.grid = GridND()
# Build parameter space
self.grid.build(**kwargs)
# Save for later access
self.kwargs = kwargs
# Shortcut to parameter names
self.parameters = self.grid.axes_names
@property
def priors(self):
# Need to override priors in ModelFit
return {}
def set_models(self, models):
"""
Set all models by hand.
Parameters
----------
models : list
List of models to run. Each entry in the list should be a
dictionary of parameters that define that model. The
base_kwargs will be updated with those values at run-time.
"""
self.grid = GridND()
# Build parameter space
self.grid.all_kwargs = models
self.grid.axes_names = list(models[0].keys())
self.grid.Nd = len(self.grid.axes_names)
# Shortcut to parameter names
if not hasattr(self, 'parameters'):
self.parameters = self.grid.axes_names
def _reshape_assignments(self, assignments):
assign = []
for h, kwargs in enumerate(self.grid.all_kwargs):
# Where does this model live in the grid?
if self.grid.structured:
kvec = self.grid.locate_entry(kwargs, tol=self.tol)
else:
kvec = h
if self.is_restart:
if hasattr(self, 'done'):
if self.done[kvec]:
continue
assign.append(assignments[kvec])
return np.array(assign, dtype=int)
def prep_output_files(self, restart, clobber):
"""
Stick this in utilities folder?
"""
prefix_by_proc = '{0!s}.{1!s}'.format(self.prefix, str(rank).zfill(3))
# Reshape assignments so it's Nlinks long.
if self.grid.structured:
assignments = self._reshape_assignments(self.assignments)
if restart:
if rank == 0:
write_pickle_file(assignments,\
'{!s}.load.pkl'.format(self.prefix), ndumps=1,\
open_mode='a', safe_mode=False, verbose=False)
return
else:
if restart:
return
if rank > 0:
return
super(ModelGrid, self)._prep_from_scratch(clobber, by_proc=True)
if self.grid.structured:
write_pickle_file(assignments,\
'{!s}.load.pkl'.format(self.prefix), ndumps=1, open_mode='w',\
safe_mode=False, verbose=False)
# ModelFit makes this file by default but grids don't use it.
if os.path.exists('{!s}.logL.pkl'.format(self.prefix)) and (rank == 0):
os.remove('{!s}.logL.pkl'.format(self.prefix))
for par in self.grid.axes_names:
if re.search('Tmin', par):
f = open('{!s}.fcoll.pkl'.format(prefix_by_proc), 'wb')
f.close()
break
@property
def blank_blob(self):
if not hasattr(self, '_blank_blob'):
blob_names = self.base_kwargs['blob_names']
if blob_names is None:
self._blank_blob = []
return []
blob_ivars = self.base_kwargs['blob_ivars']
blob_funcs = self.base_kwargs['blob_funcs']
blob_nd = [len(grp) if grp is not None else 0 \
for grp in blob_ivars]
##
# Need to be a little careful with blob ivars due to
# new-ish (ivar name, ivar values) approach.
##
blob_dims = []
for grp in blob_ivars:
if grp is None:
blob_dims.append(None)
continue
dims = []
for element in grp:
ivarn, ivars = element
dims.append(len(ivars))
blob_dims.append(tuple(dims))
self._blank_blob = []
for i, group in enumerate(blob_names):
if blob_ivars[i] is None:
self._blank_blob.append([np.inf] * len(group))
else:
if blob_nd[i] == 0:
self._blank_blob.append([np.inf] * len(group))
elif blob_nd[i] == 1:
arr = np.ones([len(group), blob_dims[i][0]])
self._blank_blob.append(arr * np.inf)
elif blob_nd[i] == 2:
dims = len(group), blob_dims[i][0], \
blob_dims[i][1]
arr = np.ones(dims)
self._blank_blob.append(arr * np.inf)
return self._blank_blob
@property
def simulator(self):
if not hasattr(self, '_simulator'):
from ..simulations import Global21cm
self._simulator = Global21cm
return self._simulator
@property
def reuse_splines(self):
if not hasattr(self, '_reuse_splines'):
self._reuse_splines = True
if 'feedback_LW' in self.base_kwargs:
if self.base_kwargs['feedback_LW']:
self._reuse_splines = False
return self._reuse_splines
@reuse_splines.setter
def reuse_splines(self, value):
self._reuse_splines = value
@property
def tricks(self):
if not hasattr(self, '_tricks'):
self._tricks = []
return self._tricks
@tricks.setter
def tricks(self, value):
if not hasattr(self, '_tricks'):
assert type(value) is tuple
self._tricks = [value]
else:
self._tricks.append(value)
@property
def trick_names(self):
return list(zip(*self.tricks))[0]
@property
def trick_files(self):
return list(zip(*self.tricks))[1]
#@property
#def trick_data(self):
# if not hasattr(self, '_trick_data'):
# self._trick_data = {}
# return self._trick_data
#
#@trick_data.setter
#def trick_data(self, value):
# if not hasattr(self, '_tricks'):
# assert type(value) is dict
# self._tricks = value
# else:
# self._tricks.update(value)
@property
def is_restart(self):
if not hasattr(self, '_is_restart'):
self._is_restart = False
return self._is_restart
@is_restart.setter
def is_restart(self, value):
self._is_restart = value
def _prep_tricks(self): # pragma: no cover
"""
Super non-general at the moment sorry.
"""
if 'guess_popIII_sfrds' in self.trick_names:
i = self.trick_names.index('guess_popIII_sfrds')
fn = self.trick_files[i]
if fn is not None:
anl = ModelSet(fn)
#if not anl.chain:
# return
print("Ready to cheat!")
# HARD CODING FOR NOW
blob_name = 'popIII_Mmin'
Mmin = anl.ExtractData(blob_name)[blob_name]
zarr = anl.get_ivars(blob_name)[0]
def func(**kw):
# First, figure out where (if anywhere) the parameters
# in hand live in the lookup table.
ind = []
for k, par in enumerate(anl.parameters):
if par not in kw:
ind.append(None)
continue
try:
i = list(anl.parameters).index(kw[par])
except ValueError:
i = np.argmin(np.abs(kw[par] - anl.unique_samples[k]))
ind.append(i)
score = 0.0
for k, par in enumerate(anl.parameters):
if ind[k] is None:
continue
vals = anl.chain[:,ind[k]]
print("{0!s} {1!s} {2!s}".format(k, par, kw[par]))
score += np.abs(vals - kw[par])
best = np.argmin(score)
print("{0!s} {1!s} {2!s}".format(zarr.shape, Mmin.shape,\
best))
f = lambda zz: np.interp(zz, zarr, Mmin[best])
return {'pop_Mmin{2}': f}
#if np.min(score) == 0:
self.trick_funcs['guess_popIII_sfrds'] = func
@property
def trick_funcs(self):
if not hasattr(self, '_trick_funcs'):
self._trick_funcs = {}
return self._trick_funcs
@trick_funcs.setter
def trick_funcs(self, value):
if not hasattr(self, '_trick_funcs'):
self._trick_funcs = {}
assert type(value) is dict
self._trick_funcs.update(value)
@property
def _exit_if_fail_streak(self):
if not hasattr(self, '_exit_if_fail_streak_'):
self._exit_if_fail_streak_ = False
return self._exit_if_fail_streak_
@_exit_if_fail_streak.setter
def _exit_if_fail_streak(self, value):
self._exit_if_fail_streak_ = bool(value)
def _run_sim(self, kw, p):
failct = 0
sim = self.simulator(**p)
if self.debug:
sim.run()
blobs = sim.blobs
try:
if not self.debug:
sim.run()
blobs = copy.deepcopy(sim.blobs)
except RuntimeError:
write_pickle_file(kw, '{0!s}.{1!s}.timeout.pkl'.format(\
self.prefix, str(rank).zfill(3)), ndumps=1, open_mode='a',\
safe_mode=False, verbose=False)
blobs = copy.deepcopy(self.blank_blob)
except MemoryError:
raise MemoryError('This cannot be tolerated!')
except:
# For some reason "except Exception" doesn't catch everything...
# Write to "fail" file
write_pickle_file(kw, '{0!s}.{1!s}.fail.pkl'.format(self.prefix,\
str(rank).zfill(3)), ndumps=1, open_mode='a', safe_mode=False,\
verbose=False)
print("FAILURE: Processor #{}.".format(rank))
failct += 1
blobs = copy.deepcopy(self.blank_blob)
if 'feedback_LW_guess' in self.tricks:
try:
self.trick_data['pop_Mmin{2}'] = \
np.interp(sim.pops[2].halos.tab_z,
sim.medium.field._zarr, sim.medium.field._Mmin_now)
except AttributeError:
del self.trick_data['pop_Mmin{2}']
del sim
return blobs, failct
@property
def debug(self):
if not hasattr(self, '_debug'):
self._debug = False
return self._debug
@debug.setter
def debug(self, value):
assert type(value) in [int, bool]
self._debug = value
def run(self, prefix, clobber=False, restart=False, save_freq=500,
use_pb=True, use_checks=True, long_run=False, exit_after=None):
"""
Run model grid, for each realization thru a given turning point.
Parameters
----------
prefix : str
Prefix for all output files.
save_freq : int
Number of steps to take before writing data to disk. Note that if
you're running in parallel, this is the number of steps *each
processor* will take before writing to disk.
clobber : bool
Overwrite pre-existing files of the same prefix if one exists?
restart : bool
Append to pre-existing files of the same prefix if one exists?
Returns
-------
"""
self.prefix = prefix
self.save_freq = save_freq
prefix_by_proc = '{0!s}.{1!s}'.format(prefix, str(rank).zfill(3))
prefix_next_proc = '{0!s}.{1!s}'.format(prefix, str(rank+1).zfill(3))
if rank == 0:
print("Starting {}-element model grid.".format(self.grid.size))
chain_exists = os.path.exists('{!s}.chain.pkl'.format(prefix_by_proc))
# Kill this thing if we're about to delete files and we haven't
# set clobber=True
if chain_exists and (not clobber):
if not restart:
raise IOError(('{!s}*.pkl exists! Remove manually, set ' +\
'clobber=True, or set restart=True to append.').format(\
prefix_by_proc))
restart_actual = True
_restart_actual = np.zeros(size)
if restart and (not chain_exists):
print(("This can't be a restart (for proc #{0}), {1!s}*.pkl " +\
"not found. Starting from scratch...").format(rank, prefix))
# Note: this can occur if restarting with fewer processors
# than we originally ran with.
else:
_restart_actual[rank] = 1
restart_actual = True
# Figure out if we're running with fewer processors than
# pre-restart
fewer_procs = False
if size > 1:
_restart_np1 = np.zeros(size)
if os.path.exists('{!s}.chain.pkl'.format(prefix_next_proc)):
_restart_np1[rank] = 1
_tmp = np.zeros(size)
MPI.COMM_WORLD.Allreduce(_restart_np1, _tmp)
fewer_procs = sum(_tmp) >= size
else:
pass
# Can't have fewer procs than 1!
# Need to communicate results of restart_actual across all procs
if size > 1:
_all_restart = np.zeros(size)
MPI.COMM_WORLD.Allreduce(_restart_actual, _all_restart)
all_restart = bool(sum(_all_restart) == size)
any_restart = bool(sum(_all_restart) > 0)
else:
all_restart = any_restart = _all_restart = _restart_actual
# If user says it's not a restart, it's not a restart.
any_restart *= restart
self.is_restart = any_restart
# Load previous results if this is a restart
if any_restart:
done = self._read_restart(prefix)
if self.grid.structured:
Ndone = int(done[done >= 0].sum())
else:
Ndone = 0
# Important that this goes second, otherwise this processor
# will count the models already run by other processors, which
# will mess up the 'Nleft' calculation below.
# Figure out what models have been run by *any* processor
# in the old grid.
if size > 1:
if self.grid.structured:
tmp = np.zeros(self.grid.shape)
MPI.COMM_WORLD.Allreduce(done, tmp)
self.done = np.minimum(tmp, 1)
else:
# In this case, self.done is just an integer.
# And apparently, we don't need to know which models are done?
tmp = np.array([0])
MPI.COMM_WORLD.Allreduce(done, tmp)
self.done = tmp[0]
else:
self.done = done
# Find outputs from processors beyond those that we're currently
# using.
if fewer_procs:
if rank == 0:
# Determine what the most number of processors to have
# run this grid (at some point) is
fn_by_proc = lambda proc: '{0!s}.{1!s}.chain.pkl'.format(\
prefix, str(proc).zfill(3))
fn_size_p1 = fn_by_proc(size+1)
_done_extra = np.zeros(self.grid.shape)
if os.path.exists(fn_size_p1):
proc_id = size + 1
while os.path.exists(fn_by_proc(proc_id)):
_done_extra += self._read_restart(prefix, proc_id)
proc_id += 1
continue
print(("This grid has been run with as many as {} " +\
"processors previously. Collectively, these " +\
"processors ran {1} models.").format(proc_id,\
_done_extra.sum()))
_done_all = self.done.copy()
_done_all += _done_extra
for i in range(1, size):
MPI.COMM_WORLD.Send(_done_all, dest=i, tag=10*i)
else:
self.done = np.zeros(self.grid.shape)
MPI.COMM_WORLD.Recv(self.done, source=0, tag=10*rank)
else:
Ndone = 0
if any_restart and self.grid.structured:
mine_and_done = np.logical_and(self.assignments == rank,
self.done == 1)
Nleft = self.load[rank] - np.sum(mine_and_done)
else:
Nleft = self.load[rank]
if Nleft == 0:
print("Processor {} is done already!".format(rank))
# Print out how many models we have (left) to compute
if any_restart and self.grid.structured:
if rank == 0:
Ndone = self.done.sum()
Ntot = self.done.size
print(("Update : {0} models down, {1} to " +\
"go.").format(Ndone, Ntot - Ndone))
if size > 1:
MPI.COMM_WORLD.Barrier()
# Is everybody done?
if np.all(self.done == 1):
return
print(("Update (processor #{0}): Running {1} more " +\
"models.").format(rank, Nleft))
elif rank == 0:
if any_restart:
print('Re-starting pre-existing model set ({} models done already).'.format(self.done))
print('Running {} more models.'.format(self.grid.size))
else:
print('Running {}-element model grid.'.format(self.grid.size))
# Make some blank files for data output
self.prep_output_files(any_restart, clobber)
# Dictionary for hmf tables
fcoll = {}
# Initialize progressbar
pb = ProgressBar(Nleft, 'grid', use_pb)
pb.start()
chain_all = []; blobs_all = []
t1 = time.time()
ct = 0
was_done = 0
failct = 0
# Loop over models, use StellarPopulation.update routine
# to speed-up (don't have to re-load HMF spline as many times)
for h, kwargs in enumerate(self.grid.all_kwargs):
# Where does this model live in the grid?
if self.grid.structured:
kvec = self.grid.locate_entry(kwargs, tol=self.tol)
else:
kvec = h
# Skip if it's a restart and we've already run this model
if any_restart and self.grid.structured:
if self.done[kvec]:
was_done += 1
pb.update(ct)
continue
# Skip if this processor isn't assigned to this model
# This could be moved above the previous check
if self.assignments[kvec] != rank:
pb.update(ct)
continue
# Grab Tmin index
if self.Tmin_in_grid and self.LB == 1:
Tmin_ax = self.grid.axes[self.grid.axisnum(self.Tmin_ax_name)]
i_Tmin = Tmin_ax.locate(kwargs[self.Tmin_ax_name])
else:
i_Tmin = 0
# Copy kwargs - may need updating with pre-existing lookup tables
p = self.base_kwargs.copy()
# Log-ify stuff if necessary
kw = {}
for i, par in enumerate(self.parameters):
if self.is_log[i]:
kw[par] = 10**kwargs[par]
else:
kw[par] = kwargs[par]
p.update(kw)
# Create new splines if we haven't hit this Tmin yet in our model grid.
if self.reuse_splines and \
i_Tmin not in fcoll.keys() and (not self.phenomenological):
#raise NotImplementedError('help')
sim = self.simulator(**p)
pops = sim.pops
if hasattr(self, 'Tmin_ax_popid'):
loc = self.Tmin_ax_popid
suffix = '{{{}}}'.format(loc)
else:
if sim.pf.Npops > 1:
loc = 0
suffix = '{0}'
else:
loc = 0
suffix = ''
hmf_pars = {'pop_Tmin{!s}'.format(suffix): sim.pf['pop_Tmin{!s}'.format(suffix)],
'fcoll{!s}'.format(suffix): copy.deepcopy(pops[loc].fcoll),
'dfcolldz{!s}'.format(suffix): copy.deepcopy(pops[loc].dfcolldz)}
# Save for future iterations
fcoll[i_Tmin] = hmf_pars.copy()
p.update(hmf_pars)
# If we already have matching fcoll splines, use them!
elif self.reuse_splines and (not self.phenomenological):
hmf_pars = {'pop_Tmin{!s}'.format(suffix): fcoll[i_Tmin]['pop_Tmin{!s}'.format(suffix)],
'fcoll{!s}'.format(suffix): fcoll[i_Tmin]['fcoll{!s}'.format(suffix)],
'dfcolldz{!s}'.format(suffix): fcoll[i_Tmin]['dfcolldz{!s}'.format(suffix)]}
p.update(hmf_pars)
else:
pass
# Write this set of parameters to disk before running
# so we can troubleshoot later if the run never finishes.
procid = str(rank).zfill(3)
fn = '{0!s}.{1!s}.checkpt.pkl'.format(self.prefix, procid)
write_pickle_file(kw, fn, ndumps=1, open_mode='w',\
safe_mode=False, verbose=False)
fn = '{0!s}.{1!s}.checkpt.txt'.format(self.prefix, procid)
with open(fn, 'w') as f:
print("Simulation began: {!s}".format(time.ctime()), file=f)
# Kill if model gets stuck
if self.timeout is not None:
signal.signal(signal.SIGALRM, self._handler)
signal.alarm(self.timeout)
##
# Run simulation!
##
blobs, _failct = self._run_sim(kw, p)
failct += _failct
# Disable the alarm
if self.timeout is not None:
signal.alarm(0)
# If this is missing from a file, we'll know where things went south.
fn = '{0!s}.{1!s}.checkpt.txt'.format(self.prefix, procid)
with open(fn, 'a') as f:
print("Simulation finished: {!s}".format(time.ctime()), file=f)
chain = np.array([kwargs[key] for key in self.parameters])
chain_all.append(chain)
blobs_all.append(blobs)
ct += 1
##
# File I/O from here on out
##
pb.update(ct)
# Only record results every save_freq steps
if ct % save_freq != 0:
del p, chain, blobs
gc.collect()
continue
# Not all processors will hit the final checkpoint exactly,
# which can make collective I/O difficult. Hence the existence
# of the will_hit_final_checkpoint and wont_hit_final_checkpoint
# attributes
if rank == 0 and use_checks:
print("Checkpoint #{0}: {1!s}".format(ct // save_freq,\
time.ctime()))
# First assemble data from all processors?
# Analogous to assembling data from all walkers in MCMC
write_pickle_file(chain_all,\
'{!s}.chain.pkl'.format(prefix_by_proc), ndumps=1,\
open_mode='a', safe_mode=False, verbose=False)
self.save_blobs(blobs_all, False, prefix_by_proc)
del p, chain, blobs
del chain_all, blobs_all
gc.collect()
chain_all = []; blobs_all = []
# If, after the first checkpoint, we only have 'failed' models,
# raise an error.
if (ct == failct) and self._exit_if_fail_streak:
raise ValueError('Only failed models up to first checkpoint!')
# This is meant to prevent crashes due to memory fragmentation.
# For it to work (when we run for a really long time), we need
# to write a shell script that calls the .py script that
# executes ModelGrid.run many times, with each call setting
# exit_after to 1 or perhaps a few, depending on the amount
# of memory on hand. This is apparently less of an issue in Python 3.3
if exit_after is not None:
if exit_after == (ct // save_freq):
break
pb.finish()
# Need to make sure we write results to disk if we didn't
# hit the last checkpoint
if chain_all:
write_pickle_file(chain_all,\
'{!s}.chain.pkl'.format(prefix_by_proc), ndumps=1,\
open_mode='a', safe_mode=False, verbose=False)
if blobs_all:
self.save_blobs(blobs_all, False, prefix_by_proc)
print("Processor {0}: Wrote {1!s}.*.pkl ({2!s})".format(rank, prefix,\
time.ctime()))
# You. shall. not. pass.
# Maybe unnecessary?
if size > 1:
MPI.COMM_WORLD.Barrier()
t2 = time.time()
##
# FINAL INFO
##
if rank == 0:
print("Calculation complete: {!s}".format(time.ctime()))
dt = t2 - t1
if dt > 3600:
print("Elapsed time (hr) : {0:.3g}".format(dt / 3600.))
else:
print("Elapsed time (min) : {0:.3g}".format(dt / 60.))
@property
def Tmin_in_grid(self):
"""
Determine if Tmin is an axis in our model grid.
"""
if not hasattr(self, '_Tmin_in_grid'):
ct = 0
name = None
self._Tmin_in_grid = False
for par in self.grid.axes_names:
if re.search('Tmin', par):
ct += 1
self._Tmin_in_grid = True
name = par
self.Tmin_ax_name = name
if ct > 1:
raise NotImplemented('Trouble w/ multiple Tmin axes!')
return self._Tmin_in_grid
@property
def nwalkers(self):
# Each processor writes its own data
return 1
@property
def assignments(self):
if not hasattr(self, '_assignments'):
#if hasattr(self, 'grid'):
# if self.grid.structured:
# self._structured_balance(method=0)
# return
self.LoadBalance()
return self._assignments
@assignments.setter
def assignments(self, value):
self._assignments = value
@property
def load(self):
if not hasattr(self, '_load'):
self._load = [np.array(self.assignments == i).sum() \
for i in range(size)]
self._load = np.array(self._load)
return self._load
@property
def LB(self):
if not hasattr(self, '_LB'):
self._LB = 0
return self._LB
@LB.setter
def LB(self, value):
self._LB = value
def _balance_via_grouping(self, par):
pass
def _balance_via_sorting(self, par):
pass
def LoadBalance(self, method=0, par=None):
if self.grid.structured:
self._structured_balance(method=method, par=par)
else:
self._unstructured_balance(method=method, par=par)
def _unstructured_balance(self, method=0, par=None):
if rank == 0:
order = list(np.arange(size))
self._assignments = []
while len(self.assignments) < self.grid.size:
self._assignments.extend(order)
self._assignments = np.array(self._assignments[0:self.grid.size])
if size == 1:
self.LB = 0
return
# Communicate assignments to workers
for i in range(1, size):
MPI.COMM_WORLD.Send(self._assignments, dest=i, tag=10*i)
else:
self._assignments = np.empty(self.grid.size, dtype=np.int)
MPI.COMM_WORLD.Recv(self._assignments, source=0,
tag=10*rank)
self.LB = 0
def _structured_balance(self, method=0, par=None):
"""
Determine which processors are to run which models.
Parameters
----------
method : int
0 : OFF
1 : Minimize the number of values of `par' each processor gets.
Good for, e.g., Tmin.
2 : Maximize the number of values of `par' each processor gets.
Useful if increasing `par' slows down the calculation.
Returns
-------
Nothing. Creates "assignments" attribute, which has the same shape
as the grid, with each element the rank of the processor assigned to
that particular model.
"""
self.LB = method
if size == 1:
self._assignments = np.zeros(self.grid.shape, dtype=int)
return
if method in [1, 2]:
assert par in self.grid.axes_names, \
"Supplied load-balancing parameter {!s} not in grid!".format(par)
par_i = self.grid.axes_names.index(par)
par_ax = self.grid.axes[par_i]
par_N = par_ax.size
else:
par_N = np.inf
if method not in [0, 1, 2, 3]:
raise NotImplementedError('Unrecognized load-balancing method {}'.format(method))
# No load balancing. Equal # of models per processor
if method == 0 or (par_N < size):
k = 0
tmp_assignments = np.zeros(self.grid.shape, dtype=int)
for loc, value in np.ndenumerate(tmp_assignments):
if k % size != rank:
k += 1
continue
tmp_assignments[loc] = rank
k += 1
# Communicate results
self._assignments = np.zeros(self.grid.shape, dtype=int)
MPI.COMM_WORLD.Allreduce(tmp_assignments, self._assignments)
# Load balance over expensive axis
elif method in [1, 2]:
self._assignments = np.zeros(self.grid.shape, dtype=int)
slc = [slice(0,None,1) for i in range(self.grid.Nd)]
k = 0 # only used for method 1
# Disclaimer: there's a probably a much more slick way of doing this
# For each value of the input 'par', split up the work.
# If method == 1, make it so that each processor gets only a
# small subset of values for that parameter (e.g., sensible
# for pop_Tmin), or method == 2 make it so that all processors get
# a variety of values of input parameter, which is useful when
# increasing values of this parameter slow down the calculation.
for i in range(par_N):
# Ellipses in all dimensions except that corresponding to a
# particular value of input 'par'
slc[par_i] = i
if method == 1:
self._assignments[slc] = k \
* np.ones_like(self._assignments[slc], dtype=int)
# Cycle through processor numbers
k += 1
if k == size:
k = 0
elif method == 2:
tmp = np.ones_like(self._assignments[slc], dtype=int)
leftovers = tmp.size % size
assign = np.arange(size)
arr = np.array([assign] * int(tmp.size // size)).ravel()
if leftovers != 0:
# This could be a little more efficient
arr = np.concatenate((arr, assign[0:leftovers]))
self._assignments[slc] = np.reshape(arr, tmp.size)
else:
raise ValueError('No method={}!'.format(method))
elif method == 3:
# Do it randomly. Need to be careful in parallel.
if rank != 0:
buff = np.zeros(self.grid.dims, dtype=int)
else:
# Could do the assignment 100 times and pick the realization
# with the most even distribution of work (as far as we
# can tell a-priori), but eh.
arr = np.random.randint(low=0, high=size, size=self.grid.size)
buff = np.reshape(arr, self.grid.dims)
self._assignments = np.zeros(self.grid.dims, dtype=int)
nothing = MPI.COMM_WORLD.Allreduce(buff, self._assignments)
else:
raise ValueError('No method={}!'.format(method))
|
{"hexsha": "dbffb8b531b30f229661ca366ff0485594da535d", "size": 39476, "ext": "py", "lang": "Python", "max_stars_repo_path": "ares/inference/ModelGrid.py", "max_stars_repo_name": "mirochaj/ares", "max_stars_repo_head_hexsha": "b3335ad30435ee0d7f17d0110aa164a35f252d78", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-03-26T01:08:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T13:02:10.000Z", "max_issues_repo_path": "ares/inference/ModelGrid.py", "max_issues_repo_name": "mirochaj/ares", "max_issues_repo_head_hexsha": "b3335ad30435ee0d7f17d0110aa164a35f252d78", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2020-06-08T14:52:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T02:30:54.000Z", "max_forks_repo_path": "ares/inference/ModelGrid.py", "max_forks_repo_name": "mirochaj/ares", "max_forks_repo_head_hexsha": "b3335ad30435ee0d7f17d0110aa164a35f252d78", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-03-24T14:11:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-06T06:32:59.000Z", "avg_line_length": 33.2010092515, "max_line_length": 104, "alphanum_fraction": 0.5291062924, "include": true, "reason": "import numpy", "num_tokens": 8751}
|
"""
Created on Apr 10, 2014
@author: sstober
"""
import logging
import os
log = logging.getLogger(__name__)
import numpy as np
from pylearn2.utils.timing import log_timing
from deepthought3.experiments.ismir2014.util import load_config
from deepthought3.util.yaml_util import load_yaml_file, save_yaml_file
from deepthought3.experiments.ismir2014.plot import scan_for_best_performance
from deepthought3.experiments.ismir2014.extract_results import extract_results
def train_convnet(config):
train, yaml_str = load_yaml_file(
os.path.join(os.path.dirname(__file__), "train_convnet_template.yaml"),
params=config,
)
save_yaml_file(yaml_str, os.path.join(config.experiment_root, "settings.yaml"))
with log_timing(log, "training network"):
train.main_loop()
def get_default_config_path():
return os.path.join(os.path.dirname(__file__), "train_convnet.cfg")
if __name__ == "__main__":
config = load_config(default_config=get_default_config_path(), reset_logging=False)
if not config.get("only_extract_results", False):
train_convnet(config)
scan_for_best_performance(config.experiment_root, "valid_y_misclass")
scan_for_best_performance(config.experiment_root, "valid_ptrial_misclass_rate")
values = extract_results(config.experiment_root, mode="misclass")
print(
np.multiply(
100,
[
# 1 - values['test_y_misclass'],
# 1 - values['test_wseq_misclass_rate'],
# 1 - values['test_wtrial_misclass_rate']]);
1 - values["frame_misclass"],
1 - values["sequence_misclass"],
1 - values["trial_misclass"],
],
)
)
|
{"hexsha": "ad60934d6a1f55fd685585f3cf1018233ab25e49", "size": 1817, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepthought3/.experiments/ismir2014/train_convnet.py", "max_stars_repo_name": "chanhakim/deepthought", "max_stars_repo_head_hexsha": "9f5dd5c7a21da51b65d6049e7a19e29fc3a072f9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deepthought3/.experiments/ismir2014/train_convnet.py", "max_issues_repo_name": "chanhakim/deepthought", "max_issues_repo_head_hexsha": "9f5dd5c7a21da51b65d6049e7a19e29fc3a072f9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-06-04T20:36:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-08T22:32:39.000Z", "max_forks_repo_path": "deepthought3/.experiments/ismir2014/train_convnet.py", "max_forks_repo_name": "chanhakim/deepthought3", "max_forks_repo_head_hexsha": "9f5dd5c7a21da51b65d6049e7a19e29fc3a072f9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8412698413, "max_line_length": 87, "alphanum_fraction": 0.6648321409, "include": true, "reason": "import numpy", "num_tokens": 399}
|
# coding: utf-8
"""Module for utility functions."""
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def fov_to_cell_size(fov, im_size):
"""Evaluate image pixel size for given FoV and number of pixels.
Args:
fov (float): Field of view, in degrees
im_size (int): Image size (1D) in pixels.
Returns:
float: pixel size, in arcsec.
"""
r_max = math.sin(math.radians(fov) / 2)
inc = r_max / (0.5 * im_size)
return math.degrees(math.asin(inc)) * 3600
def cell_size_to_fov(cell_size, im_size):
"""Obtain image fov from cell size and image size.
Args:
cell_size (float): Cell size, in arcseconds.
im_size (int): Image size, in pixels.
Returns:
float, The image FoV, in degrees.
"""
inc = math.sin(math.radians(cell_size / 3600))
r_max = inc * (0.5 * im_size)
return math.degrees(2.0 * math.asin(r_max))
def plot_image(image, title=None, cbar_label=None, filename=None, extent=None,
figsize=(8, 4), xlabel=None, ylabel=None, xlim=None,
ylim=None):
"""Utility function to plot a image.
Args:
image (numpy.array, complex): Complex image / 2d-array to be plotted.
title (str, optional): Plot title.
cbar_label (str, optional): Color bar label
filename (str, optional): If specified, save the plot to this file
instead of displaying the plot.
extent (list, optional): If specified the extent of the plot axis labels
[x_min, x_max, y_min, y_max]
"""
if extent is None:
size = image.shape[0]
extent = [-size // 2 - 0.5, size // 2 - 1 + 0.5,
-size // 2 - 0.5, size // 2 - 1 + 0.5]
fig, ax = plt.subplots(figsize=figsize)
im = ax.imshow(image, interpolation='nearest', extent=extent,
origin='lower')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="2%", pad=0.03)
cbar = ax.figure.colorbar(im, cax=cax)
cbar.ax.tick_params(labelsize='small')
ticks = np.linspace(image.min(), image.max(), 5)
cbar.set_ticks(ticks, update_ticks=True)
# ax.grid()
if cbar_label:
cbar.set_label(cbar_label)
if title:
ax.set_title(title)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
if filename:
fig.savefig(filename, dpi=200, transparent=True)
else:
plt.show()
def plot_cimage(image, title=None, cbar_label=None, filename=None, extent=None,
figsize=(8, 4)):
"""Utility function to plot a complex image.
Args:
image (numpy.array, complex): Complex image / 2d-array to be plotted.
title (str, optional): Plot title.
cbar_label (str, optional): Color bar label
filename (str, optional): If specified, save the plot to this file
instead of displaying the plot.
extent (list, optional): If specified the extent of the plot axis labels
[x_min, x_max, y_min, y_max]
"""
if extent is None:
size = image.shape[0]
extent = [-size // 2 - 0.5, size // 2 - 1 + 0.5,
-size // 2 - 0.5, size // 2 - 1 + 0.5]
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=figsize)
fig.subplots_adjust(left=0.08, bottom=0.08, right=0.92, top=0.92,
wspace=0.4, hspace=None)
for i, ax in enumerate(axes):
if i == 0:
data = np.real(image)
title_ = 'Real ( %s )' % title
else:
data = np.imag(image)
title_ = 'Imag ( %s )' % title
im = ax.imshow(data, interpolation='nearest', extent=extent,
origin='lower', cmap='jet')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="4%", pad=0.04)
cbar = ax.figure.colorbar(im, cax=cax)
ticks = np.linspace(data.min(), data.max(), 5)
cbar.set_ticks(ticks, update_ticks=True)
if cbar_label:
cbar.set_label(cbar_label)
if title:
ax.set_title(title_)
if filename:
fig.savefig(filename)
else:
plt.show()
def plot_line(y, x=None, title=None, filename=None):
"""Utility function to generate a line plot.
Args:
y (numpy.array, 1d): y data array to be plotted.
x (numpy.array, optional): If specified, the x data. Otherwise
The x data values are assumes to be an
integer range from 0 to y.size.
title (str, optional): Plot title.
filename (str, optional): If specified, save the plot to this file
instead of displaying the plot.
"""
fig, ax = plt.subplots(figsize=(6, 5))
if x is None:
x = np.arange(-y.shape[0]//2, y.shape[0]//2)
ax.plot(x, y, 'b.--')
ax.plot([x[0], x[-1]], [0.5, 0.5], 'r--')
ax.grid()
if title:
ax.set_title(title)
if filename:
fig.savefig(filename)
else:
plt.show()
def plot_semilogy(y, x=None, title=None, filename=None, y_lim=None, x1=None):
"""Utility function to generate a line plot with a log y axis.
Args:
y (numpy.array, 1d): y data array to be plotted.
x (numpy.array, optional): If specified, the x data. Otherwise
The x data values are assumes to be an
integer range from 0 to y.size.
title (str, optional): Plot title.
filename (str, optional): If specified, save the plot to this file
instead of displaying the plot.
y_lim (list, optional): y range of the axis [y_min, y_max]
x1 (float, optional): If specified plot a vertical guide line at this
x position.
"""
fig, ax = plt.subplots(figsize=(6, 5))
if x is None:
x = np.arange(-y.shape[0]//2, y.shape[0]//2)
ax.semilogy(x, y, 'b.--')
ax.semilogy([x[0], x[-1]], [0.5, 0.5], 'r--')
if y_lim:
ax.set_ylim(y_lim)
if x1:
ax.plot([x1, x1], ax.get_ylim(), '--', color='0.5')
ax.plot([-x1, -x1], ax.get_ylim(), '--', color='0.5')
ax.grid()
if title:
ax.set_title(title)
if filename:
fig.savefig(filename)
else:
plt.show()
|
{"hexsha": "f94747e0f320e6b1da222b15aadb4b91bc735c59", "size": 6766, "ext": "py", "lang": "Python", "max_stars_repo_path": "simple_w_imager/utils.py", "max_stars_repo_name": "OxfordSKA/simple_w_imager", "max_stars_repo_head_hexsha": "465fdb813f5b2662fdd20a410fc3ce739fec1d34", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "simple_w_imager/utils.py", "max_issues_repo_name": "OxfordSKA/simple_w_imager", "max_issues_repo_head_hexsha": "465fdb813f5b2662fdd20a410fc3ce739fec1d34", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simple_w_imager/utils.py", "max_forks_repo_name": "OxfordSKA/simple_w_imager", "max_forks_repo_head_hexsha": "465fdb813f5b2662fdd20a410fc3ce739fec1d34", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6105263158, "max_line_length": 80, "alphanum_fraction": 0.5598581141, "include": true, "reason": "import numpy", "num_tokens": 1751}
|
# <editor-fold desc="definition and handling of parameters"
# XXX defines struct for handling parameter data
"""
Type including data and additional information on parameters. Fields relate to what is provided in [Parameter list](@ref) and include:
* `name::Symbol`: name of the parameter
* `dim::Tuple`: potential dimensions of parameter data
* `defVal::Union{Nothing,Float64}`: default value
* `herit::Tuple`: inheritance rules for parameter, see [Parameter overview](@ref) for details
* `data::DataFrame`: specified parameter data
"""
mutable struct ParElement
name::Symbol
dim::Tuple
defVal::Union{Nothing,Float64}
herit::Tuple
data::DataFrame
techPre::NamedTuple{(:preset,:mode),Tuple{Symbol,Tuple{Vararg{Symbol,N} where N}}}
function ParElement(paraData_df::DataFrame,paraDef_ntup::NamedTuple,name::Symbol,report::Array{Tuple,1})
setLongShort_dic = Dict(:Ts => :timestep, :R => :region, :C => :carrier, :Te => :technology, :M => :mode)
if isempty(paraData_df) return new(name,paraDef_ntup.dim,paraDef_ntup.defVal,paraDef_ntup.herit,DataFrame()) end
# XXX check consistency of rows in input dataframe and definition of set and rename columns according to set defintion
# assigns array of used suffixes according to parameter defintion to each set
splitDim_arr = map(x -> map(y -> Symbol(y), split(String(x),"_")),collect(paraDef_ntup.dim))
setSuf_dic = Dict(x => map(y -> length(y) == 1 ? Symbol() : y[end],filter(z -> z[1] == x,splitDim_arr)) for x in unique(map(x -> x[1],splitDim_arr)))
# loops over set columns in input dataframe and assigns them to the sets defined for the parameter
newCol_dic = Dict(:val => :val)
sufNum_dic = Dict(:b => 2, :c => 3, :d => 4, :e => 5, :f => 6, :g => 7)
for colNam in setdiff(namesSym(paraData_df),[:val])
colNam_arr = split(String(colNam),"_")
setNam = Symbol(colNam_arr[1])
if !haskey(setSuf_dic,setNam) # parameter provided for a set not appearing in definition (e.g. demand depending on the technology)
push!(report,(2, "parameter assignment", string(name), "parameter data was specified for $(setLongShort_dic[setNam]) set, but it is not defined to depend on this set"))
continue
elseif length(setSuf_dic[setNam]) == 1 && length(colNam_arr) > 1 # they are several instances of the set provided, but it only depends on one instance (e.g. two region sets for efficiency)
push!(report,(2, "parameter assignment", string(name), "parameter data was specified for several instances of $(setLongShort_dic[setNam]) set, but it is defined to depend only on one instance, additonal instances were ignored"))
continue
elseif setSuf_dic[setNam][1] == Symbol() # set has only one instance and no suffix => no change when converting from read-in dataframe to parameter element
newCol_dic[colNam] = colNam
elseif length(setSuf_dic[setNam]) == 1 || length(colNam_arr) == 1 # column name in dataframe has no underscore, but defintion of parameter element has one
newCol_dic[colNam] = Symbol(setNam, "_", setSuf_dic[setNam][1])
else
cntRep_int = sufNum_dic[Symbol(colNam_arr[2])] # set defined for several instances
newCol_dic[colNam] = Symbol(setNam, "_", setSuf_dic[setNam][cntRep_int])
end
end
# filters only used coulumns, renames them accordingly and converts to table
writeData_df = paraData_df[:,collect(keys(newCol_dic))]
DataFrames.rename!(writeData_df,newCol_dic)
new_obj = new(name,paraDef_ntup.dim,paraDef_ntup.defVal,paraDef_ntup.herit,writeData_df)
# defines on which level parameter is presetted and which capacity restrictions are affected by different modes for all dispatch parameters, where this is specified
if haskey(paraDef_ntup,:techPre) new_obj.techPre = paraDef_ntup.techPre end
return new_obj
end
ParElement() = new()
end
# XXX specific struct for read in process of parameter data
mutable struct parEntry
colSet::Symbol
entry::Array{String,1}
lvl::Array{Int,1}
startLvl::Int
end
# </editor-fold>
# <editor-fold desc="import and extensions of base functions"
# XXX functions to copy parameter structs of parameter data
import Base.copy
function copy(par_obj::ParElement)
out = ParElement()
out.name = par_obj.name
out.dim = par_obj.dim
out.defVal = par_obj.defVal
out.herit = par_obj.herit
out.data = copy(par_obj.data)
if isdefined(par_obj,:techPre) out.techPre = par_obj.techPre end
return out
end
function copy(par_obj::ParElement,data_df::DataFrame)
out = ParElement()
out.name = par_obj.name
out.dim = par_obj.dim
out.defVal = par_obj.defVal
out.herit = par_obj.herit
out.data = data_df
if isdefined(par_obj,:techPre) out.techPre = par_obj.techPre end
return out
end
# XXX usual collect sometimes creates a mysterious error if used on dictionary keys, this command avoids this
import Base._collect
import Base.SizeUnknown
collectKeys(itr) = _collect(Symbol, itr, SizeUnknown())
# </editor-fold>
# </editor-fold>
# <editor-fold desc="struct for individual parts of the model"
# XXX defines parts of the model
abstract type AbstractModelPart end
"""
```julia
TechPart <: AbstractModelPart
```
Type used for technology parts. Parameters, variables, and constraints are assigned as dictionaries via the fields `par`, `var`, and `cns`, respectively. Additional fields include:
* `name::Tuple`: name of technology as a series of nodes from the technology tree
* `carrier::NamedTuple`: energy carriers by index assigned to technology by groups (e.g. generation, use, ...)
* `balLvl::NamedTuple`: temporal and spatial resolution for expansion and balance of the technology
* `capaRestr::DataFrame`: specification of capacity restrictions required for technology
* `actSt::Tuple`: actively stored carriers altough they are not leafs by index
* `type::Tuple`: type of technology (stock, mature, or evolving)
* `disAgg::Bool`: if true, dispatch is modelled at expansion resolution instead of dispatch resolution
* `modes::Tuple`: different operational modes of technology
"""
mutable struct TechPart <: AbstractModelPart
name::Tuple{Vararg{String,N} where N}
par::Dict{Symbol,ParElement}
var::Dict{Symbol,DataFrame}
cns::Dict{Symbol,DataFrame}
carrier::NamedTuple
balLvl::NamedTuple{(:exp,:ref),Tuple{Tuple{Int,Int},Union{Nothing,Tuple{Int,Int}}}}
capaRestr::DataFrame
actSt::Tuple
type::Symbol
disAgg::Bool
modes::Tuple{Vararg{Int,N} where N}
TechPart(name::Tuple{Vararg{String,N} where N}) = new(name,Dict{Symbol,ParElement}(),Dict{Symbol,DataFrame}(),Dict{Symbol,DataFrame}())
TechPart() = new()
end
"""
```julia
OthPart <: AbstractModelPart
```
Type used for 'exchange', 'trade', 'balance', 'limits', and 'objectives' model parts. Parameters, variables, and constraints are assigned as dictionaries via the fields `par`, `var`, and `cns`, respectively.
"""
mutable struct OthPart <: AbstractModelPart
par::Dict{Symbol,ParElement}
var::Dict{Symbol,DataFrame}
cns::Dict{Symbol,DataFrame}
OthPart() = new(Dict{Symbol,ParElement}(),Dict{Symbol,DataFrame}(),Dict{Symbol,DataFrame}())
end
# XXX container to store data defining a constraint (used to seperate definition and actual jump creation of constraints)
struct cnsCont
data::DataFrame
sign::Symbol
end
# </editor-fold>
# <editor-fold desc="structs for nodes that then make up the trees to save set data"
# XXX define nodes for set tree and tree itself
mutable struct Node
idx::Int
val::String
lvl::Int
subIdx::Int
down::Array{Int,1}
end
mutable struct Tree
nodes::Dict{Int,Node}
srcTup::Dict{Tuple,Array{Int,1}}
srcStr::Dict{Tuple{String,Int},Array{Int,1}}
up::Dict{Int,Int}
height::Int
Tree() = new(Dict{Int,Node}(),Dict{Tuple,Int}(),Dict{String,Array{Int,1}}(),Dict{Int,Int}(),1)
end
# </editor-fold>
# <editor-fold desc="options for model and model itself"
# create abstract model object to reference before creation (avoid circular type definiton)
abstract type AbstractModel end
# XXX defines final model object and its options
struct modOptions
# data in- and output
inDir::Array{String,1}
outDir::String
objName::String
csvDelim::String
outStamp::String
# model generation
decomm::Symbol
interCapa::Symbol
supTsLvl::Int
shortExp::Int
redStep::Float64
# managing numerical issues
emissionLoss::Bool
coefRng::NamedTuple{(:mat,:rhs),Tuple{Tuple{Float64,Float64},Tuple{Vararg{Float64,2}}}}
scaFac::NamedTuple{(:capa,:oprCapa,:dispConv,:dispSt,:dispExc, :dispTrd, :costDisp,:costCapa,:obj),Tuple{Vararg{Float64,9}}}
bound::NamedTuple{(:capa,:disp,:obj),Tuple{Vararg{Float64,3}}}
avaMin::Float64
checkRng::Float64
# reporting related options
reportLvl::Int
errCheckLvl::Int
errWrtLvl::Int
startTime::DateTime
end
# XXX flow graph object that defines relations between technologies and carriers (and among carriers)
mutable struct flowGraph
nodeC::Dict{Int64,Int64}
nodeTe::Dict{Int64,Int64}
edgeC::Array{Pair{Int,Int},1}
edgeTe::Array{Pair{Int,Int},1}
nodePos::Dict{Int,Array{Float64,1}}
function flowGraph(anyM::AbstractModel)
# creates dictionary mapping carrier id to node id
nodeC_dic = Dict(x[2] => x[1] for x in enumerate(sort(filter(x -> x != 0,getfield.(collect(values(anyM.sets[:C].nodes)),:idx)))))
# get all relevant technology, a technology is not relevant, where all children of a parent have the same carriers (in this case only the parent is relevant)
t_tree = anyM.sets[:Te]
allTech_arr = getfield.(collect(values(t_tree.nodes)),:idx)
tleaf_dic = Dict(x => unique(filter(y -> y in keys(anyM.parts.tech), [x,getDescendants(x,t_tree,true)...])) for x in allTech_arr)
relTech_arr = Array{Int,1}()
for t in keys(tleaf_dic)
subCar_arr = map(y -> anyM.parts.tech[y].carrier,tleaf_dic[t])
if length(unique(subCar_arr)) == 1
push!(relTech_arr,t)
else
append!(relTech_arr,collect(tleaf_dic[t]))
end
end
# creates dictionary mapping each relevant id to node id
nodeTe_dic = Dict(x[2] => x[1] + length(nodeC_dic) for x in enumerate(filter(x -> isempty(intersect(getAncestors(x,t_tree,:int),relTech_arr)),unique(relTech_arr))))
# creates edges between technologies
edgeTe_arr = Array{Pair{Int,Int},1}()
for t in keys(nodeTe_dic)
gotTech_boo = false; tItr = t
while !gotTech_boo
if tItr in keys(anyM.parts.tech)
gotTech_boo = true
else
tItr = intersect(getDescendants(t,anyM.sets[:Te],true),keys(anyM.parts.tech))[1]
end
end
car_ntup = anyM.parts.tech[tItr].carrier
for cIn in map(x -> getfield(car_ntup,x),intersect(keys(car_ntup),(:use,:stExtIn))) |> (y -> isempty(y) ? y : union(y...))
push!(edgeTe_arr, nodeC_dic[cIn] => nodeTe_dic[t])
end
for cOut in map(x -> getfield(car_ntup,x),intersect(keys(car_ntup),(:gen,:stExtOut))) |> (y -> isempty(y) ? y : union(y...))
push!(edgeTe_arr, nodeTe_dic[t] => nodeC_dic[cOut])
end
end
# creates edges between carriers
edgeC_arr = Array{Pair{Int,Int},1}()
for c in keys(nodeC_dic)
for cChild in anyM.sets[:C].nodes[c].down
push!(edgeC_arr, nodeC_dic[cChild] => nodeC_dic[c])
end
end
return new(nodeC_dic,nodeTe_dic,edgeC_arr,edgeTe_arr)
end
end
# XXX specific information for graphical evaluation
mutable struct graInfo
graph::flowGraph
names::Dict{String,String}
colors::Dict{String,Tuple{Float64,Float64,Float64}}
function graInfo(anyM::AbstractModel)
# create default options for names and colors
graph_obj = flowGraph(anyM)
# specificy some default names and colors used in visualisations
namesDef_arr = ["coalPlant" => "coal plant", "gasPlant" => "gas plant", "districtHeat" => "district heat", "naturalGas" => "natural gas", "synthGas" => "synthetic gas", "fossilGas" => "fossil gas",
"demand" => "final demand", "export" => "export", "import" => "import", "crt" => "curtailment", "lss" => "loss of load", "trdSell" => "trade sell", "trdBuy" => "trade buy"]
# create dictionary assigning internal model names to names used within visualisations
allVal_arr = unique(vcat(map(x -> getfield.(values(anyM.sets[x].nodes),:val) ,collect(keys(anyM.sets)))...))
names_dic = setdiff(allVal_arr,getindex.(namesDef_arr,1)) |> (z -> Dict(vcat(namesDef_arr,Pair.(z,z))))
# define default colors for default energy carriers
colorsCar_arr = ["electricity" => (1.0, 0.9215, 0.2313),"heat" => (0.769,0.176,0.290),"districtHeat" => (0.6,0.0,0.169), "gas" => (1.0,0.416,0.212),
"naturalGas" => (1.0,0.506,0.294),"fossilGas" => (0.898,0.259,0.075), "synthGas" => (0.235,0.506,0.325), "hydrogen" => (0.329,0.447,0.827),
"coal" => (0.459,0.286,0.216),"biomass" => (0.682,0.898,0.443),"bioGas" => (0.682,0.898,0.443)]
colors_dic = setdiff(getfield.(values(anyM.sets[:C].nodes),:val),getindex.(colorsCar_arr,1)) |> (z -> Dict(vcat(colorsCar_arr,Pair.(z,fill((0.85,0.85,0.85),length(z))))))
return new(graph_obj,names_dic,colors_dic)
end
end
# XXX finally, the model object itself
"""
The core model object containing all related data and subordinate objects.
# Constructor and arguments
```julia
anyModel(inDir::Union{String,Array{String,1}},outDir::String; kwargs)
```
* `inDir::Union{String,Array{String,1}}`: directory of input files, also allows for provide multiple directories via an array
* `outDir::String`: directory of output files are written to
# Optional arguments, data handling
* `objName::String`: name of the model object, will be added to the name of output files and printed during reporting, default is an empty string
* `csvDelim::String`: specifies the delimiter used within the read-in csv files, default is a comma `,`
# Optional arguments, model generation
* `decomm::Symbol`: specifies if the model should perform endogenous decommissioning, options are:
- `:decomm`: capacities are decommissioned endogenously, once decommissioned capacities cannot be put into operation again (default)
- `:none`: no endogenous decommissioning, operated capacities equal installed capacities
- `:recomm`: capacities are decommissioned endogenously and can be put back into operation
* `interCapa::Symbol`: capacity expansion can be modelled at a resolution less detailed than yearly, this options determines how capacities are distributed among the subsequent years in this case, options are:
- `:linear`: expansion is equally distributed among years resulting in a linear increase in capacity (default)
- `:none`: all expansion occurs in the first year
* `supTsLvl::Int`: specifies the depth in the tree of time-steps that provides years, default is `0`
* `shortExp::Int`: intervall in years between years of capacity expansion, default is `10`
* `redStep::Float64`: scales down energy quantities within the model, can be relevant when working with reduced time-series, default is `1.0`
# Optional arguments, reporting (see [Data files](@ref) for details)
* `reportLvl::Int`: controls the frequency of writing updates to the console, default is `2`
* `errCheckLvl::Int`: controls the frequency of checking for errors, default is `2`
* `errWrtLvl::Int`: controls the frequency of writing an error report to a csv file, default is`1`
# Optional arguments, numerical issues (see [Performance and stability](@ref) for details)
* `coefRng::NamedTuple`: specifies the maximum range of coefficients in the matrix and right-hand side of the model's underlying optimization problem, default is `(mat = (1e-2,1e5), rhs = (1e-2,1e2))`
* `checkRng::Float64`: if set, reports all equations whose range exceeds the specified value, default is `NaN`
* `scaFac::NamedTuple`: scales different groups of variables within the model, default is `(capa = 1e1, oprCapa = 1e2, dispConv = 1e3, dispSt = 1e4, dispExc = 1e3, dispTrd = 1e3, costDisp = 1e1, costCapa = 1e2, obj = 1e0)`
* `bound::NamedTuple`: sets external bounds for all capacities and dispatch variables (both in GW) and for the objective value (in Mil. €), default is `(capa = NaN, disp = NaN, obj = NaN)`
* `avaMin::Float64`: availabilities smaller than this value are set to zero, since in the [Conversion capacity restriction](@ref) and [Storage capacity restriction](@ref) availabilities are inversed, this avoids high coefficients, default is `0.01`
* `emissionLoss::Bool`: determines if losses from exchange and self-discharge of storage are subject to emissions, default is `true`
"""
mutable struct anyModel <: AbstractModel
options::modOptions
report::Array{Tuple,1}
optModel::Model
lock::ReentrantLock
supTs::NamedTuple{(:lvl,:step,:sca),Tuple{Int,Tuple{Vararg{Int,N} where N},Dict{Tuple{Int,Int},Float64}}}
cInfo::Dict{Int,NamedTuple{(:tsDis,:tsExp,:rDis,:rExp,:eq),Tuple{Int,Int,Int,Int,Bool}}}
sets::Dict{Symbol,Tree}
parts::NamedTuple{(:tech,:trd,:exc,:bal,:lim,:obj),Tuple{Dict{Int,TechPart},OthPart,OthPart,OthPart,OthPart,OthPart}}
graInfo::graInfo
function anyModel(inDir::Union{String,Array{String,1}},outDir::String; objName = "", csvDelim = ",", decomm = :recomm, interCapa = :linear, supTsLvl = 0, shortExp = 10, redStep = 1.0, emissionLoss = true,
reportLvl = 2, errCheckLvl = 1, errWrtLvl = 1, coefRng = (mat = (1e-2,1e5), rhs = (1e-2,1e2)),
scaFac = (capa = 1e1, oprCapa = 1e2, dispConv = 1e3, dispSt = 1e4, dispExc = 1e3, dispTrd = 1e3, costDisp = 1e1, costCapa = 1e2, obj = 1e0),
bound = (capa = NaN, disp = NaN, obj = NaN), avaMin = 0.01, checkRng = NaN)
anyM = new()
# <editor-fold desc="initialize report and options"
# XXX creates dataframe to which reporting is written
anyM.report = Array{Tuple,1}()
anyM.optModel = Model()
anyM.lock = ReentrantLock()
# XXX sets whole options object from specified directories TODO arbeite mit kwargs später
outStamp_str = string(objName,"_",Dates.format(now(),"yyyymmddHHMM"))
defOpt_ntup = (inDir = typeof(inDir) == String ? [inDir] : inDir, outDir = outDir, objName = objName, csvDelim = csvDelim, outStamp = outStamp_str, decomm = decomm, interCapa = interCapa,
supTsLvl = supTsLvl, shortExp = shortExp, redStep = redStep, emissionLoss = emissionLoss, coefRng = coefRng, scaFac = scaFac, bound = bound,
avaMin = avaMin, checkRng = checkRng, reportLvl = reportLvl, errCheckLvl = errCheckLvl, errWrtLvl = errWrtLvl, startTime = now())
anyM.options = modOptions(defOpt_ntup...)
# </editor-fold>
# <editor-fold desc= read in set and parameter data>
files_dic = readInputFolder(anyM.options.inDir)
# XXX read-in sets and parameters
setData_dic = readSets!(files_dic,anyM)
paraTemp_dic = readParameters!(files_dic,setData_dic,anyM)
produceMessage(anyM.options,anyM.report, 1," - Read-in all set and parameter files")
# </editor-fold>
# <editor-fold desc="create part objects and general mappings"
# assign actual tech to parents
relTech_df = setData_dic[:Te][!,Symbol.(filter(x -> occursin("technology",x) && !isnothing(tryparse(Int16,string(x[end]))), string.(namesSym(setData_dic[:Te]))))]
relTech_df = DataFrame(filter(x -> any(collect(x) .!= ""), eachrow(relTech_df)))
techIdx_arr = filter(z -> isempty(anyM.sets[:Te].nodes[z].down), map(x -> lookupTupleTree(tuple(collect(x)...),anyM.sets[:Te],1)[1], eachrow(relTech_df)))
anyM.parts = (tech = Dict(x => TechPart(getUniName(x,anyM.sets[:Te])) for x in techIdx_arr), trd = OthPart(), exc = OthPart(), bal = OthPart(), lim = OthPart(), obj = OthPart())
createCarrierMapping!(setData_dic,anyM)
createTimestepMapping!(anyM)
# XXX write general info about technologies
for t in techIdx_arr createTechInfo!(t, setData_dic, anyM) end
produceMessage(anyM.options,anyM.report, 2," - Created all mappings among sets")
# XXX assign parameters to model parts
parDef_dic = parameterToParts!(paraTemp_dic, techIdx_arr, anyM)
produceMessage(anyM.options,anyM.report, 2," - Assigned parameter data to model parts")
# XXX create object for data visualization
anyM.graInfo = graInfo(anyM)
produceMessage(anyM.options,anyM.report, 1," - Prepared creation of optimzation model")
# </editor-fold>
return anyM
end
anyModel() = new()
end
# </editor-fold>
|
{"hexsha": "15b72397db973b8a0465c99f42bb72a2f2ce33cb", "size": 20426, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/objects.jl", "max_stars_repo_name": "wookay/AnyMOD.jl", "max_stars_repo_head_hexsha": "14fdae26d6c8dd88001b2b5e4aadb468a3856b42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/objects.jl", "max_issues_repo_name": "wookay/AnyMOD.jl", "max_issues_repo_head_hexsha": "14fdae26d6c8dd88001b2b5e4aadb468a3856b42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/objects.jl", "max_forks_repo_name": "wookay/AnyMOD.jl", "max_forks_repo_head_hexsha": "14fdae26d6c8dd88001b2b5e4aadb468a3856b42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.4227272727, "max_line_length": 248, "alphanum_fraction": 0.7089493782, "num_tokens": 5782}
|
# coding: utf-8
import pandas as pd
import numpy as np
import scipy
import scipy.sparse
import sklearn
import sklearn.svm
import sklearn.datasets
import sklearn.cross_validation
import warnings
warnings.filterwarnings('ignore')
X, y = sklearn.datasets.load_svmlight_file('data/news20.binary')
instance_ids = np.arange(y.size)
splits = sklearn.cross_validation.StratifiedShuffleSplit(y, n_iter=1, test_size=0.95)
labeled_indices, unlabeled_indices = splits.__iter__().next()
L = X[labeled_indices]
L_ids = instance_ids[labeled_indices]
U = X[unlabeled_indices]
U_ids = instance_ids[unlabeled_indices]
y_l = y[labeled_indices]
y_u = y[unlabeled_indices]
def increment_svm(svm, L_ids, baseline_accuracy):
L = X[L_ids]
y_l = y[L_ids]
U_ids = np.array(list((set(instance_ids) - set(L_ids))))
U = X[U_ids]
y_u = y[U_ids]
ordered_indices = np.argsort(svm.decision_function(U))
smallest_indices = ordered_indices[:500]
smallest_ids = U_ids[smallest_indices]
largest_indices = ordered_indices[-500:]
largest_ids = U_ids[largest_indices]
high_confidence_unlabeled = scipy.sparse.vstack([U[smallest_indices], U[largest_indices]])
high_confidence_ids = np.concatenate([smallest_ids, largest_ids])
high_confidence_predicted_labels = svm.predict(high_confidence_unlabeled)
high_confidence_true_labels = y[high_confidence_ids]
splits = sklearn.cross_validation.StratifiedShuffleSplit(high_confidence_predicted_labels, n_iter=2, test_size=0.9)
saved_L_primes = []
saved_L_prime_ids = []
saved_cv_accuracies = []
for augment_indices, test_indices in splits:
augment = high_confidence_unlabeled[augment_indices]
test = high_confidence_unlabeled[test_indices]
augment_ids = high_confidence_ids[augment_indices]
test_ids = high_confidence_ids[test_indices]
augment_labels = high_confidence_predicted_labels[augment_indices]
test_labels = high_confidence_predicted_labels[test_indices]
L_prime = scipy.sparse.vstack([L, augment])
y_l_prime = np.concatenate([y_l, augment_labels])
L_prime_ids = np.concatenate([L_ids, augment_ids])
saved_L_primes.append(L_prime)
saved_L_prime_ids.append(L_prime_ids)
svm_prime = sklearn.svm.LinearSVC(penalty='l2', C=10, dual=False)
accuracy = sklearn.cross_validation.cross_val_score(svm_prime, L_prime, y_l_prime, cv=5, n_jobs=7).mean()
saved_cv_accuracies.append(accuracy)
best_index = np.argmax(saved_cv_accuracies)
best_L_prime_ids = saved_L_prime_ids[best_index]
best_accuracy = saved_cv_accuracies[best_index]
return best_L_prime_ids, best_accuracy
svm = sklearn.svm.LinearSVC(penalty='l2', C=10, dual=False)
svm.fit(L, y_l)
cv_accuracy = sklearn.cross_validation.cross_val_score(svm, L, y_l, cv=5, n_jobs=7).mean()
accuracies = [cv_accuracy]
iteration = 0
number_labeled = L.shape[0]
prediction_accuracy = sklearn.metrics.accuracy_score(y_u, svm.predict(U))
print "%d\t%d\t%f\t%f" %(iteration, number_labeled, cv_accuracy, prediction_accuracy)
while True:
iteration += 1
L_ids, cv_accuracy = increment_svm(svm, L_ids, cv_accuracy)
L = X[L_ids]
y_l = y[L_ids]
U_ids = np.array(list((set(instance_ids) - set(L_ids))))
U = X[U_ids]
y_u = y[U_ids]
svm = sklearn.svm.LinearSVC(penalty='l2', C=10, dual=False)
svm.fit(L, y_l)
number_labeled = L.shape[0]
prediction_accuracy = sklearn.metrics.accuracy_score(y_u, svm.predict(U))
print "%d\t%d\t%f\t%f" %(iteration, number_labeled, cv_accuracy, prediction_accuracy)
|
{"hexsha": "30faface74d25b023f5f24cde319c8de26252aa8", "size": 3680, "ext": "py", "lang": "Python", "max_stars_repo_path": "incremental_tsvm_news.py", "max_stars_repo_name": "CalculatedContent/tsvm", "max_stars_repo_head_hexsha": "0b59212c5dd682105dbb4be3be8a64832845cb01", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 89, "max_stars_repo_stars_event_min_datetime": "2015-01-11T20:18:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T19:42:38.000Z", "max_issues_repo_path": "incremental_tsvm_news.py", "max_issues_repo_name": "neuroshocked777/tsvm", "max_issues_repo_head_hexsha": "0b59212c5dd682105dbb4be3be8a64832845cb01", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "incremental_tsvm_news.py", "max_forks_repo_name": "neuroshocked777/tsvm", "max_forks_repo_head_hexsha": "0b59212c5dd682105dbb4be3be8a64832845cb01", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 33, "max_forks_repo_forks_event_min_datetime": "2015-01-08T19:03:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-26T08:07:22.000Z", "avg_line_length": 29.6774193548, "max_line_length": 119, "alphanum_fraction": 0.7263586957, "include": true, "reason": "import numpy,import scipy", "num_tokens": 945}
|
"""
Test cases for functions in general_utils.py
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
import numpy as np
from utils import general_utils
from utils.base_test_class import BaseTestClass, execute_tests
class GeneralUtilsTestCase(BaseTestClass):
"""Unit test class for general utilities. """
def __init__(self, *args, **kwargs):
super(GeneralUtilsTestCase, self).__init__(*args, **kwargs)
def setUp(self):
""" Sets up attributes. """
# For dist squared
self.X1 = np.array([[1, 2, 3], [1, 2, 4], [2, 3, 4.5]])
self.X2 = np.array([[1, 2, 4], [1, 2, 5], [2, 3, 5]])
self.true_dist_sq = np.array([[1, 4, 6], [0, 1, 3], [2.25, 2.25, 0.25]])
def test_dist_squared(self):
""" Tests the squared distance function. """
self.report('dist_squared')
comp_dist_sq = general_utils.dist_squared(self.X1, self.X2)
assert (self.true_dist_sq == comp_dist_sq).all()
def test_mapping_to_cube_and_bound(self):
""" Test map_to_cube and map_to_bounds. """
self.report('map_to_cube and map_to_bounds')
bounds = np.array([[1, 3], [2, 4], [5, 6]])
x = np.array([1.7, 3.1, 5.5])
X = np.array([[1.7, 3.1, 5.5], [2.1, 2.9, 5.0]])
y = np.array([0.35, 0.55, 0.5])
Y = np.array([[0.35, 0.55, 0.5], [0.55, 0.45, 0]])
# Map to cube
y_ = general_utils.map_to_cube(x, bounds)
Y_ = general_utils.map_to_cube(X, bounds)
# Map to Bounds
x_ = general_utils.map_to_bounds(y, bounds)
X_ = general_utils.map_to_bounds(Y, bounds)
# Check if correct.
assert np.linalg.norm(y - y_) < 1e-5
assert np.linalg.norm(Y - Y_) < 1e-5
assert np.linalg.norm(x - x_) < 1e-5
assert np.linalg.norm(X - X_) < 1e-5
def test_compute_average_sq_prediction_error(self):
""" Tests compute_average_sq_prediction_error. """
self.report('compute_average_sq_prediction_error')
Y1 = [0, 1, 2]
Y2 = [2, 0, 1]
res = general_utils.compute_average_sq_prediction_error(Y1, Y2)
assert np.abs(res - 2.0) < 1e-5
def test_stable_cholesky(self):
""" Tests for stable cholesky. """
self.report('Testing stable_cholesky')
M = np.random.normal(size=(5, 5))
M = M.dot(M.T)
L = general_utils.stable_cholesky(M)
assert np.linalg.norm(L.dot(L.T) - M) < 1e-5
def test_project_to_psd_cone(self):
""" Tests projection onto PSD cone. """
self.report('Testing projection to PSD cone.')
M1 = np.random.random((10, 10))
M1 = M1 + M1.T
M2 = M1.dot(M1.T)
M1_proj = general_utils.project_symmetric_to_psd_cone(M1)
M2_proj = general_utils.project_symmetric_to_psd_cone(M2)
eigvals_M1, _ = np.linalg.eigh(M1_proj)
assert np.all(eigvals_M1 > -1e-10)
assert np.linalg.norm(M2_proj - M2) < 1e-5
def test_draw_gaussian_samples(self):
""" Tests for draw gaussian samples. """
self.report('draw_gaussian_samples. Probabilistic test, could fail at times')
num_samples = 10000
num_pts = 3
mu = list(range(num_pts))
K = np.random.normal(size=(num_pts, num_pts))
K = K.dot(K.T)
samples = general_utils.draw_gaussian_samples(num_samples, mu, K)
sample_mean = samples.mean(axis=0)
sample_centralised = samples - sample_mean
sample_covar = sample_centralised.T.dot(sample_centralised) / num_samples
mean_tol = 4 * np.linalg.norm(mu) / np.sqrt(num_samples)
covar_tol = 4 * np.linalg.norm(K) / np.sqrt(num_samples)
mean_err = np.linalg.norm(mu - sample_mean)
covar_err = np.linalg.norm(K - sample_covar)
self.report('Mean error (tol): ' + str(mean_err) + ' (' + str(mean_tol) + ')',
'test_result')
self.report('Cov error (tol): ' + str(covar_err) + ' (' + str(covar_tol) + ')',
'test_result')
assert mean_err < mean_tol
assert covar_err < covar_tol
def test_get_exp_probs(self):
""" Testing get_exp_probs_from_fitness. """
self.report('Testing get_exp_probs_from_fitness class.')
fitness_vals = np.random.normal(size=(20,))
exp_probs = general_utils.get_exp_probs_from_fitness(fitness_vals)
exp_samples = general_utils.sample_according_to_exp_probs(fitness_vals, 10)
assert np.all(fitness_vals.argsort() == exp_probs.argsort())
assert np.abs(exp_probs.sum() - 1) < 1e-5
assert np.all(exp_probs > 0)
assert exp_samples.max() < len(fitness_vals)
# Other tests
fitness_vals_2 = fitness_vals + 1
exp_probs_2 = general_utils.get_exp_probs_from_fitness(fitness_vals_2)
assert np.linalg.norm(exp_probs_2 - exp_probs) < 1e-5
fitness_vals_3 = 2 * fitness_vals + 100.1234
exp_probs_3 = general_utils.get_exp_probs_from_fitness(fitness_vals_3, 2.1)
assert np.all(exp_probs_3.argsort() == exp_probs.argsort())
def test_array_blocking(self):
""" Test array blocking. """
self.report('Testing array blocking.')
dim1, dim2 = (10, 12)
A = np.random.random((dim1, dim2))
B = general_utils.block_augment_array(A[:4, :6], A[:4, 6:], A[4:, :7], A[4:, 7:])
assert np.linalg.norm(A-B) < 1e-5
if __name__ == '__main__':
execute_tests()
|
{"hexsha": "d3b8f76669994d04c782bea160e705706bebec73", "size": 5152, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/unittest_general_utils.py", "max_stars_repo_name": "lengjia/NAS_NPU", "max_stars_repo_head_hexsha": "600c05ed27c9b1ce63a5c1c4f1fc862d510cfcf0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 135, "max_stars_repo_stars_event_min_datetime": "2018-06-09T18:37:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T22:25:38.000Z", "max_issues_repo_path": "utils/unittest_general_utils.py", "max_issues_repo_name": "lengjia/NAS_NPU", "max_issues_repo_head_hexsha": "600c05ed27c9b1ce63a5c1c4f1fc862d510cfcf0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-11-01T09:33:09.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-09T03:19:07.000Z", "max_forks_repo_path": "utils/unittest_general_utils.py", "max_forks_repo_name": "lengjia/NAS_NPU", "max_forks_repo_head_hexsha": "600c05ed27c9b1ce63a5c1c4f1fc862d510cfcf0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2018-06-22T07:33:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T12:45:09.000Z", "avg_line_length": 37.8823529412, "max_line_length": 85, "alphanum_fraction": 0.6642080745, "include": true, "reason": "import numpy", "num_tokens": 1575}
|
import cv2
import os
import numpy as np
import cPickle
CVCONTOUR_APPROX_LEVEL = 2
CVCLOSE_ITR = 1
def main():
mask = cv2.imread('/Users/asafvaladarsky/Documents/pic3.png', cv2.CV_LOAD_IMAGE_GRAYSCALE)
findConnectedComponents(mask)
def findConnectedComponents(mask,
poly1Hull0 = 1,
perimScale = 4,
num = None,
bbs = None,
centers = None):
cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.array(0), iterations=CVCLOSE_ITR)
cv2.morphologyEx(mask, cv2.MORPH_CLOSE, np.array(0), iterations=CVCLOSE_ITR)
contours,hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(mask, contours, -1, (255,0,0), 1 )
#the Pickle trick solves some strange type errors
tmp = cPickle.dumps(contours)
contours = cPickle.loads(tmp)
for contour in contours:
perimeter = cv2.arcLength(contour, True)
if __name__ == '__main__':
main()
print 'done'
'''
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2)
################# Now finding Contours ###################
contours0, hierarchy = cv2.findContours( im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0]
def update(levels):
vis = np.zeros((cvImg.height, cvImg.width, 3), np.uint8)
levels = levels - 3
cv2.drawContours( vis, contours, (-1, 3)[levels <= 0], (128,255,255),
3, cv2.CV_AA, hierarchy, abs(levels) )
cv2.imshow('contours', vis)
update(3)
cv2.createTrackbar( "levels+3", "contours", 3, 7, update )
cv2.imshow('image', img)
0xFF & cv2.waitKey()
cv2.destroyAllWindows()
'''
'''
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
samples = np.empty((0,100))
responses = []
keys = [i for i in range(48,58)]
print len(contours)
for cnt in contours:
if cv2.contourArea(cnt)>50:
[x,y,w,h] = cv2.boundingRect(cnt)
if h>28:
cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),2)
roi = thresh[y:y+h,x:x+w]
roismall = cv2.resize(roi,(10,10))
cv2.imshow('norm',im)
key = cv2.waitKey(0)
if key == 27:
sys.exit()
elif key in keys:
responses.append(int(chr(key)))
sample = roismall.reshape((1,100))
samples = np.append(samples,sample,0)
else:
print "boho"
responses = np.array(responses,np.float32)
responses = responses.reshape((responses.size,1))
print("training complete")
np.savetxt('generalsamples.data',samples)
np.savetxt('generalresponses.data',responses)
'''
|
{"hexsha": "7f29fde3522f0e32eff87802469375619ffbd250", "size": 2844, "ext": "py", "lang": "Python", "max_stars_repo_path": "engine/ocr/Test2.py", "max_stars_repo_name": "hasadna/OpenPress", "max_stars_repo_head_hexsha": "7aa99ed92c6aef975f59c0295681f02211fc7ab5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "engine/ocr/Test2.py", "max_issues_repo_name": "hasadna/OpenPress", "max_issues_repo_head_hexsha": "7aa99ed92c6aef975f59c0295681f02211fc7ab5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "engine/ocr/Test2.py", "max_forks_repo_name": "hasadna/OpenPress", "max_forks_repo_head_hexsha": "7aa99ed92c6aef975f59c0295681f02211fc7ab5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1584158416, "max_line_length": 94, "alphanum_fraction": 0.6090014065, "include": true, "reason": "import numpy", "num_tokens": 790}
|
import sys
sys.path.append('deepv2d')
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2
import os
import time
import argparse
import glob
import tqdm
import vis
from core import config
from data_stream.scannet_twoview import ScanNet
from deepv2d import DeepV2D
import eval_utils
def write_to_folder(images, intrinsics, test_id):
dest = os.path.join("scannet/%06d" % test_id)
if not os.path.isdir(dest):
os.makedirs(dest)
for i, img in enumerate(images):
cv2.imwrite(os.path.join(dest, '%d.png' % i), img)
np.savetxt(os.path.join(dest, 'intrinsics.txt'), intrinsics)
def make_predictions(args):
cfg = config.cfg_from_file(args.cfg)
deepv2d = DeepV2D(cfg, args.model, use_fcrn=True, mode=args.mode)
with tf.Session() as sess:
deepv2d.set_session(sess)
depth_predictions, pose_predictions = [], []
depth_groundtruth, pose_groundtruth = [], []
db = ScanNet(args.dataset_dir)
for test_id, test_blob in enumerate(tqdm.tqdm(db.test_set_iterator())):
images, intrinsics = test_blob['images'], test_blob['intrinsics']
depth_pred, poses_pred = deepv2d(images, intrinsics)
# use keyframe depth for evaluation
depth_predictions.append(depth_pred[0])
# BA-Net evaluates pose as the relative transformation between two frames
delta_pose = poses_pred[1] @ np.linalg.inv(poses_pred[0])
pose_predictions.append(delta_pose)
depth_groundtruth.append(test_blob['depth'])
pose_groundtruth.append(test_blob['pose'])
predictions = (depth_predictions, pose_predictions)
groundtruth = (depth_groundtruth, pose_groundtruth)
return groundtruth, predictions
def evaluate(groundtruth, predictions):
pose_results = {}
depth_results = {}
depth_groundtruth, pose_groundtruth = groundtruth
depth_predictions, pose_predictions = predictions
num_test = len(depth_groundtruth)
for i in range(num_test):
# match scales using median
scalor = eval_utils.compute_scaling_factor(depth_groundtruth[i], depth_predictions[i])
depth_predictions[i] = scalor * depth_predictions[i]
depth_metrics = eval_utils.compute_depth_errors(depth_groundtruth[i], depth_predictions[i])
pose_metrics = eval_utils.compute_pose_errors(pose_groundtruth[i], pose_predictions[i])
if i == 0:
for pkey in pose_metrics:
pose_results[pkey] = []
for dkey in depth_metrics:
depth_results[dkey] = []
for pkey in pose_metrics:
pose_results[pkey].append(pose_metrics[pkey])
for dkey in depth_metrics:
depth_results[dkey].append(depth_metrics[dkey])
### aggregate metrics
for pkey in pose_results:
pose_results[pkey] = np.mean(pose_results[pkey])
for dkey in depth_results:
depth_results[dkey] = np.mean(depth_results[dkey])
print(("{:>1}, " * len(depth_results)).format(*depth_results.keys()))
print(("{:10.4f}, " * len(depth_results)).format(*depth_results.values()))
print(("{:>16}, " * len(pose_results)).format(*pose_results.keys()))
print(("{:16.4f}, " * len(pose_results)).format(*pose_results.values()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default='cfgs/scannet.yaml', help='config file used to train the model')
parser.add_argument('--model', default='models/scannet.ckpt', help='path to model checkpoint')
parser.add_argument('--dataset_dir', help='path to scannet dataset')
parser.add_argument('--mode', default='keyframe', help='config file used to train the model')
parser.add_argument('--fcrn', action="store_true", help='use single image depth initializiation')
parser.add_argument('--n_iters', type=int, default=8, help='number of video frames to use for reconstruction')
parser.add_argument('--viz', action="store_true", help='display depth maps during inference')
args = parser.parse_args()
groundtruth, predictions = make_predictions(args)
evaluate(groundtruth, predictions)
|
{"hexsha": "7d67fa4fa2f02bb061b6e28f40ec84eea4731cb9", "size": 4205, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluation/eval_scannet_twoview.py", "max_stars_repo_name": "TWJianNuo/Deepv2d", "max_stars_repo_head_hexsha": "e8d9658d974fac9734ebb0a9b9ad56dbfad5c8ee", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-29T03:28:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T03:28:57.000Z", "max_issues_repo_path": "evaluation/eval_scannet_twoview.py", "max_issues_repo_name": "TWJianNuo/Deepv2d", "max_issues_repo_head_hexsha": "e8d9658d974fac9734ebb0a9b9ad56dbfad5c8ee", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evaluation/eval_scannet_twoview.py", "max_forks_repo_name": "TWJianNuo/Deepv2d", "max_forks_repo_head_hexsha": "e8d9658d974fac9734ebb0a9b9ad56dbfad5c8ee", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4672131148, "max_line_length": 114, "alphanum_fraction": 0.6882282996, "include": true, "reason": "import numpy", "num_tokens": 961}
|
# -*- coding: utf-8 -*-
# The class DB allows the user to create a conection with the database
import calendar
import csv
import datetime as dt
import math
import os
import pickle
import re
import warnings
import dotenv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import psycopg2
import pymysql
import seaborn as sns
from sqlalchemy import create_engine
##Lets check if this works
|
{"hexsha": "d261edd36c22f24da71ba06872fb1d9e82f66714", "size": 411, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data/get_datasets.py", "max_stars_repo_name": "ilce-catbug/xbox-games-sales-analysis", "max_stars_repo_head_hexsha": "661a7194c73049e8a07ea3f96eafd9a09c236841", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data/get_datasets.py", "max_issues_repo_name": "ilce-catbug/xbox-games-sales-analysis", "max_issues_repo_head_hexsha": "661a7194c73049e8a07ea3f96eafd9a09c236841", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data/get_datasets.py", "max_forks_repo_name": "ilce-catbug/xbox-games-sales-analysis", "max_forks_repo_head_hexsha": "661a7194c73049e8a07ea3f96eafd9a09c236841", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.8695652174, "max_line_length": 70, "alphanum_fraction": 0.795620438, "include": true, "reason": "import numpy", "num_tokens": 93}
|
#include <btcb/node/common.hpp>
#include <btcb/node/wallet.hpp>
#include <btcb/secure/blockstore.hpp>
#include <boost/polymorphic_cast.hpp>
btcb::summation_visitor::summation_visitor (btcb::transaction const & transaction_a, btcb::block_store & store_a) :
transaction (transaction_a),
store (store_a)
{
}
void btcb::summation_visitor::send_block (btcb::send_block const & block_a)
{
assert (current->type != summation_type::invalid && current != nullptr);
if (current->type == summation_type::amount)
{
sum_set (block_a.hashables.balance.number ());
current->balance_hash = block_a.hashables.previous;
current->amount_hash = 0;
}
else
{
sum_add (block_a.hashables.balance.number ());
current->balance_hash = 0;
}
}
void btcb::summation_visitor::state_block (btcb::state_block const & block_a)
{
assert (current->type != summation_type::invalid && current != nullptr);
sum_set (block_a.hashables.balance.number ());
if (current->type == summation_type::amount)
{
current->balance_hash = block_a.hashables.previous;
current->amount_hash = 0;
}
else
{
current->balance_hash = 0;
}
}
void btcb::summation_visitor::receive_block (btcb::receive_block const & block_a)
{
assert (current->type != summation_type::invalid && current != nullptr);
if (current->type == summation_type::amount)
{
current->amount_hash = block_a.hashables.source;
}
else
{
btcb::block_info block_info;
if (!store.block_info_get (transaction, block_a.hash (), block_info))
{
sum_add (block_info.balance.number ());
current->balance_hash = 0;
}
else
{
current->amount_hash = block_a.hashables.source;
current->balance_hash = block_a.hashables.previous;
}
}
}
void btcb::summation_visitor::open_block (btcb::open_block const & block_a)
{
assert (current->type != summation_type::invalid && current != nullptr);
if (current->type == summation_type::amount)
{
if (block_a.hashables.source != btcb::genesis_account)
{
current->amount_hash = block_a.hashables.source;
}
else
{
sum_set (btcb::genesis_amount);
current->amount_hash = 0;
}
}
else
{
current->amount_hash = block_a.hashables.source;
current->balance_hash = 0;
}
}
void btcb::summation_visitor::change_block (btcb::change_block const & block_a)
{
assert (current->type != summation_type::invalid && current != nullptr);
if (current->type == summation_type::amount)
{
sum_set (0);
current->amount_hash = 0;
}
else
{
btcb::block_info block_info;
if (!store.block_info_get (transaction, block_a.hash (), block_info))
{
sum_add (block_info.balance.number ());
current->balance_hash = 0;
}
else
{
current->balance_hash = block_a.hashables.previous;
}
}
}
btcb::summation_visitor::frame btcb::summation_visitor::push (btcb::summation_visitor::summation_type type_a, btcb::block_hash const & hash_a)
{
frames.emplace (type_a, type_a == summation_type::balance ? hash_a : 0, type_a == summation_type::amount ? hash_a : 0);
return frames.top ();
}
void btcb::summation_visitor::sum_add (btcb::uint128_t addend_a)
{
current->sum += addend_a;
result = current->sum;
}
void btcb::summation_visitor::sum_set (btcb::uint128_t value_a)
{
current->sum = value_a;
result = current->sum;
}
btcb::uint128_t btcb::summation_visitor::compute_internal (btcb::summation_visitor::summation_type type_a, btcb::block_hash const & hash_a)
{
push (type_a, hash_a);
/*
Invocation loop representing balance and amount computations calling each other.
This is usually better done by recursion or something like boost::coroutine2, but
segmented stacks are not supported on all platforms so we do it manually to avoid
stack overflow (the mutual calls are not tail-recursive so we cannot rely on the
compiler optimizing that into a loop, though a future alternative is to do a
CPS-style implementation to enforce tail calls.)
*/
while (frames.size () > 0)
{
current = &frames.top ();
assert (current->type != summation_type::invalid && current != nullptr);
if (current->type == summation_type::balance)
{
if (current->awaiting_result)
{
sum_add (current->incoming_result);
current->awaiting_result = false;
}
while (!current->awaiting_result && (!current->balance_hash.is_zero () || !current->amount_hash.is_zero ()))
{
if (!current->amount_hash.is_zero ())
{
// Compute amount
current->awaiting_result = true;
push (summation_type::amount, current->amount_hash);
current->amount_hash = 0;
}
else
{
auto block (store.block_get (transaction, current->balance_hash));
assert (block != nullptr);
block->visit (*this);
}
}
epilogue ();
}
else if (current->type == summation_type::amount)
{
if (current->awaiting_result)
{
sum_set (current->sum < current->incoming_result ? current->incoming_result - current->sum : current->sum - current->incoming_result);
current->awaiting_result = false;
}
while (!current->awaiting_result && (!current->amount_hash.is_zero () || !current->balance_hash.is_zero ()))
{
if (!current->amount_hash.is_zero ())
{
auto block (store.block_get (transaction, current->amount_hash));
if (block != nullptr)
{
block->visit (*this);
}
else
{
if (current->amount_hash == btcb::genesis_account)
{
sum_set (std::numeric_limits<btcb::uint128_t>::max ());
current->amount_hash = 0;
}
else
{
assert (false);
sum_set (0);
current->amount_hash = 0;
}
}
}
else
{
// Compute balance
current->awaiting_result = true;
push (summation_type::balance, current->balance_hash);
current->balance_hash = 0;
}
}
epilogue ();
}
}
return result;
}
void btcb::summation_visitor::epilogue ()
{
if (!current->awaiting_result)
{
frames.pop ();
if (frames.size () > 0)
{
frames.top ().incoming_result = current->sum;
}
}
}
btcb::uint128_t btcb::summation_visitor::compute_amount (btcb::block_hash const & block_hash)
{
return compute_internal (summation_type::amount, block_hash);
}
btcb::uint128_t btcb::summation_visitor::compute_balance (btcb::block_hash const & block_hash)
{
return compute_internal (summation_type::balance, block_hash);
}
btcb::representative_visitor::representative_visitor (btcb::transaction const & transaction_a, btcb::block_store & store_a) :
transaction (transaction_a),
store (store_a),
result (0)
{
}
void btcb::representative_visitor::compute (btcb::block_hash const & hash_a)
{
current = hash_a;
while (result.is_zero ())
{
auto block (store.block_get (transaction, current));
assert (block != nullptr);
block->visit (*this);
}
}
void btcb::representative_visitor::send_block (btcb::send_block const & block_a)
{
current = block_a.previous ();
}
void btcb::representative_visitor::receive_block (btcb::receive_block const & block_a)
{
current = block_a.previous ();
}
void btcb::representative_visitor::open_block (btcb::open_block const & block_a)
{
result = block_a.hash ();
}
void btcb::representative_visitor::change_block (btcb::change_block const & block_a)
{
result = block_a.hash ();
}
void btcb::representative_visitor::state_block (btcb::state_block const & block_a)
{
result = block_a.hash ();
}
|
{"hexsha": "bb96606afda4d54b1d38411ebdcfdf95c51eda3b", "size": 7326, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "btcb/secure/blockstore.cpp", "max_stars_repo_name": "melnaquib/btcb", "max_stars_repo_head_hexsha": "f55c9867113d403118c3028d5ba11a0debcd7609", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2019-03-01T13:33:33.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-02T04:42:38.000Z", "max_issues_repo_path": "btcb/secure/blockstore.cpp", "max_issues_repo_name": "melnaquib/btcb", "max_issues_repo_head_hexsha": "f55c9867113d403118c3028d5ba11a0debcd7609", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "btcb/secure/blockstore.cpp", "max_forks_repo_name": "melnaquib/btcb", "max_forks_repo_head_hexsha": "f55c9867113d403118c3028d5ba11a0debcd7609", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2019-04-03T14:27:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-22T17:14:28.000Z", "avg_line_length": 25.6153846154, "max_line_length": 142, "alphanum_fraction": 0.6935571936, "num_tokens": 1949}
|
// Copyright Abel Sinkovics (abel@sinkovics.hu) 2015.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/metaparse/sequence_apply.hpp>
#include <boost/metaparse/is_error.hpp>
#include <boost/metaparse/start.hpp>
#include <boost/metaparse/get_result.hpp>
#include <boost/metaparse/always.hpp>
#include <boost/metaparse/one_char.hpp>
#include "common.hpp"
#include <boost/mpl/equal_to.hpp>
#include <boost/mpl/list.hpp>
#include <boost/mpl/at.hpp>
#include <boost/mpl/vector_c.hpp>
#include <boost/mpl/vector.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/mpl/char.hpp>
#include <boost/type_traits/is_same.hpp>
#include <boost/preprocessor/repetition/repeat_from_to.hpp>
#include <boost/preprocessor/repetition/enum_params.hpp>
#include <boost/preprocessor/repetition/enum.hpp>
#include <boost/preprocessor/tuple/eat.hpp>
#include <boost/preprocessor/cat.hpp>
#include "test_case.hpp"
namespace
{
#ifdef BOOST_METAPARSE_C_VALUE
# error BOOST_METAPARSE_C_VALUE already defined
#endif
#define BOOST_METAPARSE_C_VALUE(z, n, unused) BOOST_PP_CAT(C, n)::value
#ifdef BOOST_METAPARSE_TEMPLATE
# error BOOST_METAPARSE_TEMPLATE already defined
#endif
#define BOOST_METAPARSE_TEMPLATE(z, n, unused) \
template <BOOST_PP_ENUM(n, char BOOST_PP_TUPLE_EAT(3), ~)> \
struct BOOST_PP_CAT(template_c, n) \
{ \
typedef BOOST_PP_CAT(template_c, n) type; \
}; \
\
template <BOOST_PP_ENUM_PARAMS(n, class C)> \
struct BOOST_PP_CAT(template, n) \
{ \
typedef \
BOOST_PP_CAT(template_c, n)< \
BOOST_PP_ENUM(n, BOOST_METAPARSE_C_VALUE, ~) \
> \
type; \
};
BOOST_PP_REPEAT_FROM_TO(1, 4, BOOST_METAPARSE_TEMPLATE, ~)
#undef BOOST_METAPARSE_TEMPLATE
#undef BOOST_METAPARSE_C_VALUE
template <class T> struct has_no_type {};
// "is_same<T::type::type, double_eval<T>::type>" - helper tool to avoid
// writing type::type (which is interpreted as the constructor of ::type by
// msvc-7.1)
template <class T> struct double_eval : T::type {};
}
BOOST_METAPARSE_TEST_CASE(sequence_apply)
{
using boost::metaparse::get_result;
using boost::metaparse::sequence_apply1;
using boost::metaparse::sequence_apply2;
using boost::metaparse::sequence_apply3;
using boost::metaparse::start;
using boost::metaparse::is_error;
using boost::metaparse::always;
using boost::metaparse::one_char;
using boost::mpl::list;
using boost::mpl::equal_to;
using boost::mpl::at_c;
using boost::mpl::vector_c;
using boost::mpl::vector;
using boost::mpl::char_;
using boost::is_same;
typedef always<one_char, int> always_int;
// test_one_parser
BOOST_MPL_ASSERT((
is_same<
template_c1<'h'>,
double_eval<
get_result<
sequence_apply1<template1, lit_h>::apply<str_hello, start>
>
>::type
>
));
// test_one_failing_parser
BOOST_MPL_ASSERT((
is_error<sequence_apply1<template1, lit_e>::apply<str_hello, start> >
));
// test_two_chars
BOOST_MPL_ASSERT((
is_same<
template_c2<'h', 'e'>,
double_eval<
get_result<
sequence_apply2<template2, lit_h, lit_e>::apply<str_hello, start>
>
>::type
>
));
// test_first_fails
BOOST_MPL_ASSERT((
is_error<sequence_apply2<template2, lit_x, lit_e>::apply<str_hello, start> >
));
// test_second_fails
BOOST_MPL_ASSERT((
is_error<sequence_apply2<template2, lit_h, lit_x>::apply<str_hello, start> >
));
// test_empty_input
BOOST_MPL_ASSERT((
is_error<sequence_apply2<template2, lit_h, lit_e>::apply<str_,start> >
));
// test_three_chars
BOOST_MPL_ASSERT((
is_same<
template_c3<'h', 'e', 'l'>,
double_eval<
get_result<
sequence_apply3<template3, lit_h, lit_e, lit_l>
::apply<str_hello, start>
>
>::type
>
));
// test_no_extra_evaluation
BOOST_MPL_ASSERT((
is_same<
has_no_type<int>,
get_result<
sequence_apply1<has_no_type, always_int>::apply<str_ca, start>
>::type
>
));
}
|
{"hexsha": "179ba25c2230c46ec8c434cf2981776b06edc377", "size": 4330, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/metaparse/test/sequence_apply.cpp", "max_stars_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_stars_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32.0, "max_stars_repo_stars_event_min_datetime": "2019-02-27T06:57:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-29T10:56:19.000Z", "max_issues_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/metaparse/test/sequence_apply.cpp", "max_issues_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_issues_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-04-04T18:00:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-04T18:00:00.000Z", "max_forks_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/metaparse/test/sequence_apply.cpp", "max_forks_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_forks_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2019-08-20T13:45:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T18:23:49.000Z", "avg_line_length": 26.5644171779, "max_line_length": 81, "alphanum_fraction": 0.6681293303, "num_tokens": 1176}
|
struct fixedIncome :> Income end
|
{"hexsha": "e62a9591267dbe6de0b8fe6cfe2ae9b1ab42a9a9", "size": 33, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Accounting/CrAccount/Capital/Income/fixedIncome/fixedIncome.jl", "max_stars_repo_name": "adamwillisXanax/accountingJulia", "max_stars_repo_head_hexsha": "59aea20f42c73ae2e740d3cd279828d5ad52e1fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Accounting/CrAccount/Capital/Income/fixedIncome/fixedIncome.jl", "max_issues_repo_name": "adamwillisXanax/accountingJulia", "max_issues_repo_head_hexsha": "59aea20f42c73ae2e740d3cd279828d5ad52e1fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Accounting/CrAccount/Capital/Income/fixedIncome/fixedIncome.jl", "max_forks_repo_name": "adamwillisXanax/accountingJulia", "max_forks_repo_head_hexsha": "59aea20f42c73ae2e740d3cd279828d5ad52e1fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.5, "max_line_length": 32, "alphanum_fraction": 0.7878787879, "num_tokens": 8}
|
from numpy import prod
from torch import einsum
from torch.nn import Conv1d, Conv2d, Conv3d
from torch.nn.grad import _grad_input_padding
from torch.nn.functional import conv1d, conv2d, conv3d
from torch.nn.functional import conv_transpose1d, conv_transpose2d, conv_transpose3d
from backpack.core.derivatives.basederivatives import BaseParameterDerivatives
from backpack.utils import conv as convUtils
from backpack.utils.ein import eingroup
class ConvNDDerivatives(BaseParameterDerivatives):
def __init__(self, N):
if N == 1:
self.module = Conv1d
self.dim_text = "x"
self.conv_func = conv1d
self.conv_transpose_func = conv_transpose1d
elif N == 2:
self.module = Conv2d
self.dim_text = "x,y"
self.conv_func = conv2d
self.conv_transpose_func = conv_transpose2d
elif N == 3:
self.module = Conv3d
self.dim_text = "x,y,z"
self.conv_func = conv3d
self.conv_transpose_func = conv_transpose3d
else:
raise ValueError("{}-dimensional Conv. is not implemented.".format(N))
self.conv_dims = N
def hessian_is_zero(self):
return True
def get_unfolded_input(self, module):
return convUtils.unfold_by_conv(module.input0, module)
def _jac_mat_prod(self, module, g_inp, g_out, mat):
dims = self.dim_text
mat_as_conv = eingroup("v,n,c,{}->vn,c,{}".format(dims, dims), mat)
jmp_as_conv = self.conv_func(
mat_as_conv,
module.weight.data,
stride=module.stride,
padding=module.padding,
dilation=module.dilation,
groups=module.groups,
)
return self.reshape_like_output(jmp_as_conv, module)
def _jac_t_mat_prod(self, module, g_inp, g_out, mat):
dims = self.dim_text
mat_as_conv = eingroup("v,n,c,{}->vn,c,{}".format(dims, dims), mat)
jmp_as_conv = self.__jac_t(module, mat_as_conv)
return self.reshape_like_input(jmp_as_conv, module)
def __jac_t(self, module, mat):
input_size = list(module.input0.size())
input_size[0] = mat.size(0)
grad_padding = _grad_input_padding(
grad_output=mat,
input_size=input_size,
stride=module.stride,
padding=module.padding,
kernel_size=module.kernel_size,
dilation=module.dilation,
)
jac_t_mat = self.conv_transpose_func(
input=mat,
weight=module.weight,
bias=None,
stride=module.stride,
padding=module.padding,
output_padding=grad_padding,
groups=module.groups,
dilation=module.dilation,
)
return jac_t_mat
def _bias_jac_mat_prod(self, module, g_inp, g_out, mat):
"""mat has shape [V, C_out]"""
# Expand batch dimension
jac_mat = mat.unsqueeze(1)
# Expand data dimensions
for i in range(3, len(module.output_shape) + 1):
jac_mat = jac_mat.unsqueeze(i)
expand_shape = [-1, module.output_shape[0], -1, *module.output_shape[2:]]
return jac_mat.expand(*expand_shape)
def _bias_jac_t_mat_prod(self, module, g_inp, g_out, mat, sum_batch=True):
axes = list(range(3, len(module.output_shape) + 1))
if sum_batch:
axes = [1] + axes
return mat.sum(axes)
def _weight_jac_mat_prod(self, module, g_inp, g_out, mat):
if module.groups != 1:
raise NotImplementedError("Groups greater than 1 are not supported yet")
dims = self.dim_text
dims_joined = dims.replace(",", "")
jac_mat = eingroup("v,o,i,{}->v,o,i{}".format(dims, dims_joined), mat)
X = self.get_unfolded_input(module)
jac_mat = einsum("nij,vki->vnkj", X, jac_mat)
return self.reshape_like_output(jac_mat, module)
def _weight_jac_t_mat_prod(self, module, g_inp, g_out, mat, sum_batch=True):
if module.groups != 1:
raise NotImplementedError("Groups greater than 1 are not supported yet")
V = mat.shape[0]
N, C_out = module.output_shape[0], module.output_shape[1]
C_in = module.input0_shape[1]
C_in_axis = 1
N_axis = 0
dims = self.dim_text
repeat_pattern = [1, C_in] + [1 for _ in range(self.conv_dims)]
mat = eingroup("v,n,c,{}->vn,c,{}".format(dims, dims), mat)
mat = mat.repeat(*repeat_pattern)
mat = eingroup("a,b,{}->ab,{}".format(dims, dims), mat)
mat = mat.unsqueeze(C_in_axis)
repeat_pattern = [1, V] + [1 for _ in range(self.conv_dims)]
input = eingroup("n,c,{}->nc,{}".format(dims, dims), module.input0)
input = input.unsqueeze(N_axis)
input = input.repeat(*repeat_pattern)
grad_weight = self.conv_func(
input,
mat,
bias=None,
stride=module.dilation,
padding=module.padding,
dilation=module.stride,
groups=C_in * N * V,
).squeeze(0)
for dim in range(self.conv_dims):
axis = dim + 1
size = module.weight.shape[2 + dim]
grad_weight = grad_weight.narrow(axis, 0, size)
sum_dim = "" if sum_batch else "n,"
eingroup_eq = "vnio,{}->v,{}o,i,{}".format(dims, sum_dim, dims)
return eingroup(
eingroup_eq, grad_weight, dim={"v": V, "n": N, "i": C_in, "o": C_out}
)
def ea_jac_t_mat_jac_prod(self, module, g_inp, g_out, mat):
in_features = int(prod(module.input0.size()[1:]))
out_features = int(prod(module.output.size()[1:]))
mat = mat.reshape(out_features, *module.output.size()[1:])
jac_t_mat = self.__jac_t(module, mat).reshape(out_features, in_features)
mat_t_jac = jac_t_mat.t().reshape(in_features, *module.output.size()[1:])
jac_t_mat_t_jac = self.__jac_t(module, mat_t_jac)
jac_t_mat_t_jac = jac_t_mat_t_jac.reshape(in_features, in_features)
return jac_t_mat_t_jac.t()
|
{"hexsha": "0a261b880de5ba8cdf8aedee728c1d1da9c62bfe", "size": 6144, "ext": "py", "lang": "Python", "max_stars_repo_path": "backpack/core/derivatives/convnd.py", "max_stars_repo_name": "maryamhgf/backpack", "max_stars_repo_head_hexsha": "63d2717656df2e0f18b3b6ee50320e82ce7358b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "backpack/core/derivatives/convnd.py", "max_issues_repo_name": "maryamhgf/backpack", "max_issues_repo_head_hexsha": "63d2717656df2e0f18b3b6ee50320e82ce7358b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "backpack/core/derivatives/convnd.py", "max_forks_repo_name": "maryamhgf/backpack", "max_forks_repo_head_hexsha": "63d2717656df2e0f18b3b6ee50320e82ce7358b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-11T14:15:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-16T11:19:11.000Z", "avg_line_length": 36.1411764706, "max_line_length": 84, "alphanum_fraction": 0.6083984375, "include": true, "reason": "from numpy", "num_tokens": 1558}
|
import numpy as np
import matplotlib.pyplot as plt
from skyfield.api import Loader, EarthSatellite, Topos
# We really just want the filedialog from tkinter
import tkinter as tk
from tkinter import filedialog
from datetime import datetime
from os import path
def _calculateGroundTrack(earth, satellite, timeset):
topoZero = Topos(latitude_degrees=0.0, longitude_degrees=0.0)
satAbs = earth + satellite
earthPosition = earth.at(timeset).position.km
zeroPosition = topoZero.at(timeset).position.km
satPosition = satAbs.at(timeset).position.km - earthPosition
earthRotation = np.arctan2(zeroPosition[1], zeroPosition[2])
sinRot = np.sin(-earthRotation)
cosRot = np.cos(-earthRotation)
xAdj = satPosition[0] * cosRot - satPosition[1] * sinRot
yAdj = satPosition[0] * sinRot + satPosition[1] * cosRot
rxy = np.sqrt(xAdj**2 + yAdj**2)
satLat = np.arctan2(satPosition[2], rxy)
satLong = np.arctan2(yAdj, xAdj)
plt.plot(np.rad2deg(satLong), np.rad2deg(satLat), 'o')
def main():
# Set up and hide tkinter root window
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
if not path.exists(file_path):
print("Error - no such file")
quit()
root.destroy()
# Load earth position from ephemeris data
sfLoader = Loader('resources')
ephemeris = sfLoader('de421.bsp')
earth = ephemeris['earth']
# Setup timescale for calculating ground track
ts = sfLoader.timescale()
now = datetime.utcnow()
steps = np.arange(0, 180, 1)
time = ts.utc(now.year, now.month, now.day, now.hour, steps)
# Load satellite data from tle file
satellites = sfLoader.tle_file(file_path)
# Set up plot
plt.figure()
img = plt.imread('resources/map.png')
plt.imshow(img, extent=[-180,180,-90,90])
for sat in satellites:
_calculateGroundTrack(earth, sat, time)
plt.show()
if __name__ == "__main__":
main()
|
{"hexsha": "783a7630ce87a030398ce9cab297deb95f9560bf", "size": 1991, "ext": "py", "lang": "Python", "max_stars_repo_path": "TLEPlot.py", "max_stars_repo_name": "Uniliterally/TLEPlot", "max_stars_repo_head_hexsha": "4928eb7db2d5b7c9e37decb2f8f367c16dd02c01", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TLEPlot.py", "max_issues_repo_name": "Uniliterally/TLEPlot", "max_issues_repo_head_hexsha": "4928eb7db2d5b7c9e37decb2f8f367c16dd02c01", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TLEPlot.py", "max_forks_repo_name": "Uniliterally/TLEPlot", "max_forks_repo_head_hexsha": "4928eb7db2d5b7c9e37decb2f8f367c16dd02c01", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2739726027, "max_line_length": 66, "alphanum_fraction": 0.6745354093, "include": true, "reason": "import numpy", "num_tokens": 558}
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import tensorflow as tf
import numpy as np
import copy
from abc import ABC, abstractmethod
from rl.core.function_approximators.normalizers.normalizer import Normalizer, NormalizerStd, NormalizerMax
from rl.core.utils.tf_utils import tf_float
class ClipNormalizer(tf.keras.layers.Layer):
""" A tf.keras.layers.Layer that mimics the behavior of Normalizer. """
# NOTE custom layers can be deepcopy and pickle
def __init__(self, shape, thre_shape):
super().__init__()
self._ts_bias = self.add_variable('bias', shape, dtype=tf_float, trainable=False)
self._ts_scale = self.add_variable('scale', shape, dtype=tf_float, trainable=False)
self._ts_unscale = self.add_variable('unscale', dtype=tf.bool, trainable=False)
self._ts_unbias = self.add_variable('unbias', dtype=tf.bool, trainable=False)
self._ts_initialized = self.add_variable('initialized', dtype=tf.bool, trainable=False)
self._ts_clip = self.add_variable('clip', dtype=tf.bool, trainable=False)
self._ts_thre = self.add_variable('thre', thre_shape, dtype=tf_float, trainable=False)
def build(self, input_shape):
pass
def call(self, ts_x):
if tf.logical_not(self._ts_initialized):
return ts_x
# do something
if tf.logical_not(self._ts_clip):
if tf.logical_not(self._ts_unbias):
ts_x = ts_x - self._ts_bias
if tf.logical_not(self._ts_unscale):
ts_x = ts_x / self._ts_scale
else:
# need to first scale it before clipping
ts_x = (ts_x - self._ts_bias) / self._ts_scale
ts_x = tf.clip_by_value(ts_x, self._ts_thre[0], self._ts_thre[1])
# check if we need to scale it back
if self._ts_unscale:
ts_x = ts_x * self._ts_scale
if self._ts_unbias:
ts_x = ts_x + self._ts_bias
else:
if self._ts_unbias:
ts_x = ts_x + self._ts_bias / self._ts_scale
return ts_x
def _tfNormalizerDecorator(cls):
""" A decorator for adding a tf operator equivalent of Normalizer.predict
It reuses all the functionalties of the original Normalizer and
additional tf.Variables for defining the tf operator.
"""
assert issubclass(cls, Normalizer)
class decorated_cls(cls):
def __init__(self, shape, *args, **kwargs):
super().__init__(shape, *args, **kwargs)
# add additional tf.Variables
thre_shape = (1,1) if self._thre is None else self._thre.shape
self.klayer = ClipNormalizer(shape, thre_shape)
self._update_tf_vars()
def ts_predict(self, ts_x):
return self.klayer(ts_x)
def ts_normalize(self, ts_x):
return self.ts_predict(ts_x)
# make sure the tf.Variables are synchronized
def update(self, x):
super().update(x)
self._update_tf_vars()
def reset(self):
super().reset()
self._update_tf_vars()
def assign(self, other):
super().assign(other)
self._update_tf_vars()
def _update_tf_vars(self):
# synchronize the tf.Variables
if self.klayer is not None:
self.klayer._ts_bias.assign(self._bias)
self.klayer._ts_scale.assign(self._scale)
self.klayer._ts_unbias.assign(self._unbias)
self.klayer._ts_unscale.assign(self._unscale)
self.klayer._ts_initialized.assign(self._initialized)
self.klayer._ts_clip.assign(self._thre is not None)
if self.klayer._ts_clip:
self.klayer._ts_thre.assign(self._thre)
# make them look the same as intended
decorated_cls.__name__ = cls.__name__
decorated_cls.__qualname__ = cls.__qualname__
return decorated_cls
@_tfNormalizerDecorator
class tfNormalizerStd(NormalizerStd):
pass
@_tfNormalizerDecorator
class tfNormalizerMax(NormalizerMax):
pass
|
{"hexsha": "52612a3d8eb311eb7831cdac9543f1b065cc2453", "size": 4201, "ext": "py", "lang": "Python", "max_stars_repo_path": "rl/core/function_approximators/normalizers/keras_normalizer.py", "max_stars_repo_name": "gtrll/librl", "max_stars_repo_head_hexsha": "39709c3e485e232865b3e08b7211cd9d871c666a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rl/core/function_approximators/normalizers/keras_normalizer.py", "max_issues_repo_name": "gtrll/librl", "max_issues_repo_head_hexsha": "39709c3e485e232865b3e08b7211cd9d871c666a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rl/core/function_approximators/normalizers/keras_normalizer.py", "max_forks_repo_name": "gtrll/librl", "max_forks_repo_head_hexsha": "39709c3e485e232865b3e08b7211cd9d871c666a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1769911504, "max_line_length": 106, "alphanum_fraction": 0.6391335396, "include": true, "reason": "import numpy", "num_tokens": 968}
|
from typing import List, Dict, Any, Optional, Union
from ..core.schemas import ANNOTATION_SCHEMA, SEGMENTATION_SCHEMA
from abc import ABC, abstractmethod
import numpy as np
class Scene(ABC):
def __init__(self):
self.frames = Optional[Dict]
self.cameras = {}
self.lidars = {}
self.radars = {}
self.metadata = {}
self.frame_data = {}
self.loader = None
self.num_frames = Optional[int]
self.include_annotations = None
self.frame_no_to_full_frame_no = {}
self.full_frame_no_to_frame_no = {}
@abstractmethod
def convert(self):
"""Convert from different formats to common format"""
raise NotImplementedError
@abstractmethod
def stack_frames(self, frame_numbers: List[int]):
raise NotImplementedError
def get_frames(self, frame_numbers: List[int], stack: bool = False):
raise NotImplementedError
def load_metadata(self):
raise NotImplementedError
def load_data(self, frame_numbers: Union[List[int], int]):
raise NotImplementedError
|
{"hexsha": "0071510c39abf3ec0a3fbd0e9bedc19f781b89b6", "size": 1101, "ext": "py", "lang": "Python", "max_stars_repo_path": "frames/scene.py", "max_stars_repo_name": "jacobbieker/3dml", "max_stars_repo_head_hexsha": "f4b0e49343a18b4935c1502112e7bef0ff448986", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-14T15:11:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T15:11:34.000Z", "max_issues_repo_path": "frames/scene.py", "max_issues_repo_name": "jacobbieker/3dml", "max_issues_repo_head_hexsha": "f4b0e49343a18b4935c1502112e7bef0ff448986", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-07-01T19:28:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-01T19:32:57.000Z", "max_forks_repo_path": "frames/scene.py", "max_forks_repo_name": "jacobbieker/3dml", "max_forks_repo_head_hexsha": "f4b0e49343a18b4935c1502112e7bef0ff448986", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2307692308, "max_line_length": 72, "alphanum_fraction": 0.6630336058, "include": true, "reason": "import numpy", "num_tokens": 227}
|
\documentclass{article}
%%%%%% Include Packages %%%%%%
\usepackage{sectsty}
\usepackage{amsmath,amsfonts,amsthm,amssymb}
\usepackage{fancyhdr}
\usepackage{lastpage}
\usepackage{setspace}
\usepackage{graphicx}
%%%%%% Formatting Modifications %%%%%%
\usepackage[margin=2.5cm]{geometry} %% Set margins
\sectionfont{\sectionrule{0pt}{0pt}{-8pt}{0.8pt}} %% Underscore section headers
\setstretch{1.2} %% Set 1.2 spacing
%%%%%% Set Homework Variables %%%%%%
\newcommand{\hwkNum}{5}
\newcommand{\hwkAuthors}{Ben Drucker}
%%%%%% Set Header/Footer %%%%%%
\pagestyle{fancy}
\lhead{\hwkAuthors}
\rhead{Homework \#\hwkNum}
\rfoot{\textit{\footnotesize{\thepage /\pageref{LastPage}}}}
\cfoot{}
\renewcommand\headrulewidth{0.4pt}
\renewcommand\footrulewidth{0.4pt}
%%%%%% Document %%%%%%
\begin{document}
\title{Homework \#\hwkNum}
\author{\hwkAuthors}
\date{}
\maketitle
%%%%%% Begin Content %%%%%%
\section*{4.2}
\subsection*{20}
\subsubsection*{a)}
$F(Y) = \begin{cases}
0 \leq y \leq 5 &\Rightarrow \int_0^y \frac{y}{25}dy = \frac{y^2}{50} \\
5 \leq y \leq 10 &\Rightarrow \int_0^y f(y)dy = \int_0^5 f(y)dy + \int_5^y f(y)dy \\
&= \frac{1}{2} + \int_0^y\left [ \frac{2}{5} - \frac{y}{25} \right ] dy = \frac{2y}{5} - \frac{y^2}{50} -1
\end{cases}$ \\ \\ \\
\includegraphics[height=2in]{4-2--20a}
\subsubsection*{b)}
$ 0 < p \leq .5 \Rightarrow p=F(y_p) = \frac{y_p^2}{50} \rightarrow y_p = \sqrt{50p} \\
.5 < p \leq 1 \Rightarrow p = \frac{2y_p}{5} - \frac{y_p}{50}-1 \rightarrow y_p = 10-5\sqrt{2-2p}$
\subsubsection*{c)}
$E(Y) =\int_0^5 y\frac{y}{25}dy + \int_5^{10}y \left ( \frac{2}{5} - \frac{y}{25} \right )dy =5 \\
V(Y) = \left(\int_0^5 \frac{y^3}{25} \, dy+\int_5^{10} y^2 \left(\frac{2}{5}-\frac{y}{25}\right) \, dy\right)-5^2 = \frac{25}{6} $\\
For a single bus, the values are simply halved. So: $E(X) =2.5, V(X) = \frac{25}{12}$
\section*{4.3}
\subsection*{40}
\subsubsection*{a)}
$P(X \leq 40) = P \left (Z \leq \frac{40-43}{4.5} \right ) \approxeq 0.2546 \\
P(X >60) = 1- P(Z < \frac{60-43}{4.5}) \approxeq 1- 0.999... $
\subsubsection*{b)}
$ P(Z < z) = .75 \rightarrow z = .67 \rightarrow .67 = \frac{x-43}{4.5} \Rightarrow x = 46.015$
\subsection*{46)}
\subsubsection*{a)}
$$P(67 \leq X \leq 75) = P \left ( \frac{67-70}{3} < Z < \frac{75-70}{3} \right ) \approxeq .953 - .159 = .794$$
\subsubsection*{b)}
$Z_{.05/2} = Z_{.025} = 1.96; 1.96 * 3 = 5.88. $
\subsubsection*{c)}
$E(RV) = .794 * 10 = 7.94$
\subsubsection*{d)}
$P(X \leq 73.84) = 0.89973\\
P(p = 0.9, n = 10, x = 9) = .387 \\
P(p = 0.9, n = 10, x = 10) = 0.349 \\
p = 1-0.387-.349 = .264$
\subsection*{48}
\subsubsection*{a)}
$p(1.72) - p(.55) = .2485\\
p(.55)-p(0))+(p(1.72)-p(0))$
%%%%%% End Content %%%%%%
\end{document}
|
{"hexsha": "01d7ed558e1a4951a8e3992c95eb466673883d99", "size": 2829, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Spring 2013/STAT W1211 - Statistics/Homework/Homework 5/Homework 5.tex", "max_stars_repo_name": "bendrucker/columbia", "max_stars_repo_head_hexsha": "0661e729fa0c7cb792fc31a2da77f2b44874d8a1", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2017-05-09T03:30:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T03:38:03.000Z", "max_issues_repo_path": "Spring 2013/STAT W1211 - Statistics/Homework/Homework 5/Homework 5.tex", "max_issues_repo_name": "bendrucker/columbia", "max_issues_repo_head_hexsha": "0661e729fa0c7cb792fc31a2da77f2b44874d8a1", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Spring 2013/STAT W1211 - Statistics/Homework/Homework 5/Homework 5.tex", "max_forks_repo_name": "bendrucker/columbia", "max_forks_repo_head_hexsha": "0661e729fa0c7cb792fc31a2da77f2b44874d8a1", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-01-24T17:48:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-09T01:38:54.000Z", "avg_line_length": 31.7865168539, "max_line_length": 135, "alphanum_fraction": 0.5729939908, "num_tokens": 1263}
|
lemma one_add_le_self (x : mynat) : x ≤ 1 + x :=
begin
-- rw le_iff_exists_add,
use 1,
rw add_comm,
end
|
{"author": "chanha-park", "repo": "naturalNumberGame", "sha": "4e0d7100ce4575e1add92feefa38b1250431b879", "save_path": "github-repos/lean/chanha-park-naturalNumberGame", "path": "github-repos/lean/chanha-park-naturalNumberGame/naturalNumberGame-4e0d7100ce4575e1add92feefa38b1250431b879/Inequality/1.lean"}
|
#include "kmers/dirichlet-sampler.hpp"
#include "kmers/fasta-parser.hpp"
#include <Eigen/Dense>
#include <Eigen/Sparse>
#include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <locale>
#include <stdexcept>
#include <vector>
int64_t num_kmers(int64_t K) {
int64_t y = 1;
for (int k = 0; k < K; ++k) {
y *= 4;
}
return y;
}
template <typename T>
void write_val(std::ostream& out, const T& x) {
out.write((char *)(&x), sizeof(T));
}
template <typename T>
void write_array(std::ostream& out, const T* x, int32_t N) {
for (int32_t n = 0; n < N; ++n)
write_val<T>(out, x[n]);
}
struct number_format : public std::numpunct<char> {
char do_thousands_sep() const { return ','; }
std::string do_grouping() const { return "\03"; }
};
bool valid_base(char c) {
switch (c) {
case 'A':
case 'C':
case 'G':
case 'T': return true;
default: return false;
}
}
bool valid_bases(const std::string& seq) {
return std::all_of(seq.begin(), seq.end(),
[](char x) { return valid_base(x); });
}
template <typename F1, typename F2>
struct coupler {
F1& f1_;
F2& f2_;
coupler(F1& f1, F2& f2) : f1_(f1), f2_(f2) { }
void operator()(const std::string& id, const std::string& seq) {
f1_(id, seq);
f2_(id, seq);
}
void report() {
f1_.report();
f2_.report();
}
};
template <typename F>
F couple(F& f) {
return f;
}
template <typename F1, typename F2>
coupler<F1, F2> couple(F1& f1, F2& f2) {
return coupler<F1, F2>(f1, f2);
}
struct shredder {
const int64_t K_;
std::vector<int32_t> count_;
shredder(int64_t K) : K_(K), count_(fasta::kmers(K), 0) { }
void operator()(const std::string& id, const std::string& seq) {
for (int32_t start = 0; start < (seq.size() - K_ + 1); ++start) {
std::string kmer = seq.substr(start, K_);
try {
int32_t id = fasta::kmer_id(kmer);
++count_[id];
} catch (...) {
std::cout << " # illegal kmer = |" << kmer << "|"
<< " in ref id = " << id.substr(0, std::min<size_t>(15U, id.size()))
<< std::endl;
}
}
}
void report() {
std::cout << "Writing histogram to histo.csv" << std::endl;
std::ofstream f("histo.csv");
f << "id,count" << std::endl;
for (size_t i = 0; i < count_.size(); ++i)
f << i << "," << count_[i] << std::endl;
f.close();
}
};
struct validator {
size_t invalid_count_;
validator() : invalid_count_(0) { }
void operator()(const std::string& id, const std::string& seq) {
for (char c : seq) {
if (!valid_base(c)) {
++invalid_count_;
std::cout << "expecting one of {A,C,G,T}, found base = " << c
<< "; seq id = " << id
<< std::endl;
}
}
}
void report() const {
std::cout << invalid_count_ << " invalid sequences"
<< std::endl;
}
};
struct counter {
size_t num_targets_;
size_t num_bases_;
counter() : num_targets_(0), num_bases_(0) { }
void operator()(const std::string id, const std::string& seq) {
++num_targets_;
num_bases_ += seq.size();
if ((num_targets_ % 10000) == 0)
std::cout << " # targets = " << num_targets_ << std::endl;
}
void report() const {
number_format nf;
std::cout.imbue({std::locale(), &nf});
std::cout << "counter.report(): " << num_targets_ << " targets"
<< std::endl;
std::cout << "counter.report(): " << num_bases_ << " bases"
<< std::endl;
}
};
struct triplet_counter {
int32_t K_;
int32_t ref_id_;
std::vector<Eigen::Triplet<float, int32_t>> kmer_count_;
triplet_counter(int32_t K) : K_(K), ref_id_(0), kmer_count_() { }
void operator()(const std::string& id, const std::string& seq) {
int32_t num_kmers = seq.size() - K_ + 1;
float prob_kmer = 1.0f / num_kmers;
for (int32_t start = 0; start < num_kmers; ++start) {
std::string kmer = seq.substr(start, K_);
try {
int32_t kmer_id = fasta::kmer_id(kmer);
kmer_count_.emplace_back(kmer_id, ref_id_, prob_kmer);
} catch (...) {
std::cout << " # illegal kmer = |" << kmer << "|"
<< " in ref id = " << id.substr(0, std::min<size_t>(15U, id.size()))
<< std::endl;
}
}
++ref_id_;
}
void write_matrix(const std::string& filename) const {
int32_t M = num_kmers(K_);
int32_t N = ref_id_;
Eigen::SparseMatrix<float, Eigen::RowMajor> x(M, N);
x.setFromTriplets(kmer_count_.begin(), kmer_count_.end());
x.makeCompressed();
std::fstream out(filename, std::ios::binary | std::ios::out);
if (!out) {
throw std::runtime_error("cannot open file = " + filename
+ " for writing");
}
write_val<int>(out, x.rows());
write_val<int>(out, x.cols());
write_val<int>(out, x.nonZeros());
int* outerIndices = x.outerIndexPtr();
write_array<int>(out, outerIndices, x.rows() + 1);
int* innerIndices = x.innerIndexPtr();
write_array<int>(out, innerIndices, x.nonZeros());
float* values = x.valuePtr();
write_array<float>(out, values, x.nonZeros());
out.close();
}
void report() const {
// writing kmer_count_.size(), K_, or ref_id_ causes abnormal termination
std::cout << "triplet_counter.report(): nothing to report"
<< std::endl;
}
};
int main(int argc, char* argv[]) {
std::string fastafile = argv[1];
std::cout << "main: fasta file = " << fastafile
<< std::endl;
std::string binoutfile = argv[2];
std::cout << "main: binary output file = " << binoutfile
<< std::endl;
std::size_t K = 10;
std::cout << "main: K = " << K
<< std::endl;
int32_t M = num_kmers(K);
std::cout << "main: num kmers = " << M
<< std::endl;
// shredder shred_handler = shredder(K);
// validator validate_handler = validator();
counter count_handler = counter();
triplet_counter triplet_handler = triplet_counter(K);
coupler<counter, triplet_counter> handler = couple(count_handler, triplet_handler);
fasta::parse_file(fastafile, handler);
handler.report();
triplet_handler.write_matrix(binoutfile);
std::cout << "main: FINI." << std::endl;
return 0;
}
|
{"hexsha": "2f312effe76bd1c99058700362795f92344dee7c", "size": 6152, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "kmers/src/builder.cpp", "max_stars_repo_name": "bob-carpenter/case-studies", "max_stars_repo_head_hexsha": "d9ac886989b08629f5fcedf6c9e06f3f1f1faff8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 25.0, "max_stars_repo_stars_event_min_datetime": "2019-04-25T15:24:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T03:18:12.000Z", "max_issues_repo_path": "kmers/src/builder.cpp", "max_issues_repo_name": "bob-carpenter/case-studies", "max_issues_repo_head_hexsha": "d9ac886989b08629f5fcedf6c9e06f3f1f1faff8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kmers/src/builder.cpp", "max_forks_repo_name": "bob-carpenter/case-studies", "max_forks_repo_head_hexsha": "d9ac886989b08629f5fcedf6c9e06f3f1f1faff8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2019-04-27T01:16:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-17T19:55:00.000Z", "avg_line_length": 26.9824561404, "max_line_length": 85, "alphanum_fraction": 0.5879388817, "num_tokens": 1875}
|
example (a b c : ℕ) : a + b + c = a + c + b :=
begin
rw [add_assoc, add_comm b, ←add_assoc]
end
example (a b c : ℕ) : a + b + c = a + c + b :=
begin
rw [add_assoc, add_assoc, add_comm b]
end
example (a b c : ℕ) : a + b + c = a + c + b :=
begin
rw [add_assoc, add_assoc, add_comm _ b]
end
|
{"author": "Ailrun", "repo": "Theorem_Proving_in_Lean", "sha": "2eb1b5caf93c6a5a555c79e9097cf2ba5a66cf68", "save_path": "github-repos/lean/Ailrun-Theorem_Proving_in_Lean", "path": "github-repos/lean/Ailrun-Theorem_Proving_in_Lean/Theorem_Proving_in_Lean-2eb1b5caf93c6a5a555c79e9097cf2ba5a66cf68/src/ch5/ex0605.lean"}
|
using Compat
using Dates
using Infinity
using Infinity.Utils
using Random
using Test
using TimeZones: ZonedDateTime
@testset "Infinity" begin
include("utils.jl")
include("infinite.jl")
include("infextendedreal.jl")
include("infextendedtime.jl")
end
|
{"hexsha": "0b638bbc67fac393e1357d0c4b000abf603f2309", "size": 266, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "cjdoris/Infinity.jl", "max_stars_repo_head_hexsha": "c73a7142ded9bd4bdff35b86fd3891216a47714e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-05-05T07:53:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-02T23:59:04.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "cjdoris/Infinity.jl", "max_issues_repo_head_hexsha": "c73a7142ded9bd4bdff35b86fd3891216a47714e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2019-05-05T03:40:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-24T18:41:34.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "cjdoris/Infinity.jl", "max_forks_repo_head_hexsha": "c73a7142ded9bd4bdff35b86fd3891216a47714e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:27:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T11:55:25.000Z", "avg_line_length": 17.7333333333, "max_line_length": 33, "alphanum_fraction": 0.7556390977, "num_tokens": 70}
|
(* Title: HOL/HOLCF/One.thy
Author: Oscar Slotosch
*)
section {* The unit domain *}
theory One
imports Lift
begin
type_synonym
one = "unit lift"
translations
(type) "one" <= (type) "unit lift"
definition ONE :: "one"
where "ONE == Def ()"
text {* Exhaustion and Elimination for type @{typ one} *}
lemma Exh_one: "t = \<bottom> \<or> t = ONE"
unfolding ONE_def by (induct t) simp_all
lemma oneE [case_names bottom ONE]: "\<lbrakk>p = \<bottom> \<Longrightarrow> Q; p = ONE \<Longrightarrow> Q\<rbrakk> \<Longrightarrow> Q"
unfolding ONE_def by (induct p) simp_all
lemma one_induct [case_names bottom ONE]: "\<lbrakk>P \<bottom>; P ONE\<rbrakk> \<Longrightarrow> P x"
by (cases x rule: oneE) simp_all
lemma dist_below_one [simp]: "ONE \<notsqsubseteq> \<bottom>"
unfolding ONE_def by simp
lemma below_ONE [simp]: "x \<sqsubseteq> ONE"
by (induct x rule: one_induct) simp_all
lemma ONE_below_iff [simp]: "ONE \<sqsubseteq> x \<longleftrightarrow> x = ONE"
by (induct x rule: one_induct) simp_all
lemma ONE_defined [simp]: "ONE \<noteq> \<bottom>"
unfolding ONE_def by simp
lemma one_neq_iffs [simp]:
"x \<noteq> ONE \<longleftrightarrow> x = \<bottom>"
"ONE \<noteq> x \<longleftrightarrow> x = \<bottom>"
"x \<noteq> \<bottom> \<longleftrightarrow> x = ONE"
"\<bottom> \<noteq> x \<longleftrightarrow> x = ONE"
by (induct x rule: one_induct) simp_all
lemma compact_ONE: "compact ONE"
by (rule compact_chfin)
text {* Case analysis function for type @{typ one} *}
definition
one_case :: "'a::pcpo \<rightarrow> one \<rightarrow> 'a" where
"one_case = (\<Lambda> a x. seq\<cdot>x\<cdot>a)"
translations
"case x of XCONST ONE \<Rightarrow> t" == "CONST one_case\<cdot>t\<cdot>x"
"case x of XCONST ONE :: 'a \<Rightarrow> t" => "CONST one_case\<cdot>t\<cdot>x"
"\<Lambda> (XCONST ONE). t" == "CONST one_case\<cdot>t"
lemma one_case1 [simp]: "(case \<bottom> of ONE \<Rightarrow> t) = \<bottom>"
by (simp add: one_case_def)
lemma one_case2 [simp]: "(case ONE of ONE \<Rightarrow> t) = t"
by (simp add: one_case_def)
lemma one_case3 [simp]: "(case x of ONE \<Rightarrow> ONE) = x"
by (induct x rule: one_induct) simp_all
end
|
{"author": "Josh-Tilles", "repo": "isabelle", "sha": "990accf749b8a6e037d25012258ecae20d59ca62", "save_path": "github-repos/isabelle/Josh-Tilles-isabelle", "path": "github-repos/isabelle/Josh-Tilles-isabelle/isabelle-990accf749b8a6e037d25012258ecae20d59ca62/src/HOL/HOLCF/One.thy"}
|
from __future__ import print_function
from numpy import *
'''
NAME
host
PURPOSE
to get properties of host galaxies, given redshift, host galaxy type and i band magnitude of QSO
INPUT:
z (redshift), mQi (i band magnitude of QSO), type ("e"=early type, "l"= late type)
OUTPUT:
magnitude of host in different bands:
mhi, mhR, mhg, mhz, mhr
AUTHORS
Kai & Adri
basing on Adri's relatinship among different bands.
HISTORY
2014-07-23 Kai & aanello
'''
####Input:
z=1.
mQi=20.
type="e"
#########
if z<0.5 or z>3.965:
print("warning! out of redshift range.")
f=open('$OM10_DIR/data/ETGcols.txt','r')
ETG=loadtxt(f)
f.close()
g=open('$OM10_DIR/data/LTGcols.txt','r')
LTG=loadtxt(g)
g.close()
k=open('$OM10_DIR/data/QSOcols.txt','r')
QSO=loadtxt(k)
k.close()
MRhQ=0.+1.*random.randn()
if type=="e":
G=ETG
else:
G=LTG
diff=list(abs(G[:,0]-z))
select = diff.index(min(diff))
mhi=mQi+G[select,1]-QSO[select,1]+MRhQ+G[select,5]
mhR=mhi-G[select,1]
mhg=mhi+G[select,2]+G[select,3]
mhz=mhi-G[select,4]
mhr=mhi+G[select,3]
print("mhi, mhR, mhg, mhz, mhr")
print(mhi, mhR, mhg, mhz, mhr)
|
{"hexsha": "536ce49e149b6a1bf9344b4d07a84103c2ef0a58", "size": 1143, "ext": "py", "lang": "Python", "max_stars_repo_path": "om10/host.py", "max_stars_repo_name": "drphilmarshall/OM10", "max_stars_repo_head_hexsha": "009c16f0ef4e1c5f8f78c78df3c7711b7be24938", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-02-17T19:43:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-19T09:30:53.000Z", "max_issues_repo_path": "om10/host.py", "max_issues_repo_name": "drphilmarshall/OM10", "max_issues_repo_head_hexsha": "009c16f0ef4e1c5f8f78c78df3c7711b7be24938", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 55, "max_issues_repo_issues_event_min_datetime": "2015-02-06T19:25:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-09T07:57:04.000Z", "max_forks_repo_path": "om10/host.py", "max_forks_repo_name": "drphilmarshall/OM10", "max_forks_repo_head_hexsha": "009c16f0ef4e1c5f8f78c78df3c7711b7be24938", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2015-01-29T23:55:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-16T03:06:38.000Z", "avg_line_length": 18.737704918, "max_line_length": 101, "alphanum_fraction": 0.6482939633, "include": true, "reason": "from numpy", "num_tokens": 416}
|
import spotipy
import os
import spotipy.util as util
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import altair as alt
from sklearn.preprocessing import MinMaxScaler
import plotly.graph_objects as go
from kneed import KneeLocator
import streamlit as st
from math import sqrt
from matplotlib import cm
import SessionState
from spotipy.oauth2 import SpotifyClientCredentials
st.set_page_config(page_title='Playlist Blendr', page_icon=":notes:")
session_state = SessionState.get(checkboxed=False, num=2)
columns = ['name', 'artist', 'track_URI', 'playlist', 'acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'speechiness', 'tempo', 'valence']
def main():
st.markdown("## Welcome to Playlist Blendr :notes::chart_with_upwards_trend:")
st.markdown("This web app uses machine learning techniques to cluster music by similar audio features so that you can cultivate a cohesive vibe to satisfy your listening needs!")
num_playlists = st.sidebar.number_input('How many playlists would you like to cluster?', 1, 5, 2)
if session_state.num != num_playlists:
session_state.num = num_playlists
session_state.checkboxed = False
playlists = playlist_user_input(num_playlists)
if st.sidebar.button("Run Algorithm") or session_state.checkboxed:
session_state.checkboxed = True
print(playlists)
# acquire the data via Spotify API
df = concatenate_playlists(playlists)
if df is None:
st.warning("One of your playlist URIs was not entered properly")
st.stop()
else:
# dataframe for inspection and exploration
st.write(df)
# implement k-means clustering with PCA
clustered_df, n_clusters = kmeans(df)
# make radar chart to help understand the cluster differences
cluster_labels = clustered_df['Cluster']
orig = clustered_df.drop(columns=['Cluster', "Component 1", "Component 2"])
orig.insert(4, "cluster", cluster_labels)
norm_df = make_normalized_df(orig, 5)
fig, maxes = make_radar_chart(norm_df, n_clusters)
st.write(fig)
# interactive visualizations of clusters on 2D plane
range_ = get_color_range(n_clusters)
visualize_clusters(clustered_df, n_clusters, range_)
# within-cluster exploration
explore_df = orig.copy()
keys = sorted(list(explore_df["cluster"].unique()))
cluster = st.selectbox("Choose a cluster to preview", keys, index=0)
preview_df = preview_cluster_playlist(explore_df, cluster)
st.write(preview_df[preview_df.columns[:5]])
x_axis = list(preview_df['name'])
y_axis = st.selectbox("Choose a variable for the y-axis", list(preview_df.columns)[5:], index=maxes[cluster])
visualize_data(preview_df, x_axis, y_axis, n_clusters, range_)
else:
pass
def playlist_user_input(num_playlists):
playlists = []
defaults = ["spotify:playlist:37i9dQZF1DX9UhtB5CtZ7e", "spotify:playlist:37i9dQZF1DWSP55jZj2ES3",
"spotify:playlist:37i9dQZF1DX4OzrY981I1W",
"spotify:playlist:37i9dQZF1DX8dTWjpijlub",
"spotify:playlist:37i9dQZF1DWUE76cNNotSg"
]
st.sidebar.write("To locate a playlist URI, go to the playlist on Spotify, click the '...' button at the top, then go to Share > Copy Spotify URI. Some examples are pre-filled :)")
for i in range(num_playlists):
playlists.append(st.sidebar.text_input("Playlist URI " + str(i+1), defaults[i]))
return playlists
def concatenate_playlists(playlists):
global columns
print("concatenate playlists")
df = pd.DataFrame(columns=columns)
if all(playlists):
for playlist_uri in playlists:
df = pd.concat([df, get_features_for_playlist(os.environ.get('USERNAME'), playlist_uri)], ignore_index=True, axis=0)
return df
else:
return None
# Get Spotipy credentials from config
def load_config():
stream = open('config.yaml')
user_config = yaml.load(stream, Loader=yaml.FullLoader)
return user_config
@st.cache(allow_output_mutation=True)
def get_token():
print("generating token")
# token = util.prompt_for_user_token(
# username=os.environ.get('USERNAME'),
# scope='playlist-read-private',
# client_id=os.environ.get('CLIENT_ID'),
# client_secret=os.environ.get('CLIENT_SECRET'),
# redirect_uri=os.environ.get('REDIRECT_URI'))
# sp = spotipy.Spotify(auth=token)
client_credentials_manager = SpotifyClientCredentials(client_id=os.environ.get('CLIENT_ID'), client_secret=os.environ.get('CLIENT_SECRET'))
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
return sp
# A function to extract track names and URIs from a playlist
def get_playlist_info(username, playlist_uri):
# initialize vars
offset = 0
tracks, uris, names, artists = [], [], [], []
# get playlist id and name from URI
playlist_id = playlist_uri.split(':')[2]
playlist_name = sp.user_playlist(username, playlist_id)['name']
# get all tracks in given playlist (max limit is 100 at a time --> use offset)
while True:
results = sp.user_playlist_tracks(username, playlist_id, offset=offset)
tracks += results['items']
if results['next'] is not None:
offset += 100
else:
break
# get track metadata
for track in tracks:
names.append(track['track']['name'])
artists.append(track['track']['artists'][0]['name'])
uris.append(track['track']['uri'])
return playlist_name, names, artists, uris
@st.cache(allow_output_mutation=True)
def get_features_for_playlist(username, uri):
# initialize_df
global columns
temp_df = pd.DataFrame(columns=columns)
# get all track metadata from given playlist
playlist_name, names, artists, uris = get_playlist_info(username, uri)
# iterate through each track to get audio features and save data into dataframe
for name, artist, track_uri in zip(names, artists, uris):
# access audio features for given track URI via spotipy
audio_features = sp.audio_features(track_uri)
# get relevant audio features
feature_subset = [audio_features[0][col] for col in temp_df.columns if col not in ["name", "artist", "track_URI", "playlist"]]
# compose a row of the dataframe by flattening the list of audio features
row = [name, artist, track_uri, playlist_name, *feature_subset]
temp_df.loc[len(temp_df.index)] = row
return temp_df
def optimal_number_of_clusters(wcss):
x1, y1 = 2, wcss[0]
x2, y2 = 20, wcss[len(wcss)-1]
distances = []
for i in range(len(wcss)):
x0 = i+2
y0 = wcss[i]
numerator = abs((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)
denominator = sqrt((y2 - y1)**2 + (x2 - x1)**2)
distances.append(numerator/denominator)
return distances.index(max(distances)) + 1
def visualize_data(df, x_axis, y_axis, n_clusters, range_):
graph = alt.Chart(df.reset_index()).mark_bar().encode(
x=alt.X('name', sort='y'),
y=alt.Y(str(y_axis)+":Q"),
color=alt.Color('cluster', scale=alt.Scale(domain=[i for i in range(n_clusters)], range=range_)),
tooltip=['name', 'artist']
).interactive()
st.altair_chart(graph, use_container_width=True)
def num_components_graph(ax, num_columns, evr):
ax.plot(range(1, num_columns+1), evr.cumsum(), 'bo-')
ax.set_title('Explained Variance by Components')
ax.set(xlabel='Number of Components', ylabel='Cumulative Explained Variance')
ax.hlines(0.8, xmin=1, xmax=num_columns, linestyles='dashed')
return ax
def num_clusters_graph(ax, max_clusters, wcss):
ax.plot([i for i in range(1, max_clusters)], wcss, 'bo-')
ax.set_title('Optimal Number of Clusters')
ax.set(xlabel='Number of Clusters [k]', ylabel='Within Cluster Sum of Squares (WCSS)')
ax.vlines(KneeLocator([i for i in range(1, max_clusters)], wcss, curve='convex', direction='decreasing').knee, ymin=0, ymax=max(wcss), linestyles='dashed')
return ax
@st.cache(allow_output_mutation=True)
def kmeans(df):
df_X = df.drop(columns=df.columns[:4])
print("Standard scaler and PCA")
scaler = StandardScaler()
X_std = scaler.fit_transform(df_X)
pca = PCA()
pca.fit(X_std)
evr = pca.explained_variance_ratio_
for i, exp_var in enumerate(evr.cumsum()):
if exp_var >= 0.8:
n_comps = i + 1
break
print("Finding optimal number of components", n_comps)
pca = PCA(n_components=n_comps)
pca.fit(X_std)
scores_pca = pca.transform(X_std)
wcss = []
max_clusters = 11
for i in range(1, max_clusters):
kmeans_pca = KMeans(i, init='k-means++', random_state=42)
kmeans_pca.fit(scores_pca)
wcss.append(kmeans_pca.inertia_)
n_clusters = KneeLocator([i for i in range(1, max_clusters)], wcss, curve='convex', direction='decreasing').knee
print("Finding optimal number of clusters", n_clusters)
# fig, (ax1, ax2) = plt.subplots(1, 2)
# ax1 = num_components_graph(ax1, len(df_X.columns), evr)
# ax2 = num_clusters_graph(ax2, max_clusters, wcss)
# fig.tight_layout()
print("Performing KMeans")
kmeans_pca = KMeans(n_clusters=n_clusters, init='k-means++', random_state=42)
kmeans_pca.fit(scores_pca)
df_seg_pca_kmeans = pd.concat([df_X.reset_index(drop=True), pd.DataFrame(scores_pca)], axis=1)
df_seg_pca_kmeans.columns.values[(-1 * n_comps):] = ["Component " + str(i+1) for i in range(n_comps)]
df_seg_pca_kmeans['Cluster'] = kmeans_pca.labels_
df['Cluster'] = df_seg_pca_kmeans['Cluster']
df['Component 1'] = df_seg_pca_kmeans['Component 1']
df['Component 2'] = df_seg_pca_kmeans['Component 2']
return df, n_clusters
@st.cache(allow_output_mutation=True)
def get_color_range(n_clusters):
cmap = cm.get_cmap('tab20b')
range_ = []
for i in range(n_clusters):
color = 'rgb('
mapped = cmap(i/n_clusters)
for j in range(3):
color += str(int(mapped[j] * 255))
if j != 2:
color += ", "
else:
color += ")"
range_.append(color)
return range_
def visualize_clusters(df, n_clusters, range_):
graph = alt.Chart(df.reset_index()).mark_point(filled=True, size=60).encode(
x=alt.X('Component 2'),
y=alt.Y('Component 1'),
shape=alt.Shape('playlist', scale=alt.Scale(range=["circle", "diamond", "square", "triangle-down", "triangle-up"])),
color=alt.Color('Cluster', scale=alt.Scale(domain=[i for i in range(n_clusters)], range=range_)),
tooltip=['name', 'artist']
).interactive()
st.altair_chart(graph, use_container_width=True)
@st.cache(allow_output_mutation=True)
def make_normalized_df(df, col_sep):
print(len(df))
non_features = df[df.columns[:col_sep]]
features = df[df.columns[col_sep:]]
norm = MinMaxScaler().fit_transform(features)
scaled = pd.DataFrame(norm, index=df.index, columns = df.columns[col_sep:])
return pd.concat([non_features, scaled], axis=1)
@st.cache(allow_output_mutation=True)
def make_radar_chart(norm_df, n_clusters):
fig = go.Figure()
cmap = cm.get_cmap('tab20b')
angles = list(norm_df.columns[5:])
angles.append(angles[0])
layoutdict = dict(
radialaxis=dict(
visible=True,
range=[0, 1]
))
maxes = dict()
for i in range(n_clusters):
subset = norm_df[norm_df['cluster'] == i]
data = [np.mean(subset[col]) for col in angles[:-1]]
maxes[i] = data.index(max(data))
data.append(data[0])
fig.add_trace(go.Scatterpolar(
r=data,
theta=angles,
# fill='toself',
# fillcolor = 'rgba' + str(cmap(i/n_clusters)),
mode='lines',
line_color='rgba' + str(cmap(i/n_clusters)),
name="Cluster " + str(i)))
fig.update_layout(
polar=layoutdict,
showlegend=True
)
fig.update_traces()
return fig, maxes
@st.cache(allow_output_mutation=True)
def preview_cluster_playlist(df, cluster):
df = df[df['cluster'] == cluster]
# if st.button("Export to playlist"):
# result = sp.user_playlist_create(user_config['username'], 'cluster'+str(cluster), public=True, collaborative=False, description='')
# playlist_id = result['id']
# songs = list(df.loc[df['cluster'] == cluster]['track_URI'])
# if len(songs) > 100:
# sp.playlist_add_items(playlist_id, songs[:100])
# sp.playlist_add_items(playlist_id, songs[100:])
# else:
# sp.playlist_add_items(playlist_id, songs)
# else:
# pass
return df
if __name__ == "__main__":
# user_config = load_config()
# Initialize Spotify API token
sp = get_token()
main()
|
{"hexsha": "06114e9fc2eb03f89c24be8bbe8409a39ff44832", "size": 13293, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "sejaldua/spotify-ops", "max_stars_repo_head_hexsha": "a5676668734d409e1706a3bebe072c58658d469e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2020-12-30T04:54:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T02:11:18.000Z", "max_issues_repo_path": "app.py", "max_issues_repo_name": "sejaldua/spotify-ops", "max_issues_repo_head_hexsha": "a5676668734d409e1706a3bebe072c58658d469e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "sejaldua/spotify-ops", "max_forks_repo_head_hexsha": "a5676668734d409e1706a3bebe072c58658d469e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2021-01-03T09:34:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T23:09:48.000Z", "avg_line_length": 40.0391566265, "max_line_length": 184, "alphanum_fraction": 0.6577898142, "include": true, "reason": "import numpy", "num_tokens": 3337}
|
#!/usr/bin/python
# -*- coding: utf8 -*-
"""
analyse results from BluePyOpt checkpoints
author: András Ecker last update: 11.2017
"""
import os
import sys
import pickle
import numpy as np
import sim_evaluator
import matplotlib.pyplot as plt
SWBasePath = os.path.sep.join(os.path.abspath('__file__').split(os.path.sep)[:-2])
sys.path.insert(0, os.path.sep.join([SWBasePath, 'scripts']))
from detect_oscillations import *
from plots import *
# Optimized parameters (the bounds might be difference, but it's necessary to have this here...)
optconf = [("J_PyrInh_", 0.01, 0.1),
("J_BasExc_", 4.5, 5.5),
("J_BasInh_", 0.25, 1.),
("WeeMult_", 2.5, 3.5),
("J_PyrMF_", 20., 40.),
("rate_MF_", 10., 25.)]
def load_checkpoints(fName):
"""loads in saved checkpoints from pkl"""
cp = pickle.load(open(fName))
pop = cp["generation"]
hof = cp["halloffame"]
log = cp["logbook"]
hist = cp["history"]
# summary figure (about optimization)
plot_evolution(log.select('gen'), np.array(log.select('min')), np.array(log.select('avg')),
np.array(log.select('std')), "fittnes_evolution")
return pop, hof, log, hist
def run_simulation(Wee, best_indiv):
"""reruns simulation, using the optimizations (BluePyOpt's) structure"""
evaluator = sim_evaluator.Brian2Evaluator(Wee, optconf)
sme, smi, popre, popri = evaluator.generate_model(best_indiv, verbose=True)
if sme.num_spikes > 0 and smi.num_spikes > 0: # check if there is any activity
# analyse spikes
spikeTimesE, spikingNeuronsE, poprE, ISIhist, bin_edges = preprocess_monitors(sme, popre)
spikeTimesI, spikingNeuronsI, poprI = preprocess_monitors(smi, popri, calc_ISI=False)
# detect replay
avgReplayInterval = replay(ISIhist[3:16]) # bins from 150 to 850 (range of interest)
print "replay: %.3f"%avgReplayInterval
# analyse rates
meanEr, rEAC, maxEAC, tMaxEAC, fE, PxxE = analyse_rate(poprE)
meanIr, rIAC, maxIAC, tMaxIAC, fI, PxxI = analyse_rate(poprI)
maxEACR, tMaxEACR, avgRippleFE, ripplePE = ripple(rEAC, fE, PxxE)
maxIACR, tMaxIACR, avgRippleFI, ripplePI = ripple(rIAC, fI, PxxI)
avgGammaFE, gammaPE = gamma(fE, PxxE)
avgGammaFI, gammaPI = gamma(fI, PxxI)
# print out some info
print "Mean excitatory rate: %.3f"%meanEr
print "Mean inhibitory rate: %.3f"%meanIr
print "Average exc. ripple freq: %.3f"%avgRippleFE
print "Exc. ripple power: %.3f"%ripplePE
print "Average exc. gamma freq: %.3f"%avgGammaFE
print "Exc. gamma power: %.3f"%gammaPE
print "Average inh. ripple freq: %.3f"%avgRippleFI
print "Inh. ripple power: %.3f"%ripplePI
print "Average inh. gamma freq: %.3f"%avgGammaFI
print "Inh. gamma power: %.3f"%gammaPI
print "--------------------------------------------------"
# plot results
plot_raster_ISI(spikeTimesE, spikingNeuronsE, poprE, [ISIhist, bin_edges], "blue", multiplier_=1)
plot_PSD(poprE, rEAC, fE, PxxE, "Pyr_population", "blue", multiplier_=1)
plot_PSD(poprI, rIAC, fI, PxxI, "Bas_population", "green", multiplier_=1)
plot_zoomed(spikeTimesE, spikingNeuronsE, poprE, "Pyr_population", "blue", multiplier_=1)
plot_zoomed(spikeTimesI, spikingNeuronsI, poprI, "Bas_population", "green", multiplier_=1, Pyr_pop=False)
else: # if there is no activity the auto-correlation function will throw an error!
print "No activity !"
print "--------------------------------------------------"
if __name__ == "__main__":
fIn = "wmxR_sym.txt"
cpIn = "checkpoint_sym_4020_v1.pkl"
# load in checkpoints
fName = os.path.join(SWBasePath, "optimization", "checkpoints", cpIn)
_, hof, _, _ = load_checkpoints(fName)
# Get best individual
best = hof[0]
pnames = [name for name, _, _ in optconf]
for pname, value in zip(pnames, best):
print '%s = %.2f' % (pname, value)
print 'Fitness value: ', best.fitness.values
# load weight matrix
fName = os.path.join(SWBasePath, "files", fIn)
Wee = load_Wee(fName)
# rerun simulation
run_simulation(Wee, best)
plt.show()
|
{"hexsha": "fe7b5c7d9f40be9f7bbba1cccdf53118b2219578", "size": 4382, "ext": "py", "lang": "Python", "max_stars_repo_path": "optimization/analyse_checkpoints.py", "max_stars_repo_name": "andrisecker/KOKI_sharp_waves", "max_stars_repo_head_hexsha": "e6375a2574559172a7036c2def177064a7b6def8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "optimization/analyse_checkpoints.py", "max_issues_repo_name": "andrisecker/KOKI_sharp_waves", "max_issues_repo_head_hexsha": "e6375a2574559172a7036c2def177064a7b6def8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "optimization/analyse_checkpoints.py", "max_forks_repo_name": "andrisecker/KOKI_sharp_waves", "max_forks_repo_head_hexsha": "e6375a2574559172a7036c2def177064a7b6def8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2148760331, "max_line_length": 113, "alphanum_fraction": 0.6207211319, "include": true, "reason": "import numpy", "num_tokens": 1303}
|
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from six import StringIO
from landlab import (
CLOSED_BOUNDARY,
HexModelGrid,
NetworkModelGrid,
RadialModelGrid,
RasterModelGrid,
VoronoiDelaunayGrid,
)
from landlab.grid.hex import from_dict as hex_from_dict
from landlab.grid.raster import from_dict as raster_from_dict
def test_raster_old_from_dict_deprecated():
params = {"NUM_COLS": 10, "NUM_ROWS": 5, "GRID_SPACING": 4}
with pytest.deprecated_call():
mg = raster_from_dict(params)
assert mg.shape == (5, 10)
assert mg.dx == 4
assert mg.dy == 4
def test_hex_old_from_dict_deprecated():
params = {"NUM_COLS": 10, "NUM_ROWS": 5, "GRID_SPACING": 4}
with pytest.deprecated_call():
mg = hex_from_dict(params)
assert mg.number_of_nodes == 54
def test_raster_from_file():
file_strn = (
"shape:\n"
" - 10\n"
" - 20\n"
"xy_spacing:\n"
" - 25\n"
" - 45\n"
"bc:\n"
" right: 'closed'\n"
" top: 'closed'\n"
" left: 'closed'\n"
" bottom: 'closed'\n"
"xy_of_reference:\n"
" - 12345\n"
" - 678910\n"
"xy_of_lower_left:\n"
" - 35\n"
" - 55\n"
"axis_name:\n"
" - 'spam'\n"
" - 'eggs'\n"
"axis_units:\n"
" - 'smoot'\n"
" - 'parsec'"
)
file_like = StringIO(file_strn)
mg = RasterModelGrid.from_file(file_like)
# assert things.
assert mg.shape == (10, 20)
assert mg.dx == 25
assert mg.dy == 45
assert (mg.x_of_node.min(), mg.y_of_node.min()) == (35, 55)
assert np.all(mg.status_at_node[mg.boundary_nodes] == CLOSED_BOUNDARY)
assert mg.axis_units == ("smoot", "parsec")
assert mg.axis_name == ("spam", "eggs")
assert mg.xy_of_reference == (12345, 678910)
def test_raster_from_dict():
params = {
"shape": (10, 20),
"xy_spacing": (25, 45),
"bc": {
"right": "closed",
"top": "closed",
"left": "closed",
"bottom": "closed",
},
"xy_of_lower_left": (35, 55),
"axis_name": ("spam", "eggs"),
"axis_units": ("smoot", "parsec"),
"xy_of_reference": (12345, 678910),
}
mg = RasterModelGrid.from_dict(params)
# assert things.
assert mg.shape == (10, 20)
assert mg.dx == 25
assert mg.dy == 45
assert (mg.x_of_node.min(), mg.y_of_node.min()) == (35, 55)
assert np.all(mg.status_at_node[mg.boundary_nodes] == CLOSED_BOUNDARY)
assert mg.axis_units == ("smoot", "parsec")
assert mg.axis_name == ("spam", "eggs")
assert mg.xy_of_reference == (12345, 678910)
def test_hex_from_dict():
params = {
"base_num_rows": 5,
"base_num_cols": 4,
"dx": 2.0,
"xy_of_lower_left": (35, 55),
"axis_name": ("spam", "eggs"),
"axis_units": ("smoot", "parsec"),
"xy_of_reference": (12345, 678910),
}
mg = HexModelGrid.from_dict(params)
# assert things.
true_x_node = np.array(
[
37.0,
39.0,
41.0,
43.0,
36.0,
38.0,
40.0,
42.0,
44.0,
35.0,
37.0,
39.0,
41.0,
43.0,
45.0,
36.0,
38.0,
40.0,
42.0,
44.0,
37.0,
39.0,
41.0,
43.0,
]
)
assert_array_equal(true_x_node, mg.x_of_node)
assert (mg.x_of_node.min(), mg.y_of_node.min()) == (35, 55)
assert mg.axis_units == ("smoot", "parsec")
assert mg.axis_name == ("spam", "eggs")
assert mg.xy_of_reference == (12345, 678910)
def test_radial_from_dict():
params = {
"num_shells": 5,
"dr": 2.0,
"xy_of_center": (35, 55),
"axis_name": ("spam", "eggs"),
"axis_units": ("smoot", "parsec"),
"xy_of_reference": (12345, 678910),
}
mg = RadialModelGrid.from_dict(params)
# assert things.
assert mg.number_of_nodes == 95
assert mg.xy_of_center == (35, 55)
assert [35, 55] in mg.xy_of_node
assert mg.axis_units == ("smoot", "parsec")
assert mg.axis_name == ("spam", "eggs")
assert mg.xy_of_reference == (12345, 678910)
def test_network_from_dict():
params = {
"yx_of_node": [(0, 1, 2, 2), (0, 0, -1, 1)],
"links": ((1, 0), (2, 1), (3, 1)),
"axis_name": ("spam", "eggs"),
"axis_units": ("smoot", "parsec"),
"xy_of_reference": (12345, 678910),
}
mg = NetworkModelGrid.from_dict(params)
assert_array_equal(mg.x_of_node, np.array([0.0, 0.0, -1.0, 1.0]))
assert_array_equal(mg.y_of_node, np.array([0.0, 1.0, 2.0, 2.0]))
assert_array_equal(mg.nodes_at_link, np.array([[0, 1], [2, 1], [1, 3]]))
assert mg.axis_units == ("smoot", "parsec")
assert mg.axis_name == ("spam", "eggs")
assert mg.xy_of_reference == (12345, 678910)
def test_network_from_file():
file_strn = (
"yx_of_node:\n"
" - [0, 1, 2, 2]\n"
" - [0, 0, -1, 1]\n"
"links:\n"
" - [1, 0]\n"
" - [2, 1]\n"
" - [3, 1]\n"
"xy_of_reference:\n"
" - 12345\n"
" - 678910\n"
"axis_name:\n"
" - 'spam'\n"
" - 'eggs'\n"
"axis_units:\n"
" - 'smoot'\n"
" - 'parsec'"
)
file_like = StringIO(file_strn)
mg = NetworkModelGrid.from_file(file_like)
assert_array_equal(mg.x_of_node, np.array([0.0, 0.0, -1.0, 1.0]))
assert_array_equal(mg.y_of_node, np.array([0.0, 1.0, 2.0, 2.0]))
assert_array_equal(mg.nodes_at_link, np.array([[0, 1], [2, 1], [1, 3]]))
assert mg.axis_units == ("smoot", "parsec")
assert mg.axis_name == ("spam", "eggs")
assert mg.xy_of_reference == (12345, 678910)
def test_voronoi_from_dict():
x = [0, 0.1, 0.2, 0.3, 1, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 2.3]
y = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
params = {
"x": x,
"y": y,
"axis_name": ("spam", "eggs"),
"axis_units": ("smoot", "parsec"),
"xy_of_reference": (12345, 678910),
}
mg = VoronoiDelaunayGrid.from_dict(params)
assert mg.axis_units == ("smoot", "parsec")
assert mg.axis_name == ("spam", "eggs")
assert mg.xy_of_reference == (12345, 678910)
true_x = np.array([0.0, 1.0, 2.0, 0.1, 1.1, 2.1, 0.2, 1.2, 2.2, 0.3, 1.3, 2.3])
true_y = np.array([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0])
true_nodes_at_node = np.array(
[
[1, 3, -1, -1, -1, -1],
[2, 4, 3, 0, -1, -1],
[5, 4, 1, -1, -1, -1],
[4, 6, 0, 1, -1, -1],
[5, 7, 6, 3, 1, 2],
[8, 7, 4, 2, -1, -1],
[7, 9, 3, 4, -1, -1],
[8, 10, 9, 6, 4, 5],
[11, 10, 7, 5, -1, -1],
[10, 6, 7, -1, -1, -1],
[11, 9, 7, 8, -1, -1],
[10, 8, -1, -1, -1, -1],
]
)
assert_array_equal(mg.node_x, true_x)
assert_array_equal(mg.node_y, true_y)
assert_array_equal(mg.adjacent_nodes_at_node, true_nodes_at_node)
|
{"hexsha": "0ba7084200eccd8573752809f95462c6da930aa2", "size": 7332, "ext": "py", "lang": "Python", "max_stars_repo_path": "landlab/grid/tests/test_constructors.py", "max_stars_repo_name": "cctrunz/landlab", "max_stars_repo_head_hexsha": "4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "landlab/grid/tests/test_constructors.py", "max_issues_repo_name": "cctrunz/landlab", "max_issues_repo_head_hexsha": "4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-03-16T02:34:08.000Z", "max_issues_repo_issues_event_max_datetime": "2016-04-20T19:31:30.000Z", "max_forks_repo_path": "landlab/grid/tests/test_constructors.py", "max_forks_repo_name": "cctrunz/landlab", "max_forks_repo_head_hexsha": "4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5291828794, "max_line_length": 83, "alphanum_fraction": 0.504091653, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2556}
|
from warnings import warn
import os
from multiprocessing import Pool
import numpy as np
from tqdm import tqdm
from keras.models import Model
from scipy.stats import gaussian_kde
from coverage.tools.surprise_adequacy.sa_utils import *
from coverage.tools.common_utils import ScoreUtils
from coverage.tools.deepspeech.deepspeech_utils import DSDataUtils
def _aggr_output(x):
return [np.mean(x[..., j]) for j in range(x.shape[-1])]
def _get_saved_path(base_path, dataset, network, train_size: int, dtype, layer_names):
"""Determine saved path of ats and pred
Args:
base_path (str): Base save path.
dataset (str): Name of dataset.
dtype (str): Name of dataset type (e.g., train, test, fgsm, ...).
layer_names (list): List of layer names.
Returns:
ats_path: File path of ats.
pred_path: File path of pred (independent of layers)
"""
joined_layer_names = "_".join(layer_names)
return (
os.path.join(
base_path,
dataset + "_" + network + "_" + dtype + "_" +
str(train_size) + "_" + joined_layer_names + "_ats" + ".npy",
),
os.path.join(base_path, dataset + "_" + network + "_" +
dtype + "_" + str(train_size) + "_pred" + ".npy"),
)
def get_ats(
model,
dataset,
name,
layer_names,
save_path=None,
batch_size=128,
is_classification=True,
num_classes=10,
num_proc=10,
dataset_name=None,
):
"""Extract activation traces of dataset from model.
Args:
model (keras model): Subject model.
dataset (list): Set of inputs fed into the model.
name (str): Name of input set.
layer_names (list): List of selected layer names.
save_path (tuple): Paths of being saved ats and pred.
batch_size (int): Size of batch when serving.
is_classification (bool): Task type, True if classification task or False.
num_classes (int): The number of classes (labels) in the dataset.
num_proc (int): The number of processes for multiprocessing.
Returns:
ats (list): List of (layers, inputs, neuron outputs).
pred (list): List of predicted classes.
"""
temp_model = Model(
inputs=model.input,
outputs=[model.get_layer(
layer_name).output for layer_name in layer_names],
)
prefix = info("[" + name + "] ")
if is_classification:
p = Pool(num_proc)
print(prefix + "Model serving")
# pred = model.predict_classes(dataset, batch_size=batch_size, verbose=1)
predict = model.predict(dataset, batch_size=batch_size, verbose=1)
if dataset_name == "speech-commands":
pred_words = ScoreUtils.speech_commands_prediction(predict)
pred = [DSDataUtils.get_words_idx(s) for s in pred_words]
else:
pred = np.argmax(predict, axis=1)
if len(layer_names) == 1:
layer_outputs = [
temp_model.predict(dataset, batch_size=batch_size, verbose=1)
]
else:
layer_outputs = temp_model.predict(
dataset, batch_size=batch_size, verbose=1
)
print(prefix + "Processing ATs")
ats = None
for layer_name, layer_output in zip(layer_names, layer_outputs):
print("Layer: " + layer_name)
# (primarily for convolutional layers - note that kim et al used ndim==3)
# I think here should be 2.
# The output shape may be like (batch_size,channel1,channel2),
# and we should change it to (batch_size,channel2)
if layer_output[0].ndim >= 2:
# For convolutional layers
layer_matrix = np.array(
p.map(_aggr_output, [layer_output[i]
for i in range(len(dataset))])
)
else:
layer_matrix = np.array(layer_output)
if ats is None:
ats = layer_matrix
else:
ats = np.append(ats, layer_matrix, axis=1)
layer_matrix = None
else:
p = Pool(num_proc)
pred = []
print(prefix + "Model serving")
if len(layer_names) == 1:
layer_outputs = [
temp_model.predict(dataset, batch_size=batch_size, verbose=1)
]
else:
layer_outputs = temp_model.predict(
dataset, batch_size=batch_size, verbose=1
)
print(prefix + "Processing ATs")
ats = None
for layer_name, layer_output in zip(layer_names, layer_outputs):
print("Layer: " + layer_name)
if layer_output[0].ndim == 3:
# For convolutional layers
layer_matrix = np.array(
p.map(_aggr_output, [layer_output[i]
for i in range(len(dataset))])
)
else:
layer_matrix = np.array(layer_output)
if ats is None:
ats = layer_matrix
else:
ats = np.append(ats, layer_matrix, axis=1)
layer_matrix = None
# if save_path is not None:
# np.save(save_path[0], ats)
# np.save(save_path[1], pred)
return ats, pred
def find_closest_at(at, train_ats):
"""The closest distance between subject AT and training ATs.
Args:
at (list): List of activation traces of an input.
train_ats (list): List of activation traces in training set (filtered)
Returns:
dist (int): The closest distance.
at (list): Training activation trace that has the closest distance.
"""
dist = np.linalg.norm(at - train_ats, axis=1)
return (min(dist), train_ats[np.argmin(dist)])
def _get_train_target_ats(model, x_train, x_target, target_name, layer_names, args):
"""Extract ats of train and target inputs. If there are saved files, then skip it.
Args:
model (keras model): Subject model.
x_train (list): Set of training inputs.
x_target (list): Set of target (test or adversarial) inputs.
target_name (str): Name of target set.
layer_names (list): List of selected layer names.
args: keyboard console_args.
Returns:
train_ats (list): ats of train set.
train_pred (list): pred of train set.
target_ats (list): ats of target set.
target_pred (list): pred of target set.
"""
train_size = len(x_train)
saved_train_path = _get_saved_path(
args.save_path, args.dataset, args.network, train_size, "train", layer_names)
if os.path.exists(saved_train_path[0]):
print(infog("Found saved {} ATs, skip serving".format("train")))
# In case train_ats is stored in a disk
train_ats = np.load(saved_train_path[0])
train_pred = np.load(saved_train_path[1])
else:
train_ats, train_pred = get_ats(
model,
x_train,
"train",
layer_names,
num_classes=args.num_classes,
is_classification=args.is_classification,
save_path=saved_train_path,
dataset_name=args.dataset,
)
print(infog("train ATs is saved at " + saved_train_path[0]))
if saved_train_path is not None:
np.save(saved_train_path[0], train_ats)
np.save(saved_train_path[1], train_pred)
saved_target_path = _get_saved_path(
args.save_path, args.dataset, args.network, train_size, target_name, layer_names
)
if True:
target_ats, target_pred = get_ats(
model,
x_target,
target_name,
layer_names,
num_classes=args.num_classes,
is_classification=args.is_classification,
save_path=saved_target_path,
dataset_name=args.dataset,
)
print(infog(target_name + " ATs is saved at " + saved_target_path[0]))
return train_ats, train_pred, target_ats, target_pred
def generate_at(model, x_train, args, layer_names):
train_size = len(x_train)
saved_train_path = _get_saved_path(
args.save_path, args.dataset, args.network, train_size, "train", layer_names)
if os.path.exists(saved_train_path[0]):
print(infog("Found saved {} ATs, skip serving".format("train")))
print("Skip training ats generation")
else:
train_ats, train_pred = get_ats(
model,
x_train,
"train",
layer_names,
num_classes=args.num_classes,
is_classification=args.is_classification,
save_path=saved_train_path,
)
print(infog("train ATs is saved at " + saved_train_path[0]))
if saved_train_path is not None:
np.save(saved_train_path[0], train_ats)
np.save(saved_train_path[1], train_pred)
def fetch_dsa(model, x_train, x_target, target_name, layer_names, args):
# """Distance-based SA
# Args:
# model (keras model): Subject model.
# x_train (list): Set of training inputs.
# x_target (list): Set of target (test or adversarial) inputs.
# target_name (str): Name of target set.
# sa_layer_names (list): List of selected layer names.
# console_args: keyboard console_args.
# Returns:
# dsa (list): List of dsa for each target input.
# """
assert args.is_classification
prefix = info("[" + target_name + "] ")
train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(
model, x_train, x_target, target_name, layer_names, args
)
class_matrix = {}
all_idx = []
for i, label in enumerate(train_pred):
if label not in class_matrix:
class_matrix[label] = []
class_matrix[label].append(i)
all_idx.append(i)
dsa = []
print(prefix + "Fetching DSA")
for i, at in enumerate(tqdm(target_ats)):
label = target_pred[i]
a_dist, a_dot = find_closest_at(at, train_ats[class_matrix[label]])
b_dist, _ = find_closest_at(
a_dot, train_ats[list(set(all_idx) - set(class_matrix[label]))]
)
dsa.append(a_dist / b_dist)
return dsa
def fetch_mdsa(model, x_train, x_target, target_name, layer_names, args):
"""
@param model: Subject model.
@param x_train: Set of training inputs.
@param x_target: Set of target (test or adversarial) inputs.
@param target_name: name of targeted test inputs
@param layer_names: List of selected layer names.
@param args: keyboard console_args.
@return: List of mdsa for each target input.
"""
assert args.is_classification
prefix = info("[" + target_name + "] ")
train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(
model, x_train, x_target, target_name, layer_names, args
)
class_matrix = {}
all_idx = []
for i, label in enumerate(train_pred):
if label not in class_matrix:
class_matrix[label] = []
class_matrix[label].append(i)
all_idx.append(i)
mdsa = []
print(prefix + "Fetching MDSA")
train_size = len(x_train)
mdsa_inter_path = os.path.join(
args.save_path, f"{args.dataset}_{args.network}_{train_size}_mdsa_inter.npz")
if os.path.exists(mdsa_inter_path):
inter_dict = np.load(mdsa_inter_path, allow_pickle=True)
to_keep_dict, mu_dict, Sinv_dict = inter_dict["to_keep"][(
)], inter_dict["mu"][()], inter_dict["Sinv"][()]
else:
# generate to_keep
# here, train_ats should be like (test_size, cols_nums)
to_keep_dict = dict()
mu_dict = dict()
Sinv_dict = dict()
for label in range(args.num_classes):
_to_keep = np.ones(train_ats.shape[1], dtype=np.bool_)
# print("INFO",train_ats[class_matrix[label]].shape)
col_vectors = np.transpose(train_ats[class_matrix[label]])
# print("INFO",col_vectors.shape)
for i in range(col_vectors.shape[0]):
# print(np.var(col_vectors[i]))
if np.var(col_vectors[i]) < args.var_threshold:
_to_keep[i] = False
refined_ats = col_vectors[_to_keep, :]
to_keep_dict[label] = _to_keep
_mu = np.mean(refined_ats, axis=1).transpose()
mu_dict[label] = _mu.copy()
_Sinv = np.linalg.inv(np.cov(refined_ats))
Sinv_dict[label] = _Sinv.copy()
np.savez(mdsa_inter_path, to_keep=to_keep_dict,
mu=mu_dict, Sinv=Sinv_dict)
for i, at in enumerate(tqdm(target_ats)):
to_keep = to_keep_dict[target_pred[i]]
col_vector = at.transpose()
refined_col_vector = col_vector[to_keep].transpose()
label = target_pred[i]
mu, Sinv = mu_dict[label], Sinv_dict[label]
tmp = np.dot((refined_col_vector - mu).transpose(), Sinv)
mdsa.append(np.sqrt(np.dot(tmp, (refined_col_vector - mu))).item())
return mdsa
def _get_kdes(train_ats, train_pred, class_matrix, args):
"""Kernel density estimation
Args:
train_ats (list): List of activation traces in training set.
train_pred (list): List of prediction of train set.
class_matrix (list): List of index of classes.
args: Keyboard console_args.
Returns:
kdes (list): List of kdes per label if classification task.
removed_cols (list): List of removed columns by variance threshold.
"""
removed_cols = []
if args.is_classification:
for label in range(args.num_classes):
col_vectors = np.transpose(train_ats[class_matrix[label]])
for i in range(col_vectors.shape[0]):
if (
np.var(col_vectors[i]) < args.var_threshold
and i not in removed_cols
):
removed_cols.append(i)
print(sorted(removed_cols))
kdes = {}
for label in tqdm(range(args.num_classes), desc="kde"):
refined_ats = np.transpose(train_ats[class_matrix[label]])
refined_ats = np.delete(refined_ats, removed_cols, axis=0)
print(refined_ats.shape)
print(label)
if refined_ats.shape[0] == 0:
print(
warn("ats were removed by threshold {}".format(
args.var_threshold))
)
break
kdes[label] = gaussian_kde(refined_ats)
else:
if np.isnan(train_ats).any():
print("Found nan in train ats")
col_vectors = np.transpose(train_ats)
for i in range(col_vectors.shape[0]):
if np.var(col_vectors[i]) < args.var_threshold:
removed_cols.append(i)
print(len(removed_cols))
refined_ats = np.transpose(train_ats)
refined_ats = np.delete(refined_ats, removed_cols, axis=0)
if refined_ats.shape[0] == 0:
print(warn("ats were removed by threshold {}".format(args.var_threshold)))
kdes = [gaussian_kde(refined_ats)]
print(gaussian_kde(refined_ats))
# print(type(kdes[0]))
# if np.isnan(kdes[0]).any():
# raise Exception("Found NaN in kde")
print(infog("The number of removed columns: {}".format(len(removed_cols))))
return kdes, removed_cols
def _get_lsa(kde, at, removed_cols):
refined_at = np.delete(at, removed_cols, axis=0)
# print(refined_at)
# print(np.transpose(refined_at))
transpose_refined_at = np.transpose(refined_at)
_logpdf = -kde.logpdf(transpose_refined_at)
res = np.asscalar(_logpdf)
if np.isnan(res).any() or np.isinf(res).any():
raise Exception()
return np.asscalar(-kde.logpdf(np.transpose(refined_at)))
def fetch_lsa(model, x_train, x_target, target_name, layer_names, args):
def check_nan(x):
import math
if isinstance(x, np.ndarray):
if np.isnan(x).any() or np.isinf(x).any():
raise Exception("nan")
if isinstance(x, list):
for xi in x:
if math.isnan(xi) or math.isinf(xi):
raise Exception("nan")
print("No nan found")
# """Likelihood-based SA
# Args:
# model (keras model): Subject model.
# x_train (list): Set of training inputs.
# x_target (list): Set of target (test or[] adversarial) inputs.
# target_name (str): Name of target set.
# sa_layer_names (list): List of selected layer names.
# console_args: Keyboard console_args.
# Returns:
# lsa (list): List of lsa for each target input.
# """
prefix = info("[" + target_name + "] ")
train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(
model, x_train, x_target, target_name, layer_names, args
)
check_nan(train_ats)
check_nan(train_pred)
check_nan(target_ats)
check_nan(target_pred)
class_matrix = {}
if args.is_classification:
for i, label in enumerate(train_pred):
if label not in class_matrix.keys():
class_matrix[label] = []
class_matrix[label].append(i)
kdes, removed_cols = _get_kdes(train_ats, train_pred, class_matrix, args)
lsa = []
print(prefix + "Fetching LSA")
if args.is_classification:
for i, at in enumerate(tqdm(target_ats)):
label = target_pred[i]
kde = kdes[label]
lsa.append(_get_lsa(kde, at, removed_cols))
else:
kde = kdes[0]
for at in tqdm(target_ats):
lsa.append(_get_lsa(kde, at, removed_cols))
return lsa
def get_sc(lower, upper, k, sa):
"""Surprise Coverage
Args:
lower (int): Lower bound.
upper (int): Upper bound.
k (int): The number of buckets.
sa (list): List of lsa or dsa.
Returns:
cov (int): Surprise coverage.
"""
buckets = np.digitize(sa, np.linspace(lower, upper, k))
return len(list(set(buckets))) / float(k) * 100
# sa_selected_layers = {
# 'alexnet' : [""],
# 'lenet5': ['dense_3'],
# 'vgg16': ['dense_1'],
# 'resnet20': ['activation_19'],
# 'resnet32': ['activation_28'],
# 'vgg19': ['block5_conv4'],
# 'resnet50': ['activation_49'],
# 'deepspeech': ['dense_1'],
# 'dave-orig': ['fc4'],
# }
sa_selected_layers = {
'cifar10_alexnet': ["dense_2"], # -3
# 'cifar10_alexnet': ["dense_1"], # -3
"fashion-mnist_lenet5": ["dense_3"], # -2
'mnist_lenet5': ['dense_3'], # -2
'cifar10_vgg16': ['dense_1'], # -3
'cifar10_resnet20': ['flatten_1'], # -1
'cifar100_resnet32': ['flatten_1'], # -1
'imagenet_vgg19': ['block5_conv4'], # -6
'imagenet_resnet50': ['activation_49'], # -3
'speech-commands_deepspeech': ['dense_1'],
'driving_dave-orig': ['fc4'],
'driving_dave-dropout': ['fc3'],
}
|
{"hexsha": "f7d23664dc2153df5f8262e4d3aadbecbee16388", "size": 19682, "ext": "py", "lang": "Python", "max_stars_repo_path": "coverage/tools/surprise_adequacy/sa.py", "max_stars_repo_name": "anonymousprojs/ISSTA2022-study", "max_stars_repo_head_hexsha": "94cef7fc4c098c03bb08ff8865d0c1d9a5de86b2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "coverage/tools/surprise_adequacy/sa.py", "max_issues_repo_name": "anonymousprojs/ISSTA2022-study", "max_issues_repo_head_hexsha": "94cef7fc4c098c03bb08ff8865d0c1d9a5de86b2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "coverage/tools/surprise_adequacy/sa.py", "max_forks_repo_name": "anonymousprojs/ISSTA2022-study", "max_forks_repo_head_hexsha": "94cef7fc4c098c03bb08ff8865d0c1d9a5de86b2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4481481481, "max_line_length": 89, "alphanum_fraction": 0.5816990143, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4639}
|
import copy
import gym
import numpy as np
import rlf.algos.utils as autils
import rlf.rl.utils as rutils
import torch
import torch.nn.functional as F
from rlf.algos.il.base_il import BaseILAlgo
from rlf.args import str2bool
from rlf.storage.base_storage import BaseStorage
from tqdm import tqdm
class BehavioralCloning(BaseILAlgo):
"""
When used as a standalone updater, BC will perform a single update per call
to update. The total number of udpates is # epochs * # batches in expert
dataset. num-steps must be 0 and num-procs 1 as no experience should be collected in the
environment. To see the performance, you must evaluate. To just evaluate at
the end of training, set eval-interval to a large number that is greater
than the number of updates. There will always be a final evaluation.
"""
def __init__(self, set_arg_defs=True):
super().__init__()
self.set_arg_defs = set_arg_defs
def init(self, policy, args):
super().init(policy, args)
self.num_epochs = 0
self.action_dim = rutils.get_ac_dim(self.policy.action_space)
if self.args.bc_state_norm:
self.norm_mean = self.expert_stats["state"][0]
self.norm_var = torch.pow(self.expert_stats["state"][1], 2)
else:
self.norm_mean = None
self.norm_var = None
self.num_bc_updates = 0
def get_env_settings(self, args):
settings = super().get_env_settings(args)
if args.bc_state_norm:
print("Setting environment state normalization")
settings.state_fn = self._norm_state
return settings
def _norm_state(self, x):
obs_x = torch.clamp(
(rutils.get_def_obs(x) - self.norm_mean)
/ torch.pow(self.norm_var + 1e-8, 0.5),
-10.0,
10.0,
)
if isinstance(x, dict):
x["observation"] = obs_x
return x
def get_num_updates(self):
if self.exp_generator is None:
return len(self.expert_train_loader) * self.args.bc_num_epochs
else:
return self.args.exp_gen_num_trans * self.args.bc_num_epochs
def get_completed_update_steps(self, num_updates):
return num_updates * self.args.traj_batch_size
def _reset_data_fetcher(self):
super()._reset_data_fetcher()
self.num_epochs += 1
def full_train(self, update_iter=0):
action_loss = []
prev_num = 0
# First BC
with tqdm(total=self.args.bc_num_epochs) as pbar:
while self.num_epochs < self.args.bc_num_epochs:
super().pre_update(self.num_bc_updates)
log_vals = self._bc_step(False)
action_loss.append(log_vals["_pr_action_loss"])
pbar.update(self.num_epochs - prev_num)
prev_num = self.num_epochs
rutils.plot_line(
action_loss,
f"action_loss_{update_iter}.png",
self.args.vid_dir,
not self.args.no_wb,
self.get_completed_update_steps(self.update_i),
)
self.num_epochs = 0
def pre_update(self, cur_update):
# Override the learning rate decay
pass
def _bc_step(self, decay_lr):
if decay_lr:
super().pre_update(self.num_bc_updates)
expert_batch = self._get_next_data()
if expert_batch is None:
self._reset_data_fetcher()
expert_batch = self._get_next_data()
states, true_actions = self._get_data(expert_batch)
log_dict = {}
pred_actions, _, _ = self.policy(states, None, None)
if rutils.is_discrete(self.policy.action_space):
pred_label = rutils.get_ac_compact(self.policy.action_space, pred_actions)
acc = (pred_label == true_actions.long()).sum().float() / pred_label.shape[
0
]
log_dict["_pr_acc"] = acc.item()
loss = autils.compute_ac_loss(
pred_actions,
true_actions.view(-1, self.action_dim),
self.policy.action_space,
)
self._standard_step(loss)
self.num_bc_updates += 1
val_loss = self._compute_val_loss()
if val_loss is not None:
log_dict["_pr_val_loss"] = val_loss.item()
log_dict["_pr_action_loss"] = loss.item()
return log_dict
def _get_data(self, batch):
states = batch["state"].to(self.args.device)
if self.args.bc_state_norm:
states = self._norm_state(states)
if self.args.bc_noise is not None:
add_noise = torch.randn(states.shape) * self.args.bc_noise
states += add_noise.to(self.args.device)
states = states.detach()
true_actions = batch["actions"].to(self.args.device)
true_actions = self._adjust_action(true_actions)
return states, true_actions
def _compute_val_loss(self):
if self.update_i % self.args.eval_interval != 0:
return None
if self.val_train_loader is None:
return None
with torch.no_grad():
losses = []
for batch in self.val_train_loader:
states, true_actions = self._get_data(batch)
pred_actions, _, _ = self.policy(states, None, None)
loss = autils.compute_ac_loss(
pred_actions,
true_actions.view(-1, self.action_dim),
self.policy.action_space,
)
losses.append(loss.item())
return np.mean(losses)
def update(self, storage):
top_log_vals = super().update(storage)
log_vals = self._bc_step(True)
log_vals.update(top_log_vals)
return log_vals
def get_storage_buffer(self, policy, envs, args):
return BaseStorage()
def get_add_args(self, parser):
if not self.set_arg_defs:
# This is set when BC is used at the same time as another optimizer
# that also has a learning rate.
self.set_arg_prefix("bc")
super().get_add_args(parser)
#########################################
# Overrides
if self.set_arg_defs:
parser.add_argument("--num-processes", type=int, default=1)
parser.add_argument("--num-steps", type=int, default=0)
ADJUSTED_INTERVAL = 200
parser.add_argument("--log-interval", type=int, default=ADJUSTED_INTERVAL)
parser.add_argument(
"--save-interval", type=int, default=100 * ADJUSTED_INTERVAL
)
parser.add_argument(
"--eval-interval", type=int, default=100 * ADJUSTED_INTERVAL
)
parser.add_argument("--no-wb", default=False, action="store_true")
#########################################
# New args
parser.add_argument("--bc-num-epochs", type=int, default=1)
parser.add_argument("--bc-state-norm", type=str2bool, default=False)
parser.add_argument("--bc-noise", type=float, default=None)
|
{"hexsha": "f81b5de94a7d0bce5cee7b4c30afb75c153428bd", "size": 7142, "ext": "py", "lang": "Python", "max_stars_repo_path": "rl-toolkit/rlf/algos/il/bc.py", "max_stars_repo_name": "clvrai/goal_prox_il", "max_stars_repo_head_hexsha": "7c809b2ee575a69a14997068db06f3c1f3c8bd08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-11-17T20:19:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T04:21:26.000Z", "max_issues_repo_path": "rl-toolkit/rlf/algos/il/bc.py", "max_issues_repo_name": "clvrai/goal_prox_il", "max_issues_repo_head_hexsha": "7c809b2ee575a69a14997068db06f3c1f3c8bd08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rl-toolkit/rlf/algos/il/bc.py", "max_forks_repo_name": "clvrai/goal_prox_il", "max_forks_repo_head_hexsha": "7c809b2ee575a69a14997068db06f3c1f3c8bd08", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8390243902, "max_line_length": 92, "alphanum_fraction": 0.6033323999, "include": true, "reason": "import numpy", "num_tokens": 1561}
|
import pandas as pd
import numpy as np
import re
from datetime import datetime as dt
from datetime import date, timedelta
# This script cleans the fetched tweets from the previous task "fetching_tweets"
LOCAL_DIR = '/tmp/'
def main():
# Read the csv produced by the "fetching_tweets" task
tweets = pd.read_csv(LOCAL_DIR + 'data_fetched.csv')
# Rename the columns of the dataframe
tweets.rename(columns={'Tweet': 'tweet', 'Time': 'dt', 'Retweet from': 'retweet_from', 'User': 'tweet_user'},
inplace=True)
# Drop the useless column "User" since all the tweets are written by Elon Musk
tweets.drop(['tweet_user'], axis=1, inplace=True)
# Add a column before_clean_len to know the size of the tweets before cleaning
tweets['before_clean_len'] = [len(t) for t in tweets.tweet]
# Remove @mention in tweets
tweets['tweet'] = tweets['tweet'].apply(lambda tweet: re.sub(r'@[A-Za-z0-9]+', '', tweet))
# Remove URL in tweets
tweets['tweet'] = tweets['tweet'].apply(lambda tweet: re.sub('https?://[A-Za-z0-9./]+', '', tweet))
# Remove all non letter charaters including numbers from the tweets
tweets['tweet'] = tweets['tweet'].apply(lambda tweet: re.sub('[^a-zA-Z]', ' ', tweet))
# Lower case all the tweets
tweets['tweet'] = tweets['tweet'].str.lower()
# Add after clean len column
tweets['after_clean_len'] = [len(t) for t in tweets.tweet]
# Changing date format
yesterday = date.today() - timedelta(days=1)
dt = yesterday.strftime("%Y-%m-%d")
tweets['dt'] = dt
# Export cleaned dataframe
tweets.to_csv(LOCAL_DIR + 'data_cleaned.csv', index=False)
if __name__ == '__main__':
main()
|
{"hexsha": "a8f5c44ce603d3c0a99d7fc7972842ba270b6bcd", "size": 1707, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_pipelines/cleaning_tweet.py", "max_stars_repo_name": "rodrigoarenas456/airflow-course", "max_stars_repo_head_hexsha": "8ffda59b8ac4cfa18b4cd614bc0f75ee18324b28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_pipelines/cleaning_tweet.py", "max_issues_repo_name": "rodrigoarenas456/airflow-course", "max_issues_repo_head_hexsha": "8ffda59b8ac4cfa18b4cd614bc0f75ee18324b28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-09-08T21:24:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T22:28:47.000Z", "max_forks_repo_path": "data_pipelines/cleaning_tweet.py", "max_forks_repo_name": "rodrigoarenas456/airflow-course", "max_forks_repo_head_hexsha": "8ffda59b8ac4cfa18b4cd614bc0f75ee18324b28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-06T12:18:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-06T12:18:39.000Z", "avg_line_length": 32.8269230769, "max_line_length": 113, "alphanum_fraction": 0.6625659051, "include": true, "reason": "import numpy", "num_tokens": 438}
|
import torch
import h5py
import numpy as np
#from cspnet import CSPNet_p3p4p5, ConvBlock
def load_conv_weights(conv, f, layer_name):
w = np.asarray(f[layer_name][layer_name + '_1/kernel:0'], dtype='float32')
b = np.asarray(f[layer_name][layer_name + '_1/bias:0'], dtype='float32')
conv.weight = torch.nn.Parameter(torch.from_numpy(w).permute(3, 2, 0, 1))
conv.bias = torch.nn.Parameter(torch.from_numpy(b))
def load_bn_weights(bn, f, layer_name):
w = np.asarray(f[layer_name][layer_name + '_1/gamma:0'], dtype='float32')
b = np.asarray(f[layer_name][layer_name + '_1/beta:0'], dtype='float32')
m = np.asarray(f[layer_name][layer_name + '_1/moving_mean:0'], dtype='float32')
v = np.asarray(f[layer_name][layer_name + '_1/moving_variance:0'], dtype='float32')
bn.weight = torch.nn.Parameter(torch.from_numpy(w))
bn.bias = torch.nn.Parameter(torch.from_numpy(b))
bn.running_mean = torch.from_numpy(m)
bn.running_var = torch.from_numpy(v)
def load_conv_block_weights(conv_blk, f, blk_name):
load_conv_weights(conv_blk.conv1, f, 'res' + blk_name + '_branch2a')
load_bn_weights(conv_blk.bn1, f, 'bn' + blk_name + '_branch2a')
load_conv_weights(conv_blk.conv2, f, 'res' + blk_name + '_branch2b')
load_bn_weights(conv_blk.bn2, f, 'bn' + blk_name + '_branch2b')
load_conv_weights(conv_blk.conv3, f, 'res' + blk_name + '_branch2c')
load_bn_weights(conv_blk.bn3, f, 'bn' + blk_name + '_branch2c')
load_conv_weights(conv_blk.conv4, f, 'res' + blk_name + '_branch1')
load_bn_weights(conv_blk.bn4, f, 'bn' + blk_name + '_branch1')
def load_identity_block_weights(identity_blk, f, blk_name):
load_conv_weights(identity_blk.conv1, f, 'res' + blk_name + '_branch2a')
load_bn_weights(identity_blk.bn1, f, 'bn' + blk_name + '_branch2a')
load_conv_weights(identity_blk.conv2, f, 'res' + blk_name + '_branch2b')
load_bn_weights(identity_blk.bn2, f, 'bn' + blk_name + '_branch2b')
load_conv_weights(identity_blk.conv3, f, 'res' + blk_name + '_branch2c')
load_bn_weights(identity_blk.bn3, f, 'bn' + blk_name + '_branch2c')
def load_l2norm_weights(l2norm, f, layer_name):
w = np.asarray(f[layer_name][layer_name + '_1/' + layer_name + '_gamma:0'], dtype='float32')
l2norm.weight = torch.nn.Parameter(torch.from_numpy(w))
def load_keras_weights(model, weights_path):
with h5py.File(weights_path, 'r') as f:
print(f.attrs['layer_names'])
load_conv_weights(model.conv1, f, 'conv1')
load_bn_weights(model.bn1, f, 'bn_conv1')
load_conv_block_weights(model.convblk2a, f, '2a')
load_identity_block_weights(model.identityblk2b, f, '2b')
load_identity_block_weights(model.identityblk2c, f, '2c')
load_conv_block_weights(model.convblk3a, f, '3a')
load_identity_block_weights(model.identityblk3b, f, '3b')
load_identity_block_weights(model.identityblk3c, f, '3c')
load_identity_block_weights(model.identityblk3d, f, '3d')
load_conv_block_weights(model.convblk4a, f, '4a')
load_identity_block_weights(model.identityblk4b, f, '4b')
load_identity_block_weights(model.identityblk4c, f, '4c')
load_identity_block_weights(model.identityblk4d, f, '4d')
load_identity_block_weights(model.identityblk4e, f, '4e')
load_identity_block_weights(model.identityblk4f, f, '4f')
load_conv_block_weights(model.convblk5a, f, '5a')
load_identity_block_weights(model.identityblk5b, f, '5b')
load_identity_block_weights(model.identityblk5c, f, '5c')
load_conv_weights(model.p3, f, 'P3up')
load_conv_weights(model.p4, f, 'P4up')
load_conv_weights(model.p5, f, 'P5up')
load_l2norm_weights(model.p3_l2, f, 'P3norm')
load_l2norm_weights(model.p4_l2, f, 'P4norm')
load_l2norm_weights(model.p5_l2, f, 'P5norm')
load_conv_weights(model.feat, f, 'feat')
load_bn_weights(model.feat_bn, f, 'bn_feat')
load_conv_weights(model.center_conv, f, 'center_cls')
load_conv_weights(model.height_conv, f, 'height_regr')
load_conv_weights(model.offset_conv, f, 'offset_regr')
|
{"hexsha": "44fbdc6017d5bae69394f72f34be34ada35c9a61", "size": 4170, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/keras_weights_loader.py", "max_stars_repo_name": "wang-xinyu/csp.pytorch", "max_stars_repo_head_hexsha": "8d5187358c1eb0fbf7ba5f184b9afed6c2518ad3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-03-29T13:35:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-11T10:45:53.000Z", "max_issues_repo_path": "utils/keras_weights_loader.py", "max_issues_repo_name": "wang-xinyu/csp.pytorch", "max_issues_repo_head_hexsha": "8d5187358c1eb0fbf7ba5f184b9afed6c2518ad3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-09-08T09:53:08.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-16T04:18:03.000Z", "max_forks_repo_path": "utils/keras_weights_loader.py", "max_forks_repo_name": "wang-xinyu/csp.pytorch", "max_forks_repo_head_hexsha": "8d5187358c1eb0fbf7ba5f184b9afed6c2518ad3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-05-24T21:49:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T09:14:42.000Z", "avg_line_length": 48.488372093, "max_line_length": 96, "alphanum_fraction": 0.6990407674, "include": true, "reason": "import numpy", "num_tokens": 1192}
|
import numpy as np
import pandas as pd
import os
import librosa
from utilities import compute_time_consumed
import time
import soundfile
import tqdm
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
base_path = os.path.join(os.path.expanduser('~'), 'DCase/data/TUT-urban-acoustic-scenes-2018-development')
## random erasing
def get_random_eraser(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_h=1 / 0.3, v_l=0, v_h=255, ):
"""
:param p:擦除概率
:param s_l:擦除矩形面积比下界
:param s_h:擦除矩形面积比上界
:param r_1:宽高比下界
:param r_h:宽高比上界
:param v_l: 替换的随机值下界
:param v_h: 替换的随机值上界
:return: eraser func
reference paper:https://arxiv.org/pdf/1708.04896.pdf
"""
def eraser(input_img):
img_c, img_h, img_w = input_img.shape
p_1 = np.random.rand() # 随机概率
if p_1 > p: # 擦除概率大于0.5,不擦除
return input_img
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w # 确定随机的面积比例确定擦除的面积大小
r = np.random.uniform(r_1, r_h) # 确定随机的宽高比
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r)) # 得到待擦除的矩形宽 w 高 h
# 随机确定擦除矩阵的第一个坐标
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h: # 擦除的矩形保证在图片范围之内
break
c = np.random.uniform(v_l, v_h)
input_img[:, top:top + h, left:left + w] = c # 以c擦除
return input_img
return eraser
def generate_mix_audio():
dev_train_csv = os.path.join(base_path, 'evaluation_setup', 'fold1_train.txt')
data = pd.read_csv(dev_train_csv, sep='\t', names=['file', 'label'])
data = data.groupby('label')
for name, group in data:
print('generate {} audios'.format(name))
for index, row in tqdm.tqdm(group.iterrows(), total=6122):
print(row['file'])
file_path = os.path.join(base_path, row['file'])
print(file_path)
audio_name = os.path.splitext(file_path)[0].split('/')[-1]
print('generate {} audios'.format(audio_name))
y1, sr = librosa.load(file_path, mono=False, sr=48000)
sample_audio_file = group.sample(n=1).iloc[0]['file']
y2, sr = librosa.load(os.path.join(base_path, sample_audio_file), mono=False, sr=48000)
y = 0.5 * y1 + 0.5 * y2
new_audio_name = str(audio_name) + '_mix.wav'
save_path = os.path.join(base_path, 'audio', new_audio_name)
librosa.output.write_wav(save_path, y, sr)
def generate_fold1_train_mix():
dev_train_csv = os.path.join(base_path, 'evaluation_setup', 'fold1_train.txt')
data = pd.read_csv(dev_train_csv, sep='\t', names=['file', 'label'])
file, label = [], []
for index, row in data.iterrows():
file.append(row['file'])
file.append(row['file'].replace('.wav', '_mix.wav'))
label.append(row['label'])
label.append(row['label'])
data = pd.DataFrame({'file': file, 'label': label})
data.to_csv('fold1_train_mix.txt', header=False, sep='\t', index=False)
print(data)
def generate_meta_mix():
dev_train_csv = os.path.join(base_path, 'meta.csv')
data = pd.read_csv(dev_train_csv, sep='\t')
filename, scene_label, identifier, source_label = [], [], [], []
for index, row in data.iterrows():
if index <= 6121:
filename.append(row['filename'].replace('.wav', '_mix.wav'))
scene_label.append(row['scene_label'])
source_label.append(row['source_label'])
identifier.append(row['identifier'])
filename.append(row['filename'])
scene_label.append(row['scene_label'])
identifier.append(row['identifier'])
source_label.append(row['source_label'])
new_data = pd.DataFrame(
data={'filename': filename, 'scene_label': scene_label, 'identifier': identifier, 'source_label': source_label},
columns=['filename', 'scene_label', 'identifier', 'source_label'])
new_data.to_csv('meta_mix.csv', header=True, sep='\t', index=False)
# print(new_data)
if __name__ == '__main__':
# generate_mix_audio()
# generate_fold1_train_mix()
generate_meta_mix()
pass
|
{"hexsha": "026aa8e80e4f3f8910a8231abcfc1969f685b62e", "size": 4246, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/data_augmentation.py", "max_stars_repo_name": "rongxuanhong/task1", "max_stars_repo_head_hexsha": "3e3f8eaa936a73df60a93a410e369f803d302903", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-23T10:16:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-23T10:16:58.000Z", "max_issues_repo_path": "utils/data_augmentation.py", "max_issues_repo_name": "rongxuanhong/task1", "max_issues_repo_head_hexsha": "3e3f8eaa936a73df60a93a410e369f803d302903", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/data_augmentation.py", "max_forks_repo_name": "rongxuanhong/task1", "max_forks_repo_head_hexsha": "3e3f8eaa936a73df60a93a410e369f803d302903", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6984126984, "max_line_length": 120, "alphanum_fraction": 0.6128120584, "include": true, "reason": "import numpy", "num_tokens": 1279}
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
##############export checkpoint file into air, mindir models#################
python export.py
"""
import os
import numpy as np
import mindspore as ms
from mindspore import Tensor, export, context
from src.utils import init_net
from model_utils.config import config
from model_utils.device_adapter import get_device_id
from model_utils.moxing_adapter import moxing_wrapper
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
if config.device_target == "Ascend":
context.set_context(device_id=get_device_id())
MAX_HR_SIZE = 2040
@moxing_wrapper()
def run_export():
"""
run export
"""
print(config)
cfg = config
if cfg.pre_trained is None:
raise RuntimeError('config.pre_trained is None.')
net = init_net(cfg)
max_lr_size = MAX_HR_SIZE // cfg.scale
input_arr = Tensor(np.ones([1, cfg.n_colors, max_lr_size, max_lr_size]), ms.float32)
file_name = os.path.splitext(os.path.basename(cfg.pre_trained))[0]
file_name = file_name + f"_InputSize{max_lr_size}"
file_path = os.path.join(cfg.output_path, file_name)
file_format = 'MINDIR'
num_params = sum([param.size for param in net.parameters_dict().values()])
export(net, input_arr, file_name=file_path, file_format=file_format)
print(f"export success", flush=True)
print(f"{cfg.pre_trained} -> {file_path}.{file_format.lower()}, net parameters = {num_params/1000000:>0.4}M",
flush=True)
if __name__ == '__main__':
run_export()
|
{"hexsha": "2aadb54c111d2e210d227f1fd406003b8af9a9a2", "size": 2174, "ext": "py", "lang": "Python", "max_stars_repo_path": "research/cv/EDSR/export.py", "max_stars_repo_name": "leelige/mindspore", "max_stars_repo_head_hexsha": "5199e05ba3888963473f2b07da3f7bca5b9ef6dc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 77, "max_stars_repo_stars_event_min_datetime": "2021-10-15T08:32:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T13:09:11.000Z", "max_issues_repo_path": "research/cv/EDSR/export.py", "max_issues_repo_name": "leelige/mindspore", "max_issues_repo_head_hexsha": "5199e05ba3888963473f2b07da3f7bca5b9ef6dc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-10-30T14:44:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T06:57:57.000Z", "max_forks_repo_path": "research/cv/EDSR/export.py", "max_forks_repo_name": "leelige/mindspore", "max_forks_repo_head_hexsha": "5199e05ba3888963473f2b07da3f7bca5b9ef6dc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2021-10-15T08:32:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T18:45:20.000Z", "avg_line_length": 33.4461538462, "max_line_length": 113, "alphanum_fraction": 0.7028518859, "include": true, "reason": "import numpy", "num_tokens": 500}
|
import CompactBasisFunctions: Basis, nodes, nnodes
@testset "$(rpad("Basis Tests",80))" begin
struct BasisTest{T} <: Basis{T} end
test_basis = BasisTest{Float64}()
@test_throws ErrorException basis(test_basis)
@test_throws ErrorException nodes(test_basis)
@test_throws ErrorException nbasis(test_basis)
@test_throws ErrorException nnodes(test_basis)
@test_throws ErrorException order(test_basis)
@test_throws ErrorException degree(test_basis)
@test_throws ErrorException eachindex(test_basis)
end
|
{"hexsha": "d39594407878d7fc44c15a276e19c81d6dfcfa9a", "size": 542, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/basis_tests.jl", "max_stars_repo_name": "JuliaGNI/CompactBasisFunctions.jl", "max_stars_repo_head_hexsha": "5a76714aca25c399d0856643aff3683d8e0f103a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/basis_tests.jl", "max_issues_repo_name": "JuliaGNI/CompactBasisFunctions.jl", "max_issues_repo_head_hexsha": "5a76714aca25c399d0856643aff3683d8e0f103a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-12-11T18:50:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T16:34:58.000Z", "max_forks_repo_path": "test/basis_tests.jl", "max_forks_repo_name": "JuliaGNI/CompactBasisFunctions.jl", "max_forks_repo_head_hexsha": "5a76714aca25c399d0856643aff3683d8e0f103a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6363636364, "max_line_length": 53, "alphanum_fraction": 0.7601476015, "num_tokens": 133}
|
from __future__ import absolute_import, division, print_function
import numpy as np
import explorers
from .. import tools
from ..tools import chrono
def astd(xs):
m = np.average(xs)
plus = [x-m for x in xs if x >= m]
minus = [x-m for x in xs if x <= m]
sigma_plus = np.std([-1*x for x in plus] + plus)
sigma_minus = np.std([-1*x for x in minus] + minus)
return (sigma_minus, sigma_plus)
def quantiles(xs):
return np.percentile(xs, 100.0/4), np.percentile(xs, 3*100.0/4)
def compile_results(cfg):
results = chrono.ChronoHistory(cfg.hardware.datafile, core_keys=['error_avgs', 'error_stds', 'avg', 'std'],
extralog = False)
for i, filename in enumerate(cfg.hardware.testfiles):
test_history = chrono.ChronoHistory(filename, extralog=False, verbose=True)
for tick, entry in enumerate(test_history):
print(tick, entry)
if entry is not None:
results.setdefault(tick, {'error_avgs': [], 'error_stds': [],
'avg': None, 'std': None})
print(len(results.core.entries[tick]['data']['error_avgs']), i)
print(results.core.entries[tick]['data']['error_avgs'])
if 'errors' in entry['data']:
errors = entry['data']['errors']
else:
assert False
errors = []
s_channels = entry['data']['testset']['s_channels']
for s_goal, feedback in zip(entry['data']['testset']['s_goals'], entry['data']['tests']):
s_v_test = explorers.tools.to_vector(s_goal, s_channels)
s_v_actual = explorers.tools.to_vector(feedback['s_signal'], s_channels)
errors.append(tools.dist(s_v_test, s_v_actual))
print(len(results.core.entries[tick]['data']['error_avgs']), i)
assert len(results.core.entries[tick]['data']['error_avgs']) == i
results.core.entries[tick]['data']['error_avgs'].append(np.average(errors))
results.core.entries[tick]['data']['error_stds'].append(np.std(errors))
test_history.close()
for entry in results.core.entries:
if entry is not None:
entry['data']['avg'] = np.average(entry['data']['error_avgs'])
entry['data']['std'] = np.std(entry['data']['error_avgs'])
results.save(verbose=True)
def run():
from . import run_task
run_task.run_task(compile_results)
|
{"hexsha": "29d57357ad2ce12a14d5411d27d26183a7a64799", "size": 2592, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/runs/result.py", "max_stars_repo_name": "humm/experiments", "max_stars_repo_head_hexsha": "44770110e51349cc5e2a225322f57ede4f9fdfa7", "max_stars_repo_licenses": ["OML"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiments/runs/result.py", "max_issues_repo_name": "humm/experiments", "max_issues_repo_head_hexsha": "44770110e51349cc5e2a225322f57ede4f9fdfa7", "max_issues_repo_licenses": ["OML"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/runs/result.py", "max_forks_repo_name": "humm/experiments", "max_forks_repo_head_hexsha": "44770110e51349cc5e2a225322f57ede4f9fdfa7", "max_forks_repo_licenses": ["OML"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-05-10T21:15:02.000Z", "max_forks_repo_forks_event_max_datetime": "2018-05-10T21:15:02.000Z", "avg_line_length": 37.5652173913, "max_line_length": 111, "alphanum_fraction": 0.5686728395, "include": true, "reason": "import numpy", "num_tokens": 581}
|
#https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/4_Utils/save_restore_model.ipynb
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
# MNIST dataset parameters.
num_classes = 10 # 0 to 9 digits
num_features = 784 # 28*28
# Training parameters.
learning_rate = 0.01
training_steps = 1000
batch_size = 256
display_step = 50
# Prepare MNIST data.
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Convert to float32.
x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32)
# Flatten images to 1-D vector of 784 features (28*28).
x_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features])
# Normalize images value from [0, 255] to [0, 1].
x_train, x_test = x_train / 255., x_test / 255.
# Use tf.data API to shuffle and batch data.
train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)
# Weight of shape [784, 10], the 28*28 image features, and total number of classes.
W = tf.Variable(tf.random.normal([num_features, num_classes]), name="weight")
# Bias of shape [10], the total number of classes.
b = tf.Variable(tf.zeros([num_classes]), name="bias")
# Logistic regression (Wx + b).
def logistic_regression(x):
# Apply softmax to normalize the logits to a probability distribution.
return tf.nn.softmax(tf.matmul(x, W) + b)
# Cross-Entropy loss function.
def cross_entropy(y_pred, y_true):
# Encode label to a one hot vector.
y_true = tf.one_hot(y_true, depth=num_classes)
# Clip prediction values to avoid log(0) error.
y_pred = tf.clip_by_value(y_pred, 1e-9, 1.)
# Compute cross-entropy.
return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))
# Accuracy metric.
def accuracy(y_pred, y_true):
# Predicted class is the index of highest score in prediction vector (i.e. argmax).
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Adam optimizer.
optimizer = tf.optimizers.Adam(learning_rate)
# Optimization process.
def run_optimization(x, y):
# Wrap computation inside a GradientTape for automatic differentiation.
with tf.GradientTape() as g:
pred = logistic_regression(x)
loss = cross_entropy(pred, y)
# Compute gradients.
gradients = g.gradient(loss, [W, b])
# Update W and b following gradients.
optimizer.apply_gradients(zip(gradients, [W, b]))
# Run training for the given number of steps.
for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
# Run the optimization to update W and b values.
run_optimization(batch_x, batch_y)
if step % display_step == 0:
pred = logistic_regression(batch_x)
loss = cross_entropy(pred, batch_y)
acc = accuracy(pred, batch_y)
print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
# Save weights and optimizer variables.
# Create a dict of variables to save.
vars_to_save = {"W": W, "b": b, "optimizer": optimizer}
# TF Checkpoint, pass the dict as **kwargs.
checkpoint = tf.train.Checkpoint(**vars_to_save)
# TF CheckpointManager to manage saving parameters.
saver = tf.train.CheckpointManager(
checkpoint, directory="./tf-example", max_to_keep=5)
# Save variables.
saver.save()
# Check weight value.
np.mean(W.numpy())
# Reset variables to test restore.
W = tf.Variable(tf.random.normal([num_features, num_classes]), name="weight")
b = tf.Variable(tf.zeros([num_classes]), name="bias")
# Check resetted weight value.
np.mean(W.numpy())
# Set checkpoint to load data.
vars_to_load = {"W": W, "b": b, "optimizer": optimizer}
checkpoint = tf.train.Checkpoint(**vars_to_load)
# Restore variables from latest checkpoint.
latest_ckpt = tf.train.latest_checkpoint("./tf-example")
checkpoint.restore(latest_ckpt)
# Confirm that W has been correctly restored.
np.mean(W.numpy())
|
{"hexsha": "f6c83197c8d0bf69f07f4f4aea25dc57cbb6f6a4", "size": 4116, "ext": "py", "lang": "Python", "max_stars_repo_path": "deep_neural_networks/save_and_restore_tensorflow_models.py", "max_stars_repo_name": "parphane/udacity-self_driving_cars", "max_stars_repo_head_hexsha": "069762a5320a109ebe4f7c23997631a2998a0076", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deep_neural_networks/save_and_restore_tensorflow_models.py", "max_issues_repo_name": "parphane/udacity-self_driving_cars", "max_issues_repo_head_hexsha": "069762a5320a109ebe4f7c23997631a2998a0076", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deep_neural_networks/save_and_restore_tensorflow_models.py", "max_forks_repo_name": "parphane/udacity-self_driving_cars", "max_forks_repo_head_hexsha": "069762a5320a109ebe4f7c23997631a2998a0076", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4827586207, "max_line_length": 122, "alphanum_fraction": 0.7227891156, "include": true, "reason": "import numpy", "num_tokens": 1041}
|
#include <stan/math/rev.hpp>
#include <test/unit/math/test_ad.hpp>
#include <gtest/gtest.h>
#include <boost/math/differentiation/finite_difference.hpp>
TEST(mathMixScalFun, neg_binomial_lpmf_derivatives) {
auto f = [](const int y) {
return [=](const auto& alpha, const auto& beta) {
return stan::math::neg_binomial_lpmf(y, alpha, beta);
};
};
stan::test::expect_ad(f(0), 1.5, 4.1);
stan::test::expect_ad(f(0), std::vector<double>({1.2, 2.0}),
std::vector<double>({1.1, 1.2}));
stan::test::expect_ad(f(6), 1.5, 4.1);
stan::test::expect_ad(f(6), std::vector<double>({1.7, 2.0}),
std::vector<double>({1.1, 2.3}));
stan::test::expect_ad(f(13), 1e11, 1e10);
stan::test::expect_ad(f(13), std::vector<double>({1e11, 1e10}),
std::vector<double>({1e10, 1e9}));
}
|
{"hexsha": "a17998ded933d92b9267b26283d099ae5a55589a", "size": 858, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/unit/math/mix/prob/neg_binomial_test.cpp", "max_stars_repo_name": "LaudateCorpus1/math", "max_stars_repo_head_hexsha": "990a66b3cccd27a5fd48626360bb91093a48278b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-06-14T14:33:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-14T14:33:37.000Z", "max_issues_repo_path": "test/unit/math/mix/prob/neg_binomial_test.cpp", "max_issues_repo_name": "LaudateCorpus1/math", "max_issues_repo_head_hexsha": "990a66b3cccd27a5fd48626360bb91093a48278b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/unit/math/mix/prob/neg_binomial_test.cpp", "max_forks_repo_name": "LaudateCorpus1/math", "max_forks_repo_head_hexsha": "990a66b3cccd27a5fd48626360bb91093a48278b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-05-10T12:55:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-10T12:55:07.000Z", "avg_line_length": 37.3043478261, "max_line_length": 65, "alphanum_fraction": 0.5909090909, "num_tokens": 283}
|
* PROGRAM RIBBON
*
* Program to set up input for RENDER (RASTER3D package)
* to draw ribbon diagram. The RIBBON routine itself is simply
* extracted from CCP FRODO. The original invoked a bspline feature
* of the ps300; I have replaced this with a spline equation gotten
* from Larry Andrews. Conversion from ribbon edges to solid rendering
* is my own hacking.
* Ethan Merritt - 8-Nov-1988
* Slightly modified code to guarantee output of triangles with
* vertices in correct order for triangular mesh algorithms EAM Sep 90
*
* Usage: ribbon [-h] [-dn] pdbfile > setup.r3d
* ribbon [-h] -dn - to take pdb records from stdin
*
* Input: pdbfile
* Brookhaven PDB-format file of atomic co-ordinates
* only C-alpha and O atoms are needed
* setup.matrix or setup.angles
* rotation matrix or angles applied to PDB coords
* (see writeup for SETUP/RENDER).
* Output: stdout (new for DS5000 version)
* file suitable for input to RENDER
*
* Interactive parameters:
* WIDTH of ribbon in Angstroms
* NUMBER of interpolated coordinates between successive C-alphas.
* COLOR scheme for ribbon
* 0 or 1: solid color (RGB values from color1 below)
* 2: shade from color1 at 1st res to color2 at last res
* 3: front of ribbon is color1, back of ribbon is color2
* 4: shade front as in scheme 2, back is color 3
* 5: each chain is new color (from successive input
* (COLOR cards at start of input file)
* 6: use prefixed COLOR cards (as in SETUP/RENDER)
* (implemented 4-Aug-1997 EAM)
* COLOR1,COLOR2,COLOR3 RGB components (9f8.0)
*
INCLUDE 'VERSION.incl'
c
INTEGER INPUT, OUTPUT, NOISE
PARAMETER (OUTPUT=6, NOISE=0)
PARAMETER (MAXCOL=5000, MAXATM=10000)
C REAL RGB(3,MAXCOL)
REAL RADIUS(MAXCOL)
REAL RAD
CHARACTER*24 MASK(MAXCOL),TEST
CHARACTER*80 ATOM(MAXATM),CARD
LOGICAL SMATCH
c
C Ethan Merritt Oct 1988
C Modified to read in 3x3 view matrix (e.g. from CCP FRODO view command)
C from file. Matrix is applied before
C finding translation, center, and scale. Afterwards the input matrix
C to RENDER is therefore the identity matrix.
C EAM Aug 1997 - Honor COLOUR requests
C EAM Nov 1999 - remove all (q) formats
C EAM Jan 2010 - declare and initialize RAD
C
c
common /COLORS/ ischeme, cindex, COLOR1(3), COLOR2(3), COLOR3(3)
& ,RGB(3,MAXCOL)
integer cindex
common /SPAM/ natm, SPAM(4,MAXATM), SCAM(MAXATM)
integer SCAM
common /FLAGS/ mflag, hflag, dflag
logical mflag, hflag, dflag
c
character*64 in_file
character*32 flags
character*80 line
common /matrix/ matrix, coords
real matrix(3,3), coords(3)
data matrix / 1.,0.,0.,0.,1.,0.,0.,0.,1. /
c
c -h causes the header records not to be printed
c -m [now obsolete because always in force] uses format
c mixed object types in output file
c -d suppresses interactive input
c
hflag = .FALSE.
dflag = .FALSE.
mflag = .TRUE.
c
narg = iargc()
do i = 1,narg
call getarg( i, flags )
if (flags(1:2) .eq. '-h') then
hflag = .TRUE.
else if (flags(1:2) .eq. '-d') then
dflag = .TRUE.
read (flags(3:4),'(I1)') ischeme
end if
end do
c
call getarg( narg, in_file )
if (in_file(1:1) .eq. '-') then
INPUT = 5
else
INPUT = 1
open( unit=INPUT, file=in_file, status='OLD' )
end if
c
3 format(a,a)
c
call view_matrix
c
NCOL = 0
NATM = 0
ASPECT = 1280./1024.
c
if (hflag) goto 10
c
WRITE(OUTPUT,'(A,A)') 'C-alpha ribbon - Raster3D ',VERSION
WRITE(OUTPUT,'(A)') '80 64 tiles in x,y'
WRITE(OUTPUT,'(A)') ' 8 8 pixels (x,y) per tile'
WRITE(OUTPUT,'(A)') '4 anti-aliasing 3x3 into 2x2 pixels'
WRITE(OUTPUT,'(A)') '0 0 0 black background'
WRITE(OUTPUT,'(A)') 'F no, ribbons cast funny shadows'
WRITE(OUTPUT,'(A)') '25 Phong power'
WRITE(OUTPUT,'(A)') '0.15 secondary light contribution'
WRITE(OUTPUT,'(A)') '0.05 ambient light contribution'
WRITE(OUTPUT,'(A)') '0.25 specular reflection component'
WRITE(OUTPUT,'(A)') '4.0 eye position'
WRITE(OUTPUT,'(A)') '1 1 1 main light source position'
c
10 CONTINUE
READ(INPUT,'(A80)',END=50) CARD
IF (CARD(1:4).EQ.'COLO') THEN
NCOL = NCOL + 1
IF (NCOL.GT.MAXCOL) THEN
WRITE(NOISE,*) 'Colour table overflow. Increase ',
& 'MAXCOL and recompile.'
STOP 10
ENDIF
READ(CARD,'(6X,A24,3F8.3,F6.2)') MASK(NCOL),
& (RGB(I,NCOL),I=1,3),RADIUS(NCOL)
ELSEIF ((CARD(1:4).EQ.'ATOM') .AND.
& ( CARD(14:16).EQ.'CA ' .OR. CARD(14:16).EQ.'O ')) THEN
NATM = NATM + 1
IF (NATM.GT.MAXATM) THEN
WRITE(NOISE,*) 'Atom array overflow. Increase ',
& 'MAXATM and recompile.'
STOP 20
ENDIF
ATOM(NATM) = CARD
ELSEIF (CARD(1:3).EQ.'END') THEN
GO TO 50
ENDIF
GO TO 10
* Come here when EOF or 'END' record is reached
50 CONTINUE
IF (NATM.EQ.0) THEN
WRITE(NOISE,*) 'No atoms in input.'
STOP 30
ELSE
WRITE(NOISE,*) NATM,' atoms accepted from input.'
ENDIF
IF (NCOL.EQ.0) THEN
WRITE(NOISE,*) 'No colours in input.'
c STOP 40
ENDIF
C
XMAX = -1E20
XMIN = 1E20
YMAX = -1E20
YMIN = 1E20
ZMAX = -1E20
ZMIN = 1E20
RAD = 1.7
DO 100 IATM=1,NATM
CARD = ATOM(IATM)
TEST = CARD(7:30)
READ(CARD,82) coords
82 format(30x,3f8.3)
x = coords(1)*matrix(1,1) + coords(2)*matrix(2,1)
1 + coords(3)*matrix(3,1)
y = coords(1)*matrix(1,2) + coords(2)*matrix(2,2)
1 + coords(3)*matrix(3,2)
z = coords(1)*matrix(1,3) + coords(2)*matrix(2,3)
1 + coords(3)*matrix(3,3)
SPAM(1,IATM) = X
SPAM(2,IATM) = Y
SPAM(3,IATM) = Z
c SPAM(4,IATM) = RAD
C
C EAM Aug 1997 - finally get around to honoring atom colors
DO 84 ICOL = 1, NCOL
IF (SMATCH(TEST,MASK(ICOL))) THEN
SCAM(IATM) = ICOL
RAD = RADIUS(ICOL)
SPAM(4,IATM) = RAD
GOTO 86
ENDIF
84 CONTINUE
86 CONTINUE
C
XMAX = MAX(XMAX,X+RAD)
XMIN = MIN(XMIN,X-RAD)
YMAX = MAX(YMAX,Y+RAD)
YMIN = MIN(YMIN,Y-RAD)
ZMAX = MAX(ZMAX,Z+RAD)
ZMIN = MIN(ZMIN,Z-RAD)
100 CONTINUE
XMID = (XMAX+XMIN)/2.
YMID = (YMAX+YMIN)/2.
ZMID = (ZMAX+ZMIN)/2.
TX = -XMID
TY = -YMID
TZ = -ZMID
IF (ASPECT.GE.1.) THEN
* The X direction is wider than the Y
XROOM = ASPECT
YROOM = 1.
ZROOM = 2.
ELSE
XROOM = 1.
YROOM = ASPECT
ZROOM = 2.
ENDIF
XSPAN = XMAX-XMIN
YSPAN = YMAX-YMIN
ZSPAN = ZMAX-ZMIN
SCALE = MAX(XSPAN/XROOM,YSPAN/YROOM,ZSPAN/ZROOM)
* Leave a little extra room as a border:
SCALE = SCALE / 0.90
if (hflag) goto 129
WRITE(OUTPUT,120) TX,TY,TZ,SCALE
120 FORMAT('1 0 0 0 input co-ordinate + radius transformation'/
& '0 1 0 0'/
& '0 0 1 0'/
& 4F10.3)
if (mflag) then
WRITE (OUTPUT,'(A)') '3 mixed object types'
WRITE (OUTPUT,'(A)') '(9F8.3,2x,3f5.2)'
WRITE (OUTPUT,'(A)') '(11F8.3)'
WRITE (OUTPUT,'(A)') '(11F8.3)'
else
WRITE (OUTPUT,'(A)') '1 all objects are triangles'
WRITE (OUTPUT,'(A)') '(9F8.3,2x,3f5.2)'
end if
129 continue
write (noise,'(/)')
write (noise,153) 'X min max:', XMIN, XMAX
write (noise,153) 'Y min max:', YMIN, YMAX
write (noise,153) 'Z min max:', ZMIN, ZMAX
write (noise,153) ' scale:', SCALE
153 format(1x,a,3f8.2)
c
c
if (dflag) then
width = 1.5
offset = 1.2
nchord = 5
if (ischeme .le. 0 .or. ischeme .gt. 6) ischeme = 2
call vload( color1, 0.0, 0.0, 0.4 )
call vload( color2, 0.5, 0.0, 0.0 )
call vload( color3, 0.6, 0.6, 0.6 )
else
width = 0
write (noise,3) 'Width of ribbon (default 1.5A): '
read (5,'(A80)') line
read (line,*,end=154,err=154) width
154 continue
if (width.le.0) width = 1.5
c Original RIBBON used bspline smoothing, which requires "offset"
c because smoothed curve doesn't go through guide points.
write (noise,3) 'Offset from CA position (default 1.2A): '
read (5,'(A80)') line
read (line,*,end=156,err=156) offset
156 continue
if (offset.le.0) offset = 1.2
write (noise,3) 'Chords per residue (default = 10): '
read (5,'(A80)') line
read (line,*,end=158,err=158) nchord
158 continue
if (nchord.le.1) nchord = 10
write (noise,160)
160 format(' Coloring schemes available:',
1 /,' 0 or 1: solid color (RGB values from color1 below)',
2 /,' 2: shade from color1 at 1st res to color2 at last res',
3 /,' 3: front of ribbon is color1, back of ribbon is color2',
4 /,' 4: shade front as in scheme 2, back is color 3',
5 /,' 5: new color for each chain (requires COLOUR cards)')
write (noise,3) 'Coloring scheme: '
read (5,'(A80)') line
read (line,*,end=161,err=161) ischeme
161 continue
if (ischeme.le.0 .or. ischeme.gt.6) ischeme = 1
if (ischeme .eq. 1) write (noise,3)
1 'COLOR1 (RGB values, 3f8.0): '
if (ischeme .eq. 2) write (noise,3)
1 'COLOR1, COLOR2 (RGB values, 6f8.0): '
if (ischeme .eq. 3) write (noise,3)
1 'COLOR1, COLOR2 (RGB values, 6f8.0): '
if (ischeme .eq. 4) write (noise,3)
1 'COLOR1, COLOR2, COLOR3 (RGB values, 9f8.0): '
if (ischeme .lt. 5) then
read (5,'(A80)') line
if (line.eq.' ') goto 163
read (line,*,end=163,err=163) color1,color2,color3
endif
goto 164
163 continue
call vload( color1, 0.0, 0.0, 0.4 )
call vload( color2, 0.5, 0.0, 0.0 )
call vload( color3, 0.6, 0.6, 0.6 )
164 continue
if (ischeme .eq. 3) then
color3(1) = color2(1)
color3(2) = color2(2)
color3(3) = color2(3)
end if
c end of -d suppression
end if
write (noise,169) ischeme,color1,color2,color3
169 format(' color scheme',i3,/,3(3x,3f6.3))
cindex = 1
c
call ribbon( 2, width, nchord, offset, natm )
c
END
LOGICAL FUNCTION SMATCH (SUBJ, MASK)
CHARACTER*24 SUBJ,MASK
SMATCH = .FALSE.
DO 10 I = 1, 24
IF (SUBJ(I:I).NE.MASK(I:I) .AND. MASK(I:I).NE.'#') RETURN
10 CONTINUE
SMATCH = .TRUE.
RETURN
END
subroutine view_matrix
c
common /matrix/ matrix, coords
real matrix(3,3), coords(3)
c
real phiX, phiY, phiZ
parameter (noise = 0)
parameter (R2D = 180./3.1415927)
open (unit=3, file='setup.matrix', status='OLD', err=100)
write (noise,3) ' View Matrix from file '
read (3,*) ((matrix(i,j),i=1,3),j=1,3)
write (noise,'(1x,3f9.5)') ((matrix(i,j),i=1,3),j=1,3)
close (3)
det = matrix(1,1) * matrix(2,2) * matrix(3,3)
1 + matrix(1,2) * matrix(2,3) * matrix(3,1)
2 + matrix(2,1) * matrix(3,2) * matrix(1,3)
3 - matrix(1,3) * matrix(2,2) * matrix(3,1)
4 - matrix(1,2) * matrix(2,1) * matrix(3,3)
5 - matrix(1,1) * matrix(2,3) * matrix(3,2)
write (noise,'('' determinant ='',f8.3)') det
phiX = atan2( -matrix(3,2), matrix(3,3) )
phiY = atan2( matrix(3,1), matrix(3,3) / cos(phiX) )
phiZ = atan2( -matrix(2,1), matrix(1,1) )
write (noise,3) ' View Angles from matrix',' '
write (noise,2) phiZ*R2D, phiY*R2D, phiX*R2D
return
100 continue
open (unit=3, file='setup.angles', status='OLD', err=200)
write (noise,3) ' View Angles from file '
read (3,*) phiZ, phiY, phiX
close (3)
write (noise,2) phiZ, phiY, phiX
cx = cos(phiX/R2D)
sx = sin(phiX/R2D)
cy = cos(phiY/R2D)
sy = sin(phiY/R2D)
cz = cos(phiZ/R2D)
sz = sin(phiZ/R2D)
matrix(1,1) = cz*cy
matrix(1,2) = sz*cx + cz*sy*sx
matrix(1,3) = sz*sx - cz*sy*cx
matrix(2,1) = -sz*cy
matrix(2,2) = cz*cx - sx*sy*sz
matrix(2,3) = cz*sx + sz*sy*cx
matrix(3,1) = sy
matrix(3,2) = -sx*cy
matrix(3,3) = cx*cy
write (noise,3) ' View Matrix from angles',' '
write (noise,'(1x,3f9.5)') ((matrix(i,j),i=1,3),j=1,3)
return
200 continue
2 format(1x,' phiZ =',f8.2,' phiY =',f8.2,' phiX =',f8.2)
3 format(/a,a)
write (noise,*) ' No view matrix or angles provided'
return
end
C
SUBROUTINE RIBDRW(GUIDE,NRIB,MAXRES,NPT,NCHORD)
integer npt ! number of guide points
real guide(4,MAXRES,NRIB) ! 4 dim because E&S wanted it that way
integer nchord ! how many interpolations per guide pt
parameter (MAXCOL = 5000)
integer OUTPUT
parameter (OUTPUT = 6)
C
C splining from Larry Andrews 7-Nov-1988
C
parameter (nspln = 5000) ! maximum of (npt*nchord)
parameter (ndata = 500) ! maximum # guidepoints
parameter (ndata1 = 501)
c
common /COLORS/ ischeme, cindex, COLOR1(3), COLOR2(3), COLOR3(3)
& ,RGB(3,MAXCOL)
integer cindex
common /FLAGS/ mflag, hflag, dflag
logical mflag, hflag, dflag
c
c real s(ndata1)
c REAL XP(4,NDATA)
real smooth(4,nspln,2) ! npt*nchord points on splined curve
real color(3)
c
if (npt .gt. ndata) stop 'spline - TOO MANY GUIDE POINTS'
if (npt*nchord .gt. nspln) stop 'spline - NPT*NCHORD > 5000'
c
c fill 4th coord with fraction of chain traced
c
color_inc = 1.000 / float(npt)
fraction = 0.0
if (ischeme.le.5) then
do i = 1, npt
guide(4,i,1) = fraction
guide(4,i,2) = fraction
fraction = fraction + color_inc
end do
endif
c
c calculate spline segments
c
tinc = 1./float(nchord)
do 1000 irib = 1, 2
iout = 1
do 900 ipt = 2, npt-1
t = 0.0
do i = 1, nchord
iout = iout + 1
call bspline( guide(1,ipt-1,irib), guide(1,ipt,irib),
1 guide(1,ipt+1,irib), t, smooth(1,iout,irib) )
t = t + tinc
end do
900 continue
1000 continue
c
c Add end segments (splines go midpoint-to-midpoint)
c
iout = iout + 1
do 1100 irib = 1, 2
do i = 1, 4
smooth(i, 1, irib) = guide(i, 1, irib )
smooth(i, iout, irib) = guide(i, npt, irib )
end do
1100 continue
C
2 format(9f8.3,2x,3f5.2)
3 format('1',/,9f8.3,2x,3f5.2)
c
c Start loop over spline segments
c
ires = 1
jres = 1
kres = 2
2000 continue
c do 2100 ires = 1, iout-1
fraction = smooth(4,ires, 1)
c
c Make sure the two sides of the ribbon stay in register
c
inext = ires + 1
55 dist0 = dist(smooth(1,inext,1),smooth(1,kres,2))
dist1 = dist(smooth(1,inext,1),smooth(1,kres+1,2))
if ((dist1 .lt. dist0) .and. (kres .lt. iout)) then
kres = kres + 1
goto 55
end if
56 dist0 = dist(smooth(1,inext,1),smooth(1,kres,2))
dist1 = dist(smooth(1,inext+1,1),smooth(1,kres,2))
if ((dist1 .lt. dist0) .and. (inext .lt. iout)) then
inext = inext + 1
goto 56
end if
c
call colorit( color, fraction,
1 smooth(1,ires,1), smooth(1,jres,2), smooth(1,inext,1))
c
if (mflag) then
write (output,3) (smooth(i,ires, 1),i=1,3),
1 (smooth(i,jres, 2),i=1,3),
2 (smooth(i,inext,1),i=1,3),
3 color
else
write (output,2) (smooth(i,ires, 1),i=1,3),
1 (smooth(i,jres, 2),i=1,3),
2 (smooth(i,inext,1),i=1,3),
3 color
endif
c
if (jres .eq. kres) goto 2100
call colorit( color, fraction,
1 smooth(1,kres,2), smooth(1,inext,1), smooth(1,jres,2))
if (mflag) then
write (output,3) (smooth(i,jres, 2),i=1,3),
1 (smooth(i,inext,1),i=1,3),
2 (smooth(i,kres, 2),i=1,3),
3 color
else
write (output,2) (smooth(i,jres, 2),i=1,3),
1 (smooth(i,inext,1),i=1,3),
2 (smooth(i,kres, 2),i=1,3),
3 color
end if
jres = kres
if (kres .lt. iout) kres = kres + 1
2100 continue
ires = inext
if (ires .lt. iout) goto 2000
c
c End loop over spline segments
c
cindex = cindex + 1
return
end
function dist(v1, v2)
real diff(3)
call vdif(diff,v1,v2)
dist = dot(diff,diff)
return
end
subroutine vload( v, s1, s2, s3 )
real v(3)
v(1) = s1
v(2) = s2
v(3) = s3
return
end
subroutine colorit( color, fraction, point1, point2, point3 )
real color(3), point1(3), point2(3), point3(3)
c
c scheme 1 solid color (COLOR1)
c scheme 2 shade from COLOR1 at 1st residue to COLOR2 at last
c scheme 3 COLOR1 on front, COLOR3 (=COLOR2) on back
c scheme 4 combination of 2 and 3 above
c scheme 5 color each new chain a new color from RGB
c
PARAMETER (MAXCOL=5000, MAXATM=10000)
common /COLORS/ ischeme, cindex, COLOR1(3), COLOR2(3), COLOR3(3)
& ,RGB(3,MAXCOL)
integer cindex
common /SPAM/ NATM, SPAM(4,MAXATM), SCAM(MAXATM)
integer SCAM
real vec1(3), vec2(3), vec3(3)
c
if ((ischeme .eq. 3) .or. (ischeme .eq. 4)) then
call vdif( vec1, point2, point1 )
call vdif( vec2, point3, point1 )
call cross( vec1, vec2, vec3 )
if (vec3(3) .lt. 0) then
color(1) = color3(1)
color(2) = color3(2)
color(3) = color3(3)
else if (ischeme .eq. 4) then
color(1) = fraction*color2(1)
& + (1.-fraction)*color1(1)
color(2) = fraction*color2(2)
& + (1.-fraction)*color1(2)
color(3) = fraction*color2(3)
& + (1.-fraction)*color1(3)
else
color(1) = color1(1)
color(2) = color1(2)
color(3) = color1(3)
end if
else if (ischeme .eq. 2) then
color(1) = fraction*color2(1) + (1.-fraction)*color1(1)
color(2) = fraction*color2(2) + (1.-fraction)*color1(2)
color(3) = fraction*color2(3) + (1.-fraction)*color1(3)
else if (ischeme .eq. 5) then
call vload( color,
& RGB(1,cindex), RGB(2,cindex), RGB(3,cindex))
c else if (ischeme .eq. 6) then
c ICOL = SCAM(fraction)
c color(1) = RGB(1,icol)
c color(2) = RGB(2,icol)
c color(3) = RGB(3,icol)
else
call vload( color, color1(1), color1(2), color1(3) )
end if
return
end
subroutine bspline( v1, v2, v3, t, v4 )
real v1(4), v2(4), v3(4)
real t
real v4(4)
c
frac3 = 0.5 * t*t
frac1 = 0.5 * (1.-t) * (1.-t)
frac2 = 1. - (frac1 + frac3)
do i = 1, 4
v4(i) = frac1 * v1(i) + frac2 * v2(i) + frac3 * v3(i)
end do
return
end
|
{"hexsha": "154e637ec48020051b1af88eb101073d45df2c0f", "size": 18299, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ribbon1.f", "max_stars_repo_name": "wkpark/raster3d", "max_stars_repo_head_hexsha": "1fc0f8e363c6c5a63a9fe64e7adeb4f2538bacb3", "max_stars_repo_licenses": ["Artistic-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ribbon1.f", "max_issues_repo_name": "wkpark/raster3d", "max_issues_repo_head_hexsha": "1fc0f8e363c6c5a63a9fe64e7adeb4f2538bacb3", "max_issues_repo_licenses": ["Artistic-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ribbon1.f", "max_forks_repo_name": "wkpark/raster3d", "max_forks_repo_head_hexsha": "1fc0f8e363c6c5a63a9fe64e7adeb4f2538bacb3", "max_forks_repo_licenses": ["Artistic-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.514516129, "max_line_length": 74, "alphanum_fraction": 0.5844581671, "num_tokens": 7076}
|
[STATEMENT]
lemma D_mndet1 : "D(mndet {} P) = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. D (mndet {} P) = {}
[PROOF STEP]
unfolding mndet_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. D (if {} = {} then STOP else Abs_process (\<Union>x\<in>{}. F (x \<rightarrow> P x), \<Union>x\<in>{}. D (x \<rightarrow> P x))) = {}
[PROOF STEP]
by(simp add: D_STOP)
|
{"llama_tokens": 168, "file": "HOL-CSP_Mndet", "length": 2}
|
import os
import numpy as np
import vipy.video
import vipy.videosearch
import vipy.object
from vipy.util import tempjpg, tempdir, Failed, isurl, rmdir
from vipy.geometry import BoundingBox
import pdb
from vipy.data.kinetics import Kinetics400, Kinetics600, Kinetics700
from vipy.data.activitynet import ActivityNet
from vipy.data.lfw import LFW
import warnings
def _test_dataset():
rmdir('/tmp/lfw')
d = LFW('/tmp/lfw').download().dataset()
d[0].saveas('lfw.jpg')
print('[test_datasets]: LFW PASSED')
warnings.warn('these datasets are crufty and have many missing youtube videos')
rmdir('/tmp/kinetics')
d = Kinetics400('/tmp/kinetics').download().valset()
v = d[0].load(verbose=True)[0].resize(rows=256).saveas('/tmp/kinetics.jpg')
v = d[1].download(verbose=True).save()
print('[test_datasets]: Kinetics400 PASSED')
d = Kinetics600('/tmp/kinetics').download().testset()
v = d[1].load()[0].resize(rows=256).saveas('kinetics.jpg')
print('[test_datasets]: Kinetics600 PASSED')
d = Kinetics700('/tmp/kinetics').download().trainset()
v = d[2].load()[0].rescale(0.5).saveas('kinetics.jpg')
print('[test_datasets]: Kinetics700 PASSED')
rmdir('/tmp/activitynet')
d = ActivityNet('/tmp/activitynet').download().dataset()
v = d[0].load()
print('[test_datasets]: ActivityNet PASSED')
if __name__ == "__main__":
_test_dataset()
|
{"hexsha": "06905d82b034ea65d424ad564ffe13609073d408", "size": 1451, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_dataset.py", "max_stars_repo_name": "williford/vipy", "max_stars_repo_head_hexsha": "d7ce90cfa3c11363ca9e9fcb1fcea9371aa1b74d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-07-23T12:15:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T13:58:31.000Z", "max_issues_repo_path": "test/test_dataset.py", "max_issues_repo_name": "williford/vipy", "max_issues_repo_head_hexsha": "d7ce90cfa3c11363ca9e9fcb1fcea9371aa1b74d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-02-26T00:58:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-26T12:34:41.000Z", "max_forks_repo_path": "test/test_dataset.py", "max_forks_repo_name": "williford/vipy", "max_forks_repo_head_hexsha": "d7ce90cfa3c11363ca9e9fcb1fcea9371aa1b74d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-11T15:31:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-16T14:01:33.000Z", "avg_line_length": 32.2444444444, "max_line_length": 87, "alphanum_fraction": 0.6740179187, "include": true, "reason": "import numpy", "num_tokens": 417}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.