text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
from mock import call, Mock, patch, PropertyMock
import pytest
from sagemaker_containers import _env, _server
FIRST_PORT = "1111"
LAST_PORT = "2222"
SAFE_PORT_RANGE = "{}-{}".format(FIRST_PORT, LAST_PORT)
@patch.object(_env.ServingEnv, "model_server_workers", PropertyMock(return_value=2))
@patch.object(_env.ServingEnv, "model_server_timeout", PropertyMock(return_value=100))
@patch.object(_env.ServingEnv, "use_nginx", PropertyMock(return_value=False))
@patch("sagemaker_containers._env.num_gpus", lambda: 0)
@patch("os.wait", lambda: (-1, 0))
@patch("subprocess.Popen")
@patch("sys.path", ["/opt/folder", "/lib/another/folder"])
def test_start_no_nginx(popen):
popen.return_value.pid = -1
calls = [
call(
[
"gunicorn",
"--timeout",
"100",
"-k",
"gevent",
"--pythonpath",
"/opt/folder,/lib/another/folder,%s" % _env.code_dir,
"-b",
"0.0.0.0:8080",
"--worker-connections",
"2000",
"-w",
"2",
"--log-level",
"info",
"my_module",
]
)
]
_server.start("my_module")
popen.assert_has_calls(calls)
@patch.object(_env.ServingEnv, "model_server_workers", PropertyMock(return_value=2))
@patch.object(_env.ServingEnv, "model_server_timeout", PropertyMock(return_value=100))
@patch.object(_env.ServingEnv, "use_nginx", PropertyMock(return_value=True))
@patch("sagemaker_containers._env.num_gpus", lambda: 0)
@patch("sagemaker_containers._server.nginx_config_file", "/tmp/nginx.conf")
@patch("sagemaker_containers._server.nginx_config_template_file", "/tmp/nginx.conf.template")
@patch("sagemaker_containers._files.read_file", lambda x: "random_string")
@patch("sagemaker_containers._files.write_file", Mock())
@patch("os.wait", lambda: (-1, 0))
@patch("subprocess.Popen")
@patch("sys.path", ["/opt/folder", "/lib/another/folder"])
def test_start_with_nginx(popen):
popen.return_value.pid = -1
calls = [
call(["nginx", "-c", "/tmp/nginx.conf"]),
call(
[
"gunicorn",
"--timeout",
"100",
"-k",
"gevent",
"--pythonpath",
"/opt/folder,/lib/another/folder,%s" % _env.code_dir,
"-b",
"unix:/tmp/gunicorn.sock",
"--worker-connections",
"2000",
"-w",
"2",
"--log-level",
"info",
"my_module",
]
),
]
_server.start("my_module")
popen.assert_has_calls(calls)
def test_next_safe_port_first():
safe_port = _server.next_safe_port(SAFE_PORT_RANGE)
assert safe_port == FIRST_PORT
def test_next_safe_port_after():
safe_port = _server.next_safe_port(SAFE_PORT_RANGE, FIRST_PORT)
next_safe_port = str(int(FIRST_PORT) + 1)
assert safe_port == next_safe_port
def test_next_safe_port_greater_than_range_exception():
current_port = str(int(LAST_PORT) + 1)
with pytest.raises(ValueError):
_server.next_safe_port(SAFE_PORT_RANGE, current_port)
def test_next_safe_port_less_than_range_exception():
current_port = str(int(FIRST_PORT) - 100)
with pytest.raises(ValueError):
_server.next_safe_port(SAFE_PORT_RANGE, current_port)
@patch(
"sagemaker_containers._files.read_file",
lambda x: "nginx_timeout=%NGINX_PROXY_READ_TIMEOUT%, nginx_port=%NGINX_HTTP_PORT%",
)
@patch("sagemaker_containers._server.nginx_config_template_file", "/tmp/nginx.conf.template")
@patch.object(_env.ServingEnv, "model_server_timeout", PropertyMock(return_value=4567))
@patch.object(_env.ServingEnv, "http_port", PropertyMock(return_value="1234"))
def test_create_nginx_config(tmpdir):
nginx_config_file = os.path.join(str(tmpdir), "nginx.conf")
serving_env = _env.ServingEnv()
with patch("sagemaker_containers._server.nginx_config_file", nginx_config_file):
_server._create_nginx_config(serving_env)
assert os.path.exists(nginx_config_file)
with open(nginx_config_file, "r") as f:
data = f.readline()
assert data == "nginx_timeout=4567, nginx_port=1234"
|
{"hexsha": "3f7036d4a856a91f6eeb370adf8ca5cb1924ae5d", "size": 4954, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/unit/test_server.py", "max_stars_repo_name": "wiltonwu/sagemaker-containers", "max_stars_repo_head_hexsha": "8ece6809f1af6c32a95dc5330ac29c48d34ed12d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 190, "max_stars_repo_stars_event_min_datetime": "2018-04-20T10:13:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T01:38:02.000Z", "max_issues_repo_path": "test/unit/test_server.py", "max_issues_repo_name": "wiltonwu/sagemaker-containers", "max_issues_repo_head_hexsha": "8ece6809f1af6c32a95dc5330ac29c48d34ed12d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 173, "max_issues_repo_issues_event_min_datetime": "2018-04-25T20:58:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-23T18:12:35.000Z", "max_forks_repo_path": "test/unit/test_server.py", "max_forks_repo_name": "wiltonwu/sagemaker-containers", "max_forks_repo_head_hexsha": "8ece6809f1af6c32a95dc5330ac29c48d34ed12d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 98, "max_forks_repo_forks_event_min_datetime": "2018-04-19T20:06:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T02:06:36.000Z", "avg_line_length": 34.1655172414, "max_line_length": 93, "alphanum_fraction": 0.6423092451, "include": true, "reason": "from sage", "num_tokens": 1205}
|
\chapter{Theoretical Background}
\section{Basics in Modelling Light in Computer Graphics}
\subsection{Radiometry}
One purpose of Computer Graphics is to simulate the interaction of light with a surface and how a real-world observer, such as a human eye, will perceive this. These visual sensations of an eye are modelled relying on a virtual camera which captures the emitted light from the surface. The physical basis to measure such reflected light is studied under radiometry which deals with measures on the electromagnetic radiation transferred from a source to a receiver. \\
Fundamentally, light is a form of energy propagation, consisting of a large collection of photons, whereat each photon can be considered as a quantum of light that has a position, a direction of propagation and a wavelength $\lambda$. A photon travels at a certain speed $v = c/n$, that depends only the speed of light $c$ and the refractive index $n$ through which it propagates. Its frequency is defined by $f = v/\lambda$ and its carried amount of energy $q$, measured in the SI unit Joule, is given by $q = hf= hv/\lambda n$ where $h$ is the Plank's constant. The total energy of a large collection of photons is hence $Q = \sum_i q_i$.
\subsection{Spectral Energy}
It is important to understand that the human eye is not equally sensitive to all wavelength of the spectrum of light and therefore responds differently to specific wavelengths. Remember that our goal is to model the human visual perception. This is why we consider the energy distribution of a light spectrum rather than considering the total energy of a photon collection since then we could weight the distribution according to the human visual system. So the question we want to answer is: How is the energy distributed across wavelengths of light? \\
One idea is to make an energy histogram from a given photon collection. For this we have to order all photons by their associated wavelength, discretize the wavelength spectrum, count all photons which then will fall in same wavelength-interval, and then, finally, normalize each interval by the total energy $Q$. This will give us a histogram which tells us the spectral energy $Q_{\lambda}$ for a given discrete $\lambda$ interval and thus models the so called spectral energy distribution $\footnote{Intensive quantities can be thought of as density functions that tell the density of an extensive quantity at an infinitesimal by a small interval or a point.}$.
\subsection{Spectral Power}
Rendering an image in Computer Graphics corresponds to capturing the color sensation of an illuminated, target scene at a certain point in time. Each color is associated with either a particular wavelength or is composed of a wavelength spectrum$\footnote{A wavelength spectrum is a collection of certain wavelengths. For example brown color is a composition of many wavelengths in the region of yellow, orange or read color in combination with low luminance.}$. Thus a color is directly related to a certain amount of energy. In order to determine the color of a to-be-rendered pixel of an image, we have to first get a sense of how much light (in terms of energy) passes through the area which the pixel corresponds to. We begin by considering the flow of energy $\Phi = \frac{\Delta Q}{\Delta t}$ transferred through this area over a unit period of time. This allows us to measure the energy flow through a pixel during a certain amount of time. In general, power is the estimated rate of energy production for light sources and corresponds to the flux. It is measured in the unit Watts, denoted by Q. Since power is a rate over time, it is well defined even when energy production is varying over time. As with Spectral Energy for rendering, we are really interested in the spectral power $\Phi_\lambda = \frac{\Delta \Phi}{\Delta \lambda}$, measured in Watts per nanometer.
\subsection{Spectral Irradiance}
Before we can tell how much light is reflected from a given point on a surface towards the viewing direction of an observer, we first have to know how much light arrives at this point. Since in general a point has no length, area or even volume associated, let us instead consider an infinitesimal area $\Delta A$ around a such a point. Then, we can ask ourself how much light falls in such a small area. Further while observing this process over a short period of time, the measured quantity gives us the the spectral irradiance $E$ as illustrated in figure $\ref{fig:irradiance}$. Summarized, this quantity tells us how much spectral power is incident on a surface per unit area and mathematically is equal:
\begin{equation}
E_{\lambda} = \frac{\Phi_{\lambda}}{\Delta A}
\end{equation}
\begin{figure}[H]
\centering
\includegraphics[scale=0.5]{background/irradiance.png}
\caption[Irradiance]{Irradiance is the summed up radiance over all directions. The black border is representing a surface element.}
\label{fig:irradiance}
\end{figure}
\subsection{Spectral Radiance}
When rendering an image we have to determine the color of each pixel of the image. Although irradiance tells us how much light is arriving at a point and gets reflected, it tells us nothing about the power distribution across different directions. The direction of the element is important because the human eye may perceive the brightness of an illuminated objects differently when looking from direction.
\begin{figure}[H]
\centering
\subfigure[Radiances is the density of photons per area per solid angle]{
\includegraphics[scale=0.4]{background/radiancehemisphere.png}
\label{fig:radiance}
}
~
\subfigure[Solid angle is the area of a surface patch on a sphere with radius R which is spanned by a set of directions]{
\includegraphics[scale=0.6]{background/solidangle.png}
\label{fig:solidangle}
}
\caption[Concept of Radiance]{Illustration of the concepts of radiance and of solid angle and how they are related.}
\label{fig:radianceBasics}
\end{figure}
\noindent
This concept is described by the radiometric quantity radiance. Basically, radiance is the measure of light energy passing through or emitted from a small area around a point on a surface towards a given direction during a short period in time. More formally this is the spectral power emerging from an arbitrary point (an infinitesimal area around this point) and falls within a given solid angle (see figure$\footnote{Modified from a figure in Computer Graphics class 2012 in chapter \emph{Colors}}$ $\ref{fig:solidangle}$) in a specific direction (usually towards the observer) as shown in figure $\ref{fig:radiance}$. Formally, this leads us to the following mathematical formalism:
\begin{equation}
L_{\lambda}(\omega) = \frac{d^2 \Phi_{\lambda}}{dA d\Omega} \approx \frac{\Phi_{\lambda}}{\Omega A}
\end{equation}
where $L$ is the observed spectral radiance in $Wm^-2 sr^-1$ in direction $\omega$$\footnote{The direction $\omega$ is determined by two angles, $\phi$ and $\theta$ like illustrated in figure $\ref{fig:radianceBasics}$}$, $\Theta_{\lambda}$ is the total power emitted, $\theta$ is the angle between the surface normal and the specified direction, $A$ is the area of the surface and $\Omega$ is the solid angle subtended by the observation or measurement. \\
It is useful to distinguish between radiance incident at a point on a surface and excitant from that point. Terms for these concepts sometimes used in the graphics literature are surface radiance $L_r$ for the radiance \textit{reflected} from a surface and field radiance $L_i$ for the radiance \textit{incident} at a surface.
\subsection{BRDF}
In order to render the colorization of an observed object, a natural question in Computer Graphics is what portion of the incident light a viewer will receive after reflection, after when he looks at an illuminated object. For any given surface which is illuminated from a certain direction $\omega_i$, we can ask ourself how much light is reflected from any point on this surface towards a viewing direction $\omega_r$. This is where the Bidirectional Reflectance Distribution Function (BRDF) comes into play, which is a radiometric quantity telling us how much light is reflected at an opaque surface. Mathematically speaking, the BRDF is the ratio of the reflected radiance pointing in the direction $\omega_r$ to the incident irradiance coming from the inverse direction of $\omega_i$ as illustrated in figure $\ref{fig:brdfillustration}$. Hence the BRDF is a four dimensional function defined by four angles $\theta_i$, $\phi_i$, $\theta_r$ and $\phi_r$.
\begin{figure}[ht]
\centering
\includegraphics[scale=0.5]{background/mybrdfmodel.png}
\caption[BRDF Model]{Illustration of the BRDF model, where $\omega_i$ is pointing to the light source and the viewing direction is denoted by $\omega_r$. Both unit direction vectors are defined w.r.t to a surface normal $\mathbf{n}$ for every point on the surface.}
\label{fig:brdfillustration}
\end{figure}
Formally, a BRDF for any given wavelength $\lambda$ is defined as:
\begin{align}
BRDF_{\lambda}(\omega_i, \omega_r)
& = \frac{dL_r(\omega_r)}{dE_i(\omega_i)} \nonumber \\
& = \frac{dL_r(\omega_r)}{L_i(\omega_i)cos(\theta_i)d\omega_i}
\label{eq:defbrdf}
\end{align}
Where $L_{r}$ is the reflected spectral radiance, $E_i$ is the incident spectral irradiance and $\theta_{\text{i}}$ is the angle between $\omega_{\text{i}}$ and the surface normal $\mathbf n$. Also, $L_i$ is the incident spectral radiance.
\subsection{Wavespectrum and Colors}
In order to see how crucial the role of human vision plays, let us consider the following definition of color by \textit{Wyszechkiu and Siles} mentioned in the Fundamentals of Computergraphics Book$\cite{fundcg}$, stating that \textit{"Color is the aspect of visual perception by which an observer may distinguish differences between two structure-free fields of view of the same size and shape such as may be caused by differences in the spectral composition of the radiant energy concerned in the observation"}. Therefore, similarly like the humans' perceived sensation of smell and taste, color vision is just another individual sense of perception giving us the ability to distinguish between different frequency distributions of light which we experienced as different colors.
\begin{figure}[H]
\centering
\includegraphics[scale=0.35]{background/lightspec.png}
\caption[Visible Lightspectrum]{Frequency (top) and wavelength (bottom) of colors of the visible light spectrum$\footnotemark$.}
\label{fig:colorspectrum}
\end{figure}
\footnotetext{Similar figure like used in computer graphics class 2012 in chapter colors}
In general, an eye consists of photoreceptor cells which are responsible for providing ability of color perception. A schematic of an eye is illustrated in figure $\ref{fig:humaneye}$. Basically, there are two specialized types of photoreceptor cells, cone cells which are responsible for color vision and rod cells, which allow an eye to perceive different brightness levels.
\begin{figure}[H]
\centering
\includegraphics[scale=0.35]{background/humaneye.png}
\caption[humanayeschematic]{Schematic$\footnotemark$ of photoreceptor cells, cones and rods, in a human eye }
\label{fig:humaneye}
\end{figure}
\footnotetext{image of illustration has been taken from \\ \texttt{http://en.wikipedia.org/wiki/Bidirectional\textunderscore reflectance\textunderscore distribution\textunderscore function}}
A human eye is made of three different types of cone cells, having their peak sensitivity in different wavelength ranges. More precisely, there are cone cells most sensitive to short wavelengths between $420 nm$ and $440 nm$, those which are most sensitive in the middle range between $530 nm$ and $550 nm$ and those which have their peak in the long range, from $560 nm$ to $580 nm$. Therefore, any color sensation in human color perception can therefore be described by just three parameters, corresponding to the levels of stimulus of these three types of cone cells.
\subsection{Colorspace}
\label{sec:colorspace}
In order to render accurate images of how a human observer sees its world, a mathematical model of the human color perception is required. Remember that color sensation is due to a visual stimulus processed by cone cells in an eye. A human eye contains three different types of cone cells. Therefore, one possible approach is to describe each kind of these cone cells with a function mapping each wavelength to a certain sensitivity. In the early 1920 from a series of experiments, the so called CIE RGB color space was derived (see figure $\ref{fig:ciergb}$). This space describes the response of cone cells of an average human individual, the so called standard observer. Basically, a statistically sufficiently large number of probands were exposed to different target light colors expressed by their wavelength. The task of each proband was to reproduce these target colors by mixing three given primary colors, red, green and blue light. The strength of each primary color could be manually adjusted by setting their relative sensitivity. Those adjustment weights have been measured, aggregated and averaged among all probands for each primary color. This model describes each color as a triple of three real valued numbers$\footnote{note that there are negative color weights possible in the CIE RGB colors space. This is why some human perceived color sensations could not be reconstructed using just an additive color model (adding three positively weighted primary values). Therefore, a proband was also allowed to move one of the primary colors to the target color and instead was supposed to reproduce this new color mix using the two remaining primaries (subtractive model). The value of the selected, moved primary was then interpreted as being negative weighted in an additive color model.}$, the so called tristimulus values. Summarized, these experiments provided certain weights of primary colors in order to match a color at a certain wavelength according to the average human color perception. However, some of these weights could have a negative value. \\
\begin{figure}[H]
\centering
\includegraphics[scale=0.3]{introduction/ciergb.png}
\caption[CIE RGB Color Matching Functions]{Plots$\footnotemark$ of CIE 1931 RGB Color matching functions showing the amounts of primaries needed to match a certain wavelength.}
\label{fig:ciergb}
\end{figure}
\footnotetext{These plots have been taken from \texttt{http://en.wikipedia.org/wiki/CIE\textunderscore 1931\textunderscore color\textunderscore space}}
The disadvantage of the CIE RGB colorspace is that some of its color weights are negative. Thus, scientist derived the CIE XYZ colorspace which as no negative color matching functions but is still additive$\footnote{Remember, the property of an additive colorspace is that any color can be represented as a weighted sum of matching functions of that color space.}$. Figure $\ref{fig:matchingfunction}$ visualizes the matching functions of the CIE XYZ space. Another property of the CIE XYZ space is that its Y component is representing the luminance of the corresponding color. Usually, the CIE XYZ space is used as a reference colorspace to define colorspace transformations. \\
Pragmatically speaking, color spaces describe the range of colors a camera can see, a printer can print or a monitor can display. Thus, formally we can define it as a mapping from a range of physically produced colors from mixed light to a standard objective description of color sensations registered in the eye of an observer in terms of tristimulus values.
\begin{figure}[H]
\centering
\includegraphics[scale=0.7]{background/somatchingfunctions.png}
\caption[Color Matching Functions]{Plots of our CIE XYZ color matching functions we used for rendering}
\label{fig:matchingfunction}
\end{figure}
Interpolating all measured tristimulus values gives us three basis functions, the CIE color matching functions $\overline{x}(\lambda)$, $\overline{y}(\lambda)$, $\overline{z}(\lambda)$. In figure $\ref{fig:matchingfunction}$ are the numerical description of the chromatic response of the observer. They can be thought of as the spectral sensitivity curves of three linear light detectors yielding the CIE tristimulus values X, Y and Z. \\
The tristimulus values for a color with a spectral power distribution $I(\lambda)$, are given in terms of the standard observer by:
\begin{align}
X= \int_{\Lambda} I(\lambda)\,\overline{x}(\lambda)\,d\lambda \nonumber \\
Y= \int_{\Lambda} I(\lambda)\,\overline{y}(\lambda)\,d\lambda \nonumber \\
Z= \int_{\Lambda} I(\lambda)\,\overline{z}(\lambda)\,d\lambda
\label{eq:tristimulusvalues}
\end{align}
\noindent
where $\lambda$ is the wavelength of the equivalent monochromatic light spectrum $\Lambda = [380nm, 780nm]$. Note that it is not possible to build a display that corresponds to the CIE XYZ colorspace. For this reason it is necessary to design other color spaces, which are physically realizable, efficiently encoded, perceptually uniform and have an intuitive color specification. There are simple conversions between XYZ color spaces to other color space - such as the RGB colorspace - described as linear transformations.
\subsection{Spectral Rendering}
When rendering an image, most of the time we are using colors described in a certain RGB color space. However, a RGB colorspace results from a colorspace transformation of the tristimulus values, which themselves are inherent to the human visual system. Therefore, many physical phenomenon are poorly modelled when they rely on RGB colors for rendering. Using only RGB colors for rendering is like assuming that a given light source emits light of only three particular wavelengths. But in reality this is rarely the case. Spectral rendering refers to using certain wavelength spectrum, for e.g. the human visible light spectrum, instead of simply using only the range of RGB values in order to render an illuminated scene. This captures the physical reality of specific light sources way more accurately. Keep in mind that even when we make use of a spectral rendering approach, we have to convert the final spectra to RGB color values when we want to display an image on an actual display.
\subsection{Rendering Equation}
\label{sec:renderingequation}
In Computer Graphics we are interested in rendering images realistically of a given scene. One common choice is to use the rendering equation when knowing the BRDF of the involved scene materials. This equation models the amount of emitted radiance from a point on a surface material along a particular viewing direction. Now, let us assume we are given an incoming light source directional at a solid angle $\omega_i$ and $\theta_i$ is its angle of incidence and that $\omega_r$ is the solid angle for the viewing direction. Further let $\lambda$ denote the wavelength$\footnote{Notice that, to keep our terms simple, we have dropped all $\lambda$ subscripts for spectral radiance quantities.}$ and $\Omega$ is the hemisphere of integration for the incoming light. Then, we are able to formulate a $BRDF_\lambda$ by using its definition $\ref{eq:defbrdf}$:
\begin{alignat}{4}
& f_r(\omega_i, \omega_r) &&= \frac{dL_r(\omega_r)}{L_i(\omega_i)cos(\theta_i)d\omega_i} \nonumber \\
\Rightarrow{} & f_r(\omega_i, \omega_r) L_i(\omega_i)cos(\theta_i)d\omega_i &&= dL_r(\omega_r) \nonumber \\
\Rightarrow{} & \int_{\Omega}f_r(\omega_i, \omega_r) L_i(\omega_i)cos(\theta_i)d\omega_i &&= \int_{\Omega}dL_r(\omega_r) \nonumber\\
L_r(\omega_r) &&= \Rightarrow{} & \int_{\Omega}f_r(\omega_i, \omega_r) L_i(\omega_i)cos(\theta_i)d\omega_i
\label{eq:initialbrdf}
\end{alignat}
The last equation is the so called rendering equation $\label{sec:dirlighsourceassumption}$. We assume that our incident light is a directional, unpolarized light source like sunlight and therefore its radiance is given as:
\begin{equation}
L_{\lambda}(\omega)=I(\lambda)\delta(\omega-\omega_i)
\label{eq:radiancedirlightsource}
\end{equation}
where $I(\lambda)$ is the intensity of the relative spectral power for the wavelength $\lambda$. By plugging the identity in equation $\ref{eq:radiancedirlightsource}$ into our current rendering equation $\ref{eq:initialbrdf}$, we get:
\begin{align}
L_{\lambda}(w_r)
& = \int_{\Omega} BRDF_{\lambda}(\omega_i, \omega_r) L_{\lambda}(\omega_i) cos(\theta_i) d\omega_i \nonumber \\
& = BRDF_{\lambda}(\omega_i, \omega_r) I(\lambda) cos(\theta_i)
\label{eq:deribrdfwithdirsource}
\end{align}
where $L_{\lambda}(\omega_i)$ is the incident radiance and $L_{\lambda}(\omega_r)$ is the radiance reflected by the given surface. Note that the integral in equation $\ref{eq:deribrdfwithdirsource}$ vanishes since $\delta(\omega-\omega_i)$ is only equal one if and only if $\omega = \omega_i$.
\section{Wave Theory for Light and Diffraction}
\subsection{Basics in Wave Theory}
In order to prepare the reader for relevant concepts in physics which are used later for derivations and reasonings within this thesis, I am going to provide a quick introduction to the basics of wave theory and related concepts. In physics, a wave describes a disturbance that travels from one location to another through a certain medium. The disturbance temporarily displaces the particles in the medium from their rest position which results in an energy transport along the medium during wave propagation. Usually, when talking about waves we are actually referring to a complex valued function which is a solution to the so called \emph{wave equation} which is modelling how the wave disturbance proceeds in space during time. \\
There are two types of waves, (a) mechanical waves which deform their mediums during propagation like sound waves and (b) electromagnetic waves consisting of periodic oscillations of an electromagnetic field, such as light. As illustrated in figure $\ref{fig:wavebasics}$, there are several properties someone can use and apply in order to compare and distinguish different waves:
\begin{figure}[H]
\centering
\includegraphics[scale=0.65]{background/waveschematicimpr.png}
\caption[Sinewave]{Simplified, one dimensionally real valued wave function$\footnotemark$, giving an idea about some important wave properties. We denote the crest of a wave as the highest point relative to the equilibrium line (zero height along time axis) and similarly the trough as the lowest point.}
\label{fig:wavebasics}
\end{figure}
\footnotetext{Image source: http://neutrino.ethz.ch/Vorlesung/FS2013/index.php/vorlesungsskript}
\begin{description}
\item[Wavelength:] It is usually denoted by $\lambda$ and is a measure for the spatial distance from one point to another until the shape of a wave repeats
\item[Amplitude:] It is denoted by $A$ and there are two possible interpretations: Firstly, it is a measure of the height from the equilibrium point to the highest point of a crest on the wave or the lowest point of a trough. This means that the amplitude can be positive or negative. However, usually, someone is just interested in the absolute value of an amplitude, i.e. the magnitude of a wave. For light waves it is a relative measure of intensity or brightness to other light waves of the same wavelength. And secondly, it can be interpreted as a measure how much energy a wave carries whereat the greater the absolute amplitude value, the bigger the amount of energy being carried.
\item[Frequency:] Is a measure of the number of waves which are passing through a particular point in the propagation medium during one unit of time and is denoted by $f$.
\item[Phase:] It is denoted by $\varphi$. It describes either the offset of initial position of a wave or the relative displacement between or among waves having the same frequency. Two waves with same frequency are said to be \emph{in phase} if they have the same phase. This means that they line up everywhere. On the other hand, two waves are said to be \emph{out of phase} if they have the same frequency but a different phases. As a remark, we denote by $\omega$ the angular frequency which is equal $2\pi f$.
\end{description}
A geometrical property of waves is their wavefront. This is either a surface or line along the path of wave propagation on which the disturbance at every point has the same phase. Basically, a wavefront can have any kind of shape although three prominent types of wavefronts are: spherical-, cylindrical- and plane wavefronts. If a point in an isotropic medium is sending out waves in three dimensions, then the corresponding wavefronts are spheres, centered on the source point. Hence spherical wavefront is the result of a spherical wave, also denoted as a wavelet. Note that for electromagnetic waves, the phase is a position of a point in time on a wavefront cycle (motion of wave over a whole wavelength) whereat a complete cycle is defined as being equal to $360\degree$.
\subsection{Wave Interference}
Next, after having seen that a wave is simply a traveling disturbance along a medium, having some special properties, someone could ask what happens when there are several waves traveling on the same medium. Especially, we are interested in how these waves will interact with each other. In physics the term interference denotes the interaction of waves when they encounter each other at a point along their propagation medium. At each point where two waves superpose, their total displacement is the sum of the displacements of each individual wave at that point. Then, the resulting wave is having a greater or lower amplitude than each separate wave and we can interpret the interference as the addition operates on waves. Two extreme scenarios are illustrated in figure $\ref{fig:interferenceconcept}$ for waves with same frequency and equal amplitude. There are basically three variants of interferences which can occur, depending on how crest and troughs of the waves are matched up:
\begin{figure}[H]
\centering
\includegraphics[scale=0.65]{background/interferenceconcept.png}
\caption[interference]{Interference scenarios$\footnotemark$ when two waves waves meet: On the left hand-side, there is constructive interference and on the right hand-side there is destructive interference illustrated.}
\label{fig:interferenceconcept}
\end{figure}
\footnotetext{Image source: \texttt{http://en.wikipedia.org/wiki/Interference\textunderscore(wave\textunderscore propagation)} }
\begin{itemize}
\item A crest of a wave meets a crest of another wave and similarly a trough meets a trough of another wave. This scenario is denoted as constructive interference and occurs at any location along the medium where the two interfering waves have a displacement in the same direction. This is like saying that the phase difference between the waves is a multiple of $2\pi$. Then the resulting amplitude at that point is being much larger than the amplitude of an individual wave. For two waves with an equal amplitude interfering constructively, the resulting amplitude is twice as large as the amplitude of an individual wave.
\item A crest of a wave meets a trough of another wave and vice versa. This scenario is denoted as destructive interference and occurs at any location along the medium where the two interfering waves have a displacement in the opposite direction. This is like saying that the phase difference between the waves is an odd multiple of $\pi$. Then the waves completely cancel each other out at any point they superimpose.
\item If the phase difference between two waves is intermediate between the first two scenarios, then the magnitude of the displacement lies between the minimal and maximal values which we could get from constructive interference.
\end{itemize}
Keep in mind that when two or more waves interfere which each other, the resulting wave may have a different frequency. This means that interfering waves, coming from two light sources with a certain color, may produce a light of another color than they have.
\subsection{Wave Coherence}
\label{sec:wavecoherence}
When considering waves which are traveling on a shared medium along the same direction, we could examine how their phase difference is changing over time. Formulating the change in their relative phase as a function of time will provide us a quantitative measure of the synchronization between those two waves, the so called wave coherence. In order to better understand this concept, let us consider a perfectly mathematical sine wave and a second wave which is a phase-shifted replica of the first one. A property of these mathematical waves is that they keep their shape over an infinity amount of time (i.e. propagated wavelengths). In our scenario, both waves are traveling along the same direction on the same medium, as illustrated in figure $\ref{fig:coherencesinsignal}$.
\begin{figure}[H]
\centering
\includegraphics[scale=0.32]{background/coherencesinsignal.png}
\caption[Wave Coherence]{Two mathematical sine waves which are perfectly coherent which means that their phase difference is constant for every point in time.}
\label{fig:coherencesinsignal}
\end{figure}
\noindent
Taking the difference between these two sine waves always yields a constant number. Therefore, those two waves are said to be coherent and hence perfectly synchronous over time. Notice that this scenario is completely artificial since in nature there are no mathematical sine waves. Rather, the phase difference is then a function of time $p(t)$. The more coherent two waves are, the slower this function will change over time.
In fact, two waves are said to be coherent if they are of the same frequency, are temporally in phase or have the same amplitude at every point in time. Thus two waves are coherent if they are generated at the same time, having the same frequency, amplitude, and phase. Conversely, waves are considered incoherent or also asynchronous if they have no stable phase difference. This means $p(t)$ is heavily varying over time. Coherence describes the effect of whether waves will tend to interfere with each other constructively or destructively at a certain point in time and space. Thus this is a property of waves that enables stationary interference. The more correlated two waves are, the higher their degree of coherence is. In physics coherence between waves is quantified by the cross-correlation function, which basically predicts the value of a second wave using the value of the first one. There are two basic coherence classifications:
\begin{itemize}
\item Spatial coherence is dealing with the question of what is the range of distance between two points in space in the span of a wave for which there is occurring a significant effect of stationary interference when averaged over time. This is formally answered by considering the correlation between waves at different point in space. The range of distance with significant coherence is also denoted as the coherence area.
\item Temporal coherence examines how well two waves which are observed at two different moments in time correlate with each other. Thus it may be used for predicting how well a wave interferes temporally with itself. Mathematically, this kind of coherence is computed by measuring the correlation between the value of the wave and the delayed version of itself. The coherence time denotes the maximum time delay for which the waves are coherent. The distance a wave has traveled during the coherence time is denoted as their temporal coherence length.
\end{itemize}
\subsection{Huygen's Principle}
\label{sec:huygensprincipledef}
Besides the phases and the amplitudes of waves, their propagation directly affects the interaction between different waves and how they could interfere with each other. This is why it makes sense to formulate a model which allows us to predict the position of a moving wavefront and how it moves in space. This is where $\emph{Huygen's Principle}$ comes into play. It states that any point of a wavefront may be regarded as a point source that emits spherical wavelets in every direction. Within the same propagation medium, these wavelets travel at the same speed as their source wavefront. The position of the new wavefront results by superimposing all of these emitted wavelets. Geometrically, the surface that is tangential to the secondary waves can be used in order to determine the future position of the wavefront. Therefore, the new wavefront encloses all emitted wavelets. Figure $\ref{fig:huygensprinciple}$ visualizes Huygen's principle for a wavefront reflected off from a plane surface.
\begin{figure}[H]
\centering
\includegraphics[scale=0.6]{background/huygensprinciple.png}
\caption[Huygen's Principle]{A moving wavefront (blue) encounters an obstacle (a surface shown in brown colors) and produces a new wavefront (green) as a result of superposition of all secondary wavelets.}
\label{fig:huygensprinciple}
\end{figure}
\subsection{Waves Diffraction}
Revisiting Huygen's principle we know that each point on a wavefront can be considered as a source of a spherical wavelet which propagates in every direction. But what exactly happens when a wave's propagation direction is only partially occluded by an object? What will be the outcome on applying Huygen's principle to this case? An example scenario for this case is shown in figure $\ref{fig:wavediffraction}$.
\begin{figure}[H]
\centering
\subfigure[transmissive grating]{
\includegraphics[scale=0.45]{background/diffractiontransmissive2.png}
\label{fig:wavediffractiontransm}
}
~
\subfigure[reflective grating]{
\includegraphics[scale=0.6]{background/reflectivegrating.png}
\label{fig:wavediffractionrefl}
}
\caption[Diffracted Wave]{Illustration$\footnotemark$ of a diffraction scenario in which a plane wavefront passes through a surface with a certain width and how the wave will be bent, also showing the intensity of the resulting wave along a straight line in its path.}
\label{fig:wavediffraction}
\end{figure}
\footnotetext{Image source:\texttt{http://cronodon.com/images/Single\textunderscore slit\textunderscore diffraction\textunderscore 2b.jpg} }
Whenever a propagating wavefront is partially occluded by an obstacle, the wave is not only moving in the direction along its propagation, but is also bent around the edges of the obstacle. In physics, this phenomenon is called diffraction. Waves are diffracted due to interference which occurs among all wavelets when applying Huygen's Principle for the case when a wavefront hits an obstacle. Generally, the effect of diffraction is most pronounced for the waves whose wavelength is roughly similar in size to the dimension of the occluding object. Conversely, if the wavelength is much smaller in size, then there is almost no wave diffraction perceivable at a far off distance. This relationship between the strength of wave diffraction and the wavelength is conceptually illustrated in figure $\ref{fig:diffractionrelationshipdimension}$ when a wave is transmitted through an opening in a surface. A reflective example for diffraction provided in figure $\ref{fig:huygensprinciple}$.
\begin{figure}[H]
\centering
\subfigure[W $\ll$ $\lambda$]{
\includegraphics[scale=1.0]{background/Aa2l.png}
\label{fig:a1}
}
~
\subfigure[W $\approx$ $2 \lambda$]{
\includegraphics[scale=1.0]{background/Aastl.png}
\label{fig:a2}
}
~
\subfigure[W $\approx$ $6 \lambda$]{
\includegraphics[scale=1.0]{background/Aa6l.png}
\label{fig:a3}
}
\caption[Diffraction for different $\texttt{Wavelength/Slit-Width}$ ratio]{Illustration$\footnotemark$ of how diffraction changes when a wave with wavelength $\lambda$ propagates through a slit of width equal $W$.}
\label{fig:diffractionrelationshipdimension}
\end{figure}
\footnotetext{Image taken from:\texttt{http://neutrino.ethz.ch/Vorlesung/FS2013/index.php/vorlesungsskript}, chapter 9, figure 9.14 }
In everyday's life, we can see the direct outcome of the effect of wave diffraction in form of structural colors. There are examples from nature such as the iridescent colors on various snake skins as well as artificial examples such as the colorful patterns notable when having a close look at an illuminated compact disc. All these examples comprise a surface made of highly regular nanostructures which diffract an incident light significantly. Such a nanostructure which exhibits a certain degree of regularity is also denoted as a diffraction grating. Further information about diffraction gratings can be found in section $\ref{sec:diffractiongrating}$.
\section{Stam's BRDF formulation}
\label{sec:sumstam}
The theoretical foundation of this thesis is based on the pioneering work of J.Stam$\cite{diffstam}$. In this model Stam derived a BRDF formulation to model the effect of far field diffraction for various analytical anisotropic surfaces. His model is relying on the so called scalar wave theory of diffraction for which a wave$\footnote{In general, a wave is a complex valued vector satisfying Maxwell's equations. A scalar values wave only satisfies the Helmoltz equation. For further information please visit \texttt{http://en.wikipedia.org/wiki/Maxwells\textunderscore equations} and \texttt{http://en.wikipedia.org/wiki/Helmholtz\textunderscore equation}.}$ is assumed to be a complex valued scalar. Thus, Stam's BRDF formulation does not take into account the polarization of the incident light. Fortunately, light sources like sunlight and light bulbs are unpolarized. The principal behind J. Stam's approach is illustrated in figure $\ref{fig:meaningofstamsapproach}$.
\begin{figure}[H]
\centering
\includegraphics[scale=0.6]{background/stamsapproachsmeaning2.png}
\caption[Idea behind Stam's approach]{Illustration of secondary wavelets reflected off a surface. An integration over all secondary sources resulting from an incident wave according to Huygen's principle will give us an identity for the total contribution at a certain point in space.}
\label{fig:meaningofstamsapproach}
\end{figure}
An incident wave $p_i$ from a a light source encounters a surface representing a diffraction grating. According to Huygen's Principle, at any point $i$ on the grating at which the incident wave meets the grating a secondary, spherical wavelet $p_{r,i}$ will be emitted. A viewer, indicated by a gray circle in the figure, will perceive the superimposed contribution of all wavelets along the surface $S$ (in the figure indicated by a integration symbol), which will directly follow the laws of wave interference. Therefore the resulting color which an observer sees is the final radiance at that point which reflects from stationary interference of all emitted secondary wavelets and per due to Huygen's principle. \\
A further assumption in Stam's Paper is, that the emanated waves from the source are stationary, which implies the wave is a superposition of independent monochromatic waves. This further implies that each wave is associated with a definite wavelength $\lambda$. Directional light sources such as sunlight fulfills this fact and since we are using these kinds of light sources for our simulations, Stam's model can be used for our modelling purposes. \\
The main idea of his model is to formulate a BRDF as a function of the Fourier transformation applied on a certain correlation function relating to the given height field. His model assumes homogeneity of the height field structure and depicts an approximation of far-field diffraction effects. The geometrical setup of his model is illustrated in figure $\ref{fig:geometricsetup}$. The classes of surfaces his model is able to support either exhibit a very regular structure or may be considered as a superposition of bumps forming a periodic like structure. Therefore, the surfaces he is dealing with can either be modelled by probabilistic distributions or have a direct analytical representation. Both cases allow him to derive an analytical solution for his BRDF model.
\begin{figure}[H]
\centering
\includegraphics[scale=0.8]{background/stamsinputp.png}
\caption[Stam's geometrical setup]{Illustration$\footnotemark$ of the geometrical setup of Stam's approach where $\omega_i$ is a direction, pointing towards the light source, $\omega_r$ points towards the viewer, $n$ is the surface normal and$ (u,v,w)$ are the components of the vector $-\omega_i - \omega_r$.}
\label{fig:geometricsetup}
\end{figure}
\footnotetext{Modified image which originally has been taken from D.S. Dhillon et. al. poster$\cite{diffourp}$.}
Figure $\ref{fig:geometricsetup}$ illustrated schematically the geometrical setup used for Stam's BRDF formulation. An incident light with a direction $\omega_i$ hits the surface of a given height field at the position $p$. The direction vector $\omega_r$ points towards the viewer. After the incident light has hit the surface, a spherical wavelet is reflected off from this hit position. The direction vector of the wavelet can be computed by taking the difference between the incident and viewing direction as shown in equation $\ref{eq:uvw}$:
\begin{equation}
(u,v,w) = -\omega_i - \omega_r
\label{eq:uvw}
\end{equation}
These coordinates will later be used in order to compute the total contribution of all secondary sources used in Stam's BRDF in equation $\ref{eq:mainstam}$. In Stam's Derivation, the phase difference between the incident and emitted wave from the given height field is denoted by the auxiliary function $\Phi$ defined as in equation $\ref{eq:heightfieldphase}$.
\begin{equation}
\Phi(x,y) = \frac{2 \pi}{\lambda} w h(x,y)
\label{eq:heightfieldphase}
\end{equation}
Then, any secondary wavelet $p$ which is emitted off from the given surface is equal to:
\begin{equation}
p(x,y) = e^{i\Phi(x,y)}
\label{eq:px}
\end{equation}
using the idea presented for figure $\ref{fig:meaningofstamsapproach}$ and performing some further mathematical steps shown as described in Stams paper, will lead us to the final BRDF representation. This BRDF models the total contribution of all secondary sources reflected off the the provided surface $h$ in the direction $\omega_r$:
\begin{equation}
BRDF_{\lambda}(\omega_i, \omega_r) = \frac{k^2 F^2 G}{4\pi^2 A w^2} \langle \left|P(ku, kv)\right|^2\rangle
\label{eq:mainstam}
\end{equation}
where $F$ denotes the Fresnel coefficient and $G$ is the so called geometry term$\footnote{The geometric terms expresses the correction factor to perform an integration over an area instead over a surface. For further information, please have a look at \texttt{http://en.wikipedia.org/wiki/Surface\textunderscore integral}, and read the definition about \emph{surface element}}$ which is equal to:
\begin{equation}
G =\frac{(1 + \omega_i \cdot \omega_r)^2}{cos(\theta_i)cos(\theta_r)}
\label{eq:geometricterm}
\end{equation}
\myparagraph{Fourier Transformation Sign Convention}
\label{sec:electricalengeneeringftconvention}
\noindent
One last word about the Fourier transform terms that Stam uses in his derivation. Conventionally, following the definitions of the Fourier transformation, we are dealing with is commonly denoted as the inverse Fourier Transformation. However, especially in electrical engineering (EE), it is common to define the inverse Fourier transformation by the Fourier Transformation and vice versa. To be more precisely there are two definitions of the Fourier transformation. One commonly used by physicist where the exponential function is w.r.t. $-i$ and the one which is used in EE, performing an integration over an exponential is w.r.t. $i$. Further information about the sign convention in Fourier transformations can be looked up in the book Quantum Mechanics for Electrical Engineers$\cite{signconvention}$. Note that by substituting the minus sign of the physicist definition of the Fourier transformation gives us the definition used in EE shown in in equation $\ref{eq:signchangementconvention}$:
\begin{align}
\mathcal{F}_{FT}\{f\}(w)
& = \int_{\mathds{R}^n} f(x)e^{-iwt} dt \nonumber\\
& = \int_{\mathds{R}^n} f(x)e^{i\hat{w}t} dt \nonumber\\
& = \mathcal{F}^{-1}_{FT}\{f\}(\hat{w})
\label{eq:signchangementconvention}
\end{align}
where $\hat{w}$ is equal $-w$. \\
The height fields we are dealing with in this work are, however, natural gratings containing a complex shaped nano-structure and hence far from being very regularly aligned. The reason why Stam's approach in its current form is not suitable for our purpose is twofold: First his approach does not capture the complexity of natural gratings accurately well enough when relying on his statistical approaches and secondly it is way too slow in order to be usable for interactive rendering since his BRDF needs an evaluation of a Fourier Transform for every directional changing. \\
In the next chapter we are going to adapt Stam's BRDF model such that it will be able to handle the kind of surfaces we are dealing with and even will have a runtime complexity which permits interactive rendering.
|
{"hexsha": "7e53cde6557f38a17d8d510a5da1b837bc3c9049", "size": 45172, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "document/Source/Chapters/theoretical_background.tex", "max_stars_repo_name": "simplay/Bachelor-Thesis", "max_stars_repo_head_hexsha": "ef450c5420b768b2a1fd84c9ad768f34db12fc88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "document/Source/Chapters/theoretical_background.tex", "max_issues_repo_name": "simplay/Bachelor-Thesis", "max_issues_repo_head_hexsha": "ef450c5420b768b2a1fd84c9ad768f34db12fc88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-05-13T14:35:57.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-13T14:35:57.000Z", "max_forks_repo_path": "document/Source/Chapters/theoretical_background.tex", "max_forks_repo_name": "simplay/Bachelor-Thesis", "max_forks_repo_head_hexsha": "ef450c5420b768b2a1fd84c9ad768f34db12fc88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 124.7845303867, "max_line_length": 2077, "alphanum_fraction": 0.792791995, "num_tokens": 10281}
|
classdef NonSeqEvtsState < matlab.mixin.SetGet
%NonSeqEvtsState Summary of this class goes here
% Detailed explanation goes here
properties
nonSeqEvts LaunchVehicleNonSeqEvents
event LaunchVehicleEvent
end
methods
function obj = NonSeqEvtsState(event, nonSeqEvts)
obj.event = event;
obj.nonSeqEvts = nonSeqEvts;
end
end
end
|
{"author": "Arrowstar", "repo": "ksptot", "sha": "2b414440d3b167ba2294f56dafce0f465c07f982", "save_path": "github-repos/MATLAB/Arrowstar-ksptot", "path": "github-repos/MATLAB/Arrowstar-ksptot/ksptot-2b414440d3b167ba2294f56dafce0f465c07f982/helper_methods/ksptot_lvd/classes/StateLog/@NonSeqEvtsState/NonSeqEvtsState.m"}
|
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
BLOCK_SIZE = 4
### read image ###
img = Image.open('test_B.bmp') # color image
img = np.array(img).astype(np.float32) / 256
print(img.shape, img.dtype)
row,col=img.shape[:2]
data=[]
for i in range(0,row,BLOCK_SIZE):
for j in range(0,col,BLOCK_SIZE):
data.append(img[i:i+BLOCK_SIZE,j:j+BLOCK_SIZE,:].reshape(-1))
data = np.array(data)
### read image ###
def rebuildimage(data1):
img1=np.zeros(img.shape)
for i in range(0,row,BLOCK_SIZE):
for j in range(0,col,BLOCK_SIZE):
img1[i:i+BLOCK_SIZE,j:j+BLOCK_SIZE,:]=\
data1[(i//BLOCK_SIZE*(col//BLOCK_SIZE)+j//BLOCK_SIZE)]\
.reshape((BLOCK_SIZE, BLOCK_SIZE, 3))
return img1
import lbg
plt.subplot(231); plt.imshow(img); plt.title('input')
data1=lbg.lbg(data, 11)
img1=rebuildimage(data1)
plt.subplot(232); plt.imshow(img1); plt.title('lgb, 1/4')
data1=lbg.lbg(data, 10)
img1=rebuildimage(data1)
plt.subplot(233); plt.imshow(img1); plt.title('lgb, 1/8')
import pca
data1=pca.pca(data.copy(), 1)
img1=rebuildimage(data1)
plt.subplot(236); plt.imshow(img1); plt.title('klt, 1 eig, 1/48')
data1=pca.pca(data.copy(), 4)
img1=rebuildimage(data1)
plt.subplot(235); plt.imshow(img1); plt.title('klt, 4 eig, 1/12')
data1=pca.pca(data.copy(), 12)
img1=rebuildimage(data1)
plt.subplot(234); plt.imshow(img1); plt.title('klt, 12 eig, 1/4')
plt.show()
|
{"hexsha": "176d6cec5d325f9d5113c23368e2ce50e192fe32", "size": 1455, "ext": "py", "lang": "Python", "max_stars_repo_path": "digitmedia/2 compress/imgtest.py", "max_stars_repo_name": "fffasttime/cs_misc", "max_stars_repo_head_hexsha": "abff0dcaa840d07e2d948c50d9a9e53996c744fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-04-28T12:31:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-28T10:45:22.000Z", "max_issues_repo_path": "digitmedia/2 compress/imgtest.py", "max_issues_repo_name": "fffasttime/cs_misc", "max_issues_repo_head_hexsha": "abff0dcaa840d07e2d948c50d9a9e53996c744fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "digitmedia/2 compress/imgtest.py", "max_forks_repo_name": "fffasttime/cs_misc", "max_forks_repo_head_hexsha": "abff0dcaa840d07e2d948c50d9a9e53996c744fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-19T09:43:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-28T10:52:11.000Z", "avg_line_length": 23.4677419355, "max_line_length": 71, "alphanum_fraction": 0.6687285223, "include": true, "reason": "import numpy", "num_tokens": 458}
|
import os
import argparse
import warnings
import numpy as np
import torch
from torch import nn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from tqdm import tqdm
class Generator(nn.Module):
"""Generator structure from InfoGAN paper"""
def __init__(self,latent_dim):
super(Generator, self).__init__()
self.latent_dim = latent_dim
self.fc1 = nn.Sequential(
nn.Linear(self.latent_dim,1024),
nn.BatchNorm1d(1024),
nn.ReLU(),
)
self.fc2 = nn.Sequential(
nn.Linear(1024,7*7*128),
nn.BatchNorm1d(7*7*128),
nn.ReLU(),
)
self.upconv1 = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
)
self.upconv2 = nn.Sequential(
nn.ConvTranspose2d(64, 1, kernel_size=4,stride=2,padding=1),
nn.Sigmoid()
)
def forward(self,x):
x = self.fc1(x)
x = self.fc2(x)
x = x.view(x.shape[0],128,7,7)
x = self.upconv1(x)
x = self.upconv2(x)
return x
class Discriminator(nn.Module):
"""Discriminator structure from InfoGAN paper"""
def __init__(self):
super(Discriminator, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=4, stride=2),
nn.LeakyReLU(0.1),
)
self.conv2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=4, stride=2),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.1),
)
self.fc1 = nn.Sequential(
nn.Linear(128*5*5,1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.1),
)
self.fc2 = nn.Sequential(
nn.Linear(1024,1),
nn.Sigmoid()
)
def forward(self,x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.shape[0],128*5*5)
fe_out = self.fc1(x)
y = self.fc2(fe_out)
return y,fe_out
class Qnet_Categorical(nn.Module):
"""Q-part of network from InfoGAN paper"""
def __init__(self,n_cat):
super(Qnet_Categorical, self).__init__()
self.n_cat = n_cat
self.fcQ = nn.Sequential(
nn.Linear(1024,128),
nn.BatchNorm1d(128),
nn.LeakyReLU(0.1),
nn.Linear(128,self.n_cat)
)
def forward(self,x):
y_logits = self.fcQ(x)
return y_logits
class InfoGAN_MNIST:
def __init__(self,args,n_latent,n_cat):
self.batch_size = args.batch_size
self.num_epochs = args.epochs
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.n_noise = n_latent
self.n_cat = n_cat
self.n_latent = n_latent+n_cat
self.cat_prob = torch.ones(n_cat)*0.1
self.generator = Generator(self.n_latent).to(self.device)
self.discriminator = Discriminator().to(self.device)
self.qnet = Qnet_Categorical(self.n_cat).to(self.device)
self.dset_train = MNIST("./", train=True, download=True, transform=transforms.ToTensor())
self.train_loader = DataLoader(self.dset_train, batch_size=self.batch_size,shuffle=True,
pin_memory=torch.cuda.is_available(),drop_last=True)
self.dset_test = MNIST("./", train=False, download=True, transform=transforms.ToTensor())
self.test_loader = DataLoader(self.dset_test, batch_size=self.batch_size, shuffle=False,
pin_memory=torch.cuda.is_available(),drop_last=True)
def train(self):
self.generator.train()
self.discriminator.train()
self.qnet.train()
lambda_cat = 1
bce_loss = nn.BCELoss()
CE_loss = nn.CrossEntropyLoss()
optimG = torch.optim.Adam(list(self.generator.parameters())+list(self.qnet.parameters()),lr=1e-3,betas=(0.5, 0.99))
optimD = torch.optim.Adam(self.discriminator.parameters(),lr=2e-4,betas=(0.5, 0.99))
batch_size = self.batch_size
true_label = torch.ones(batch_size, 1).to(self.device)
fake_label = torch.zeros(batch_size, 1).to(self.device)
best_test_acc = 0
for epoch in range(self.num_epochs):
with tqdm(total=len(self.train_loader)) as pbar:
pbar.set_description("Epoch {}".format(epoch))
for batch_nr, (img_true, _) in enumerate(self.train_loader):
optimG.zero_grad()
optimD.zero_grad()
noise_vector = torch.randn(batch_size, self.n_noise).to(self.device)
cat_vector = torch.multinomial(self.cat_prob,batch_size,replacement=True)
cat_one_hot = torch.zeros(batch_size,self.n_cat)
cat_one_hot[range(batch_size),cat_vector] = 1
cat_one_hot = cat_one_hot.to(self.device)
img_fake = self.generator(torch.cat((noise_vector,cat_one_hot),dim=1))
y_fake,_ = self.discriminator(img_fake.detach())
y_true,_ = self.discriminator(img_true.to(self.device))
loss_D = bce_loss(y_true, true_label) + bce_loss(y_fake, fake_label)
loss_D.backward()
optimD.step()
y_fake,fe_out = self.discriminator(img_fake)
y_cat_logits = self.qnet(fe_out)
cat_target = cat_vector.long().to(self.device)
loss_Q = lambda_cat*CE_loss(y_cat_logits,cat_target)
loss_G = bce_loss(y_fake, true_label) + loss_Q
loss_G.backward()
optimG.step()
pbar.update()
test_acc = self.eval()
self.generator.train()
self.discriminator.train()
self.qnet.train()
if test_acc>best_test_acc:
torch.save(self.generator.state_dict(),"generator_weights.pth")
torch.save(self.discriminator.state_dict(),"discriminator_weights.pth")
torch.save(self.qnet.state_dict(),"qnet_weights.pth")
best_test_acc = test_acc
def eval(self):
self.generator.eval()
self.discriminator.eval()
self.qnet.eval()
exemplar_idx = [68, 6, 76, 7, 20, 259, 66, 263, 265, 264]
exemplars = []
for idx in exemplar_idx:
img,img_lbl = self.dset_train.__getitem__(idx)
exemplars.append(img)
exemplar_batch = torch.cat(exemplars,dim=0).unsqueeze(1).to(self.device)
with torch.no_grad():
_,fe_out = self.discriminator(exemplar_batch)
y_cat = self.qnet(fe_out)
exemplar_cat = torch.argmax(torch.softmax(y_cat,dim=1),dim=1)
exemplar_cat = exemplar_cat.cpu().numpy()
cat2class = {cat:cls for cls,cat in enumerate(exemplar_cat)}
if len(cat2class.keys())!=10:
warn_str = """Only {}/10 classes identified from exemplars. Some classes might be mixed.
Consider retraining the network if this is the final iteration.""".format(len(cat2class.keys()))
warnings.warn(warn_str)
running_sum = 0
with tqdm(total=len(self.test_loader)) as pbar:
pbar.set_description("Test loss")
for (test_imgs,test_labels) in self.test_loader:
with torch.no_grad():
_,fe_out = self.discriminator(test_imgs.to(self.device))
y_cat = self.qnet(fe_out)
test_cat = torch.argmax(torch.softmax(y_cat,dim=1),dim=1)
test_cat = test_cat.cpu().numpy()
test_pred_class = np.array([cat2class[cat] if cat in cat2class.keys() else np.nan for cat in test_cat])
running_sum+=sum(test_pred_class==test_labels.numpy())
pbar.update()
test_accuracy = running_sum/len(self.dset_test)
pbar.set_postfix({'test_acc':test_accuracy})
return test_accuracy
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=10, type=int)
args = parser.parse_args()
info_gan = InfoGAN_MNIST(args,n_latent=64,n_cat=10)
info_gan.train()
|
{"hexsha": "dc26338c761280465e0c0f119159be4ec053448f", "size": 8676, "ext": "py", "lang": "Python", "max_stars_repo_path": "main_infogan.py", "max_stars_repo_name": "elingaard/infogan-mnist", "max_stars_repo_head_hexsha": "fc50097b7b416400d00c48286c2de9c3a5190eef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-14T02:51:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-14T02:51:31.000Z", "max_issues_repo_path": "main_infogan.py", "max_issues_repo_name": "elingaard/infogan-mnist", "max_issues_repo_head_hexsha": "fc50097b7b416400d00c48286c2de9c3a5190eef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main_infogan.py", "max_forks_repo_name": "elingaard/infogan-mnist", "max_forks_repo_head_hexsha": "fc50097b7b416400d00c48286c2de9c3a5190eef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-14T09:07:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-14T09:07:39.000Z", "avg_line_length": 41.1184834123, "max_line_length": 124, "alphanum_fraction": 0.5749193177, "include": true, "reason": "import numpy", "num_tokens": 1997}
|
import numpy as np
from keras.layers import Input, Embedding, SpatialDropout1D, Bidirectional, LSTM, Flatten, Concatenate, Dense
from keras.initializers import glorot_normal, orthogonal
from keras.models import Model
from sklearn.model_selection import StratifiedKFold
from sklearn import utils
from common.nn.elements import Capsule, DropConnect, Attention
def classify_based_on_probs(predictions, boundary=0.2):
"""Classifies a prediction as 0 or 1 based on the given probability
Parameters
----------
predictions: np.array
The array of predictions. Each prediction is a float on range [0, 1]
boundary: float
The boundary of the classification. When a prediction is above this boundary
it is classified as 1 else as 0.
Returns
-------
An np.array in which each instance is classified as 0 or 1.
"""
labels = (predictions > boundary)
return np.array(labels, dtype=int)
def evaluate_classification_boundary(predictions, true_labels, boundary, evaluation_func):
"""Function that calculates the score based on a given boundary and evaluation function.
Parameters
----------
predictions: np.array
An array with predictions. Each prediction is a float on range [0, 1]
true_labels: np.array
The real binary y values.
boundary: float
The boundary of the classification. When a prediction is above this boundary
it is classified as 1 else as 0.
evaluation_func: function
The evaluation function that should decide on the score obtained with the given boundary
Returns
-------
score: fleat
The score related to the given boundary
"""
predicted_labels = classify_based_on_probs(predictions, boundary=boundary)
score = evaluation_func(predicted_labels, true_labels)
return score
def get_best_threshold(val_predictions, val_true, low=0.1, high=0.8, steps=0.01):
"""Function that defines the best threshold for classification.
Parameters
----------
val_predictions: np.array
The array with predictions of the validation set. Each prediction is a float on range [0, 1]
val_true: np.array
The binary y values of the validation set.
low: float, default 0.1
The lowest threshold tested
high: float, default 0.8
The highest threshold tested
steps: float, default 0.01
The step size with which all thresholds between low and high are evaluated
Returns
-------
best_b: float
The threshold that gave the best score
best_score: fleat
The score related to the best threshold
"""
best_b, best_score = 0, 0
for b in np.arange(low, high, steps):
score = evaluate_classification_boundary(val_predictions, val_true, b, binary_f1score)
if score > best_score:
best_b = b
best_score = score
print("Best F1 score is {} at threshold {}".format(best_score, best_b))
return best_b, best_score
def binary_f1score(y_predict, y_true):
"""F1 score for boolean / binary problems.
Parameters
----------
y_predict: np.array
The predicted binary y values.
y_true: np.array
The true binary y values.
Returns
-------
f: float
The F1 score of the prediction.
"""
true_positives = np.sum((y_predict == 1) & (y_true == 1))
false_positives = np.sum((y_predict == 1) & (y_true == 0))
false_negatives = np.sum((y_predict == 0) & (y_true == 1))
recall = true_positives / (true_positives + false_negatives)
precision = true_positives / (true_positives + false_positives)
f = 2 * (precision * recall) / (precision + recall)
return f
def create_sequence_model(embedding_matrix, word_index, max_words):
"""This function creates a deep learning model for the Quora competition.
Parameters
----------
embedding_matrix: np.array
The matrix of embeddings used in the embedding layer
word_index: dict
The word_index obtained by the tokenizer
max_words: int
The maximum number of words in a sequence
Returns
-------
A deep learning model
"""
input_layer = Input(shape=(75, ), name='input_layer')
x = Embedding(len(word_index) + 1, embedding_matrix.shape[1], weights=[embedding_matrix], trainable=False, input_shape=(max_words,))(input_layer)
x = SpatialDropout1D(rate=0.24)(x)
x = Bidirectional(layer=LSTM(80, return_sequences=True, kernel_initializer=glorot_normal(seed=1029),
recurrent_initializer=orthogonal(gain=1.0, seed=1029)), name='bidirectional_lstm')(x)
# Capsule layer
capsule = Capsule(num_capsule=10, dim_capsule=10, routings=4, share_weights=True)(x) # noqa
capsule = Flatten()(capsule)
capsule = DropConnect(Dense(32, activation="relu"), prob=0.01)(capsule)
# Attention layer
atten = Attention(step_dim=75, name='attention')(x)
atten = DropConnect(Dense(16, activation="relu"), prob=0.05)(atten)
# Concatenate Capsule and Attention layer
x = Concatenate(axis=-1)([capsule, atten])
output_layer = Dense(units=1, activation='sigmoid', name='output')(x)
model = Model(inputs=input_layer, outputs=output_layer)
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam')
return model
def cross_validate_and_predict(x, y, xtest, embedding_matrix, word_index, max_words, folds=4):
"""This function applies cross validation on the dataset. It predicts the y values of
the test sets based on these folds.
Parameters
----------
x: np.array
The x-values of the training set
y: np.array
The y-values of the training set
xtest: np.array
The x-values of the test set
embedding_matrix: np.array
The matrix of embeddings used in the embedding layer
word_index: dict
The word_index obtained by the tokenizer
max_words: int
The maximum number of words in a sequence
folds: int, default 4
The number of folds used in cross-validation
Returns
-------
predictions: np.array
The binary predictions of the test set
"""
print("Start cross validation..")
kfold = StratifiedKFold(n_splits=folds, random_state=99, shuffle=True)
thresholds, scores = [], []
probas = np.zeros(len(xtest))
for train_index, val_index in kfold.split(x, y):
# Start fold
x_train, x_val, y_train, y_val = x[train_index], x[val_index], y[train_index], y[val_index]
# Create model
model = create_sequence_model(embedding_matrix, word_index, max_words)
# Get weights
weights = utils.class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
# Fit model
model.fit(x_train, y_train, batch_size=512, epochs=6,
validation_data=(x_val, y_val), verbose=2, class_weight=weights)
# Predict validation labels
predicted_y_val = model.predict([x_val], batch_size=1024, verbose=2).flatten()
# Get and save best threshold and score
boundary, f1score = get_best_threshold(predicted_y_val, y_val)
thresholds.append(boundary)
scores.append(f1score)
print("Fold completed. Validation score = {} at threshold {}".format(f1score, boundary))
# Predict test set with the model
probas += (model.predict([xtest], batch_size=1024, verbose=2).flatten() / folds)
# Get predictions based on all folds
predictions = classify_based_on_probs(probas, boundary=np.mean(thresholds))
return predictions
|
{"hexsha": "c7d7f9a5c164b8c1385129880be6f55e81a22851", "size": 7653, "ext": "py", "lang": "Python", "max_stars_repo_path": "quora/sequence_models.py", "max_stars_repo_name": "CC0210/jads_kaggle", "max_stars_repo_head_hexsha": "6897a43426b7a54325e5301ced9e714d79541c4a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-01-20T08:17:34.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-04T08:40:11.000Z", "max_issues_repo_path": "quora/sequence_models.py", "max_issues_repo_name": "amChristonasis/jads_kaggle", "max_issues_repo_head_hexsha": "a9f67d6779957d30425de8ff16bfd574a4dcd332", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 55, "max_issues_repo_issues_event_min_datetime": "2018-01-19T15:23:45.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-01T09:51:21.000Z", "max_forks_repo_path": "quora/sequence_models.py", "max_forks_repo_name": "amChristonasis/jads_kaggle", "max_forks_repo_head_hexsha": "a9f67d6779957d30425de8ff16bfd574a4dcd332", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2018-01-17T16:18:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-25T14:14:49.000Z", "avg_line_length": 34.3183856502, "max_line_length": 149, "alphanum_fraction": 0.6773814191, "include": true, "reason": "import numpy", "num_tokens": 1801}
|
import unittest
import torch
import torch.nn as nn
import torch.optim
import numpy as np
import FrEIA.modules as Fm
import FrEIA.framework as Ff
def F_conv(cin, cout):
'''Simple convolutional subnetwork'''
net = nn.Sequential(nn.Conv2d(cin, 32, 3, padding=1),
nn.ReLU(),
nn.Conv2d(32, cout, 3, padding=1))
net.apply(subnet_initialization)
return net
def F_fully_connected(cin, cout):
'''Simple fully connected subnetwork'''
net = nn.Sequential(nn.Linear(cin, 128),
nn.ReLU(),
nn.Linear(128, cout))
net.apply(subnet_initialization)
return net
# the reason the subnet init is needed, is that with uninitalized
# weights, the numerical jacobian check gives inf, nan, etc,
def subnet_initialization(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight.data)
m.bias.data *= 0.1
class ConditioningTest(unittest.TestCase):
def __init__(self, *args):
super().__init__(*args)
self.batch_size = 32
self.inv_tol = 1e-4
torch.manual_seed(self.batch_size)
self.inp_size = (3, 10, 10)
self.c1_size = (1, 10, 10)
self.c2_size = (50,)
self.c3_size = (20,)
self.x = torch.randn(self.batch_size, *self.inp_size)
self.c1 = torch.randn(self.batch_size, *self.c1_size)
self.c2 = torch.randn(self.batch_size, *self.c2_size)
self.c3 = torch.randn(self.batch_size, *self.c3_size)
# this is only used for the cuda variant of the tests.
# if true, all tests are skipped.
self.skip_all = False
inp = Ff.InputNode(*self.inp_size, name='input')
c1 = Ff.ConditionNode(*self.c1_size, name='c1')
c2 = Ff.ConditionNode(*self.c2_size, name='c2')
c3 = Ff.ConditionNode(*self.c3_size, name='c3')
conv = Ff.Node(inp,
Fm.RNVPCouplingBlock,
{'subnet_constructor': F_conv, 'clamp': 1.0},
conditions=c1,
name='conv::c1')
flatten = Ff.Node(conv,
Fm.Flatten,
{},
name='flatten')
linear = Ff.Node(flatten,
Fm.RNVPCouplingBlock,
{'subnet_constructor': F_fully_connected, 'clamp': 1.0},
conditions=[c2, c3],
name='linear::c2|c3')
outp = Ff.OutputNode(linear, name='output')
self.test_net = Ff.GraphINN([inp, c1, conv, flatten, c2, c3, linear, outp])
def test_output_shape(self):
if self.skip_all:
raise unittest.SkipTest("No CUDA-device found, skipping CUDA test.")
y = self.test_net(self.x, c=[self.c1, self.c2, self.c3], jac=False)[0]
self.assertTrue(isinstance(y, type(self.x)), f"{type(y)}")
exp = torch.Size([self.batch_size, self.inp_size[0] * self.inp_size[1] * self.inp_size[2]])
self.assertEqual(y.shape, exp, f"{y.shape}")
# Assert that wrong condition inputs throw exceptions
with self.assertRaises(Exception) as context:
y = self.test_net(self.x, c=[self.c2, self.c1, self.c3])
c2a = torch.randn(self.batch_size, self.c2_size[0] + 4, *self.c2_size[1:]).to(self.c2.device)
with self.assertRaises(Exception) as context:
y = self.test_net(self.x, c=[self.c1, c2a, self.c3])
c1a = torch.randn(self.batch_size, *self.c1_size[:2], self.c1_size[2] + 1).to(self.c1.device)
with self.assertRaises(Exception) as context:
y = self.test_net(self.x, c=[c1a, self.c2, self.c3])
def test_inverse(self):
if self.skip_all:
raise unittest.SkipTest("No CUDA-device found, skipping CUDA test.")
y, j = self.test_net(self.x, c=[self.c1, self.c2, self.c3])
x_re, j_re = self.test_net(y, c=[self.c1, self.c2, self.c3], rev=True)
obs = torch.max(torch.abs(self.x - x_re))
obs_j = torch.max(torch.abs(j + j_re))
self.assertTrue(obs < self.inv_tol, f"Inversion {obs} !< {self.inv_tol}")
self.assertTrue(obs_j < self.inv_tol, f"Jacobian inversion {obs} !< {self.inv_tol}")
def test_jacobian(self):
if self.skip_all:
raise unittest.SkipTest("No CUDA-device found, skipping CUDA test.")
# Compute log det of Jacobian
logdet = self.test_net(self.x, c=[self.c1, self.c2, self.c3])[1]
# Approximate log det of Jacobian numerically
logdet_num = self.test_net.log_jacobian_numerical(self.x, c=[self.c1, self.c2, self.c3], h=1e-3)
# Check that they are the same (within tolerance)
obs = torch.allclose(logdet, logdet_num, atol=np.inf, rtol=0.03)
self.assertTrue(obs, f"Numerical Jacobian check {logdet, logdet_num}")
class ConditioningTestCuda(ConditioningTest):
def __init__(self, *args):
super().__init__(*args)
if torch.cuda.is_available():
self.x = self.x.cuda()
self.c1 = self.c1.cuda()
self.c2 = self.c2.cuda()
self.c3 = self.c3.cuda()
self.test_net.cuda()
else:
self.skip_all = True
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "2cb1542a8a5f2f78712107cd107b1ce838a94454", "size": 5356, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_conditioning.py", "max_stars_repo_name": "RussellALA/FrEIA", "max_stars_repo_head_hexsha": "f7a9fd469741fcab7912425047bf9a3965876512", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 507, "max_stars_repo_stars_event_min_datetime": "2018-09-07T13:57:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T13:07:26.000Z", "max_issues_repo_path": "tests/test_conditioning.py", "max_issues_repo_name": "RussellALA/FrEIA", "max_issues_repo_head_hexsha": "f7a9fd469741fcab7912425047bf9a3965876512", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 100, "max_issues_repo_issues_event_min_datetime": "2018-09-26T13:05:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T05:49:32.000Z", "max_forks_repo_path": "tests/test_conditioning.py", "max_forks_repo_name": "RussellALA/FrEIA", "max_forks_repo_head_hexsha": "f7a9fd469741fcab7912425047bf9a3965876512", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 84, "max_forks_repo_forks_event_min_datetime": "2019-01-15T00:39:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T01:44:54.000Z", "avg_line_length": 35.2368421053, "max_line_length": 104, "alphanum_fraction": 0.5890589993, "include": true, "reason": "import numpy", "num_tokens": 1400}
|
! ******************************************************************************************************************************** !
! cpl_comp_rokocn.f90
! rokgem interface rokd compositional integrator
! ******************************************************************************************************************************** !
! NOTE: 'surface' refers to ocean surface for flux passing to biogem
! ******************************************************************************************************************************** !
! COUPLE rokgem rock composition
SUBROUTINE cpl_comp_rokocn()
!SUBROUTINE cpl_comp_rokocn( &
! & dum_n_rok, &
! & dum_n_i_rok,dum_n_j_rok, &
! & dum_n_i_ocn,dum_n_j_ocn, &
! & dum_sfcrok, &
! & dum_sfcrok1 &
! & )
! IMPLICIT NONE
! dummy arguments
! integer,intent(in)::dum_n_rok
! integer,intent(in)::dum_n_i_rok,dum_n_j_rok
! integer,intent(in)::dum_n_i_ocn,dum_n_j_ocn
! real,dimension(dum_n_rok,dum_n_i_rok,dum_n_j_rok),intent(in)::dum_sfcrok ! rock-surface tracer composition; rok grid
! real,dimension(dum_n_rok,dum_n_i_ocn,dum_n_j_ocn),intent(out)::dum_sfcrok1 ! rock-surface tracer composition; ocn grid
!
! ANY DIFFERENCE BETWEEN OCEAN AND rock GRIDS WILL HAVE TO BE TAKEN INTO ACCOUNT HERE
! NOTE: currently no summation done!
! NOTE: do not copy the first 2 tracers (SAT and humidity) as these values are set directly by the EMBM
! dum_sfcrok1(3:dum_n_rok,:,:) = dum_sfcrok(3:dum_n_rok,:,:)
!
end SUBROUTINE cpl_comp_rokocn
! ******************************************************************************************************************************** !
! ******************************************************************************************************************************** !
! COUPLE EMBM TRACERS
!SUBROUTINE cpl_comp_rokEMBM( &
! & dum_n_rok, &
! & dum_n_i_rok,dum_n_j_rok, &
! & dum_n_i_ocn,dum_n_j_ocn, &
! & dum_t, &
! & dum_q, &
! & dum_sfcrok1)
! IMPLICIT NONE
! dummy arguments
! integer,intent(in)::dum_n_rok
! integer,intent(in)::dum_n_i_rok,dum_n_j_rok
! integer,intent(in)::dum_n_i_ocn,dum_n_j_ocn
! real,dimension(dum_n_i_rok,dum_n_j_rok),intent(in)::dum_t
! real,dimension(dum_n_i_rok,dum_n_j_rok),intent(in)::dum_q
! real,dimension(dum_n_rok,dum_n_i_ocn,dum_n_j_ocn),intent(out)::dum_sfcrok1 ! rock-surface tracer composition; ocn grid
!
! ANY DIFFERENCE BETWEEN OCEAN AND rock GRIDS WILL HAVE TO BE TAKEN INTO ACCOUNT HERE
! NOTE: currently no summation done!
! dum_sfcrok1(1,:,:) = dum_t(:,:)
! dum_sfcrok1(2,:,:) = dum_q(:,:)
!
!end SUBROUTINE cpl_comp_rokEMBM
! ******************************************************************************************************************************** !
|
{"hexsha": "e90b334b9e70aea9b5a23d9996ac2274067602bf", "size": 2872, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "genie-rokgem/src/fortran/cpl_comp_rokocn.f90", "max_stars_repo_name": "JUNPENGZ/cgenie.muffin", "max_stars_repo_head_hexsha": "43bc8dc025428a5141866d762129b2cfaf1345ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "genie-rokgem/src/fortran/cpl_comp_rokocn.f90", "max_issues_repo_name": "JUNPENGZ/cgenie.muffin", "max_issues_repo_head_hexsha": "43bc8dc025428a5141866d762129b2cfaf1345ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-01-28T23:13:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:28:15.000Z", "max_forks_repo_path": "genie-rokgem/src/fortran/cpl_comp_rokocn.f90", "max_forks_repo_name": "JUNPENGZ/cgenie.muffin", "max_forks_repo_head_hexsha": "43bc8dc025428a5141866d762129b2cfaf1345ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-10T19:25:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-10T19:25:44.000Z", "avg_line_length": 47.0819672131, "max_line_length": 132, "alphanum_fraction": 0.4961699164, "num_tokens": 764}
|
# This is barely modified from Kivy tutorials:
# https://kivy.org/doc/stable/tutorials/pong.html
# ...to integrate serial input from the MSP430FR5994
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import (
NumericProperty, ReferenceListProperty, ObjectProperty
)
from kivy.vector import Vector
from kivy.clock import Clock
import random
import serial
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import animation
import datetime as dt
from itertools import count
Lightval = 50;
Watervalue = 50;
reservoir = 50;
sensorreading = 0;
import csv
import time
x_var =[]
y_var =[]
index = count()
def initialize_serial():
try:
mspserial = serial.Serial('COM10', 9600)
except:
print("Failed to connect. Check your port name")
exit()
return mspserial
def serial_read(serial_obj):
data = serial_obj.read()
return data
#read the serial data from the port
#def processdatato_ints():
# Read input from serial port for movement
# value = arduino.readline()
def isaHundredNum(nums):
count = 0;
itis = True
hunnid = [b'1', b'0',b'0']
while(count):
itis = itis and (nums[count]==hunnid[count])
return itis
def animate(i):
data = pd.read_csv('data.csv')
x = data['timeval']
y = data['light']
plt.cla()
plt.plot(x,y)
#ani = animation.FuncAnimation(plt.gcf(), animate, interval= 1000)
#plt.show()
if __name__ == '__main__':
# Connect to serial port first
# Make sure to replace this with YOUR MSP430s serial port
#PongApp().run()
# Cleanup
#databits = [b'1',b'2',b'3']
#while(True):
x_value = 0#dt.datetime.now().strftime('%H:%M:%S.%f')
fieldnames = ["timeval", "sensoreading"]
with open('newdata.csv', 'w') as csv_file:
csv_writer = csv.DictWriter(csv_file,fieldnames= fieldnames)
csv_writer.writeheader()
serial_obj = initialize_serial()
count = 0;
num = []
numbers = []
empty = " "
bits = [b'1' ,b'2',b'3']
index = 0;
single = False
sensordefined = False
resultvalues = []
variable = input("type the variable you want to observe in real time: ")
if(variable== "water"):
index = 2
elif(variable == "Light"):
index = 0
else:
index = 1
serial_obj.flushInput()
while(True):
serial_obj.flushOutput()
"""
if(count == 300):
time.sleep(1)
serial_obj.close()
serial_obj = initialize_serial()
variable = input("type the variable you want to observe in real time: ")
if(variable== "water"):
index = 2
elif(variable == "Light"):
index = 0
else:
index = 1
count = 0
serial_obj.flushInput()
"""
#time.sleep(1)
#count = 0
serial_obj.write(bits[index])
#print(count)
data = serial_read(serial_obj)
#print(data)
if(data == b'.' and len(num) == 2):
for a in num:
empty += str(a)
if(empty != " "):
sensorreading = (int(empty))
sensordefined = True
empty = " "
num.clear()
print(sensorreading)
#return Lightvalue
elif(data == b'.' and len(num)== 1):
single = True
for a in num:
empty += str(a)
if(empty != " "):
sensorreading = (int(empty))
sensordefined = True
empty = " "
num.clear()
print(sensorreading)
else:
if(not(single)):
if(data != b'.'):
num.append(str(int(data)))
if(len(num)>=3):
if(isaHundredNum(num)):
#index += 1
for a in num:
empty += str(a)
sensorreading = (int(empty))
print(sensorreading)
sensordefined = True
empty = " "
num.clear()
else:
single = False
if(sensordefined):
with open('newdata.csv', 'a') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames = fieldnames)
info = {
"timeval": x_value,
"sensoreading": sensorreading
}
csv_writer.writerow(info)
count += 1
x_value = count
sensordefined = False
#serial_obj.flushInput()
#time.sleep(1)
serial_obj.close()
#write data to the msp 430
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
xs = []
ys = []
ani = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=1000)
plt.show()
#databits = [1,2,3];
length = len(databits)
newdata = []
while(True):
count = 64
for a in databits:
while(count):
-- count
print(data)
newdata.append(data)
#display the data
#print(data)
newdata.clear()
"""
|
{"hexsha": "a69cd2dec9ff226f5a3cf2663f00a9c4e81ec583", "size": 5301, "ext": "py", "lang": "Python", "max_stars_repo_path": "lab4/pong/main.py", "max_stars_repo_name": "EvansTDingwiza/ce346-code", "max_stars_repo_head_hexsha": "b938463d4a6d25e0a017aedbf4f50977adc88f40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab4/pong/main.py", "max_issues_repo_name": "EvansTDingwiza/ce346-code", "max_issues_repo_head_hexsha": "b938463d4a6d25e0a017aedbf4f50977adc88f40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab4/pong/main.py", "max_forks_repo_name": "EvansTDingwiza/ce346-code", "max_forks_repo_head_hexsha": "b938463d4a6d25e0a017aedbf4f50977adc88f40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6558139535, "max_line_length": 84, "alphanum_fraction": 0.5253725712, "include": true, "reason": "import numpy", "num_tokens": 1266}
|
import json
import os
import random
from argparse import ArgumentParser
import cv2
import keras.backend as K
import numpy as np
from keras import Input, Model, metrics
from keras.callbacks import Callback, TensorBoard
from keras.layers import Conv2D, Flatten, Dense, Lambda, Reshape, Conv2DTranspose
from sonicrl.environments import get_environments
class MultiModelCheckpoint(Callback):
def __init__(self, models, filepath):
self._models = models
self._filepath = filepath
def on_epoch_end(self, epoch, logs=None):
for model_name, model in self._models.items():
filepath = self._filepath.format(model=model_name, epoch=epoch + 1, **logs)
model.save(filepath, overwrite=True)
def autoencoder(image_shape, filters=64, kernel_size=3, latent_dims=64, intermediate_dims=128, epsilon_std=1.0):
x = Input(shape=image_shape)
height, width, channels = image_shape
conv_1 = Conv2D(filters,
kernel_size=kernel_size,
padding='same', activation='relu',
strides=(2, 2))(x)
conv_2 = Conv2D(filters,
kernel_size=kernel_size,
padding='same', activation='relu')(conv_1)
conv_3 = Conv2D(filters * 2,
kernel_size=kernel_size,
padding='same', activation='relu',
strides=(2, 2))(conv_2)
flat = Flatten()(conv_3)
hidden = Dense(intermediate_dims, activation='relu')(flat)
z_mean = Dense(latent_dims)(hidden)
z_log_var = Dense(latent_dims)(hidden)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dims),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
z = Lambda(sampling, output_shape=(latent_dims,))([z_mean, z_log_var])
encoder = Model(x, [z_mean, z_log_var, z], name='encoder')
# we instantiate these layers separately so as to reuse them later
decoder_hid = Dense(intermediate_dims, activation='relu')
intermediate_height = height // 4
intermediate_width = width // 4
decoder_upsample = Dense(filters * intermediate_height * intermediate_width, activation='relu')
decoder_reshape = Reshape((intermediate_height, intermediate_width, filters))
decoder_deconv_1 = Conv2DTranspose(filters * 2,
kernel_size=kernel_size,
padding='same',
activation='relu')
decoder_deconv_2 = Conv2DTranspose(filters,
kernel_size=kernel_size,
strides=(2, 2),
padding='same',
activation='relu')
decoder_deconv_3_upsamp = Conv2DTranspose(filters,
kernel_size=kernel_size,
strides=(2, 2),
padding='same',
activation='relu')
decoder_mean_squash = Conv2D(channels,
kernel_size=kernel_size,
padding='same',
activation='sigmoid')
decoder_input = Input(shape=(latent_dims,))
decoder_features = decoder_hid(decoder_input)
decoder_features = decoder_upsample(decoder_features)
decoder_features = decoder_reshape(decoder_features)
decoder_features = decoder_deconv_1(decoder_features)
decoder_features = decoder_deconv_2(decoder_features)
decoder_features = decoder_deconv_3_upsamp(decoder_features)
decoded_image = decoder_mean_squash(decoder_features)
decoder = Model(decoder_input, decoded_image, name='decoder')
# instantiate VAE model
_, _, z_sampling = encoder(x)
vae_output = decoder(z_sampling)
vae = Model(x, vae_output, name='vae')
# Compute VAE loss
xent_loss = height * width * metrics.binary_crossentropy(
K.flatten(x),
K.flatten(vae_output))
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop')
vae.summary()
return vae, encoder, decoder
def chunks(items, chunk_size):
chunk = []
for item in items:
chunk.append(item)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if chunk:
yield chunk
def image_path_generator(image_paths, shuffle=False):
while True:
if shuffle:
random.shuffle(image_paths)
yield from image_paths
def load_image(path):
image = cv2.imread(path, cv2.IMREAD_COLOR)
image = image.astype(np.float32)
image /= 255.
return image
def image_generator(image_paths, shuffle=False, batch_size=32):
paths = image_path_generator(image_paths, shuffle)
images = map(load_image, paths)
batches = chunks(images, batch_size)
for batch in batches:
image_batch = np.array(batch)
yield image_batch, None
def train_val_split(samples, train_envs, image_directory):
train_envs = {(env['game'], env['state']) for env in train_envs}
train_paths = []
val_paths = []
for sample in samples:
path = os.path.join(image_directory, sample['image_id'])
env = (sample['game'], sample['state'])
if env in train_envs:
train_paths.append(path)
else:
val_paths.append(path)
return train_paths, val_paths
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--samples')
parser.add_argument('--image-directory')
parser.add_argument('--train-environments')
parser.add_argument('--checkpoint-directory', default='checkpoints')
parser.add_argument('--batch-size', type=int, default=32)
args = parser.parse_args()
with open(args.samples) as samples_file:
samples = list(map(json.loads, samples_file))
train_environments = get_environments(args.train_environments)
train_paths, val_paths = train_val_split(samples, train_environments, args.image_directory)
train_generator = image_generator(train_paths, shuffle=True, batch_size=args.batch_size)
train_steps = 10000
val_generator = image_generator(train_paths, batch_size=args.batch_size)
val_steps = 100
vae, encoder, decoder = autoencoder((224, 320, 3))
models = {'vae': vae, 'encoder': encoder, 'decoder': decoder}
filepath = os.path.join(args.checkpoint_directory, '{model}.{epoch:02d}-{val_loss:.6f}.hdf5')
vae.fit_generator(
train_generator,
steps_per_epoch=train_steps,
epochs=100,
validation_data=val_generator,
validation_steps=val_steps,
callbacks=[
TensorBoard(),
MultiModelCheckpoint(models, filepath)
]
)
|
{"hexsha": "24d73cf05ee1988a9e74d8066bf02d61c91a6307", "size": 7210, "ext": "py", "lang": "Python", "max_stars_repo_path": "sonicrl/worldmodel/autoencoder.py", "max_stars_repo_name": "bharris47/sonic-rl", "max_stars_repo_head_hexsha": "5a819e92299f7eeaa9853b4991c9829603752bf6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sonicrl/worldmodel/autoencoder.py", "max_issues_repo_name": "bharris47/sonic-rl", "max_issues_repo_head_hexsha": "5a819e92299f7eeaa9853b4991c9829603752bf6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sonicrl/worldmodel/autoencoder.py", "max_forks_repo_name": "bharris47/sonic-rl", "max_forks_repo_head_hexsha": "5a819e92299f7eeaa9853b4991c9829603752bf6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2311557789, "max_line_length": 112, "alphanum_fraction": 0.6289875173, "include": true, "reason": "import numpy", "num_tokens": 1569}
|
from typing import List, Dict, Tuple, Callable
import numpy as np
import pytest
def static_test(f: Callable, l_tests: List[Dict[str, Tuple]],
key_in: str = 'Input', key_out: str = 'Output'):
"""Validates the 'f' function on the list of tests 'l_tests'
Parameters
--------------------
f: Callable,
Function to be tested
l_tests: List[Dict[str, Tuple]],
List of dictionaries containing inputs for the function 'f' and the expected outputs
key_in:str='Input'
Input key for test dictionaries
key_out:str='Output'
Output key for test dictionaries
"""
lazy_isin = lambda x: next((True for d in l_tests if x in d), False)
if not lazy_isin(key_in) or not lazy_isin(key_out):
raise KeyError(f"{key_in} or {key_out} is not a valid key.")
for test in l_tests:
if not isinstance(test[key_out], tuple):
with pytest.raises(test[key_out]):
f(*test[key_in])
else:
result = f(*test[key_in])
assert len(result) == len(test[key_out])
assert all(np.isclose(x, y) for x, y in zip(result, test[key_out]))
|
{"hexsha": "cac0f8b5054dd09652366846e06eda59042eaeae", "size": 1167, "ext": "py", "lang": "Python", "max_stars_repo_path": "covidxpert/utils/test_utils.py", "max_stars_repo_name": "LucaCappelletti94/covidxpert", "max_stars_repo_head_hexsha": "8adda25f3d6fb648607c0f8af7d3ff54b42c59fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-22T12:50:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-12T01:00:17.000Z", "max_issues_repo_path": "covidxpert/utils/test_utils.py", "max_issues_repo_name": "LucaCappelletti94/covidxpert", "max_issues_repo_head_hexsha": "8adda25f3d6fb648607c0f8af7d3ff54b42c59fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-05-27T19:03:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-02T11:12:06.000Z", "max_forks_repo_path": "covidxpert/utils/test_utils.py", "max_forks_repo_name": "LucaCappelletti94/covidxpert", "max_forks_repo_head_hexsha": "8adda25f3d6fb648607c0f8af7d3ff54b42c59fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-27T07:21:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-27T07:21:02.000Z", "avg_line_length": 35.3636363636, "max_line_length": 92, "alphanum_fraction": 0.6109682948, "include": true, "reason": "import numpy", "num_tokens": 282}
|
Require Import rt.util.all.
Require Import rt.model.arrival.basic.task.
From mathcomp Require Import ssreflect ssrbool ssrnat eqtype seq.
Module ConcreteTask.
Import SporadicTaskset.
Section Defs.
(* Definition of a concrete task. *)
Record concrete_task :=
{
task_id: nat; (* for uniqueness *)
task_cost: nat;
task_period: nat;
task_deadline: nat;
task_suspension_bound: nat
}.
(* To make it compatible with ssreflect, we define a decidable
equality for concrete tasks. *)
Definition task_eqdef (t1 t2: concrete_task) :=
(task_id t1 == task_id t2) &&
(task_cost t1 == task_cost t2) &&
(task_period t1 == task_period t2) &&
(task_deadline t1 == task_deadline t2) &&
(task_suspension_bound t1 == task_suspension_bound t2).
(* Next, we prove that task_eqdef is indeed an equality, ... *)
Lemma eqn_task : Equality.axiom task_eqdef.
Proof.
unfold Equality.axiom; intros x y.
destruct (task_eqdef x y) eqn:EQ.
{
apply ReflectT.
unfold task_eqdef in *.
move: EQ => /andP [/andP [/andP [/andP [/eqP ID /eqP COST] /eqP PERIOD] /eqP DL] /eqP SUSP].
by destruct x, y; simpl in *; subst.
}
{
apply ReflectF.
unfold task_eqdef, not in *; intro BUG.
apply negbT in EQ; rewrite negb_and in EQ.
destruct x, y.
rewrite negb_and in EQ.
move: EQ => /orP [/orP [EQ | /eqP DL] | /eqP SUSP]; last by apply SUSP; inversion BUG.
rewrite negb_and in EQ.
move: EQ => /orP [EQ | /eqP DL]; last by apply DL; inversion BUG.
rewrite negb_and in EQ.
move: EQ => /orP [/eqP ID | /eqP PERIOD]; last by apply PERIOD; inversion BUG.
by apply ID; inversion BUG.
by apply DL; inversion BUG.
}
Qed.
(* ..., which allows instantiating the canonical structure. *)
Canonical concrete_task_eqMixin := EqMixin eqn_task.
Canonical concrete_task_eqType := Eval hnf in EqType concrete_task concrete_task_eqMixin.
End Defs.
Section ConcreteTaskset.
Definition concrete_taskset :=
taskset_of concrete_task_eqType.
End ConcreteTaskset.
End ConcreteTask.
|
{"author": "cd-public", "repo": "rt-proofs", "sha": "ebef0b65460fe009c51f638fe2b459f16a6d1dd5", "save_path": "github-repos/coq/cd-public-rt-proofs", "path": "github-repos/coq/cd-public-rt-proofs/rt-proofs-ebef0b65460fe009c51f638fe2b459f16a6d1dd5/implementation/uni/susp/dynamic/task.v"}
|
from sklearn.datasets import fetch_lfw_people
import numpy as np
import pdb
MALENESS_THRESHOLD = 0 # threshold at which the person is classified as a male
MIN_FACES = 5
TRAIN_CUT = 0.75
print("Fetching people with at least " + str(MIN_FACES) + " pictures.")
lfw_people = fetch_lfw_people(color=True, min_faces_per_person=MIN_FACES)
print("Loading maleness attributes")
maleness_lookup = np.load('lfw_maleness.npy').item()
# downloads all faces with more than 5 images
target_names = lfw_people.target_names
target = lfw_people.target
data = lfw_people.data
n,d = lfw_people.data.shape
y = np.zeros(5985) # 1 for male 0 for female
print("Labelling maleness")
for i in range(n):
target_name = target_names[target[i]]
avg_maleness = np.mean(maleness_lookup[target_name])
if avg_maleness > MALENESS_THRESHOLD:
y[i] = 1
else:
y[i] = 0
print("Storing data and labels")
data_slice = np.hstack((data, y[:, None]))
# TODO: Randomly shuffle data_slice before
# np.save("lfw_maleness_train", data_slice[0:int(n*TRAIN_CUT)])
# split into num_client parts
num_clients = 10
for i in range(num_clients):
slice_size = int(n*TRAIN_CUT) / num_clients
left_idx = i*slice_size
right_idx = (i+1)*slice_size
np.save("lfw_maleness_train"+str(i), data_slice[left_idx:right_idx])
np.save("lfw_maleness_test", data_slice[int(n*TRAIN_CUT):])
|
{"hexsha": "7d2228ed17ca5af8e5f4bcfba05b8bb1d578ccb7", "size": 1379, "ext": "py", "lang": "Python", "max_stars_repo_path": "ML/Pytorch/data/lfw/parse_lfw_maleness.py", "max_stars_repo_name": "DistributedML/Biscotti", "max_stars_repo_head_hexsha": "dfba71b3924e1bafd2ab2545881fb741193f224e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2019-01-13T22:07:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T16:53:13.000Z", "max_issues_repo_path": "ML/Pytorch/data/lfw/parse_lfw_maleness.py", "max_issues_repo_name": "cm20210602/Biscotti", "max_issues_repo_head_hexsha": "dfba71b3924e1bafd2ab2545881fb741193f224e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ML/Pytorch/data/lfw/parse_lfw_maleness.py", "max_forks_repo_name": "cm20210602/Biscotti", "max_forks_repo_head_hexsha": "dfba71b3924e1bafd2ab2545881fb741193f224e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2019-05-26T15:11:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T16:10:24.000Z", "avg_line_length": 28.7291666667, "max_line_length": 78, "alphanum_fraction": 0.735315446, "include": true, "reason": "import numpy", "num_tokens": 389}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 22:52:36 2020
@author: Woody
"""
from model import Model
from agent import Agent
from algorithm import PolicyGradient
import gym
import numpy as np
import os
from parl.utils import logger
def run_episode(env, agent):
obs_list, action_list, reward_list = [], [], []
obs = env.reset()
while True:
obs = preprocess(obs) # from shape (210, 160, 3) to (100800,)
obs_list.append(obs)
action = agent.sample(obs) # 采样动作
action_list.append(action)
obs, reward, done, info = env.step(action)
reward_list.append(reward)
if done:
break
return obs_list, action_list, reward_list
# 评估 agent, 跑 5 个episode,求平均
def evaluate(env, agent, render=False):
eval_reward = []
for i in range(5):
obs = env.reset()
episode_reward = 0
while True:
obs = preprocess(obs) # from shape (210, 160, 3) to (100800,)
action = agent.predict(obs) # 选取最优动作
obs, reward, isOver, _ = env.step(action)
episode_reward += reward
if render:
env.render()
if isOver:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
# Pong 图片预处理
def preprocess(image):
""" 预处理 210x160x3 uint8 frame into 6400 (80x80) 1维 float vector """
image = image[35:195] # 裁剪
image = image[::2,::2,0] # 下采样,缩放2倍
image[image == 144] = 0 # 擦除背景 (background type 1)
image[image == 109] = 0 # 擦除背景 (background type 2)
image[image != 0] = 1 # 转为灰度图,除了黑色外其他都是白色
return image.astype(np.float).ravel()
# 根据一个episode的每个step的reward列表,计算每一个Step的Gt
def calc_reward_to_go(reward_list, gamma=0.99):
"""calculate discounted reward"""
reward_arr = np.array(reward_list)
for i in range(len(reward_arr) - 2, -1, -1):
# G_t = r_t + γ·r_t+1 + ... = r_t + γ·G_t+1
reward_arr[i] += gamma * reward_arr[i + 1]
# normalize episode rewards
reward_arr -= np.mean(reward_arr)
reward_arr /= np.std(reward_arr)
return reward_arr
LEARNING_RATE = 1e-3*5
GAMMA = 0.92
# 创建环境
env = gym.make('Pong-v0')
obs_dim = 80 * 80
act_dim = env.action_space.n
logger.info('obs_dim {}, act_dim {}'.format(obs_dim, act_dim))
# 根据parl框架构建agent
model = Model(act_dim=act_dim)
alg = PolicyGradient(model, lr=LEARNING_RATE)
agent = Agent(alg, obs_dim=obs_dim, act_dim=act_dim)
# 加载模型
if os.path.exists('./model_pg.ckpt'):
agent.restore('./model_pg.ckpt')
count = 0
for i in range(3000):
obs_list, action_list, reward_list = run_episode(env, agent)
if i % 10 == 0:
logger.info("Train Episode {}, Reward Sum {}, Learning rate {}.".format(i,
sum(reward_list), alg.lr))
batch_obs = np.array(obs_list)
batch_action = np.array(action_list)
batch_reward = calc_reward_to_go(reward_list)
agent.learn(batch_obs, batch_action, batch_reward)
if (i + 1) % 100 == 0:
total_reward = evaluate(env, agent, render=False)
logger.info('Episode {}, Test reward: {}, Learning rate {}.'.format(i + 1, total_reward, alg.lr))
if alg.lr>1e-4:
alg.lr = alg.lr * GAMMA
else:
alg.lr = 1e-4
if (i + 1) % 200 == 0:
# save the parameters to ./model.ckpt
agent.save('./model_pg.ckpt')
count = count + 1
logger.info('Model {} saved'.format(count))
|
{"hexsha": "0c972a37f8cbb8bfc355a5b5d778962b75250753", "size": 3481, "ext": "py", "lang": "Python", "max_stars_repo_path": "results/pg/train_pg.py", "max_stars_repo_name": "star2dust/parl-tutorials", "max_stars_repo_head_hexsha": "a7bae8b9a8968b7cad77a04f104f1c846eb4ddf2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-06-27T05:38:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T05:29:09.000Z", "max_issues_repo_path": "results/pg/train_pg.py", "max_issues_repo_name": "star2dust/parl-tutorials", "max_issues_repo_head_hexsha": "a7bae8b9a8968b7cad77a04f104f1c846eb4ddf2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "results/pg/train_pg.py", "max_forks_repo_name": "star2dust/parl-tutorials", "max_forks_repo_head_hexsha": "a7bae8b9a8968b7cad77a04f104f1c846eb4ddf2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-06-30T01:12:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T03:03:26.000Z", "avg_line_length": 28.0725806452, "max_line_length": 105, "alphanum_fraction": 0.6075840276, "include": true, "reason": "import numpy", "num_tokens": 1050}
|
include("censored.jl")
include("cross_validate.jl")
include("precision_at_k.jl")
include("simple_glrms.jl")
#include("fit_rdataset.jl")
|
{"hexsha": "f321a716cd48caeec7660660067c526171aab22f", "size": 135, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/runexamples.jl", "max_stars_repo_name": "heyi19931225/Lowrankmodels", "max_stars_repo_head_hexsha": "b87cadec54dd0e431c0b901ae405dcc490bdc79a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/runexamples.jl", "max_issues_repo_name": "heyi19931225/Lowrankmodels", "max_issues_repo_head_hexsha": "b87cadec54dd0e431c0b901ae405dcc490bdc79a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-16T09:18:59.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-16T10:33:54.000Z", "max_forks_repo_path": "examples/runexamples.jl", "max_forks_repo_name": "heyi19931225/Lowrankmodels", "max_forks_repo_head_hexsha": "b87cadec54dd0e431c0b901ae405dcc490bdc79a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-04-16T07:48:24.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-16T07:48:24.000Z", "avg_line_length": 27.0, "max_line_length": 28, "alphanum_fraction": 0.7777777778, "num_tokens": 39}
|
! the initilization module, used to initialize the phase field data
! Created by Z. Guo on Jan 17, 2014
! Last modified ib Jan 22, 2014
module init_phi_module
use multifab_module
use ml_layout_module
use define_bc_module
use multifab_physbc_module
use multifab_fill_ghost_module
!use ml_restriction_module
use pf_utility_module
use ml_restrict_fill_module
implicit none
private
public :: init_phi_on_level, init_phi,trans_result_load
public :: init_flw_on_level, init_flw,trans_result_load_flw
contains
subroutine init_phi_on_level(phi,ad,dx,seed_pos,prob_lo,prob_hi,the_bc_level,pfpara)
type(multifab) , intent(inout) :: phi,ad
type(pf_para), intent(in ) :: pfpara
real(kind=dp_t), intent(in ) :: dx
real(kind=dp_t), intent(inout) :: seed_pos(:,:)
real(kind=dp_t), intent(in ) :: prob_lo(phi%dim)
real(kind=dp_t), intent(in ) :: prob_hi(phi%dim)
type(bc_level) , intent(in ) :: the_bc_level
! local
integer i,ng,dm
integer :: lo(phi%dim), hi(phi%dim)
real(kind=dp_t), pointer :: dp(:,:,:,:)
real(kind=dp_t), pointer :: da(:,:,:,:)
ng = phi%ng
dm = phi%dim
do i=1,nfabs(phi)
dp => dataptr(phi,i)
da => dataptr(ad,i)
lo = lwb(get_box(phi,i))
hi = upb(get_box(phi,i))
select case(dm)
case (2)
call init_phi_2d(da(:,:,1,1),dp(:,:,1,1),dp(:,:,1,2),dp(:,:,1,3),ng,lo,hi,prob_lo,prob_hi,dx, &
pfpara%ori_def,pfpara%temp_h_l,pfpara%temp_h_r,pfpara%temp_l,pfpara%seed_radius,seed_pos,&
pfpara%seed_num, pfpara%seed_type)
case (3)
call init_phi_3d(da(:,:,:,1),dp(:,:,:,1),dp(:,:,:,2),dp(:,:,:,3),ng,lo,hi,prob_lo,prob_hi,dx, &
pfpara%ori_def,pfpara%temp_h_l,pfpara%temp_h_r,pfpara%temp_l,pfpara%seed_radius,seed_pos,&
pfpara%seed_num, pfpara%seed_type)
end select
end do
call multifab_fill_boundary(phi)
call multifab_fill_boundary(ad)
call multifab_physbc(phi,1,1,3,the_bc_level)
call multifab_physbc(ad,1,1,1,the_bc_level)
end subroutine init_phi_on_level
subroutine init_phi(mla,phi,ad,dx,seed_pos,prob_lo,prob_hi,the_bc_tower,pfpara)
type(ml_layout), intent(in ) :: mla
type(multifab) , intent(inout) :: phi(:),ad(:)
real(kind=dp_t), intent(in ) :: dx(:)
real(kind=dp_t), intent(inout) :: seed_pos(:,:)
real(kind=dp_t), intent(in ) :: prob_lo(mla%dim)
real(kind=dp_t), intent(in ) :: prob_hi(mla%dim)
type(bc_tower) , intent(in ) :: the_bc_tower
type(pf_para), intent(in ) :: pfpara
! local variables
integer :: lo(mla%dim), hi(mla%dim)
integer :: nlevs, dm, ng, i, n
real(kind=dp_t), pointer :: dp(:,:,:,:)
real(kind=dp_t), pointer :: da(:,:,:,:)
ng = phi(1)%ng
dm = mla%dim
nlevs = mla%nlevel
do n=1,nlevs
do i=1,nfabs(phi(n))
dp => dataptr(phi(n),i)
da => dataptr(ad(n),i)
lo = lwb(get_box(phi(n),i))
hi = upb(get_box(phi(n),i))
select case(dm)
case (2)
call init_phi_2d(da(:,:,1,1),dp(:,:,1,1),dp(:,:,1,2),dp(:,:,1,3),ng,lo,hi,prob_lo,prob_hi,dx(n), &
pfpara%ori_def,pfpara%temp_h_l,pfpara%temp_h_r,pfpara%temp_l,pfpara%seed_radius,seed_pos,&
pfpara%seed_num, pfpara%seed_type)
case (3)
call init_phi_3d(da(:,:,:,1),dp(:,:,:,1),dp(:,:,:,2),dp(:,:,:,3),ng,lo,hi,prob_lo,prob_hi,dx(n), &
pfpara%ori_def,pfpara%temp_h_l,pfpara%temp_h_r,pfpara%temp_l,pfpara%seed_radius,seed_pos,&
pfpara%seed_num, pfpara%seed_type)
end select
end do
end do
call ml_restrict_and_fill(nlevs, phi, mla%mba%rr, the_bc_tower%bc_tower_array,1,1,3)
call ml_restrict_and_fill(nlevs, ad, mla%mba%rr, the_bc_tower%bc_tower_array,1,1,1)
end subroutine init_phi
subroutine init_phi_2d (phi_ad,phi_pf,phi_uc,phi_th,ng,lo,hi,prob_lo,prob_hi,dx, &
ori,t_h_l,t_h_r,t_l,s_r,seed_pos,seed_num,seed_type)
integer :: lo(2), hi(2), ng,seed_num,seed_type
double precision :: phi_ad(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_pf(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_uc(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_th(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: prob_lo(2),prob_hi(2)
double precision :: dx,ori,t_h_l,t_h_r,t_l,s_r
real(kind=dp_t) :: seed_pos(seed_num,4)
! local variables
integer :: i,j,s_i
double precision :: x,x_l,y,r2,y_l,t_h,d_c= sqrt(2.d0)
!double precision :: dis_sep(seed_num, 2), dis_unit
! The domain top to bottom distance
y_l = prob_hi(2) - prob_lo(2)
x_l = prob_hi(1) - prob_lo(1)
select case (seed_type)
case(1)
! columnar dendrite growth, started from the bottom of y direction
!$omp parallel do private(i,j,y)
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_pf(i,j) = -1.d0
phi_uc(i,j) = -1.d0 ! initialized as zero, model A, no need further change
phi_ad(i,j) = -1.d0 ! initialized as zero, i.e. along axis
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j) = t_l + (t_h - t_l) * y / y_l ! setup a temperature gradient
if (y < s_r) then
phi_pf(i,j) = 1.d0
phi_ad(i,j) = atan(ori)
end if
end do
end do
!$omp end parallel do
case(2)
! columnar dendrite growth, started from the bottom of y direction
!$omp parallel do private(i,j,y)
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_pf(i,j) = -1.d0
phi_uc(i,j) = -1.d0 ! initialized as zero, model A, no need further change
phi_ad(i,j) = -1.d0 ! initialized as zero, i.e. along axis
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j) = t_l + (t_h - t_l) * y / y_l ! setup a temperature gradient
if (x <= s_r .or. y <= s_r .or. x >= (x_l-s_r) .or. y>= (y_l-s_r) ) then
phi_pf(i,j) = 1.d0
phi_ad(i,j) = atan(ori)
end if
end do
end do
!$omp end parallel do
case default
! equiaxed dendrite growth
!$omp parallel do private(i,j,y)
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_pf(i,j) = -1.d0
phi_uc(i,j) = -1.d0 ! initialized as zero, model A, no need further change
phi_ad(i,j) = -1.d0 ! initialized as zero, i.e. along axis
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j) = t_l + (t_h - t_l) * y / y_l ! setup a temperature gradient
do s_i = 1,seed_num
r2 = sqrt( (x - seed_pos(s_i,1))**2 + (y - seed_pos(s_i,2))**2)
!phi_pf(i,j) = -tanh(r2-20.d0)
if (r2 < s_r) then
phi_pf(i,j) = 1.d0
phi_ad(i,j) = dble(s_i)
end if
end do
end do
end do
!$omp end parallel do
end select
end subroutine init_phi_2d
subroutine init_phi_3d (phi_ad,phi_pf,phi_uc,phi_th,ng,lo,hi,prob_lo,prob_hi,dx,&
ori,t_h_l,t_h_r,t_l,s_r,seed_pos,seed_num,seed_type)
integer :: lo(3), hi(3), ng,seed_num,seed_type
double precision :: phi_ad(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_pf(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_uc(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_th(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: prob_lo(3),prob_hi(3)
double precision :: dx, ori,t_h_l,t_h_r,t_l,s_r
real(kind=dp_t) :: seed_pos(seed_num,4)
! local variables
integer :: i,j,k,s_i
double precision :: x,x_l,y,z,r2,y_l,t_h,d_c=sqrt(2.d0)
double precision :: x_left, x_right, y_top, y_bottom
! The domain top to bottom distance
y_l = prob_hi(2) - prob_lo(2)
x_l = prob_hi(1) - prob_lo(1)
select case (seed_type)
case(1)
!$omp parallel do private(i,j,k,y)
do k = lo(3), hi(3)
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_pf(i,j,k) = -1.d0
phi_uc(i,j,k) = -1.d0 ! initialized as zero, model A, no need further change
phi_ad(i,j,k) = -1.d0 ! initialized as zero, i.e. along axis
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j,k) = t_l + (t_h - t_l) * y / y_l ! setup a temperature gradient
if (y < s_r) then
phi_pf(i,j,k) = 1.d0
phi_ad(i,j,k) = atan( ori )
end if
end do !i
end do !j
end do !k
!$omp end parallel do
case (2)
!$omp parallel do private(i,j,k,z,y,x,s_i,r2)
do k = lo(3), hi(3)
!z = prob_lo(3) + (dble(k)+0.5d0) * dx
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_pf(i,j,k) = -1.d0
phi_uc(i,j,k) = -1.d0 ! initialized as zero, model A, no need further change
phi_ad(i,j,k) = -1.d0 ! initialized as zero, i.e. along axis
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j,k) = t_l + (t_h - t_l) * y / y_l ! setup a temperature gradient
do s_i = 1,seed_num
r2 = sqrt( (x - seed_pos(s_i,1))**2 + (y - seed_pos(s_i,2))**2 )
!phi_pf(i,j,k) = -tanh(r2-20.d0)
if (r2 < s_r) then
phi_pf(i,j,k) = 1.d0
phi_ad(i,j,k) = dble(s_i)
end if
end do
end do !i
end do !j
end do !k
!$omp end parallel do
case (3)
!$omp parallel do private(i,j,k,z,y,x,s_i,r2)
do k = lo(3), hi(3)
!z = prob_lo(3) + (dble(k)+0.5d0) * dx
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_pf(i,j,k) = -1.d0
phi_uc(i,j,k) = -1.d0 ! initialized as zero, model A, no need further change
phi_ad(i,j,k) = -1.d0 ! initialized as zero, i.e. along axis
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j,k) = t_l + (t_h - t_l) * y / y_l ! setup a temperature gradient
do s_i = 1,seed_num
x_left = seed_pos(s_i,1) - s_r; x_right = seed_pos(s_i,1) + s_r
y_bottom = seed_pos(s_i,2) - s_r; y_top = seed_pos(s_i,2) + s_r
if ( (x >= x_left .and. x <= x_right) .and. &
(y >= y_bottom .and. y <= y_top ) ) then
phi_pf(i,j,k) = 1.d0
phi_ad(i,j,k) = dble(s_i)
end if
end do
end do !i
end do !j
end do !k
!$omp end parallel do
case default
!$omp parallel do private(i,j,k,z,y,x,s_i,r2)
do k = lo(3), hi(3)
z = prob_lo(3) + (dble(k)+0.5d0) * dx
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_pf(i,j,k) = -1.d0
phi_uc(i,j,k) = -1.d0 ! initialized as zero, model A, no need further change
phi_ad(i,j,k) = -1.d0 ! initialized as zero, i.e. along axis
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j,k) = t_l + (t_h - t_l) * y / y_l ! setup a temperature gradient
do s_i = 1,seed_num
r2 = sqrt( (x - seed_pos(s_i,1))**2 + (y - seed_pos(s_i,2))**2 + (z - seed_pos(s_i,3))**2 )
!phi_pf(i,j,k) = -tanh(r2-20.d0)
if (r2 < s_r) then
phi_pf(i,j,k) = 1.d0
phi_ad(i,j,k) = dble(s_i)
end if
end do
end do !i
end do !j
end do !k
!$omp end parallel do
end select
end subroutine init_phi_3d
subroutine trans_result_load(mla,phi_old,ad_ori,phi_load,dx,the_bc_tower,time,&
prob_lo,prob_hi,pfpara)
type(ml_layout), intent(in ) :: mla
type(multifab) , intent(inout) :: phi_old(:),ad_ori(:)
type(multifab) , pointer :: phi_load(:)
real(kind=dp_t), intent(in ) :: dx(:),time
type(bc_tower) , intent(in ) :: the_bc_tower
type(pf_para), intent(in ) :: pfpara
real(kind=dp_t), intent(in ) :: prob_lo(mla%dim)
real(kind=dp_t), intent(in ) :: prob_hi(mla%dim)
! local variables
integer :: lo(mla%dim), hi(mla%dim)
integer :: nlevs, dm, ng, i, n
real(kind=dp_t), pointer :: dp(:,:,:,:)
real(kind=dp_t), pointer :: da(:,:,:,:)
real(kind=dp_t), pointer :: dl(:,:,:,:)
ng = phi_old(1)%ng
dm = mla%dim
nlevs = mla%nlevel
if(pfpara%plot_mode .eq. 3) then
do n=1,nlevs
do i=1,nfabs(phi_old(n))
dp => dataptr(phi_old(n),i)
da => dataptr(ad_ori(n),i)
dl => dataptr(phi_load(n),i)
lo = lwb(get_box(phi_old(n),i))
hi = upb(get_box(phi_old(n),i))
select case(dm)
case (2)
call load_phi_2d(da(:,:,1,1),dp(:,:,1,1),dp(:,:,1,2),dp(:,:,1,3),&
dl(:,:,1,1),dl(:,:,1,2),dl(:,:,1,3), time, &
ng,lo,hi,prob_lo,prob_hi,dx(n),pfpara%temp_h_l,pfpara%temp_h_r,pfpara%temp_l,&
pfpara%sk,pfpara%Rate_Cooling,pfpara%temp_lowcut)
case (3)
call load_phi_3d(da(:,:,:,1),dp(:,:,:,1),dp(:,:,:,2),dp(:,:,:,3),&
dl(:,:,:,1),dl(:,:,:,2),dl(:,:,:,3), time, &
ng,lo,hi,prob_lo,prob_hi,dx(n),pfpara%temp_h_l,pfpara%temp_h_r,pfpara%temp_l,&
pfpara%sk,pfpara%Rate_Cooling,pfpara%temp_lowcut)
end select
end do
end do
else if(pfpara%plot_mode .eq. 0) then
do n=1,nlevs
do i=1,nfabs(phi_old(n))
dp => dataptr(phi_old(n),i)
da => dataptr(ad_ori(n),i)
dl => dataptr(phi_load(n),i)
lo = lwb(get_box(phi_old(n),i))
hi = upb(get_box(phi_old(n),i))
select case(dm)
case (2)
call load_phi_2d_0(da(:,:,1,1),dp(:,:,1,1),dp(:,:,1,2),dp(:,:,1,3),&
dl(:,:,1,1),dl(:,:,1,2),dl(:,:,1,3),dl(:,:,1,4), time, &
ng,lo,hi,prob_lo,prob_hi,dx(n),pfpara%temp_h_l,pfpara%temp_h_r,pfpara%temp_l,&
pfpara%sk,pfpara%Rate_Cooling,pfpara%temp_lowcut,pfpara%coupled_mode,&
pfpara%cal_tem_mode)
case (3)
call load_phi_3d_0(da(:,:,:,1),dp(:,:,:,1),dp(:,:,:,2),dp(:,:,:,3),&
dl(:,:,:,1),dl(:,:,:,2),dl(:,:,:,3),dl(:,:,1,4), time, &
ng,lo,hi,prob_lo,prob_hi,dx(n),pfpara%temp_h_l,pfpara%temp_h_r,pfpara%temp_l,&
pfpara%sk,pfpara%Rate_Cooling,pfpara%temp_lowcut,pfpara%coupled_mode,&
pfpara%cal_tem_mode)
end select
end do
end do
end if
call ml_restrict_and_fill(nlevs, phi_old, mla%mba%rr, the_bc_tower%bc_tower_array,1,1,3)
call ml_restrict_and_fill(nlevs, ad_ori, mla%mba%rr, the_bc_tower%bc_tower_array,1,1,1)
end subroutine trans_result_load
subroutine load_phi_2d (phi_ad,phi_pf,phi_uc,phi_th,lod_ad,lod_pf,lod_trc,time,&
ng,lo,hi,prob_lo,prob_hi,dx,t_h_l,t_h_r,t_l,p_k,r_cool,t_cut)
integer :: lo(2), hi(2), ng
double precision :: phi_ad(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_pf(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_uc(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_th(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: lod_ad(lo(1):hi(1),lo(2):hi(2))
double precision :: lod_pf(lo(1):hi(1),lo(2):hi(2))
double precision :: lod_trc(lo(1):hi(1),lo(2):hi(2))
double precision :: prob_lo(2),prob_hi(2)
double precision :: dx,t_h_l,t_h_r,t_l,p_k,time,r_cool,t_cut
! local variables
integer :: i,j
double precision :: y,y_l,x,x_l,t_h
! The domain top to bottom distance
y_l = prob_hi(2) - prob_lo(2)
x_l = prob_hi(1) - prob_lo(1)
! columnar dendrite growth, started from the bottom of y direction
!$omp parallel do private(i,j,y)
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_ad(i,j) = lod_ad(i,j)
phi_pf(i,j) = lod_pf(i,j)
phi_uc(i,j) = 2.d0*p_k*lod_trc(i,j)/(1.d0+p_k-(1.d0-p_k)*lod_pf(i,j)) - 1.d0
phi_uc(i,j) = phi_uc(i,j) / (1.d0-p_k)
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j) = t_l + (t_h - t_l) * y / y_l
if(phi_th(i,j) < t_cut) then
phi_th(i,j) = t_cut
end if
end do
end do
!$omp end parallel do
end subroutine load_phi_2d
subroutine load_phi_3d (phi_ad,phi_pf,phi_uc,phi_th,lod_ad,lod_pf,lod_trc,time,&
ng,lo,hi,prob_lo,prob_hi,dx,t_h_l,t_h_r,t_l,p_k,r_cool,t_cut)
integer :: lo(3), hi(3), ng
double precision :: phi_ad(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_pf(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_uc(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_th(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: lod_ad(lo(1):hi(1),lo(2):hi(2),lo(3):hi(3))
double precision :: lod_pf(lo(1):hi(1),lo(2):hi(2),lo(3):hi(3))
double precision :: lod_trc(lo(1):hi(1),lo(2):hi(2),lo(3):hi(3))
double precision :: prob_lo(3),prob_hi(3)
double precision :: dx, t_h_l,t_h_r,t_l,p_k,time,r_cool,t_cut
! local variables
integer :: i,j,k
double precision :: y,y_l,x,x_l,t_h
! The domain top to bottom distance
y_l = prob_hi(2) - prob_lo(2)
x_l = prob_hi(1) - prob_lo(1)
!$omp parallel do private(i,j,k,y)
do k = lo(3), hi(3)
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_ad(i,j,k) = lod_ad(i,j,k)
phi_pf(i,j,k) = lod_pf(i,j,k)
phi_uc(i,j,k) = 2.d0*p_k*lod_trc(i,j,k)/(1.d0+p_k-(1.d0-p_k)*lod_pf(i,j,k)) - 1.d0
phi_uc(i,j,k) = phi_uc(i,j,k) / (1.d0-p_k)
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j,k) = t_l + (t_h - t_l) * y / y_l
if(phi_th(i,j,k) < t_cut) then
phi_th(i,j,k) = t_cut
end if
end do !i
end do !j
end do !k
!$omp end parallel do
end subroutine load_phi_3d
subroutine load_phi_2d_0 (phi_ad,phi_pf,phi_uc,phi_th,lod_ad,lod_pf,lod_uc,lod_th,time,&
ng,lo,hi,prob_lo,prob_hi,dx,t_h_l,t_h_r,t_l,p_k,r_cool,t_cut,&
coupled_mode,cal_tem_mode)
integer :: lo(2), hi(2), ng, coupled_mode,cal_tem_mode
double precision :: phi_ad(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_pf(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_uc(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_th(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: lod_ad(lo(1):hi(1),lo(2):hi(2))
double precision :: lod_pf(lo(1):hi(1),lo(2):hi(2))
double precision :: lod_uc(lo(1):hi(1),lo(2):hi(2))
double precision :: lod_th(lo(1):hi(1),lo(2):hi(2))
double precision :: prob_lo(2),prob_hi(2)
double precision :: dx,t_h_l,t_h_r,t_l,p_k,time,r_cool,t_cut
! local variables
integer :: i,j
double precision :: y,y_l,x,x_l,t_h
! The domain top to bottom distance
y_l = prob_hi(2) - prob_lo(2)
x_l = prob_hi(1) - prob_lo(1)
! columnar dendrite growth, started from the bottom of y direction
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_ad(i,j) = lod_ad(i,j)
phi_pf(i,j) = lod_pf(i,j)
phi_uc(i,j) = lod_uc(i,j)
if(coupled_mode==1 .and. cal_tem_mode==1) then
phi_th(i,j) = lod_th(i,j)
else
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j) = t_l + (t_h - t_l) * y / y_l
if(phi_th(i,j) < t_cut) then
phi_th(i,j) = t_cut
end if
end if
end do
end do
end subroutine load_phi_2d_0
subroutine load_phi_3d_0 (phi_ad,phi_pf,phi_uc,phi_th,lod_ad,lod_pf,lod_uc,lod_th,time,&
ng,lo,hi,prob_lo,prob_hi,dx,t_h_l,t_h_r,t_l,p_k,r_cool,t_cut, &
coupled_mode,cal_tem_mode)
integer :: lo(3), hi(3), ng, coupled_mode,cal_tem_mode
double precision :: phi_ad(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_pf(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_uc(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_th(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: lod_ad(lo(1):hi(1),lo(2):hi(2),lo(3):hi(3))
double precision :: lod_pf(lo(1):hi(1),lo(2):hi(2),lo(3):hi(3))
double precision :: lod_uc(lo(1):hi(1),lo(2):hi(2),lo(3):hi(3))
double precision :: lod_th(lo(1):hi(1),lo(2):hi(2),lo(3):hi(3))
double precision :: prob_lo(3),prob_hi(3)
double precision :: dx, t_h_l,t_h_r,t_l,p_k,time,r_cool,t_cut
! local variables
integer :: i,j,k
double precision :: y,y_l,x,x_l,t_h
! The domain top to bottom distance
y_l = prob_hi(2) - prob_lo(2)
x_l = prob_hi(1) - prob_lo(1)
do k = lo(3), hi(3)
do j = lo(2), hi(2)
y = prob_lo(2) + (dble(j)+0.5d0) * dx
do i = lo(1), hi(1)
x = prob_lo(1) + (dble(i)+0.5d0) * dx
phi_ad(i,j,k) = lod_ad(i,j,k)
phi_pf(i,j,k) = lod_pf(i,j,k)
phi_uc(i,j,k) = lod_uc(i,j,k)
if(coupled_mode==1 .and. cal_tem_mode==1) then
phi_th(i,j,k) = lod_th(i,j,k)
else
t_h = t_h_l + (t_h_r - t_h_l) * x / x_l
phi_th(i,j,k) = t_l + (t_h - t_l) * y / y_l
if(phi_th(i,j,k) < t_cut) then
phi_th(i,j,k) = t_cut
end if
end if
end do !i
end do !j
end do !k
end subroutine load_phi_3d_0
subroutine init_flw_on_level(flw,phi,the_bc_level,pfpara)
type(multifab) , intent(inout) :: flw
type(multifab) , intent(inout) :: phi
type(pf_para), intent(in ) :: pfpara
type(bc_level) , intent(in ) :: the_bc_level
! local
integer i,ng,dm
integer :: lo(phi%dim), hi(phi%dim)
integer :: iter_st(3),iter_end(3),icom_start,ncomponent
real(kind=dp_t), pointer :: pfo(:,:,:,:)
real(kind=dp_t), pointer :: u_vec(:,:,:,:)
real(kind=dp_t), pointer :: f_mat(:,:,:,:)
real(kind=dp_t), pointer :: k_mat(:,:,:,:)
ng = phi%ng
dm = phi%dim
select case(dm)
case (2)
icom_start = 1
ncomponent = 21
iter_st = (/1,4,13/)
iter_end = (/3,9,9/)
case (3)
icom_start = 1
ncomponent = 42
iter_st = (/1,5,24/)
iter_end = (/4,19,19/)
end select
do i=1,nfabs(phi)
pfo => dataptr(phi,i)
u_vec => dataptr(flw,i,iter_st(1),iter_end(1))
f_mat => dataptr(flw,i,iter_st(2),iter_end(2))
k_mat => dataptr(flw,i,iter_st(3),iter_end(3))
lo = lwb(get_box(phi,i))
hi = upb(get_box(phi,i))
select case(dm)
case (2)
call init_flw_2d(pfo(:,:,1,1),u_vec(:,:,1,:),f_mat(:,:,1,:),k_mat(:,:,1,:),ng,lo,hi,&
pfpara%flw_rho)
case (3)
call init_flw_3d(pfo(:,:,:,1),u_vec(:,:,:,:),f_mat(:,:,:,:),k_mat(:,:,:,:),ng,lo,hi,&
pfpara%flw_rho)
end select
end do
call multifab_fill_boundary_c(flw,icom_start,ncomponent,ng)
call multifab_physbc(flw,icom_start,1,ncomponent,the_bc_level)
if(pfpara%kill_phi_flag .eq. 1) call multifab_setval_c(phi,-1.d0,1,1)
end subroutine init_flw_on_level
subroutine init_flw(mla,flw,phi,the_bc_tower,pfpara)
type(ml_layout), intent(in ) :: mla
type(multifab) , intent(inout) :: flw(:)
type(multifab) , intent(inout) :: phi(:)
type(bc_tower) , intent(in ) :: the_bc_tower
type(pf_para), intent(in ) :: pfpara
! local variables
integer :: lo(mla%dim), hi(mla%dim)
integer :: nlevs, dm, ng, i, n
integer :: iter_st(3),iter_end(3),icom_start,ncomponent
real(kind=dp_t), pointer :: pfo(:,:,:,:)
real(kind=dp_t), pointer :: u_vec(:,:,:,:)
real(kind=dp_t), pointer :: f_mat(:,:,:,:)
real(kind=dp_t), pointer :: k_mat(:,:,:,:)
ng = phi(1)%ng
dm = mla%dim
nlevs = mla%nlevel
select case(dm)
case (2)
icom_start = 1
ncomponent = 21
iter_st = (/1,4,13/)
iter_end = (/3,9,9/)
case (3)
icom_start = 1
ncomponent = 42
iter_st = (/1,5,24/)
iter_end = (/4,19,19/)
end select
do n=1,nlevs
do i=1,nfabs(phi(n))
pfo => dataptr(phi(n),i)
u_vec => dataptr(flw(n),i,iter_st(1),iter_end(1))
f_mat => dataptr(flw(n),i,iter_st(2),iter_end(2))
k_mat => dataptr(flw(n),i,iter_st(3),iter_end(3))
lo = lwb(get_box(phi(n),i))
hi = upb(get_box(phi(n),i))
select case(dm)
case (2)
call init_flw_2d(pfo(:,:,1,1),u_vec(:,:,1,:),f_mat(:,:,1,:),k_mat(:,:,1,:),ng,lo,hi,&
pfpara%flw_rho)
case (3)
call init_flw_3d(pfo(:,:,:,1),u_vec(:,:,:,:),f_mat(:,:,:,:),k_mat(:,:,:,:),ng,lo,hi,&
pfpara%flw_rho)
end select
end do
if(pfpara%kill_phi_flag .eq. 1) call multifab_setval_c(phi(n),-1.d0,1,1)
end do
! restrict the multi-level data, and
! fill all boundaries: same-level, coarse-fine, periodic, and domain boundaries
call ml_restrict_and_fill(nlevs, flw, mla%mba%rr, the_bc_tower%bc_tower_array,icom_start,1,ncomponent)
end subroutine init_flw
subroutine init_flw_2d (phi,u_vec,f_mat,k_mat,ng,lo,hi,rho)
integer :: lo(2), hi(2), ng,kill_phi_flag
double precision :: phi(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: u_vec(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,1:3)
double precision :: f_mat(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,1:9)
double precision :: k_mat(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,1:9)
double precision :: rho
! local variables
integer :: i,j,iter
! local varables
integer,parameter:: q=9
double precision, parameter :: p0_2d = 4.d0/9.d0, p1_2d = 1.d0/9.d0, p2_2d=1.d0/36.d0
integer :: ex(q)=(/ 0,1,0,-1,0,1,-1,-1,1 /),ey(q)=(/ 0,0,1,0,-1,1,1,-1,-1 /)
double precision :: w(q)=(/ p0_2d,p1_2d,p1_2d,p1_2d,p1_2d,p2_2d,p2_2d,p2_2d,p2_2d /)
! equiaxed dendrite growth
do j = lo(2), hi(2)
do i = lo(1), hi(1)
u_vec(i,j,1) =0.d0
u_vec(i,j,2) =0.d0
u_vec(i,j,3) =rho
do iter=1,q
f_mat(i,j,iter)=w(iter)*u_vec(i,j,3)
end do
k_mat(i,j,:)=0.d0
end do
end do
end subroutine init_flw_2d
subroutine init_flw_3d (phi,u_vec,f_mat,k_mat,ng,lo,hi,rho)
integer :: lo(3), hi(3), ng
double precision :: phi(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: u_vec(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng,1:4)
double precision :: f_mat(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng,1:19)
double precision :: k_mat(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng,1:19)
double precision :: rho
! local variables
integer :: i,j,k,iter
integer,parameter:: q=19
double precision, parameter :: p0_3d = 1.d0/3.d0, p1_3d = 1.d0/18.d0, p2_3d=1.d0/36.d0
integer :: ex(q)=(/ 0,1,-1,0,0,0,0,1,1,-1,-1,1,-1,1,-1,0,0,0,0 /)
integer :: ey(q)=(/ 0,0,0,1,-1,0,0,1,-1,1,-1,0,0,0,0,1,1,-1,-1 /)
integer :: ez(q)=(/ 0,0,0,0,0,1,-1,0,0,0,0,1,1,-1,-1,1,-1,1,-1 /)
double precision :: w(q)=(/ p0_3d,p1_3d,p1_3d,p1_3d,p1_3d,p1_3d,p1_3d,p2_3d,p2_3d,p2_3d,&
p2_3d,p2_3d,p2_3d,p2_3d,p2_3d,p2_3d,p2_3d,p2_3d,p2_3d /)
do k = lo(3), hi(3)
do j = lo(2), hi(2)
do i = lo(1), hi(1)
u_vec(i,j,k,1:3) =0.d0
u_vec(i,j,k,4) =rho
do iter=1,q
f_mat(i,j,k,iter)=w(iter)*u_vec(i,j,k,4)
end do
k_mat(i,j,k,:) = 0.d0
end do !i
end do !j
end do !k
end subroutine init_flw_3d
subroutine trans_result_load_flw(mla,flw,phi_old,ad_ori,phi_load,the_bc_tower)
type(ml_layout), intent(in ) :: mla
type(multifab) , intent(inout) :: flw(:),phi_old(:),ad_ori(:)
type(multifab) , pointer :: phi_load(:)
type(bc_tower) , intent(in ) :: the_bc_tower
! local variables
integer :: lo(mla%dim), hi(mla%dim)
integer :: nlevs, dm, ng, i, n,nc_flw_0
real(kind=dp_t), pointer :: dp(:,:,:,:)
real(kind=dp_t), pointer :: da(:,:,:,:)
real(kind=dp_t), pointer :: dl(:,:,:,:)
real(kind=dp_t), pointer :: df(:,:,:,:)
real(kind=dp_t), pointer :: dll(:,:,:,:)
ng = phi_old(1)%ng
dm = mla%dim
nlevs = mla%nlevel
if(dm .eq. 2) then
nc_flw_0 = 12
else if(dm .eq. 3) then
nc_flw_0 = 23
end if
do n=1,nlevs
do i=1,nfabs(phi_old(n))
dp => dataptr(phi_old(n),i)
da => dataptr(ad_ori(n),i)
dl => dataptr(phi_load(n),i,1,4)
dll => dataptr(phi_load(n),i,5,nc_flw_0)
df => dataptr(flw(n),i)
lo = lwb(get_box(phi_old(n),i))
hi = upb(get_box(phi_old(n),i))
select case(dm)
case (2)
call load_flw_2d(da(:,:,1,1),dp(:,:,1,1),dp(:,:,1,2),dp(:,:,1,3),df(:,:,1,:),&
dl(:,:,1,:),dll(:,:,1,:),ng,lo,hi)
case (3)
call load_flw_3d(da(:,:,:,1),dp(:,:,:,1),dp(:,:,:,2),dp(:,:,:,3),df(:,:,:,:),&
dl(:,:,:,:),dll(:,:,:,:),ng,lo,hi)
end select
end do
end do
!call ml_restrict_and_fill(nlevs, phi_old, mla%mba%rr, the_bc_tower%bc_tower_array,1,1,3)
!call ml_restrict_and_fill(nlevs, ad_ori, mla%mba%rr, the_bc_tower%bc_tower_array,1,1,1)
!call ml_restrict_and_fill(nlevs, flw, mla%mba%rr, the_bc_tower%bc_tower_array,1,1,nc_flw_0)
do n=1,nlevs
call multifab_fill_boundary_c(ad_ori(n), 1, 1, ng)
call multifab_fill_boundary_c(phi_old(n), 1, 3, ng)
call multifab_fill_boundary_c(flw(n), 1, nc_flw_0, ng)
call multifab_physbc(ad_ori(n),1,1,1,the_bc_tower%bc_tower_array(n))
call multifab_physbc(phi_old(n),1,1,3,the_bc_tower%bc_tower_array(n))
call multifab_physbc(flw(n),1,1,nc_flw_0,the_bc_tower%bc_tower_array(n))
end do
end subroutine trans_result_load_flw
subroutine load_flw_2d (phi_ad,phi_pf,phi_uc,phi_th,flw,phi_load,flw_load,ng,lo,hi)
integer :: lo(2), hi(2), ng
double precision :: phi_ad(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_pf(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_uc(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: phi_th(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng)
double precision :: flw(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,1:21)
double precision :: phi_load(lo(1):hi(1),lo(2):hi(2),1:4)
double precision :: flw_load(lo(1):hi(1),lo(2):hi(2),1:12)
! local variables
integer :: i,j,q
! columnar dendrite growth, started from the bottom of y direction
!$omp parallel do private(i,j,y)
do j = lo(2), hi(2)
do i = lo(1), hi(1)
phi_ad(i,j) = phi_load(i,j,1)
phi_pf(i,j) = phi_load(i,j,2)
phi_uc(i,j) = phi_load(i,j,3)
phi_th(i,j) = phi_load(i,j,4)
flw(i,j,1:12) = flw_load(i,j,1:12)
flw(i,j,13:21) = 0.d0
end do
end do
!$omp end parallel do
end subroutine load_flw_2d
subroutine load_flw_3d (phi_ad,phi_pf,phi_uc,phi_th,flw,phi_load,flw_load,ng,lo,hi)
integer :: lo(3), hi(3), ng
double precision :: phi_ad(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_pf(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_uc(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: phi_th(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng)
double precision :: flw(lo(1)-ng:hi(1)+ng,lo(2)-ng:hi(2)+ng,lo(3)-ng:hi(3)+ng,1:42)
double precision :: phi_load(lo(1):hi(1),lo(2):hi(2),lo(3):hi(3),1:4)
double precision :: flw_load(lo(1):hi(1),lo(2):hi(2),lo(3):hi(3),1:23)
! local variables
integer :: i,j,k,q
!$omp parallel do private(i,j,k,y)
do k = lo(3), hi(3)
do j = lo(2), hi(2)
do i = lo(1), hi(1)
phi_ad(i,j,k) = phi_load(i,j,k,1)
phi_pf(i,j,k) = phi_load(i,j,k,2)
phi_uc(i,j,k) = phi_load(i,j,k,3)
phi_th(i,j,k) = phi_load(i,j,k,4)
flw(i,j,k,1:23) = flw_load(i,j,k,1:23)
flw(i,j,k,24:42) = 0.d0
end do !i
end do !j
end do !k
!$omp end parallel do
end subroutine load_flw_3d
end module init_phi_module
|
{"hexsha": "4e8f957d82796804e4438530cdedc42d3077d9c3", "size": 33877, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Work/FLOW_PHASEFIELD/init_phi.f90", "max_stars_repo_name": "Marsfish1981/HPC-Phase_Field", "max_stars_repo_head_hexsha": "80a6b367cb6395541150ea68ab9dd3802f37850b", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL", "BSD-3-Clause"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2018-08-26T07:39:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T10:25:32.000Z", "max_issues_repo_path": "Work/FLOW_PHASEFIELD/init_phi.f90", "max_issues_repo_name": "Marsfish1981/HPC-Phase_Field", "max_issues_repo_head_hexsha": "80a6b367cb6395541150ea68ab9dd3802f37850b", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Work/FLOW_PHASEFIELD/init_phi.f90", "max_forks_repo_name": "Marsfish1981/HPC-Phase_Field", "max_forks_repo_head_hexsha": "80a6b367cb6395541150ea68ab9dd3802f37850b", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL", "BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-05-12T11:49:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-04T20:48:04.000Z", "avg_line_length": 33.6081349206, "max_line_length": 120, "alphanum_fraction": 0.5458570712, "num_tokens": 12541}
|
# stdlib
from functools import lru_cache
from typing import Dict
# third party
from autodp import dp_bank
from autodp import fdp_bank
from autodp.autodp_core import Mechanism
import numpy as np
@lru_cache(maxsize=None)
def _individual_RDP_gaussian(
sigma: float, value: float, L: float, alpha: float
) -> float:
return (alpha * (L ** 2) * (value ** 2)) / (2 * (sigma ** 2))
def individual_RDP_gaussian(params: Dict, alpha: float) -> np.float64:
"""
:param params:
'sigma' --- is the normalized noise level: std divided by global L2 sensitivity
:param alpha: The order of the Renyi Divergence
:return: Evaluation of the RDP's epsilon
"""
sigma = params["sigma"]
value = params["value"]
L = params["L"]
if sigma <= 0:
raise Exception("Sigma should be above 0")
if alpha < 0:
raise Exception("Sigma should not be below 0")
return _individual_RDP_gaussian(sigma=sigma, alpha=alpha, value=value, L=L)
# Example of a specific mechanism that inherits the Mechanism class
class iDPGaussianMechanism(Mechanism):
def __init__(
self,
sigma: float,
value: float,
L: np.float,
entity: str,
name: str = "Gaussian",
RDP_off: bool = False,
approxDP_off: bool = False,
fdp_off: bool = True,
use_basic_rdp_to_approx_dp_conversion: bool = False,
use_fdp_based_rdp_to_approx_dp_conversion: bool = False,
):
# the sigma parameter is the std of the noise divide by the l2 sensitivity
Mechanism.__init__(self)
self.name = name # When composing
self.params = {
"sigma": sigma,
"value": value,
"L": L,
} # This will be useful for the Calibrator
self.entity = entity
# TODO: should a generic unspecified mechanism have a name and a param dictionary?
self.delta0 = 0
if not RDP_off:
# Tudor: i'll fix these
new_rdp = lambda x: individual_RDP_gaussian(self.params, x) # noqa: E731
if use_fdp_based_rdp_to_approx_dp_conversion:
# This setting is slightly more complex, which involves converting RDP to fDP,
# then to eps-delta-DP via the duality
self.propagate_updates(new_rdp, "RDP", fDP_based_conversion=True)
elif use_basic_rdp_to_approx_dp_conversion:
self.propagate_updates(new_rdp, "RDP", BBGHS_conversion=False)
else:
# This is the default setting with fast computation of RDP to approx-DP
self.propagate_updates(new_rdp, "RDP")
if not approxDP_off: # Direct implementation of approxDP
new_approxdp = lambda x: dp_bank.get_eps_ana_gaussian( # noqa: E731
sigma, x
)
self.propagate_updates(new_approxdp, "approxDP_func")
if not fdp_off: # Direct implementation of fDP
# Tudor: i'll fix these
fun1 = lambda x: fdp_bank.log_one_minus_fdp_gaussian( # noqa: E731
{"sigma": sigma}, x
)
fun2 = lambda x: fdp_bank.log_neg_fdp_grad_gaussian( # noqa: E731
{"sigma": sigma}, x
)
self.propagate_updates([fun1, fun2], "fDP_and_grad_log")
# overwrite the fdp computation with the direct computation
self.fdp = lambda x: fdp_bank.fDP_gaussian(
{"sigma": sigma}, x
) # noqa: E731
# the fDP of gaussian mechanism is equivalent to analytical calibration of approxdp,
# so it should have been automatically handled numerically above
# Discussion: Sometimes delta as a function of eps has a closed-form solution
# while eps as a function of delta does not
# Shall we represent delta as a function of eps instead?
|
{"hexsha": "e87011da3841abc315e22906bde74485e41a1fa8", "size": 3894, "ext": "py", "lang": "Python", "max_stars_repo_path": "packages/syft/src/syft/core/adp/idp_gaussian_mechanism.py", "max_stars_repo_name": "callezenwaka/PySyft", "max_stars_repo_head_hexsha": "2545c302441cfe727ec095c4f9aa136bff02be32", "max_stars_repo_licenses": ["Apache-1.1"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-14T10:56:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T10:56:43.000Z", "max_issues_repo_path": "packages/syft/src/syft/core/adp/idp_gaussian_mechanism.py", "max_issues_repo_name": "callezenwaka/PySyft", "max_issues_repo_head_hexsha": "2545c302441cfe727ec095c4f9aa136bff02be32", "max_issues_repo_licenses": ["Apache-1.1"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-02T10:12:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-02T10:12:50.000Z", "max_forks_repo_path": "packages/syft/src/syft/core/adp/idp_gaussian_mechanism.py", "max_forks_repo_name": "callezenwaka/PySyft", "max_forks_repo_head_hexsha": "2545c302441cfe727ec095c4f9aa136bff02be32", "max_forks_repo_licenses": ["Apache-1.1"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-19T12:23:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-19T12:23:01.000Z", "avg_line_length": 37.4423076923, "max_line_length": 94, "alphanum_fraction": 0.62403698, "include": true, "reason": "import numpy", "num_tokens": 962}
|
# Main waveform class location
# Copyright (C) 2020 Michael L. Katz, Alvin J.K. Chua, Niels Warburton, Scott A. Hughes
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import sys
import os
from abc import ABC
import numpy as np
from tqdm import tqdm
# check if cupy is available / GPU is available
try:
import cupy as xp
except (ImportError, ModuleNotFoundError) as e:
import numpy as xp
from few.utils.baseclasses import SchwarzschildEccentric, Pn5AAK
from few.trajectory.pn5 import RunKerrGenericPn5Inspiral
from few.trajectory.flux import RunSchwarzEccFluxInspiral
from few.amplitude.interp2dcubicspline import Interp2DAmplitude
from few.utils.utility import get_mismatch
from few.amplitude.romannet import RomanAmplitude
from few.utils.modeselector import ModeSelector
from few.utils.ylm import GetYlms
from few.summation.directmodesum import DirectModeSum
from few.summation.aakwave import AAKSummation
from few.utils.constants import *
from few.utils.citations import *
from few.summation.interpolatedmodesum import InterpolatedModeSum
class SchwarzschildEccentricWaveformBase(SchwarzschildEccentric, ABC):
"""Base class for the actual Schwarzschild eccentric waveforms.
This class carries information and methods that are common to any
implementation of Schwarzschild eccentric waveforms. These include
initialization and the actual base code for building a waveform. This base
code calls the various modules chosen by the user or according to the
predefined waveform classes available. See
:class:`few.utils.baseclasses.SchwarzschildEccentric` for information
high level information on these waveform models.
args:
inspiral_module (obj): Class object representing the module
for creating the inspiral. This returns the phases and orbital
parameters. See :ref:`trajectory-label`.
amplitude_module (obj): Class object representing the module for
generating amplitudes. See :ref:`amplitude-label` for more
information.
sum_module (obj): Class object representing the module for summing the
final waveform from the amplitude and phase information. See
:ref:`summation-label`.
inspiral_kwargs (dict, optional): Optional kwargs to pass to the
inspiral generator. **Important Note**: These kwargs are passed
online, not during instantiation like other kwargs here. Default is
{}. This is stored as an attribute.
amplitude_kwargs (dict, optional): Optional kwargs to pass to the
amplitude generator during instantiation. Default is {}.
sum_kwargs (dict, optional): Optional kwargs to pass to the
sum module during instantiation. Default is {}.
Ylm_kwargs (dict, optional): Optional kwargs to pass to the
Ylm generator during instantiation. Default is {}.
use_gpu (bool, optional): If True, use GPU resources. Default is False.
normalize_amps (bool, optional): If True, it will normalize amplitudes
to flux information output from the trajectory modules. Default
is True. This is stored as an attribute.
"""
def attributes_SchwarzschildEccentricWaveformBase(self):
"""
attributes:
inspiral_generator (obj): instantiated trajectory module.
amplitude_generator (obj): instantiated amplitude module.
ylm_gen (obj): instantiated ylm module.
create_waveform (obj): instantiated summation module.
ylm_gen (obj): instantiated Ylm module.
mode_selector (obj): instantiated mode selection module.
num_teuk_modes (int): number of Teukolsky modes in the model.
ls, ms, ns (1D int xp.ndarray): Arrays of mode indices :math:`(l,m,n)`
after filtering operation. If no filtering, these are equivalent
to l_arr, m_arr, n_arr.
xp (obj): numpy or cupy based on gpu usage.
num_modes_kept (int): Number of modes for final waveform after mode
selection.
"""
pass
def __init__(
self,
inspiral_module,
amplitude_module,
sum_module,
inspiral_kwargs={},
amplitude_kwargs={},
sum_kwargs={},
Ylm_kwargs={},
use_gpu=False,
normalize_amps=True,
):
SchwarzschildEccentric.__init__(self, use_gpu)
amplitude_kwargs, sum_kwargs = self.adjust_gpu_usage(
use_gpu, [amplitude_kwargs, sum_kwargs]
)
# normalize amplitudes to flux at each step from trajectory
self.normalize_amps = normalize_amps
# kwargs that are passed to the inspiral call function
self.inspiral_kwargs = inspiral_kwargs
# function for generating the inpsiral
self.inspiral_generator = inspiral_module()
# function for generating the amplitude
self.amplitude_generator = amplitude_module(**amplitude_kwargs)
# summation generator
self.create_waveform = sum_module(**sum_kwargs)
# angular harmonics generation
self.ylm_gen = GetYlms(use_gpu=use_gpu, **Ylm_kwargs)
# selecting modes that contribute at threshold to the waveform
self.mode_selector = ModeSelector(self.m0mask, use_gpu=use_gpu)
@property
def citation(self):
"""Return citations related to this module"""
return few_citation + few_software_citation + romannet_citation
def __call__(
self,
M,
mu,
p0,
e0,
theta,
phi,
dist,
dt=10.0,
T=1.0,
eps=1e-5,
show_progress=False,
batch_size=-1,
mode_selection=None,
):
"""Call function for SchwarzschildEccentric models.
This function will take input parameters and produce Schwarzschild
eccentric waveforms. It will use all of the modules preloaded to
compute desired outputs.
args:
M (double): Mass of larger black hole in solar masses.
mu (double): Mass of compact object in solar masses.
p0 (double): Initial semilatus rectum (:math:`10\leq p_0\leq16 + e_0`).
See documentation for more information on :math:`p_0<10`.
e0 (double): Initial eccentricity (:math:`0.0\leq e_0\leq0.7`).
theta (double): Polar viewing angle (:math:`-\pi/2\leq\Theta\leq\pi/2`).
phi (double): Azimuthal viewing angle.
dist (double): Luminosity distance in Gpc.
dt (double, optional): Time between samples in seconds (inverse of
sampling frequency). Default is 10.0.
T (double, optional): Total observation time in years.
Default is 1.0.
eps (double, optional): Controls the fractional accuracy during mode
filtering. Raising this parameter will remove modes. Lowering
this parameter will add modes. Default that gives a good overalp
is 1e-5.
show_progress (bool, optional): If True, show progress through
amplitude/waveform batches using
`tqdm <https://tqdm.github.io/>`_. Default is False.
batch_size (int, optional): If less than 0, create the waveform
without batching. If greater than zero, create the waveform
batching in sizes of batch_size. Default is -1.
mode_selection (str or list or None): Determines the type of mode
filtering to perform. If None, perform our base mode filtering
with eps as the fractional accuracy on the total power.
If 'all', it will run all modes without filtering. If a list of
tuples (or lists) of mode indices
(e.g. [(:math:`l_1,m_1,n_1`), (:math:`l_2,m_2,n_2`)]) is
provided, it will return those modes combined into a
single waveform.
Returns:
1D complex128 xp.ndarray: The output waveform.
Raises:
ValueError: user selections are not allowed.
"""
# makes sure viewing angles are allowable
theta, phi = self.sanity_check_viewing_angles(theta, phi)
self.sanity_check_init(M, mu, p0, e0)
# get trajectory
(t, p, e, Phi_phi, Phi_r, amp_norm) = self.inspiral_generator(
M, mu, p0, e0, T=T, dt=dt, **self.inspiral_kwargs
)
# makes sure p and e are generally within the model
self.sanity_check_traj(p, e)
self.end_time = t[-1]
# convert for gpu
t = self.xp.asarray(t)
p = self.xp.asarray(p)
e = self.xp.asarray(e)
Phi_phi = self.xp.asarray(Phi_phi)
Phi_r = self.xp.asarray(Phi_r)
amp_norm = self.xp.asarray(amp_norm)
# get ylms only for unique (l,m) pairs
# then expand to all (lmn with self.inverse_lm)
ylms = self.ylm_gen(self.unique_l, self.unique_m, theta, phi).copy()[
self.inverse_lm
]
# split into batches
if batch_size == -1 or self.allow_batching is False:
inds_split_all = [self.xp.arange(len(t))]
else:
split_inds = []
i = 0
while i < len(t):
i += batch_size
if i >= len(t):
break
split_inds.append(i)
inds_split_all = self.xp.split(self.xp.arange(len(t)), split_inds)
# select tqdm if user wants to see progress
iterator = enumerate(inds_split_all)
iterator = tqdm(iterator, desc="time batch") if show_progress else iterator
if show_progress:
print("total:", len(inds_split_all))
for i, inds_in in iterator:
# get subsections of the arrays for each batch
t_temp = t[inds_in]
p_temp = p[inds_in]
e_temp = e[inds_in]
Phi_phi_temp = Phi_phi[inds_in]
Phi_r_temp = Phi_r[inds_in]
amp_norm_temp = amp_norm[inds_in]
# amplitudes
teuk_modes = self.amplitude_generator(p_temp, e_temp)
# normalize by flux produced in trajectory
if self.normalize_amps:
amp_for_norm = self.xp.sum(
self.xp.abs(
self.xp.concatenate(
[teuk_modes, self.xp.conj(teuk_modes[:, self.m0mask])],
axis=1,
)
)
** 2,
axis=1,
) ** (1 / 2)
# normalize
factor = amp_norm_temp / amp_for_norm
teuk_modes = teuk_modes * factor[:, np.newaxis]
# different types of mode selection
# sets up ylm and teuk_modes properly for summation
if isinstance(mode_selection, str):
# use all modes
if mode_selection == "all":
self.ls = self.l_arr[: teuk_modes.shape[1]]
self.ms = self.m_arr[: teuk_modes.shape[1]]
self.ns = self.n_arr[: teuk_modes.shape[1]]
keep_modes = self.xp.arange(teuk_modes.shape[1])
temp2 = keep_modes * (keep_modes < self.num_m0) + (
keep_modes + self.num_m_1_up
) * (keep_modes >= self.num_m0)
ylmkeep = self.xp.concatenate([keep_modes, temp2])
ylms_in = ylms[ylmkeep]
teuk_modes_in = teuk_modes
else:
raise ValueError("If mode selection is a string, must be `all`.")
# get a specific subset of modes
elif isinstance(mode_selection, list):
if mode_selection == []:
raise ValueError("If mode selection is a list, cannot be empty.")
keep_modes = self.xp.zeros(len(mode_selection), dtype=self.xp.int32)
for jj, lmn in enumerate(mode_selection):
keep_modes[jj] = self.xp.int32(self.lmn_indices[tuple(lmn)])
self.ls = self.l_arr[keep_modes]
self.ms = self.m_arr[keep_modes]
self.ns = self.n_arr[keep_modes]
temp2 = keep_modes * (keep_modes < self.num_m0) + (
keep_modes + self.num_m_1_up
) * (keep_modes >= self.num_m0)
ylmkeep = self.xp.concatenate([keep_modes, temp2])
ylms_in = ylms[ylmkeep]
teuk_modes_in = teuk_modes[:, keep_modes]
# mode selection based on input module
else:
modeinds = [self.l_arr, self.m_arr, self.n_arr]
(
teuk_modes_in,
ylms_in,
self.ls,
self.ms,
self.ns,
) = self.mode_selector(teuk_modes, ylms, modeinds, eps=eps)
# store number of modes for external information
self.num_modes_kept = teuk_modes_in.shape[1]
# create waveform
waveform_temp = self.create_waveform(
t_temp,
teuk_modes_in,
ylms_in,
Phi_phi_temp,
Phi_r_temp,
self.ms,
self.ns,
dt=dt,
T=T,
)
# if batching, need to add the waveform
if i > 0:
waveform = self.xp.concatenate([waveform, waveform_temp])
# return entire waveform
else:
waveform = waveform_temp
dist_dimensionless = (dist * Gpc) / (mu * MRSUN_SI)
return waveform / dist_dimensionless
class FastSchwarzschildEccentricFlux(SchwarzschildEccentricWaveformBase):
"""Prebuilt model for fast Schwarzschild eccentric flux-based waveforms.
This model combines the most efficient modules to produce the fastest
accurate EMRI waveforms. It leverages GPU hardware for maximal acceleration,
but is also available on for CPUs. Please see
:class:`few.utils.baseclasses.SchwarzschildEccentric` for general
information on this class of models.
The trajectory module used here is :class:`few.trajectory.flux` for a
flux-based, sparse trajectory. This returns approximately 100 points.
The amplitudes are then determined with
:class:`few.amplitude.romannet.RomanAmplitude` along these sparse
trajectories. This gives complex amplitudes for all modes in this model at
each point in the trajectory. These are then filtered with
:class:`few.utils.modeselector.ModeSelector`.
The modes that make it through the filter are then summed by
:class:`few.summation.interpolatedmodesum.InterpolatedModeSum`.
See :class:`few.waveform.SchwarzschildEccentricWaveformBase` for information
on inputs. See examples as well.
args:
inspiral_kwargs (dict, optional): Optional kwargs to pass to the
inspiral generator. **Important Note**: These kwargs are passed
online, not during instantiation like other kwargs here. Default is
{}.
amplitude_kwargs (dict, optional): Optional kwargs to pass to the
amplitude generator during instantiation. Default is {}.
sum_kwargs (dict, optional): Optional kwargs to pass to the
sum module during instantiation. Default is {}.
Ylm_kwargs (dict, optional): Optional kwargs to pass to the
Ylm generator during instantiation. Default is {}.
use_gpu (bool, optional): If True, use GPU resources. Default is False.
*args (list, placeholder): args for waveform model.
**kwargs (dict, placeholder): kwargs for waveform model.
"""
def __init__(
self,
inspiral_kwargs={},
amplitude_kwargs={},
sum_kwargs={},
Ylm_kwargs={},
use_gpu=False,
*args,
**kwargs
):
SchwarzschildEccentricWaveformBase.__init__(
self,
RunSchwarzEccFluxInspiral,
RomanAmplitude,
InterpolatedModeSum,
inspiral_kwargs=inspiral_kwargs,
amplitude_kwargs=amplitude_kwargs,
sum_kwargs=sum_kwargs,
Ylm_kwargs=Ylm_kwargs,
use_gpu=use_gpu,
*args,
**kwargs
)
def attributes_FastSchwarzschildEccentricFlux(self):
"""
Attributes:
gpu_capability (bool): If True, this wavefrom can leverage gpu
resources. For this class it is True.
allow_batching (bool): If True, this waveform can use the batch_size
kwarg. For this class it is False.
"""
pass
@property
def gpu_capability(self):
return True
@property
def allow_batching(self):
return False
class SlowSchwarzschildEccentricFlux(SchwarzschildEccentricWaveformBase):
"""Prebuilt model for slow Schwarzschild eccentric flux-based waveforms.
This model combines the various modules to produce the a reference waveform
against which we test our fast models. Please see
:class:`few.utils.baseclasses.SchwarzschildEccentric` for general
information on this class of models.
The trajectory module used here is :class:`few.trajectory.flux` for a
flux-based trajectory. For this slow waveform, the DENSE_SAMPLING parameter
from :class:`few.utils.baseclasses.TrajectoryBase` is fixed to 1 to create
a densely sampled trajectory.
The amplitudes are then determined with
:class:`few.amplitude.interp2dcubicspline.Interp2DAmplitude`
along a densely sampled trajectory. This gives complex amplitudes
for all modes in this model at each point in the trajectory. These, can be
chosent to be filtered, but for reference waveforms, they should not be.
The modes that make it through the filter are then summed by
:class:`few.summation.directmodesum.DirectModeSum`.
See :class:`few.waveform.SchwarzschildEccentricWaveformBase` for information
on inputs. See examples as well.
args:
inspiral_kwargs (dict, optional): Optional kwargs to pass to the
inspiral generator. **Important Note**: These kwargs are passed
online, not during instantiation like other kwargs here. Default is
{}.
amplitude_kwargs (dict, optional): Optional kwargs to pass to the
amplitude generator during instantiation. Default is {}.
sum_kwargs (dict, optional): Optional kwargs to pass to the
sum module during instantiation. Default is {}.
Ylm_kwargs (dict, optional): Optional kwargs to pass to the
Ylm generator during instantiation. Default is {}.
use_gpu (bool, optional): If True, use GPU resources. Default is False.
*args (list, placeholder): args for waveform model.
**kwargs (dict, placeholder): kwargs for waveform model.
"""
@property
def gpu_capability(self):
return False
@property
def allow_batching(self):
return True
def attributes_SlowSchwarzschildEccentricFlux(self):
"""
attributes:
gpu_capability (bool): If True, this wavefrom can leverage gpu
resources. For this class it is False.
allow_batching (bool): If True, this waveform can use the batch_size
kwarg. For this class it is True.
"""
pass
def __init__(
self,
inspiral_kwargs={},
amplitude_kwargs={},
sum_kwargs={},
Ylm_kwargs={},
use_gpu=False,
*args,
**kwargs
):
# declare specific properties
inspiral_kwargs["DENSE_STEPPING"] = 1
SchwarzschildEccentricWaveformBase.__init__(
self,
RunSchwarzEccFluxInspiral,
Interp2DAmplitude,
DirectModeSum,
inspiral_kwargs=inspiral_kwargs,
amplitude_kwargs=amplitude_kwargs,
sum_kwargs=sum_kwargs,
Ylm_kwargs=Ylm_kwargs,
use_gpu=use_gpu,
*args,
**kwargs
)
class Pn5AAKWaveform(Pn5AAK, ABC):
"""Waveform generation class for AAK with 5PN trajectory.
This class generates waveforms based on the Augmented Analytic Kludge
given in the
`EMRI Kludge Suite <https://github.com/alvincjk/EMRI_Kludge_Suite/>`_.
However, here the trajectory is vastly improved by employing the 5PN
fluxes for generic Kerr orbits from
`Fujita & Shibata 2020<https://arxiv.org/abs/2008.13554>`_.
The 5PN trajectory produces orbital and phase trajectories.
The trajectory is calculated until the orbit reaches
within 0.2 of the separatrix, determined from
`arXiv:1912.07609 <https://arxiv.org/abs/1912.07609/>`_. The
fundamental frequencies along the trajectory at each point are then
calculated from the orbital parameters and the spin value given by (`Schmidt 2002 <https://arxiv.org/abs/gr-qc/0202090>`_).
These frequencies along the trajectory are then used to map to the
frequency basis of the `Analytic Kludge <https://arxiv.org/abs/gr-qc/0310125>`_. This mapping
takes the form of time evolving large mass and spin parameters, as
well as the use of phases and frequencies in
:math:`(alpha, \Phi, \gamma)`:
.. math:: \Phi = \Phi_\phi,
.. math:: \gamma = \Phi_\phi + \Phi_\Theta,
.. math:: alpha = \Phi_\phi + \Phi_\Theta + \Phi_r.
The frequencies in that basis are found by taking the time derivatives
of each equation above.
This class has GPU capabilities and works from the sparse trajectory
methodoligy with cubic spine interpolation of the smoothly varying
waveform quantities. This waveform does not have the freedom in terms
of user-chosen quantitites that
:class:`few.waveform.SchwarzschildEccentricWaveformBase` contains.
This is mainly due to the specific waveform constructions particular
to the AAK/AK.
args:
inspiral_kwargs (dict, optional): Optional kwargs to pass to the
inspiral generator. **Important Note**: These kwargs are passed
online, not during instantiation like other kwargs here. Default is
{}. This is stored as an attribute.
sum_kwargs (dict, optional): Optional kwargs to pass to the
sum module during instantiation. Default is {}.
use_gpu (bool, optional): If True, use GPU resources. Default is False.
"""
def attributes_Pn5AAKWaveform(self):
"""
attributes:
inspiral_generator (obj): instantiated trajectory module.
create_waveform (obj): instantiated summation module.
inspiral_kwargs (dict): Kwargs related to the inspiral class:
:class:`few.trajectory.pn5.RunKerrGenericPn5Inspiral`.
xp (obj): numpy or cupy based on gpu usage.
num_modes_kept/nmodes (int): Number of modes for final waveform.
For this model, it is solely determined from the
eccentricity.
"""
pass
def __init__(self, inspiral_kwargs={}, sum_kwargs={}, use_gpu=False):
Pn5AAK.__init__(self, use_gpu)
sum_kwargs = self.adjust_gpu_usage(use_gpu, sum_kwargs)
# kwargs that are passed to the inspiral call function
self.inspiral_kwargs = inspiral_kwargs
# function for generating the inpsiral
self.inspiral_generator = RunKerrGenericPn5Inspiral(**inspiral_kwargs)
# summation generator
self.create_waveform = AAKSummation(**sum_kwargs)
@property
def citation(self):
"""Return citations related to this module"""
return (
few_citation
+ few_software_citation
+ AAK_citation_1
+ AAK_citation_2
+ AK_citation
+ Pn5_citation
+ kerr_separatrix_citation
)
@property
def gpu_capability(self):
return True
@property
def allow_batching(self):
return False
def __call__(
self,
M,
mu,
a,
p0,
e0,
Y0,
qS,
phiS,
qK,
phiK,
dist,
Phi_phi0=0.0,
Phi_theta0=0.0,
Phi_r0=0.0,
mich=False,
dt=10.0,
T=1.0,
):
"""Call function for AAK + 5PN model.
This function will take input parameters and produce AAK waveforms with 5PN trajectories in generic Kerr.
args:
M (double): Mass of larger black hole in solar masses.
mu (double): Mass of compact object in solar masses.
p0 (double): Initial semilatus rectum (Must be greater than
the separatrix at the the given e0 and Y0).
See documentation for more information on :math:`p_0<10`.
e0 (double): Initial eccentricity.
Y0 (double): Initial cosine of the inclination angle
(:math:`\cos{\iota}`).
qS (double): Sky location polar angle in ecliptic
coordinates.
phiS (double): Sky location azimuthal angle in
ecliptic coordinates.
qK (double): Initial BH spin polar angle in ecliptic
coordinates.
phiK (double): Initial BH spin azimuthal angle in
ecliptic coordinates.
dist (double): Luminosity distance in Gpc.
Phi_phi0 (double, optional): Initial phase for :math:`\Phi_\phi`.
Default is 0.0.
Phi_theta0 (double, optional): Initial phase for :math:`\Phi_\Theta`.
Default is 0.0.
Phi_r0 (double, optional): Initial phase for :math:`\Phi_r`.
Default is 0.0.
mich (bool, optional): If True, produce waveform with
long-wavelength response approximation (hI, hII). Please
note this is not TDI. If False, return hplus and hcross.
Default is False.
dt (double, optional): Time between samples in seconds
(inverse of sampling frequency). Default is 10.0.
T (double, optional): Total observation time in years.
Default is 1.0.
Returns:
1D complex128 xp.ndarray: The output waveform.
Raises:
ValueError: user selections are not allowed.
"""
# makes sure angular extrinsic parameters are allowable
qS, phiS, qK, phiK = self.sanity_check_angles(qS, phiS, qK, phiK)
self.sanity_check_init(M, mu, a, p0, e0, Y0)
# get trajectory
t, p, e, Y, Phi_phi, Phi_theta, Phi_r = self.inspiral_generator(
M,
mu,
a,
p0,
e0,
Y0,
Phi_phi0=Phi_phi0,
Phi_theta0=Phi_theta0,
Phi_r0=Phi_r0,
T=T,
dt=dt,
**self.inspiral_kwargs
)
# makes sure p, Y, and e are generally within the model
self.sanity_check_traj(p, e, Y)
self.end_time = t[-1]
# number of modes to use (from original AAK model)
self.num_modes_kept = self.nmodes = int(30 * e0)
waveform = self.create_waveform(
t,
M,
a,
p,
e,
Y,
Phi_phi,
Phi_theta,
Phi_r,
mu,
qS,
phiS,
qK,
phiK,
dist,
self.nmodes,
mich=mich,
dt=dt,
T=T,
)
return waveform
|
{"hexsha": "be7b77f6e62a95a80db9fac8e2ae6aab92a0b4ad", "size": 28545, "ext": "py", "lang": "Python", "max_stars_repo_path": "FastEMRIWaveforms/few/waveform.py", "max_stars_repo_name": "basuparth/ICERM_Workshop", "max_stars_repo_head_hexsha": "ebabce680fc87e90ff1de30246dcda9beb384bb4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FastEMRIWaveforms/few/waveform.py", "max_issues_repo_name": "basuparth/ICERM_Workshop", "max_issues_repo_head_hexsha": "ebabce680fc87e90ff1de30246dcda9beb384bb4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FastEMRIWaveforms/few/waveform.py", "max_forks_repo_name": "basuparth/ICERM_Workshop", "max_forks_repo_head_hexsha": "ebabce680fc87e90ff1de30246dcda9beb384bb4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.16796875, "max_line_length": 127, "alphanum_fraction": 0.6148187073, "include": true, "reason": "import numpy,import cupy", "num_tokens": 6470}
|
import io
import math
import pdb
import timeit
from quantities import mV, ms, s, V
import sciunit
from neo import AnalogSignal
import neuronunit.capabilities as cap
import numpy as np
from .base import *
import quantities as qt
from quantities import mV, ms, s, V
import matplotlib as mpl
# try:
# import asciiplotlib as apl
# except:
# pass
import numpy
voltage_units = mV
from sciunit.utils import redirect_stdout
from elephant.spike_train_generation import threshold_detection
from neuronunit.optimisation.model_parameters import path_params
import time
from numba import jit
import numpy as np
# @jit(nopython=True)
@jit
def inner_core(fPots, vIndex, vTime, fTime, duration, vTimes, vPots, fDt, iter_):
for time in iter_:
if fTime == vTime:
fPots.append(vPots[vIndex])
# Interpolate between the two nearest vdt times
else:
# Increment vdt time until it surpases the fdt time
while fTime > vTime and vIndex < len(vTimes):
vIndex += 1
vTime = vTimes[vIndex]
# Once surpassed, use the new vdt time and t-1
# for interpolation
vIndexMinus1 = max(0, vIndex - 1)
vTimeMinus1 = vTimes[vIndexMinus1]
# def linearInterpolate(tStart, tEnd, vStart, vEnd, tTarget):
tStart, tEnd, vStart, vEnd, tTarget = (
vTimeMinus1,
vTime,
vPots[vIndexMinus1],
vPots[vIndex],
fTime,
)
tRange = tEnd - tStart
tFractionAlong = (tTarget - tStart) / tRange
vRange = vEnd - vStart
fPot = vRange * tFractionAlong + vStart
fPots.append(fPot)
# Go to the next fdt time step
fTime += fDt
return fPots
@jit
def get_fixed_step_analog_signal(
desired_fixedTimeStep, varied_Pots, varied_Times
): # ,iterator):
"""Convert variable dt array values to fixed dt array.
Uses linear interpolation.
"""
# Fixed dt potential
fPots = []
fDt = desired_fixedTimeStep
# Variable dt potential
vPots = varied_Pots
# self.vVector.to_python()
# Variable dt times
vTimes = varied_Times # self.tVector.to_python()
duration = vTimes[len(vTimes) - 1]
# Fixed and Variable dt times
fTime = vTimes[0]
vTime = vTimes[0]
# Index of variable dt time array
vIndex = 0
# Advance the fixed dt position
#
iter_ = range(0, int(duration / fDt) - 1)
fPots = inner_core(
fPots,
int(vIndex),
float(vTime),
float(fTime),
float(duration),
list(vTimes),
list(vPots),
float(fDt),
iter_,
)
return fPots, iter_
@jit
def get_int(tvec):
list_of_intervals = []
for i, t in enumerate(tvec):
if i > 0:
list_of_intervals.append(t - tvec[i - 1])
return np.min(list_of_intervals)
class NEURONHHBackend(Backend):
"""Use for simulation with NEURON, a popular simulator.
http://www.neuron.yale.edu/neuron/
Units used by NEURON are sometimes different to quantities/neo
(note nA versus pA)
http://neurosimlab.org/ramcd/pyhelp/modelspec/programmatic/mechanisms/mech.html#IClamp
NEURON's units:
del -- ms
dur -- ms
amp -- nA
i -- nA
"""
name = "NEURONHH"
def init_backend(self, attrs=None, DTC=None):
"""Initialize the NEURON backend for neuronunit.
Arguments should be consistent with an underlying model files.
Args:
attrs (dict): a dictionary of items used to update NEURON
model attributes.
cell_name (string): A string that represents the cell models name
in the NEURON HOC space.
current_src_name (string): A string that represents the current
source models name in the NEURON HOC
space.
DTC (DataTransportContainer): The data transport container contain
a dictionary of model attributes
When the DTC object is provided,
it's attribute dictionary can be used
to update the NEURONBackends model
attribute dictionary.
"""
if not NEURON_SUPPORT:
msg = "The neuron module was not successfully imported"
raise BackendException(msg)
self.stdout = io.StringIO()
self.neuron = None
self.model_path = None
self.h = h
self.h.load_file("stdlib.hoc")
self.h.load_file("stdgui.hoc")
self.default_attrs = {
"gnabar": 0.12,
"gkbar": 0.036,
"Ra": 100,
"L": 12.6157,
"diam": 12.6157,
"gkbar": 0.036,
"el": -54.3,
"gl": 0.0003,
"ena": 50.0,
"ek": -77.0,
"vr": -65,
"cm": 1.0,
"ena": 50.0,
"ek": -77,
}
super(NEURONHHBackend, self).init_backend()
self.model._backend.use_memory_cache = False
self.model.unpicklable += ["h", "ns", "_backend"]
self.load_model()
if type(DTC) is not type(None):
if type(DTC.attrs) is not type(None):
self.set_attrs(DTC.attrs)
if len(DTC.attrs):
assert len(self.model.attrs) > 0
if hasattr(DTC, "current_src_name"):
self._current_src_name = DTC.current_src_name
if hasattr(DTC, "cell_name"):
self._cell_name = DTC.cell_name
def reset_neuron(self, neuronVar):
"""Reset the neuron simulation.
Refreshes the the HOC module, purging it's variable namespace.
Sets the NEURON h variable, and resets the NEURON h variable.
The NEURON h variable, may benefit from being reset between simulation
runs as a way of insuring that each simulation is freshly initialized.
the reset_neuron method is used to prevent a situation where a new
model's initial conditions are erroneously updated from a stale model's
final state.
Args:
neuronVar (module): a reference to the neuron module
"""
self.h = neuronVar.h
self.neuron = neuronVar
# h = neuron.h
# self.h.load_file("stdlib.hoc")
# self.h.load_file("stdgui.hoc")
self.load_model()
def set_run_params(self, **run_params):
pass
def set_stop_time(self, stop_time=1200 * pq.ms):
"""Set the simulation duration
stopTimeMs: duration in milliseconds
"""
self.h.tstop = float(stop_time.rescale(pq.ms))
def get_spike_count(self):
thresh = threshold_detection(self.vM, threshold=0.0 * pq.mV, sign=above)
return len(thresh)
'''
def set_time_step(self, integrationTimeStep=(pq.ms/128.0)):
"""Set the simulation itegration fixed time step
integrationTimeStepMs: time step in milliseconds.
Powers of two preferred. Defaults to 1/128.0
Args:
integrationTimeStep (float): time step in milliseconds.
Powers of two preferred. Defaults to 1/128.0
"""
dt = integrationTimeStep
self.h.dt = 0.1#float(dt)0.01
'''
'''
def set_tolerance(self, tolerance=0.001):
"""Set the variable time step integration method absolute tolerance.
Args:
tolerance (float): absolute tolerance value
"""
self.h.cvode.atol(tolerance)
'''
def set_integration_method(self, method="variable"):
"""Set the simulation itegration method.
cvode is used when method is "variable"
Args:
method (string): either "fixed" or "variable". Defaults to fixed.
"""
# This line is compatible with the above cvodes statements.
self.h.cvode.active(1 if method == "variable" else 0)
try:
assert self.h.cvode.active()
except AssertionError:
self.h.cvode = self.h.CVode()
self.h.cvode.active(1 if method == "variable" else 0)
'''
def get_variable_step_analog_signal(self):
"""Convert variable dt array values to fixed dt array.
Uses linear interpolation.
"""
# Fixed dt potential
fPots = []
fDt = self.fixedTimeStep
# Variable dt potential
vPots = self.vVector.to_python()
# Variable dt times
vTimes = self.tVector.to_python()
duration = vTimes[len(vTimes)-1]
# Fixed and Variable dt times
fTime = vTime = vTimes[0]
# Index of variable dt time array
vIndex = 0
# Advance the fixed dt position
while fTime <= duration:
# If v and f times are exact, no interpolation needed
if fTime == vTime:
fPots.append(vPots[vIndex])
# Interpolate between the two nearest vdt times
else:
# Increment vdt time until it surpases the fdt time
while fTime > vTime and vIndex < len(vTimes):
vIndex += 1
vTime = vTimes[vIndex]
# Once surpassed, use the new vdt time and t-1
# for interpolation
vIndexMinus1 = max(0, vIndex-1)
vTimeMinus1 = vTimes[vIndexMinus1]
fPot = self.linearInterpolate(vTimeMinus1, vTime,
vPots[vIndexMinus1],
vPots[vIndex], fTime)
fPots.append(fPot)
# Go to the next fdt time step
fTime += fDt
return fPots
'''
def get_membrane_potential(self):
"""Get a membrane potential traces from the simulation.
Must destroy the hoc vectors that comprise it.
Returns:
neo.core.AnalogSignal: the membrane potential trace
"""
"""
#if self.h.cvode.active() == 0:
dt = float(self.h.dt)
fixed_signal = self.vVector.to_python()
#else:
#dt = float(self.fixedTimeStep)
# dt = self.h.dt
# self.fixedTimeStep = float(dt)
#fixed_signal = self.get_variable_step_analog_signal()
#self.h.dt = dt
fixed_signal = [ v for v in fixed_signal ]
self.vM = AnalogSignal(fixed_signal,
units=pq.mV,
sampling_period=self.h.dt*pq.ms)
"""
return self.vM # waves0 = [i.rescale(qt.mV) for i in waves0 ]
'''
def linearInterpolate(self, tStart, tEnd, vStart, vEnd, tTarget):
"""Perform linear interpolation."""
tRange = float(tEnd - tStart)
tFractionAlong = (tTarget - tStart)/tRange
vRange = vEnd - vStart
vTarget = vRange*tFractionAlong + vStart
return vTarget
def load(self, tstop=650*pq.ms):
#nrn_path = (os.path.splitext(self.model.orig_lems_file_path)[0] +
# '_nrn.py')
#nrn = import_module_from_path(nrn_path)
self.reset_neuron(nrn.neuron)
self.h.tstop = tstop
self.set_stop_time(tstop) # previously 500ms add on 150ms of recovery
#with redirect_stdout(self.stdout):
self.ns = nrn.NeuronSimulation(self.h.tstop, dt=0.1)
def load_mechanisms(self):
with redirect_stdout(self.stdout):
neuron.load_mechanisms(self.neuron_model_dir)
'''
def load_model(self, verbose=True):
"""Load a NEURON model.
Side Effects: Substantially mutates neuronal model stored in self.
Description: Take a declarative model description, and call JneuroML
to convert it into an python/neuron implementation stored in a pyho
file. Then import the pyhoc file thus dragging the neuron variables
into memory/python name space. Since this only happens once outside
of the optimization loop its a tolerable performance hit.
Create a pyhoc file using jneuroml to convert from NeuroML to pyhoc.
import the contents of the file into the current names space.
"""
soma = h.Section(name="soma")
soma.nseg = 1
soma.insert("hh")
soma.insert("pas")
self.soma = soma
return self
def set_attrs(self, attrs):
# make sure all attributes are acoounted for.
# if assingment is incomplete assume user does not want to explicitly specify
# everything and is satisfied by defaults.
self.default_attrs.update(attrs)
attrs = self.default_attrs
if not hasattr(self.model, "attrs"): # is None:
self.model.attrs = {}
self.model.attrs.update(attrs)
else:
self.model.attrs.update(attrs)
self.soma(0.5).hh.gl = attrs["gl"]
self.soma(0.5).hh.gnabar = attrs["gnabar"]
self.soma(0.5).hh.gkbar = attrs["gkbar"]
self.soma(0.5).cm = attrs["cm"]
self.soma.L = attrs["L"]
self.soma.diam = attrs["diam"] # 12.6157 # Makes a soma of 500 microns squared.
for sec in self.h.allsec():
sec.Ra = attrs["Ra"] # Axial resistance in Ohm * cm
sec.cm = attrs["cm"] # Membrane capacitance in micro Farads / cm^2
# import pdb; pdb.set_trace()
self.soma(0.5).hh.el = attrs["el"]
# self.soma(0.5).k_ion.ek = attrs['ek']
# self.soma(0.5).na_ion.ena = attrs['ena']
self.vVector = self.h.Vector() # Membrane potential vector
self.tVector = self.h.Vector() # Time stamp vector
self.vVector.record(self.soma(0.5)._ref_v)
self.tVector.record(self.h._ref_t)
return self
def inject_square_current(self, current, section=None, debug=False):
"""Apply current injection into the soma or a specific compartment.
Example: current = {'amplitude':float*pq.pA, 'delay':float*pq.ms,
'duration':float*pq.ms}}
where 'pq' is a physical unit representation, implemented by casting
float values to the quanitities 'type'.
Currently only single section neuronal models are supported, the
neurite section is understood to be simply the soma.
Args:
current (dict): a dictionary with exactly three items,
whose keys are: 'amplitude', 'delay', 'duration'
Implementation:
1. purge the HOC space, by calling reset_neuron()
2. Redefine the neuronal model in the HOC namespace, which was recently
cleared out.
3. Strip away quantities representation of physical units.
4. Translate the dictionary of current injection parameters into
executable HOC code.
"""
try:
assert len(self.model.attrs)
except:
print(
"this means you didnt instance a model and then add in model parameters"
)
temp_attrs = self.model.attrs
assert len(temp_attrs)
self.init_backend()
self.h.cvode.active(1)
# self.set_integration_method(method="variable")
if len(temp_attrs):
self.set_attrs(temp_attrs)
current = current
self.last_current = current
if "injected_square_current" in current.keys():
c = current["injected_square_current"]
else:
c = current
##
# critical code:
##
stop_time = (
float(c["delay"]) * pq.ms + float(c["duration"]) * pq.ms + 200.0 * pq.ms
)
# translate pico amps to nano amps
# NEURONs default unit multiplier for current injection values is nano amps.
# to make sure that pico amps are not erroneously interpreted as a larger nano amp.
# current injection value, the value is divided by 1000.
stim = self.h.IClamp(self.soma(0.5))
amp = (
float(c["amplitude"]) / 1000.0
) # *1000.0#.rescale('nA'))#/1000.0#*1000.0*1000.0#*1000.0#.rescale('nA'))#*1000.0#*1.0/1000.0#.rescale('nA'))
# print(amp)
# amp = amp*0.001#1000.0 # convert back to nano amps
# assert amp!=-0.0
# print(amp)
dur = float(c["duration"]) # .rescale('ms'))
delay = float(c["delay"]) # .rescale('ms'))
stim.amp = amp
stim.dur = dur
stim.delay = delay
# simdur = dur+delay #2000.0
tMax = delay + dur + 200.0
self.h.tstop = tMax
# b4 = time.perf_counter()
self.h.dt = 1
self.set_stop_time(stop_time)
with redirect_stdout(self.stdout):
self.h("run()") # +str(tMax)+')')
# af = time.perf_counter()
# print('time:',af - b4)
tvec = [float(x) for x in self.tVector]
# print(tvec[1],self.h.dt)
# get_int(tvec)
vm = [float(x) for x in self.vVector]
vm_fast, vm_times = get_fixed_step_analog_signal(tvec[1], vm, tvec)
"""
self.vM = AnalogSignal(vm,
units=pq.mV,
sampling_period=tvec[1]*pq.ms)
t = [float(f) for f in self.vM.times]
v = [float(f) for f in self.vM.magnitude]
v_fast = [float(f) for f in self.vM_other]
t_fast = [float(f) for f in self.vM_other.times]
fig = apl.figure()
fig.plot(t, v, width=100, height=20)
fig.plot(t_fast, v_fast, width=100, height=20)
fig.show()
"""
self.vM = AnalogSignal(vm_fast, units=pq.mV, sampling_period=tvec[1] * pq.ms)
is_nan_in_vm = False
for v in self.vM:
if np.isnan(float(v)):
is_nan_in_vm = True
# print('gets to nan')
# if is_nan_in_vm:
# print(self.attrs)
return self.vM
def local_run(self):
# with redirect_stdout(self.stdout):
self.h("run()")
results = {}
results["vm"] = AnalogSignal(
[float(x) for x in self.vVector],
units=pq.mV,
sampling_period=self.h.dt * pq.ms,
)
results["t"] = results["vm"].times
results["run_number"] = results.get("run_number", 0) + 1
return results
def _backend_run(self):
self.inject_square_current(self.last_current)
return self.local_run()
|
{"hexsha": "92e8f3ce05be7c7693ee18f892651c5172b269ff", "size": 18687, "ext": "py", "lang": "Python", "max_stars_repo_path": "jithub/models/backends/neuron_hh.py", "max_stars_repo_name": "russelljjarvis/numba_reduced_neuronal_models", "max_stars_repo_head_hexsha": "bc500aefab267a1a1eaf2a1d8dac83da676d7ee6", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-17T07:39:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-17T07:39:19.000Z", "max_issues_repo_path": "jithub/models/backends/neuron_hh.py", "max_issues_repo_name": "russelljjarvis/numba_reduced_neuronal_models", "max_issues_repo_head_hexsha": "bc500aefab267a1a1eaf2a1d8dac83da676d7ee6", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-11-25T06:36:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-17T21:12:24.000Z", "max_forks_repo_path": "jithub/models/backends/neuron_hh.py", "max_forks_repo_name": "russelljjarvis/numba_reduced_neuronal_models", "max_forks_repo_head_hexsha": "bc500aefab267a1a1eaf2a1d8dac83da676d7ee6", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-01T01:40:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-01T01:40:39.000Z", "avg_line_length": 33.1329787234, "max_line_length": 119, "alphanum_fraction": 0.5723230053, "include": true, "reason": "import numpy,from numba", "num_tokens": 4561}
|
'''Collection of terms that form loss functionals
Author: Hwan Goh, Oden Institute, Austin, Texas 2020
'''
import numpy as np
import tensorflow as tf
import pdb #Equivalent of keyboard in MATLAB, just add "pdb.set_trace()"
###############################################################################
# General Loss #
###############################################################################
def loss_penalized_difference(true, pred, penalty):
'''penalized squared error of the true and predicted values'''
return penalty*true.shape[1]*tf.keras.losses.mean_squared_error(true, pred)
def loss_diagonal_weighted_penalized_difference(true, pred, weight_matrix, penalty):
'''weighted penalized squared error of the true and predicted values
for the special case where the weight matrix is a diagonal stored as rows'''
if len(pred.shape) == 1:
pred = tf.expand_dims(pred, axis=1)
return penalty*true.shape[1]*tf.keras.losses.mean_squared_error(
tf.multiply(weight_matrix, true),
tf.multiply(weight_matrix, pred))
###############################################################################
# Loss Diagonal Posterior Covariance #
###############################################################################
def loss_kld(post_mean, log_post_var,
prior_mean, prior_cov_inv,
penalty):
'''Kullback-Leibler divergence between the model posterior and the prior
model for the case where the model posterior possesses a diagonal covariance
matrix
'''
trace_prior_cov_inv_times_cov_post = tf.reduce_sum(
tf.multiply(tf.linalg.diag_part(prior_cov_inv), tf.math.exp(log_post_var)),
axis=1)
prior_weighted_prior_mean_minus_post_mean = tf.reduce_sum(
tf.multiply(tf.transpose(prior_mean - post_mean),
tf.linalg.matmul(prior_cov_inv, tf.transpose(prior_mean - post_mean))),
axis = 0)
return penalty*(trace_prior_cov_inv_times_cov_post
+ prior_weighted_prior_mean_minus_post_mean
- tf.math.reduce_sum(log_post_var, axis=1))
###############################################################################
# Loss Full Posterior Covariance #
###############################################################################
def loss_weighted_post_cov_full_penalized_difference(true, pred,
post_cov_chol,
penalty):
'''Monte-Carlo estimate of the Kullback-Leibler divergence
between the true posterior and the model posterior for the
case where the model posterior possesses a full covariance
matrix
'''
batched_value = weighted_inner_product_chol_solve(
tf.transpose(tf.reshape(
post_cov_chol[0,:], (true.shape[1], true.shape[1]))),
tf.expand_dims(true[0,:] - pred[0,:], axis=1))
for m in range(1, true.shape[0]):
batched_value = tf.concat(
[batched_value,
weighted_inner_product_chol_solve(
tf.transpose(tf.reshape(
post_cov_chol[m,:], (true.shape[1], true.shape[1]))),
tf.expand_dims(true[m,:] - pred[m,:], axis=1))], axis=0)
return penalty*tf.squeeze(batched_value)
def weighted_inner_product_chol_solve(weight_matrix, vector):
'''Evaluates data-misfit term weighted by the inverse of the full model
posterior covariance
'''
return tf.linalg.matmul(tf.transpose(vector),
tf.linalg.solve(tf.transpose(weight_matrix),
tf.linalg.solve(weight_matrix, vector)))
def loss_trace_likelihood(post_cov_chol,
identity_otimes_likelihood_matrix,
penalty):
'''For the case where the parameter-to-observable map is linear, the
expectation of the likelihood does not require a Monte-Carlo approximation
and so there is an extra trace term which is computed by this function
'''
return penalty*tf.reduce_sum(
tf.multiply(tf.transpose(post_cov_chol),
identity_otimes_likelihood_matrix.matmul(tf.transpose(post_cov_chol))),
axis=0)
def loss_kld_full(post_mean, log_post_std, post_cov_chol,
prior_mean, prior_cov_inv, identity_otimes_prior_cov_inv,
penalty):
'''Kullback-Leibler divergence between the model posterior and the prior
model for the case where the model posterior possesses a full covariance
matrix
'''
trace_prior_cov_inv_times_cov_post = tf.reduce_sum(
tf.multiply(tf.transpose(post_cov_chol),
identity_otimes_prior_cov_inv.matmul(tf.transpose(post_cov_chol))),
axis=0)
prior_weighted_prior_mean_minus_post_mean = tf.reduce_sum(
tf.multiply(tf.transpose(prior_mean - post_mean),
tf.linalg.matmul(prior_cov_inv, tf.transpose(prior_mean - post_mean))),
axis = 0)
return penalty*(trace_prior_cov_inv_times_cov_post
+ prior_weighted_prior_mean_minus_post_mean
- 2*tf.math.reduce_sum(log_post_std, axis=1))
###############################################################################
# Loss Forward Model #
###############################################################################
def loss_forward_model(hyperp, options,
forward_model,
state_obs_true, parameter_pred,
penalty):
'''Computes the expectation of the likelihood using the modelled
parameter-to-observable map
'''
forward_model_state_pred = forward_model(parameter_pred)
forward_model_state_pred = tf.cast(forward_model_state_pred, dtype=tf.float32)
return penalty*state_obs_true.shape[1]*tf.keras.losses.mean_squared_error(state_obs_true,
forward_model_state_pred)
###############################################################################
# Relative Error #
###############################################################################
def relative_error(true, pred):
'''relative error between testing data and prediction'''
return tf.keras.losses.mean_absolute_percentage_error(true, pred)
|
{"hexsha": "f31c2caa4c3fb72373b0df5131eb8cb21f222a02", "size": 6600, "ext": "py", "lang": "Python", "max_stars_repo_path": "codes/src/utils_training/functionals.py", "max_stars_repo_name": "hwangoh/uq-vae", "max_stars_repo_head_hexsha": "382548e6f6dd7f9d72feff0e0752beec871db348", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-28T16:47:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-03T00:53:58.000Z", "max_issues_repo_path": "codes/src/utils_training/functionals.py", "max_issues_repo_name": "HwanGoh/uq-vae", "max_issues_repo_head_hexsha": "24a3d26987e2ec807d57601b14c68b22f3652a18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codes/src/utils_training/functionals.py", "max_forks_repo_name": "HwanGoh/uq-vae", "max_forks_repo_head_hexsha": "24a3d26987e2ec807d57601b14c68b22f3652a18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-29T08:31:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-07T10:26:45.000Z", "avg_line_length": 50.7692307692, "max_line_length": 93, "alphanum_fraction": 0.5593939394, "include": true, "reason": "import numpy", "num_tokens": 1166}
|
import os
import torch
import numpy as np
import librosa as la
from utils import listDir, tick, tock
FRAMES_PER_SAMPLE: int = 336 # number of frames per sample
HOP_LENGTH: int = 42 # number of frames to hop, to get to next sample
# number of samples to extract from a performance
SAMPLES_PER_PERFORMANCE: int = 120
# CQT Filtering Params
CQT_TOP_DROP_BINS: int = 36
CQT_PRESERVED_PEAK_COUNT: int = 1
# Audio data Cache limits
CACHE_LIMIT = 80
class Covers80DatasetPerformanceChunks(torch.utils.data.Dataset):
def __init__(self, root_dir: str, excluded_transforms: list = [], validation: bool = False, isolated_performance_index=None):
performances = getPerformancesList(
root_dir=root_dir, excluded_transforms=excluded_transforms)
self.performances = []
self.cache = {}
self.cache_size = 0
if isolated_performance_index != None:
self.performances.append(performances[isolated_performance_index]['path'])
print("Using only {}".format(performances[isolated_performance_index]['name']))
else:
for performance in performances:
self.performances.append(performance['path'])
def __len__(self):
return len(self.performances) * SAMPLES_PER_PERFORMANCE
def __getitem__(self, index):
tick("GET ITEM {}".format(index))
performance_index = index // SAMPLES_PER_PERFORMANCE
if self.performances[performance_index] in self.cache:
cqt = self.cache[self.performances[performance_index]]
else:
# Free one item from the cache if the cache limit has reached
if self.cache_size > CACHE_LIMIT:
cache_keys = list(dict.keys())
del self.cache[cache_keys[0]]
cqt = np.load(self.performances[performance_index])
self.cache[self.performances[performance_index]] = cqt
frame_offset = (index % SAMPLES_PER_PERFORMANCE) * HOP_LENGTH
# [feature_size, sequence_size]
frames = cqt[:, frame_offset:(frame_offset+FRAMES_PER_SAMPLE)]
frames = frames.transpose() # [sequence_size, feature_size]
frames[:, -CQT_TOP_DROP_BINS:] = 0.0
maxIndices = np.argmax(frames, axis=1)
filteredFrames = np.zeros(frames.shape, dtype=np.bool)
for (step, index) in enumerate(maxIndices):
filteredFrames[step, index] = 1.0
filteredFrames = filteredFrames[:, :-CQT_TOP_DROP_BINS]
# [sequence_size,feature_size]
X = torch.from_numpy(filteredFrames[:-1, :]).type(torch.float32)
# [sequence_size]
Y = torch.as_tensor(np.argmax(filteredFrames[-1, :]))
tock("GET ITEM {}".format(index))
return X, Y
def getPerformancesList(root_dir: str, excluded_transforms: list = []):
songs = listDir(path=root_dir, directoriesOnly=True)
all_performances = []
for song in songs:
song_dir = os.path.join(root_dir, song)
performances = listDir(song_dir, filesOnly=True)
for performance in performances:
name = '.'.join(performance.split('.')[:-1])
# Check if excluded
excluded = False
for suffix in excluded_transforms:
if name[len(name)-len(suffix):] == suffix:
excluded = True
if excluded:
continue
data = {
"song": song,
"name": name,
"path": os.path.join(song_dir, performance)
}
all_performances.append(data)
return all_performances
|
{"hexsha": "6f6515c0ca8544426648f1f4dcae3b9af008fedc", "size": 3610, "ext": "py", "lang": "Python", "max_stars_repo_path": "comparing-rnn-params/model/dataset.py", "max_stars_repo_name": "pasinducw/scs-4224-fyp", "max_stars_repo_head_hexsha": "753dd2cc6db84bcb9823a24ce5f495d94f55b162", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-14T06:29:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-14T06:29:25.000Z", "max_issues_repo_path": "comparing-rnn-params/model/dataset.py", "max_issues_repo_name": "pasinducw/university-work-scs-4224", "max_issues_repo_head_hexsha": "9ff208e31ddd17146dac2226ac5474bd7fe98ab2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "comparing-rnn-params/model/dataset.py", "max_forks_repo_name": "pasinducw/university-work-scs-4224", "max_forks_repo_head_hexsha": "9ff208e31ddd17146dac2226ac5474bd7fe98ab2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3921568627, "max_line_length": 129, "alphanum_fraction": 0.6379501385, "include": true, "reason": "import numpy", "num_tokens": 807}
|
#pragma once
#include <memory>
#include <iterator>
#include <cstddef>
#include <gsl/gsl>
namespace dr {
/// \brief round `s` up to the nearest multiple of n
template<typename T>
T round_up(T s, unsigned int n) { return ((s + n - 1) / n) * n; }
template<typename T, typename Allocator = std::allocator<T>>
struct gap_buffer {
using value_type = T;
using allocator_type = Allocator;
using size_type = std::size_t;
using difference_type = ptrdiff_t;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = typename std::allocator_traits<Allocator>::pointer;
using const_pointer = typename std::allocator_traits<Allocator>::const_pointer;
struct const_iterator;
struct iterator {
using self_type = iterator;
using container_type = gap_buffer;
using value_type = container_type::value_type;
using difference_type = container_type::difference_type;
using reference = container_type::reference;
using pointer = container_type::pointer;
using iterator_category = std::random_access_iterator_tag;
explicit iterator(gap_buffer* container = nullptr, difference_type offset = 0)
: container(container), offset(offset) { }
operator const_iterator() const {
return const_iterator(container, offset);
}
reference operator [](difference_type i) const {
return (*container)[offset + i];
}
reference operator *() const {
return (*container)[offset];
}
pointer operator ->() const {
return &(*container)[offset];
}
self_type& operator ++() {
offset++;
return *this;
}
self_type operator ++(int) {
self_type retval = *this;
this->operator ++();
return retval;
}
self_type& operator --() {
offset--;
return *this;
}
self_type operator --(int) {
self_type retval = *this;
this->operator --();
return retval;
}
bool operator ==(const self_type& other) const {
return container == other.container && offset == other.offset;
}
bool operator !=(const self_type& other) const {
return !(*this == other);
}
bool operator <(const self_type& other) const {
Expects(container == other.container);
return offset < other.offset;
}
bool operator >(const self_type& other) const {
return other < *this;
}
bool operator <=(const self_type& other) const {
return !(other < *this);
}
bool operator >=(const self_type& other) const {
return !(*this < other);
}
self_type& operator +=(difference_type n) {
offset += n;
return *this;
}
friend
self_type operator +(self_type it, difference_type n) {
it.offset += n;
return it;
}
friend
self_type operator +(difference_type n, self_type it) {
it.offset += n;
return it;
}
self_type& operator -=(difference_type n) {
offset -= n;
return *this;
}
self_type operator -(difference_type n) const {
return self_type(container, offset - n);
}
difference_type operator -(const self_type& other) const {
Expects(container == other.container);
return offset - other.offset;
}
friend class gap_buffer;
private:
gap_buffer* container;
difference_type offset;
};
// Here we repeat ourselves, that is, DRY principle is violated.
// We can get rid of the repetition with CRTP. But let's keep it
// as is.
struct const_iterator {
using self_type = const_iterator;
using container_type = gap_buffer;
using value_type = container_type::value_type;
using difference_type = container_type::difference_type;
using reference = container_type::const_reference;
using pointer = container_type::const_pointer;
using iterator_category = std::random_access_iterator_tag;
explicit const_iterator(const gap_buffer* container = nullptr, difference_type offset = 0)
: container(container),
offset(offset) { }
reference operator [](difference_type i) const {
return (*container)[offset + i];
}
reference operator *() const {
return (*container)[offset];
}
pointer operator ->() const {
return &(*container)[offset];
}
self_type& operator ++() {
offset++;
return *this;
}
self_type operator ++(int) {
self_type retval = *this;
this->operator ++();
return retval;
}
self_type& operator --() {
offset--;
return *this;
}
self_type operator --(int) {
self_type retval = *this;
this->operator --();
return retval;
}
bool operator ==(const self_type& other) const {
return container == other.container && offset == other.offset;
}
bool operator !=(const self_type& other) const {
return !(*this == other);
}
bool operator <(const self_type& other) const {
Expects(container == other.container);
return offset < other.offset;
}
bool operator >(const self_type& other) const {
return other < *this;
}
bool operator <=(const self_type& other) const {
return !(other < *this);
}
bool operator >=(const self_type& other) const {
return !(*this < other);
}
self_type& operator +=(difference_type n) {
offset += n;
return *this;
}
friend
self_type operator +(self_type it, difference_type n) {
it.offset += n;
return it;
}
friend
self_type operator +(difference_type n, self_type it) {
it.offset += n;
return it;
}
self_type& operator -=(difference_type n) {
offset -= n;
return *this;
}
self_type operator -(difference_type n) const {
return self_type(container, offset - n);
}
difference_type operator -(const self_type& other) const {
Expects(container == other.container);
return offset - other.offset;
}
friend class gap_buffer;
private:
const gap_buffer* container;
difference_type offset;
};
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
private:
static constexpr float incremental_factor = 0.2;
static constexpr size_type default_size = 8;
static constexpr size_type alignment = 8;
public:
explicit gap_buffer(size_type count = default_size) {
if (count == 0) {
start = finish = gap_start = nullptr;
gap_size = 0;
}
else {
count = round_up(count, alignment);
start = allocate_and_construct(count);
finish = start + count;
gap_start = start;
gap_size = count;
}
}
gap_buffer(size_type count, const T& value)
: gap_buffer(count) {
for (size_type i = 0; i < count; ++i) push_back(value);
}
template<typename InputIt>
gap_buffer(InputIt first, InputIt last) {
difference_type n = std::distance(first, last);
size_type len = round_up(std::max(default_size, size_type(n)), alignment);
start = data_allocator.allocate(len);
finish = start + len;
int except_flag = 0;
try {
gap_start = std::uninitialized_copy(first, last, start);
except_flag = 1;
std::uninitialized_default_construct(gap_start, finish);
}
catch (...) {
switch (except_flag) {
case 1:
std::destroy(start, start + n);
// FALL THROUGH
case 0:
data_allocator.deallocate(start, finish - start);
default:;// DO NOTHING
}
throw;
}
gap_size = len - n;
}
gap_buffer(const gap_buffer& rhs)
: gap_buffer(rhs.begin(), rhs.end()) { }
gap_buffer(gap_buffer&& rhs) noexcept
: gap_buffer(0) { swap(rhs); }
gap_buffer(std::initializer_list<T> ilist)
: gap_buffer(ilist.begin(), ilist.end()) { }
gap_buffer& operator =(const gap_buffer& rhs) {
gap_buffer temp(rhs);
swap(temp);
return *this;
}
gap_buffer& operator =(gap_buffer&& rhs) noexcept {
swap(rhs);
return *this;
}
~gap_buffer() {
destroy_and_deallocate(start, finish);
start = finish = gap_start = nullptr;
gap_size = 0;
}
void swap(gap_buffer& rhs) {
using std::swap;
swap(start, rhs.start);
swap(finish, rhs.finish);
swap(gap_start, rhs.gap_start);
swap(gap_size, rhs.gap_size);
}
void assign(size_type count, const T& value) { *this = gap_buffer(count, value); }
template<typename InputIt>
void assign(InputIt first, InputIt last) { *this = gap_buffer(first, last); }
void assign(std::initializer_list<T> ilist) { *this = gap_buffer(ilist); }
allocator_type get_allocator() const { return data_allocator; }
// ------ basis START HERE ------
const_reference operator [](size_type pos) const {
if (start + pos < gap_start) return *(start + pos);
else return *(start + gap_size + pos);
}
iterator erase(const_iterator first, const_iterator last) {
Expects(first.container == this && last.container == this);
difference_type num_to_erase = std::distance(first, last);
relocate_gap(first.offset);
std::fill_n(gap_start + gap_size, num_to_erase, T{});
gap_size += num_to_erase;
return iterator(this, first.offset);
}
/// \return iterator to the first inserted element
template<class InputIt>
iterator insert(const_iterator pos, InputIt first, InputIt last) {
Expects(this == pos.container && pos <= end());
difference_type num_to_insert = std::distance(first, last);
if (gap_size >= num_to_insert) {
relocate_gap(pos.offset);
std::copy(first, last, gap_start);
gap_start += num_to_insert;
gap_size -= num_to_insert;
return iterator(this, pos.offset);
}
else {
size_type old_size = size();
size_type old_capacity = capacity();
auto default_delta = size_type(old_capacity * incremental_factor);
size_type delta = round_up(std::max(default_delta, num_to_insert - gap_size), alignment);
size_type new_capacity = std::max(old_capacity + delta, default_size);
gap_buffer temp(new_capacity);
relocate_gap(pos.offset);
auto cursor = std::copy(start, gap_start, temp.start);
cursor = std::copy(first, last, cursor);
std::copy(gap_start + gap_size, finish, cursor);
swap(temp);
gap_start = start + old_size + num_to_insert;
gap_size = finish - gap_start;
return iterator(this, pos.offset);
}
}
void reserve(size_type new_cap = 0) {
if (capacity() >= new_cap) return;
if (new_cap > max_size()) throw std::length_error("new_cap should be less than max_size()");
size_type old_size = size();
size_type new_capacity = round_up(new_cap, alignment);
gap_buffer temp(new_capacity);
auto cursor = std::copy(start, gap_start, temp.start);
std::copy(gap_start + gap_size, finish, cursor);
swap(temp);
gap_start = start + old_size;
gap_size = finish - gap_start;
}
size_type size() const noexcept { return finish - start - gap_size; }
size_type max_size() const noexcept { return size_type(1 << 31); }
size_type capacity() const noexcept { return finish - start; }
// ------ basis END HERE ------
reference operator [](size_type pos) {
return const_cast<reference>(
static_cast<const gap_buffer&>(*this)[pos]
);
}
const_reference at(size_type pos) const {
if (pos >= size()) throw std::out_of_range("index out of range");
return (*this)[pos];
}
reference at(size_type pos) {
return const_cast<reference>(
static_cast<const gap_buffer&>(*this).at(pos)
);
}
const_reference front() const { return (*this)[0]; }
reference front() {
return const_cast<reference>(
static_cast<const gap_buffer&>(*this).front()
);
}
const_reference back() const { return (*this)[size() - 1]; }
reference back() {
return const_cast<reference>(
static_cast<const gap_buffer&>(*this).back()
);
}
T* data() noexcept = delete;
const T* data() const noexcept = delete;
[[nodiscard]] bool empty() const noexcept { return size() == 0; }
void shrink_to_fit() {
gap_buffer temp(begin(), end());
swap(temp);
}
void clear() { erase(begin(), end()); }
void resize(size_type count, const value_type& value = value_type{}) {
if (count < size())
erase(begin() + count, end());
else
insert(end(), count - size(), value);
}
iterator insert(const_iterator pos, const T& value) {
return insert(pos, &value, &value + 1);
}
iterator insert(const_iterator pos, T&& value) {
return insert(pos, std::make_move_iterator(&value), std::make_move_iterator(&value + 1));
}
iterator insert(const_iterator pos, size_type count, const T& value) {
for (size_type i = 0; i < count; ++i)
pos = insert(pos, value);
return iterator(this, pos.offset);
}
iterator insert(const_iterator pos, std::initializer_list<T> ilist) {
return insert(pos, ilist.begin(), ilist.end());
}
void erase(const_iterator pos) { erase(pos, pos + 1); }
template<typename ... Args>
iterator emplace(const_iterator pos, Args&& ... args) {
return insert(pos, T(std::forward<Args>(args)...));
}
template<typename ... Args>
reference emplace_back(Args&& ... args) {
return *emplace(end(), std::forward<Args>(args)...);
}
void push_back(const T& value) { insert(end(), &value, &value + 1); }
void push_back(T&& value) { insert(end(), std::make_move_iterator(&value), std::make_move_iterator(&value + 1)); }
void pop_back() { erase(end() - 1); }
const_iterator begin() const noexcept { return const_iterator(this); }
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const noexcept { return const_iterator(this, size()); }
const_iterator cend() const noexcept { return end(); }
iterator begin() noexcept { return iterator(this); }
iterator end() noexcept { return iterator(this, size()); }
const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); }
const_reverse_iterator crbegin() const noexcept { return rbegin(); }
const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); }
const_reverse_iterator crend() const noexcept { return rend(); }
reverse_iterator rbegin() noexcept { return reverse_iterator(end()); }
reverse_iterator rend() noexcept { return reverse_iterator(begin()); }
friend
bool operator ==(const gap_buffer& lhs, const gap_buffer& rhs) {
return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
}
friend
bool operator !=(const gap_buffer& lhs, const gap_buffer& rhs) {
return !(lhs == rhs);
}
friend
bool operator <(const gap_buffer& lhs, const gap_buffer& rhs) {
auto[left, right] = std::mismatch(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
if (right == rhs.end()) return false;
else if (left == lhs.end()) return true;
else return *left < *right;
}
friend
bool operator >(const gap_buffer& lhs, const gap_buffer& rhs) {
return rhs < lhs;
}
friend
bool operator <=(const gap_buffer& lhs, const gap_buffer& rhs) {
return !(rhs < lhs);
}
friend
bool operator >=(const gap_buffer& lhs, const gap_buffer& rhs) {
return !(lhs < rhs);
}
// additional
template<typename InputIt>
void append(InputIt first, InputIt last) {
insert(end(), first, last);
}
void append(const T& value) { push_back(value); }
void append(T&& value) { push_back(std::move(value)); }
template<typename InputIt>
void replace(const_iterator f1, const_iterator l1, InputIt f2, InputIt l2) {
auto cursor = erase(f1, l1);
insert(cursor, f2, l2);
}
template<typename InputIt>
void replace(const_iterator pos, InputIt first, InputIt last) {
replace(pos, pos + 1, first, last);
}
gap_buffer substr(const_iterator first, const_iterator last) const {
return substr_impl<gap_buffer>(first, last);
}
protected:
template<typename U>
U substr_impl(const_iterator first, const_iterator last) const {
return U(first, last);
}
void relocate_gap(difference_type offset) {
if (gap_start != start + offset) {
if (gap_start < start + offset)
std::move(gap_start /**/ + gap_size,
start + offset + gap_size,
gap_start);
else
std::move_backward(start + offset,
gap_start,
gap_start + gap_size);
gap_start = start + offset;
}
}
pointer allocate_and_construct(size_type n) {
pointer result = data_allocator.allocate(n);
std::uninitialized_default_construct_n(result, n);
return result;
}
void destroy_and_deallocate(pointer start, pointer finish) {
std::destroy(start, finish);
if (start)
data_allocator.deallocate(start, finish - start);
}
private:
Allocator data_allocator;
pointer start;
pointer finish;
pointer gap_start;
size_type gap_size;
};
template<typename T, typename Allocator>
void swap(gap_buffer<T, Allocator>& lhs, gap_buffer<T, Allocator>& rhs) {
lhs.swap(rhs);
}
}
|
{"hexsha": "30128633a412da2b11f1833ec52d0b0e2c142bc9", "size": 17324, "ext": "h", "lang": "C", "max_stars_repo_path": "include/gap_buffer.h", "max_stars_repo_name": "lie-yan/gapbuffer", "max_stars_repo_head_hexsha": "b6b3d621430029d989ebed8a672eee769c214ea5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-02-28T12:41:19.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-02T09:33:21.000Z", "max_issues_repo_path": "include/gap_buffer.h", "max_issues_repo_name": "lie-yan/gapbuffer", "max_issues_repo_head_hexsha": "b6b3d621430029d989ebed8a672eee769c214ea5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/gap_buffer.h", "max_forks_repo_name": "lie-yan/gapbuffer", "max_forks_repo_head_hexsha": "b6b3d621430029d989ebed8a672eee769c214ea5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9424572317, "max_line_length": 116, "alphanum_fraction": 0.6386515816, "num_tokens": 4130}
|
"""
inputfile
Abstract type for all kinds of input files
"""
abstract type inputfile end
"""
Inputconstants = new(lx, ly, maxruntime, dumping, gravity, γ, δ, kbt)
Struct containing input parameters.
Contains `.lx` lattice points in x-direction, `.ly` lattice points in y-direction.
Other fields are `.maxruntime` for the maximal number of time steps and `.dumping` to limit the number of output files.
On top of these there are physical quantities such as `.gravity`, `.γ`, `.δ` and `.kbt`.
for the values of gravitational acceleration, fluids surface tension and the slip length.
The example relates to an quadratic lattice 20 times 20 lattice units in area.
Run for 100 lattice Boltzmann time steps only printing output every 10 time steps.
Having no gravity and a surface tension of 0.01 and a slip length of 1.
# Example
```jldoctest firsttest
julia> using JuSwalbe
julia> new_input = JuSwalbe.Inputconstants()
JuSwalbe.Inputconstants
lx: Int64 512
ly: Int64 512
maxruntime: Int64 100000
dumping: Int64 1000
τ: Float64 1.0
gravity: Float64 0.0
γ: Float64 0.01
δ: Float64 1.0
μ: Float64 0.16666666666666666
kbt: Float64 0.0
julia> new_input.γ
0.01
```
# References
See also: [`readinput`](@ref), [`findargument`](@ref), [`computeslip`](@ref)
"""
@with_kw struct Inputconstants <: inputfile
lx = 512
ly = 512
maxruntime = 100000
dumping = 1000
τ = 1.0
gravity = 0.0
γ = 0.01
δ = 1.0
μ = 1 / 3 * (2 - τ) / 2 * τ
kbt = 0.0
end
"""
readinput(file)
Reads input parameters from a `file`.
The expected amount of parameters can be addressed with [`Inputconstants`](@ref).
For now it expects seven values for different runtime constants.
# Example
```jldoctest secondtest
julia> using JuSwalbe, DelimitedFiles
julia> args = ["Lattice_points_x" 10; "Lattice_points_y" 5; "Max_run_time" 1000; "Output_dump" 100; "Relaxation_rate" 1.0; "gravity" 0.0; "surface_tension" 0.01; "slippage" 1.0] # Generate a text file with input
8×2 Array{Any,2}:
"Lattice_points_x" 10
"Lattice_points_y" 5
"Max_run_time" 1000
"Output_dump" 100
"Relaxation_rate" 1.0
"gravity" 0.0
"surface_tension" 0.01
"slippage" 1.0
julia> writedlm("test.txt", args)
julia> test = readinput("test.txt")
JuSwalbe.Inputconstants
lx: Int64 10
ly: Int64 5
maxruntime: Int64 1000
dumping: Int64 100
τ: Float64 1.0
gravity: Float64 0.0
γ: Float64 0.01
δ: Float64 1.0
μ: Float64 0.16666666666666666
kbt: Float64 0.0
julia> test.lx
10
julia> test.γ
0.01
julia> test.γ + test.δ
1.01
julia> isa(test.lx + test.gravity, Int32)
false
julia> rm("test.txt")
```
"""
function readinput(file)
# Length of the input file
num = countlines(file)
# Actually reading of the file and saving to an array
input = readdlm(file)
# Extracting of numerical values
values = []
arguments = ["Lattice_points_x",
"Lattice_points_y",
"Max_run_time",
"Output_dump",
"Relaxation_rate",
"gravity",
"surface_tension",
"slippage",
"viscosity",
"k_BT"]
for i in arguments
push!(values, findfirst(x -> x == i, vec(input)))
end
lx = findargument(input, "Lattice_points_x")
ly = findargument(input, "Lattice_points_y")
maxruntime = findargument(input, "Max_run_time")
dumping = findargument(input, "Output_dump")
τ = findargument(input, "Relaxation_rate")
gravity = findargument(input, "gravity")
γ = findargument(input, "surface_tension")
δ = findargument(input, "slippage")
kbt = findargument(input, "k_BT")
if kbt == nothing
kbt = 0.0
end
runtimeconstants = Inputconstants(lx=lx, ly=ly, maxruntime=maxruntime, dumping=dumping, τ=τ, gravity=gravity, γ=γ, δ=δ, kbt=kbt)
return runtimeconstants
end
"""
findargument(arr, str)
Searches for a numerical value based on a str input and returns the value.
# Example
```jldoctest
julia> using JuSwalbe
julia> arr = ["hmm" 1; "yeah" 0.01; "world" 1090]
3×2 Array{Any,2}:
"hmm" 1
"yeah" 0.01
"world" 1090
julia> world = findargument(arr, "world")
1090
```
# References
See also: [`readinput`](@ref)
"""
function findargument(arr::Array{Any,2}, argument::String)
for (i,arg) in enumerate(arr[:,1])
if arr[i,1] == argument
value = arr[i,2]
return value
end
end
end
|
{"hexsha": "3404f78641c7fb9a981a3eff91f25d5f79cd2762", "size": 4538, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/readinput.jl", "max_stars_repo_name": "Zitzeronion/JuSwalbe", "max_stars_repo_head_hexsha": "eb0aca0eabe327d4f9ca5756b4fc5b0e4fb2876b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-24T13:28:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-24T13:28:56.000Z", "max_issues_repo_path": "src/readinput.jl", "max_issues_repo_name": "Zitzeronion/JuSwalbe", "max_issues_repo_head_hexsha": "eb0aca0eabe327d4f9ca5756b4fc5b0e4fb2876b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-05-08T00:38:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-09T01:31:47.000Z", "max_forks_repo_path": "src/readinput.jl", "max_forks_repo_name": "Zitzeronion/JuSwalbe.jl", "max_forks_repo_head_hexsha": "eb0aca0eabe327d4f9ca5756b4fc5b0e4fb2876b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9340659341, "max_line_length": 211, "alphanum_fraction": 0.6507271926, "num_tokens": 1432}
|
import os
import json
import numpy as np
try:
import cv2
except:
pass
from copy import deepcopy
from tqdm import tqdm
from transformers import BertTokenizer, LayoutLMTokenizer
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from public.data_provider.doc2dial import load_tocr, load_doc
from config_file import *
def tokenize_bounding_box(bounding_box, tokenizer=None, bbox_type='token-level'):
"""
layout type - original
:param bounding_box: dict, {"text": ,
"polygon": [x1, y1, x2, y2, x3, y3, x4, y4]
-> w = (x2-x1+x3-x4) / 2
h = (y4-y1+y3-y2) / 2
}
:return:
"""
text = bounding_box["text"]
p = bounding_box["polygon"]
tokenizer_name = tokenizer.__class__.__name__
# print(text)
# print(p)
# exit(1)
tokens = tokenizer.tokenize(text)
tokenized_polygons = []
# string_length = len(' '.join([t.lstrip('##') for t in tokens]))
string_length = len(text)
left = min([p[0], p[2], p[4], p[6]])
right = max([p[0], p[2], p[4], p[6]])
top = min([p[1], p[3], p[5], p[7]])
bottom = max([p[1], p[3], p[5], p[7]])
width = right - left
height = bottom - top
width_per_char = width / string_length
# x_begin = (p[0] + p[6]) / 2 # - len(tokens[0]) * weight_per_char
# y_begin = (p[1] + p[7]) / 2
x_left = left
if bbox_type == 'token-level':
for i, token in enumerate(tokens):
tokenized_polygon = [left, top, right, top, right, bottom, left, bottom]
if tokenizer_name == 'BertTokenizer':
if token.startswith('##'):
token = token.lstrip('##')
x_left -= width_per_char # 回退空格的影响
elif tokenizer_name == 'LongformerTokenizer':
if token.startswith('\u0120'):
token = token.lstrip('\u0120')
else:
if i != 0:
x_left -= width_per_char
else:
if i != 0:
x_left -= width_per_char
x_right = x_left + len(token) * width_per_char
tokenized_polygon[0] = tokenized_polygon[6] = int(x_left)
tokenized_polygon[2] = tokenized_polygon[4] = int(x_right)
x_left = x_right
x_left += width_per_char # 空格
tokenized_polygons.append(tokenized_polygon)
elif bbox_type == 'cell-level':
tokenized_polygons = [[left, top, right, top, right, bottom, left, bottom]] * len(tokens)
# tokenized_polygons = np.clip(np.array(tokenized_polygons), 0, MAX_2D_POS - 1).tolist()
assert (0 <= np.min(tokenized_polygons) <= np.max(tokenized_polygons) <= MAX_2D_POS - 1)
assert (len(tokens) == len(tokenized_polygons))
# print(tokens)
# print(tokenized_polygons)
return tokens, tokenized_polygons
def bbox_is_valid(bbox):
return len(bbox) == 8 and \
np.min(bbox) >= 0 and \
np.max(bbox) <= (MAX_2D_POS - 1) and \
bbox[4] > bbox[0] and \
bbox[5] > bbox[1]
def disturb(bboxs, p, shift=int(MAX_2D_POS * 0.001)):
"""
:param bboxs: list of [x0, y0, x1, y1, x2, y2, x3, y3] # list of [x0, y0, x1, y1]
:param p: the propotion of disturbed bbox
:return:
"""
bboxs = np.array(bboxs)
n_success = 0
for i in range(len(bboxs)):
if np.random.rand(1) < p:
left = np.clip(0, bboxs[i][0] + np.random.randint(-1, 2, 1) * shift, MAX_2D_POS - 1)
right = np.clip(0, bboxs[i][2] + np.random.randint(-1, 2, 1) * shift, MAX_2D_POS - 1)
upper = np.clip(0, bboxs[i][1] + np.random.randint(-1, 2, 1) * shift, MAX_2D_POS - 1)
bottom = np.clip(0, bboxs[i][5] + np.random.randint(-1, 2, 1) * shift, MAX_2D_POS - 1)
bbox = [left, upper, right, upper, right, bottom, left, bottom]
if bbox_is_valid(bbox):
bboxs[i] = bbox
n_success += 1
bboxs = np.clip(bboxs, 0, MAX_2D_POS - 1)
bboxs = bboxs.astype('int').tolist()
# print('Success rate: %s' % (n_success / len(bboxs)))
return bboxs
def align_ocr_text(tocr_filename, document, tokenizer=None, return_coefficient=False):
DetectedTexts, Polygons, normalized_coefficient = load_tocr(tocr_filename, normalize=True, mode='original')
_, filename = os.path.split(tocr_filename)
domain, index, _ = filename.split('.')[0].split('-')
document = document['doc_text'].replace('\n', '')
# print(document)
# print(DetectedTexts)
# print(Polygons)
words_aligned = []
# Align the sentence ONLY based on the length of words
# It is actually not ALIGNED,
# but they will only have minor error on the beginning and the end for one or two words.
words_from_doc = document.split(' ')
start = 0
pre_text = ''
for i in range(len(DetectedTexts) - 1):
detectedtext = DetectedTexts[i]
words_from_tocr = detectedtext.split(' ')
if ' '.join(words_from_doc[start:start+len(words_from_tocr)]) == '':
words_aligned.append(pre_text)
else:
pre_text = ' '.join(words_from_doc[start:start+len(words_from_tocr)])
words_aligned.append(pre_text)
start += len(words_from_tocr)
if ' '.join(words_from_doc[start:]) == '':
words_aligned.append(pre_text)
else:
words_aligned.append(' '.join(words_from_doc[start:]))
# print(words_aligned)
# print(DetectedTexts)
assert (len(words_aligned) == len(DetectedTexts) == len(Polygons))
all_tokens, all_polygons = [], []
for words, polygon in zip(words_aligned, Polygons):
tokens, tokenized_polygons = tokenize_bounding_box(bounding_box={"text": words,
"polygon": polygon},
tokenizer=tokenizer)
all_tokens.extend(tokens)
all_polygons.extend(tokenized_polygons)
if return_coefficient:
return all_tokens, all_polygons, normalized_coefficient
else:
return all_tokens, all_polygons
def debug_align_ocr_text():
data_root_path = '/dataset/doc2dial/v2'
tocr_dir = os.path.join(data_root_path, 'document_ocr')
tocr_filename = os.path.join(tocr_dir, os.listdir(tocr_dir)[0])
tocr_domain, tocr_index, _ = os.path.split(tocr_filename)[-1].split('.')[0].split('-')
doc_filename = os.path.join(data_root_path, 'doc2dial_doc_with_index.json')
doc_list = load_doc(doc_filename)
index2docid, docid2index = {}, {}
for doc in tqdm(doc_list):
doc_id, index = doc['doc_id'], doc['index']
index2docid[index] = doc_id
docid2index[doc_id] = index
with open(doc_filename) as f:
doc_json_data = json.loads(f.read())
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
all_tokens, all_polygons = align_ocr_text(tocr_filename=tocr_filename,
document=doc_json_data["doc_data"][tocr_domain][index2docid[int(tocr_index)]],
tokenizer=tokenizer)
print(all_tokens)
print(all_polygons)
def align_polygon2document(detectedtexts, polygons, doc_tokens, tokenizer):
words_aligned = []
start = 0
pre_text = ''
for i in range(len(detectedtexts) - 1):
detectedtext = detectedtexts[i]
words_from_tocr = detectedtext.split(' ')
if ' '.join(doc_tokens[start:start+len(words_from_tocr)]) == '':
words_aligned.append(pre_text)
else:
pre_text = ' '.join(doc_tokens[start:start+len(words_from_tocr)])
words_aligned.append(pre_text)
start += len(words_from_tocr)
if ' '.join(doc_tokens[start:]) == '':
words_aligned.append(pre_text)
else:
words_aligned.append(' '.join(doc_tokens[start:]))
# print(words_aligned)
# print(detectedtexts)
# print(polygons)
assert (len(words_aligned) == len(detectedtexts) == len(polygons))
new_doc_tokens, new_polygons = [], []
for words, polygon in zip(words_aligned, polygons):
tokens, tokenized_polygons = tokenize_bounding_box(bounding_box={"text": words,
"polygon": polygon},
tokenizer=tokenizer)
new_doc_tokens.extend(tokens)
new_polygons.extend(tokenized_polygons)
return new_doc_tokens, new_polygons
def add_bounding_box(image, bounding_box, normalized_coefficient, mode='xywh'):
fig, ax = plt.subplots(1, 1, dpi=500)
plt.imshow(image)
currentAxis = fig.gca()
for box in bounding_box:
if mode == 'xywh':
x, y, w, h = [v / normalized_coefficient for v in box[:4]]
elif mode == 'original':
x = box[0]
y = box[1]
w = box[2] - box[0]
h = box[7] - box[1]
x, y, w, h = [v / normalized_coefficient for v in [x, y, w, h]]
else:
raise ValueError('Unknown mode: [%s]' % mode)
currentAxis.add_patch(patches.Rectangle((x, y),
width=w,
height=h,
linewidth=0.5,
fill=False))
return fig, ax
def visualize_ocr(ocr_filename, image_filename):
DetectedTexts, Polygons, normalized_coefficient = load_tocr(tocr_filename=ocr_filename, normalize=True, mode='xywh')
image = cv2.imread(image_filename)
fig = plt.figure(dpi=300)
plt.imshow(image)
plt.show()
fig, ax = add_bounding_box(image, Polygons, normalized_coefficient=normalized_coefficient, mode='xywh')
plt.axis('off')
plt.show()
def visualize_ocr_by_token_and_bbox(tokens, bboxs, normalized_coefficient, image=None):
image = np.ones(shape=(int(1024 / normalized_coefficient), int(1024 / normalized_coefficient), 3)) * 0.8 if image is None else image
fig = plt.figure(dpi=300)
plt.imshow(image)
plt.show()
fig, ax = add_bounding_box(image,
bboxs,
normalized_coefficient=normalized_coefficient,
mode='xywh')
plt.axis('off')
plt.show()
def visualize_tokenized_tocr(tocr_filename, image_filename):
DetectedTexts, Polygons, normalized_coefficient = load_tocr(tocr_filename=tocr_filename, normalize=True, mode='original')
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer = 'split'
all_tokens, all_polygons = [], []
for words, polygon in zip(DetectedTexts, Polygons):
# words = [word for word in words.split(' ')]
tokens, tokenized_polygons = tokenize_bounding_box(bounding_box={"text": words,
"polygon": polygon},
tokenizer=tokenizer)
all_tokens.extend(tokens)
all_polygons.extend(tokenized_polygons)
image = cv2.imread(image_filename)
plt.imshow(image)
plt.show()
fig, ax = add_bounding_box(image, all_polygons, normalized_coefficient=normalized_coefficient, mode='original')
plt.axis('off')
plt.show()
def visualize_tocr_alignment(tocr_filename, image_filename, doc_filename):
# select the corresponding document
tocr_domain, tocr_index, _ = os.path.split(tocr_filename)[-1].split('.')[0].split('-')
doc_list = load_doc(doc_filename)
index2docid, docid2index = {}, {}
for doc in tqdm(doc_list):
doc_id, index = doc['doc_id'], doc['index']
index2docid[index] = doc_id
docid2index[doc_id] = index
with open(doc_filename) as f:
doc_json_data = json.loads(f.read())
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
all_tokens, all_polygons, normalized_coefficient = align_ocr_text(tocr_filename=tocr_filename,
document=doc_json_data["doc_data"][tocr_domain][
index2docid[int(tocr_index)]],
tokenizer=tokenizer,
return_coefficient=True)
image = cv2.imread(image_filename)
plt.imshow(image)
plt.show()
fig, ax = add_bounding_box(image, all_polygons, normalized_coefficient=normalized_coefficient, mode='original')
plt.axis('off')
plt.show()
if __name__ == '__main__':
visualize_tokenized_tocr(tocr_filename='/Users/jiangjunfeng/Downloads/dataset/doc2dial/v3/document_tocr/ssa-42.tocr',
image_filename='/Users/jiangjunfeng/Downloads/dataset/doc2dial/v3/document_screenshot/ssa-42.png')
# tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased')
# polygon = (np.array([1246, 2739, 1325, 2730, 1332, 2784, 1252, 2793]) / 7279 * (MAX_2D_POS - 1)).astype('int')
# bbox = {"text": "It will",
# "polygon": polygon}
# tokens, tokenized_polygons = tokenize_bounding_box(bounding_box=bbox, tokenizer=tokenizer)
# print(tokens)
# print(polygon)
# print(tokenized_polygons)
|
{"hexsha": "e62ab32977f188ca54fb1a9d8f4b42e8aa026cc1", "size": 13450, "ext": "py", "lang": "Python", "max_stars_repo_path": "public/data_provider/ocr.py", "max_stars_repo_name": "Coldog2333/DGDS", "max_stars_repo_head_hexsha": "7c9b6904ab1d86fe2b430f01a3b583609bc095e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "public/data_provider/ocr.py", "max_issues_repo_name": "Coldog2333/DGDS", "max_issues_repo_head_hexsha": "7c9b6904ab1d86fe2b430f01a3b583609bc095e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "public/data_provider/ocr.py", "max_forks_repo_name": "Coldog2333/DGDS", "max_forks_repo_head_hexsha": "7c9b6904ab1d86fe2b430f01a3b583609bc095e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9943502825, "max_line_length": 136, "alphanum_fraction": 0.5931598513, "include": true, "reason": "import numpy", "num_tokens": 3356}
|
clf reset;
echo on
% This script demonstrates the use of the RSOM.
clc;
% load the example dissimilarity data
load exampleDissimilarity.mat;
% display the eigenvalue spectrum
[V, D] = eig(Dissim);
eVals = diag(D);
figure; bar(eVals);
pause % Strike any key to continues...
clc;
% init RSOM
sMap = rsom_lininit(Dissim, [10 10]);
% train the RSOM
sMap = rsom_batchtrain(sMap, Dissim);
% Display the U-Matrix
figure;
rsom_show(sMap, Dissim);
% The U-Matrix shows clearly the structure of the data, i.e. that two
% clusters are available
pause % Strike any key to continues...
clc;
% do linear embedding of the distance matrix into a 3 dimensional space
x = cmdscale(Dissim.^(1/2));
x = x(:,1:3);
% plot the resulting data
h = figure; hold on;
plot3(x(1:100, 1), x(1:100, 2), x(1:100, 3), '.b');
plot3(x(101:200, 1), x(101:200, 2), x(101:200, 3), '.r');
% Since approximated vectorial data are available now, we can compute
% the (approximated) neuron positions and plot them
Neurons = sMap.cCodebook * x;
figure(h);
% create a som struct
sMapSOM = som_map_struct(3, sMap.topol);
som_grid(sMapSOM,'Coord',Neurons);
echo off;
|
{"author": "ilarinieminen", "repo": "SOM-Toolbox", "sha": "f2597abc1ae33c2060e0443d49e854011ff21831", "save_path": "github-repos/MATLAB/ilarinieminen-SOM-Toolbox", "path": "github-repos/MATLAB/ilarinieminen-SOM-Toolbox/SOM-Toolbox-f2597abc1ae33c2060e0443d49e854011ff21831/contrib/rsom/rsom_demo.m"}
|
import numpy as np
from sklearn import datasets
# 设置数据集
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=0.5, noise=0.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
X, y = datasets.make_blobs(n_samples=n_samples, random_state=170)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
varied = datasets.make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=170)
|
{"hexsha": "4a9d2b400b46b60ed4abaeb38252abe352f4c7f0", "size": 605, "ext": "py", "lang": "Python", "max_stars_repo_path": "Dataset/toy_dataset/cluster_toy_dataset.py", "max_stars_repo_name": "pengchenyu111/PaperCodeReplication", "max_stars_repo_head_hexsha": "7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Dataset/toy_dataset/cluster_toy_dataset.py", "max_issues_repo_name": "pengchenyu111/PaperCodeReplication", "max_issues_repo_head_hexsha": "7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Dataset/toy_dataset/cluster_toy_dataset.py", "max_forks_repo_name": "pengchenyu111/PaperCodeReplication", "max_forks_repo_head_hexsha": "7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5882352941, "max_line_length": 96, "alphanum_fraction": 0.7619834711, "include": true, "reason": "import numpy", "num_tokens": 194}
|
import sys
sys.path.insert(0, 'data')
import pandas as pd
import numpy as np
from matplotlib import pyplot
import collections
from sklearn.model_selection import train_test_split, cross_val_score, RepeatedStratifiedKFold
from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve, f1_score, auc, accuracy_score, precision_score, recall_score, balanced_accuracy_score, plot_confusion_matrix
from sklearn.impute import KNNImputer, SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
from imblearn.over_sampling import SMOTE, BorderlineSMOTE, RandomOverSampler
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import TomekLinks
from clover.over_sampling import ClusterOverSampler
from sklearn.svm import SVC
from numpy import mean,where
from sklearn.decomposition import PCA
from imblearn.under_sampling import NearMiss
from xgboost import XGBClassifier
from imblearn.under_sampling import ClusterCentroids
from numpy import isnan
from sklearn.linear_model import LogisticRegression
global y_predicted
global lr_probs
global model
#CURVES
fig, ((ax1, ax2, axBar), (ax3, ax4,axBar2), (ax5, ax6,axBar3), (ax7, ax8,axBar4), (ax9, ax10,axBar5), (ax11, ax12, axBar6),(ax13, ax14,axBar7)) = pyplot.subplots(7,3)
fig.suptitle('ROC AND AUC CURVES')
fig.tight_layout(pad=0.5)
def pca(X_train_pca, X_test_pca):
pca = PCA(n_components=3)# adjust yourself
pca.fit(X_train_pca)
X_train_pca = pca.transform(X_train_pca)
X_test_pca = pca.transform(X_test_pca)
return X_train_pca, X_test_pca
def plotTargetClassValues(X,y,numberOfPlot):
fig2, ((axx1,axx2),(axx3,axx4),(axx5,axx6),(axx7,axx8)) = pyplot.subplots(4,2)
fig2.suptitle ('Number Of target values')
if (numberOfPlot == 1):
axx1.set_title('Imbalanced Data')
for label, _ in counter.items():
row_ix = where(y == label)[0]
axx1.scatter(X[row_ix, 0], X[row_ix, 1], label=str(label))
elif (numberOfPlot == 2):
axx2.set_title('SMOTE')
for label, _ in counter.items():
row_ix = where(y == label)[0]
axx2.scatter(X[row_ix, 0], X[row_ix, 1], label=str(label))
elif (numberOfPlot == 3):
axx3.set_title('Borderline SMOTE')
for label, _ in counter.items():
row_ix = where(y == label)[0]
axx3.scatter(X[row_ix, 0], X[row_ix, 1], label=str(label))
elif (numberOfPlot == 4):
axx4.set_title('RandomOverSampler')
for label, _ in counter.items():
row_ix = where(y == label)[0]
axx4.scatter(X[row_ix, 0], X[row_ix, 1], label=str(label))
elif (numberOfPlot == 5):
axx5.set_title('ClusterOverSampler')
for label, _ in counter.items():
row_ix = where(y == label)[0]
axx5.scatter(X[row_ix, 0], X[row_ix, 1], label=str(label))
elif (numberOfPlot == 6):
axx6.set_title('UnderSampling')
for label, _ in counter.items():
row_ix = where(y == label)[0]
axx6.scatter(X[row_ix, 0], X[row_ix, 1], label=str(label))
elif (numberOfPlot == 7):
axx7.set_title('ClusterCentroids')
for label, _ in counter.items():
row_ix = where(y == label)[0]
axx7.scatter(X[row_ix, 0], X[row_ix, 1], label=str(label))
else:
fig2.show()
def makeClassificationLogisticRegression(X_train, y_train, X_test, y_test):
global y_predicted
global lr_probs
global model
#model = RandomForestClassifier(n_estimators=10, random_state=12,class_weight='balanced_subsample',criterion='entropy')
model = LogisticRegression(random_state=0)
model.fit(X_train,y_train)
y_predicted = model.predict(X_test)
#Not relevant metrics
print('-------------------')
print('Accuracy Score : %f'%accuracy_score(y_test,y_predicted))
print('Balanced Accuracy Score : %f'%balanced_accuracy_score(y_test, y_predicted))
print('Precision Score : %f'%precision_score(y_test,y_predicted,average='macro'))
print('Recall Score : %f' %recall_score(y_test,y_predicted,average='macro'))
print('F1 Score : %f'%f1_score(y_test,y_predicted,average='macro'))
print('-------------------')
lr_probs = model.predict_proba(X_test)
lr_probs = lr_probs[:, 1]
def makeClassificationCostSensitive(X_train, y_train):
model = SVC(gamma='scale', class_weight='balanced')
# define evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(model, X_train, y_train, scoring='roc_auc', cv=cv, n_jobs=-1)
# summarize performance
print('-------------------')
print('Mean ROC AUC: %.3f' % mean(scores))
print('-------------------')
def printCurvesWithClassImbalance(lr_probs, y_test, y_predicted, X_test):
lr_auc = roc_auc_score(y_test, lr_probs)
plot_auc_score = lr_auc
# summarize scores
print('-------------------')
print('LogisticRegression: ROC AUC=%.3f' % (lr_auc))
print('-------------------')
# calculate roc curves
lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
# plot the roc curve for the model
ax1.plot(lr_fpr, lr_tpr, marker='.', label='LogisticRegression')
ax1.set_xlabel('False Positive Rate')
ax1.set_ylabel('True Positive Rate')
ax1.set_title('ROC CURVE with class imbalance')
# predict class values
lr_precision, lr_recall, _ = precision_recall_curve(y_test, lr_probs)
lr_f1, lr_auc = f1_score(y_test, y_predicted), auc(lr_recall, lr_precision)
# summarize scores
print('-------------------')
print('LogisticRegression Unbalanced: f1=%.3f auc=%.3f' % (lr_f1, lr_auc))
print('-------------------')
# plot the precision-recall curves
ax2.plot(lr_recall, lr_precision, marker='.', label='LogisticRegression')
ax2.set_xlabel('Recall')
ax2.set_ylabel('Precision')
ax2.set_title('AUC CURVE with class imbalance')
plot_confusion_matrix(model, X_test, y_test, ax=axBar)
return plot_auc_score
def printCurvesWithSMOTE(lr_probs, y_test, y_predicted, X_test):
# calculate scores
lr_auc = roc_auc_score(y_test, lr_probs)
plot_auc_score = lr_auc
print('-------------------')
print('LogisticRegression with SMOTE: ROC AUC=%.3f' % (lr_auc))
print('-------------------')
# calculate roc curves
lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
# plot the roc curve for the model
ax3.plot(lr_fpr, lr_tpr, marker='.', label='LogisticRegression')
ax3.set_xlabel('False Positive Rate')
ax3.set_ylabel('True Positive Rate')
ax3.set_title('ROC CURVE with SMOTE')
# predict class values
lr_precision, lr_recall, _ = precision_recall_curve(y_test, lr_probs)
lr_f1, lr_auc = f1_score(y_test, y_predicted,average='macro'), auc(lr_recall, lr_precision)
print('-------------------')
print('LogisticRegression with SMOTE: f1=%.3f auc=%.3f' % (lr_f1, lr_auc))
print('-------------------')
ax4.plot(lr_recall, lr_precision, marker='.', label='LogisticRegression')
ax4.set_xlabel('Recall')
ax4.set_ylabel('Precision')
ax4.set_title('AUC CURVE with SMOTE')
plot_confusion_matrix(model, X_test, y_test, ax=axBar2)
return plot_auc_score
def printCurvesWithBorderLineSMOTE(lr_probs, y_test, y_predicted, X_test):
# calculate scores
lr_auc = roc_auc_score(y_test, lr_probs)
plot_auc_score = lr_auc
print('-------------------')
print('LogisticRegression with Borderline SMOTE: ROC AUC=%.3f' % (lr_auc))
print('-------------------')
lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
ax5.plot(lr_fpr, lr_tpr, marker='.', label='LogisticRegression')
# axis labels
ax5.set_xlabel('False Positive Rate')
ax5.set_ylabel('True Positive Rate')
ax5.set_title('ROC CURVE with BorderLine SMOTE')
# predict class values
lr_precision, lr_recall, _ = precision_recall_curve(y_test, lr_probs)
lr_f1, lr_auc = f1_score(y_test, y_predicted,average='macro'), auc(lr_recall, lr_precision)
print('-------------------')
print('LogisticRegression with Borderline SMOTE: f1=%.3f auc=%.3f' % (lr_f1, lr_auc))
print('-------------------')
ax6.plot(lr_recall, lr_precision, marker='.', label='LogisticRegression')
ax6.set_xlabel('Recall')
ax6.set_ylabel('Precision')
ax6.set_title('AUC CURVE with BorderLine SMOTE')
plot_confusion_matrix(model, X_test, y_test, ax=axBar3)
return plot_auc_score
def printCurvesWithRandomOverSampler(lr_probs, y_test, y_predicted, X_test):
lr_auc = roc_auc_score(y_test, lr_probs)
plot_auc_score = lr_auc
print('-------------------')
print('LogisticRegression with RandomOverSampling: ROC AUC=%.3f' % (lr_auc))
print('-------------------')
lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
ax7.plot(lr_fpr, lr_tpr, marker='.', label='LogisticRegression')
ax7.set_xlabel('False Positive Rate')
ax7.set_ylabel('True Positive Rate')
ax7.set_title('ROC CURVE with RandomOverSamler')
lr_precision, lr_recall, _ = precision_recall_curve(y_test, lr_probs)
lr_f1, lr_auc = f1_score(y_test, y_predicted,average='macro'), auc(lr_recall, lr_precision)
print('-------------------')
print('LogisticRegression with RandomOverSampling: f1=%.3f auc=%.3f' % (lr_f1, lr_auc))
print('-------------------')
ax8.plot(lr_recall, lr_precision, marker='.', label='LogisticRegression')
ax8.set_xlabel('Recall')
ax8.set_ylabel('Precision')
ax8.set_title('AUC CURVE with RandomOverSamler')
plot_confusion_matrix(model, X_test, y_test, ax=axBar4)
return plot_auc_score
def printCurvesWithClusterOverSampler(lr_probs, y_test, y_predicted, X_test):
lr_auc = roc_auc_score(y_test, lr_probs)
plot_auc_score = lr_auc
print('-------------------')
print('LogisticRegression with Cluster OverSampling: ROC AUC=%.3f' % (lr_auc))
print('-------------------')
lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
ax9.plot(lr_fpr, lr_tpr, marker='.', label='LogisticRegression')
ax9.set_xlabel('False Positive Rate')
ax9.set_ylabel('True Positive Rate')
ax9.set_title('ROC CURVE with ClusterOverSampler')
lr_precision, lr_recall, _ = precision_recall_curve(y_test, lr_probs)
lr_f1, lr_auc = f1_score(y_test, y_predicted,average='macro'), auc(lr_recall, lr_precision)
print('-------------------')
print('LogisticRegression with Cluster OverSampling: f1=%.3f auc=%.3f' % (lr_f1, lr_auc))
print('-------------------')
ax10.plot(lr_recall, lr_precision, marker='.', label='LogisticRegression')
ax10.set_xlabel('Recall')
ax10.set_ylabel('Precision')
ax10.set_title('AUC CURVE with ClusterOverSampler')
plot_confusion_matrix(model, X_test, y_test, ax=axBar5)
return plot_auc_score
def printCurvesWithUnderSampling(lr_probs, y_test, y_predicted, X_test):
lr_auc = roc_auc_score(y_test, lr_probs)
plot_auc_score = lr_auc
print('-------------------')
print('LogisticRegression with UnderSampling: ROC AUC=%.3f' % (lr_auc))
print('-------------------')
lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
ax11.plot(lr_fpr, lr_tpr, marker='.', label='LogisticRegression')
ax11.set_xlabel('False Positive Rate')
ax11.set_ylabel('True Positive Rate')
ax11.set_title('ROC CURVE with UnderSampling')
lr_precision, lr_recall, _ = precision_recall_curve(y_test, lr_probs)
lr_f1, lr_auc = f1_score(y_test, y_predicted,average='macro'), auc(lr_recall, lr_precision)
print('-------------------')
print('LogisticRegression with UnderSampling: f1=%.3f auc=%.3f' % (lr_f1, lr_auc))
print('-------------------')
ax12.plot(lr_recall, lr_precision, marker='.', label='LogisticRegression')
ax12.set_xlabel('Recall')
ax12.set_ylabel('Precision')
ax12.set_title('AUC CURVE with UnderSampling')
plot_confusion_matrix(model, X_test, y_test, ax=axBar6)
return plot_auc_score
def printCurvesWithClusterCentroids(lr_probs, y_test, y_predicted, X_test):
lr_auc = roc_auc_score(y_test, lr_probs)
plot_auc_score = lr_auc
print('-------------------')
print('LogisticRegression with ClusterCentroids: ROC AUC=%.3f' % (lr_auc))
print('-------------------')
lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
ax13.plot(lr_fpr, lr_tpr, marker='.', label='LogisticRegression')
ax13.set_xlabel('False Positive Rate')
ax13.set_ylabel('True Positive Rate')
ax13.set_title('ROC CURVE with ClusterCentroids')
lr_precision, lr_recall, _ = precision_recall_curve(y_test, lr_probs)
lr_f1, lr_auc = f1_score(y_test, y_predicted,average='macro'), auc(lr_recall, lr_precision)
print('-------------------')
print('LogisticRegression with ClusterCentroids: f1=%.3f auc=%.3f' % (lr_f1, lr_auc))
print('-------------------')
ax14.plot(lr_recall, lr_precision, marker='.', label='LogisticRegression')
ax14.set_xlabel('Recall')
ax14.set_ylabel('Precision')
ax14.set_title('AUC CURVE with ClusterCentroids')
plot_confusion_matrix(model, X_test, y_test, ax=axBar7)
return plot_auc_score
def plotCurves():
# show the plot
fig.show()
#------------------------
data = pd.read_csv('./data/Myocardial infarction complications Database.csv')
print('-------------------')
print (data)
print('-------------------')
data.replace("?", np.nan, inplace = True)
#------------------------
#------------------------
#print list of columns and number of NaN values
missing_data = data.isnull()
print('-------------------')
print(data.isnull().sum())
print('-------------------')
#plot for these columns
data.isnull().sum().reset_index(name="names").plot.bar(x='index', y='names', rot=90)
#------------------------
#------------------------
print('-------------------')
print(data.describe())
print(data.head(10))
print('-------------------')
data = pd.DataFrame(data)
X = data.iloc[:, 1:112]
y = data.iloc[:, 114]
#drop columns with many NaN values - got it from plot
del X["IBS_NASL"]
del X["KFK_BLOOD"]
del X["S_AD_KBRIG"]
del X["D_AD_KBRIG"]
del X["R_AB_3_n"]
del X["R_AB_2_n"]
#del X["NA_R_2_n"]
#del X["NA_R_3_n"]
#del X["NOT_NA_2_n"]
#del X["NOT_NA_3_n"]
print('-------------------')
print(X.shape)
print(y.shape)
#------------------------
#------------------------
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, random_state=0, stratify=y)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
print('-------------------')
#------------------------
#------------------------
#preproccessing
imputer = KNNImputer(weights='uniform',n_neighbors=50)
X_train = imputer.fit_transform(X_train)
X_test = imputer.transform(X_test)
print('-------------------')
print('Missing Values Train: %d' % isnan(X_train).sum())
print('Missing Values Test: %d' % isnan(X_test).sum())
print('-------------------')
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
#------------------------
#------------------------
X_train_pca,X_test_pca = pca(X_train,X_test)
makeClassificationLogisticRegression(X_train_pca, y_train, X_test_pca, y_test)
lr_auc1 = printCurvesWithClassImbalance(lr_probs, y_test, y_predicted, X_test_pca)
#plotTargetClassValues(X_train,y_train,1)
#------------------------
#------------------------
counter = collections.Counter(y_train)
print('-------------------')
print('Before SMOTE',counter)
smote = SMOTE(random_state=12)
X_train_sm,y_train_sm = smote.fit_resample(X_train, y_train)
counter = collections.Counter(y_train_sm)
print('After SMOTE',counter)
print('-------------------')
X_train_sm,X_test_pca = pca(X_train_sm,X_test)
makeClassificationLogisticRegression (X_train_sm, y_train_sm, X_test_pca, y_test)
lr_auc2 = printCurvesWithSMOTE(lr_probs, y_test, y_predicted, X_test_pca)
#plotTargetClassValues(X_train_sm,y_train_sm,2)
#------------------------
#------------------------
counter = collections.Counter(y_train)
print('-------------------')
print('Before SMOTE Borderline',counter)
borderLineSMOTE = BorderlineSMOTE(kind='borderline-2', random_state=0)
X_train_sm_borderline,y_train_sm_borderline = borderLineSMOTE.fit_resample(X_train, y_train)
counter = collections.Counter(y_train_sm_borderline)
print('After SMOTE Borderline',counter)
print('-------------------')
X_train_sm_borderline,X_test_pca = pca(X_train_sm_borderline,X_test)
makeClassificationLogisticRegression(X_train_sm_borderline, y_train_sm_borderline, X_test_pca, y_test)
lr_auc3 = printCurvesWithBorderLineSMOTE(lr_probs, y_test, y_predicted, X_test_pca)
#plotTargetClassValues(X_train_sm_borderline,y_train_sm_borderline,3)
#------------------------
#------------------------
counter = collections.Counter(y_train)
print('-------------------')
print('Before RandomOverSampler',counter)
oversample = RandomOverSampler(sampling_strategy='minority')
#oversample = RandomOverSampler(sampling_strategy=0.5)
X_over, y_over = oversample.fit_resample(X_train, y_train)
counter = collections.Counter(y_over)
print('After RandomOverSampler',counter)
print('-------------------')
X_over,X_test_pca = pca(X_over,X_test)
makeClassificationLogisticRegression(X_over, y_over, X_test_pca, y_test)
lr_auc4 = printCurvesWithRandomOverSampler(lr_probs, y_test, y_predicted, X_test_pca)
#plotTargetClassValues(X_over,y_over,4)
#------------------------
#------------------------
counter = collections.Counter(y_train)
print('-------------------')
print('Before KMeans',counter)
smote = SMOTE(random_state= 12)
kmeans = KMeans(n_clusters=2, random_state=17)
kmeans_smote = ClusterOverSampler(oversampler=smote, clusterer=kmeans)
# Fit and resample imbalanced data
X_res, y_res = kmeans_smote.fit_resample(X_train, y_train)
counter = collections.Counter(y_res)
print('After KMeans',counter)
print('-------------------')
X_res,X_test_pca = pca(X_res,X_test)
makeClassificationLogisticRegression(X_res, y_res, X_test_pca, y_test)
lr_auc5 = printCurvesWithClusterOverSampler(lr_probs, y_test, y_predicted, X_test_pca)
#plotTargetClassValues(X_res,y_res,5)
#------------------------
#------------------------
makeClassificationCostSensitive(X_train, y_train)
#------------------------
#------------------------
counter = collections.Counter(y_train)
print('-------------------')
print('Before UnderSampling',counter)
undersample = NearMiss(version=2, n_neighbors=5)
X_under, y_under = undersample.fit_resample(X_train, y_train)
counter = collections.Counter(y_under)
print('After UnderSampling',counter)
print('-------------------')
X_under,X_test_pca = pca(X_under,X_test)
makeClassificationLogisticRegression(X_under, y_under, X_test_pca, y_test)
lr_auc6 = printCurvesWithUnderSampling(lr_probs, y_test, y_predicted, X_test_pca)
#plotTargetClassValues(X_under,y_under,6)
#------------------------
#------------------------
boostingmodel = XGBClassifier(scale_pos_weight=100)
# define evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(boostingmodel, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
# summarize performance
print('-------------------')
print('Mean ROC AUC for XGBClassifier: %.5f' % mean(scores))
print('-------------------')
#------------------------
#------------------------
counter = collections.Counter(y_train)
print('-------------------')
print('Before ClusterCentroids',counter)
trans = ClusterCentroids(random_state=0)
X_resampled, y_resampled = trans.fit_sample(X_train, y_train)
counter = collections.Counter(y_resampled)
print('After ClusterCentroids',counter)
print('-------------------')
X_resampled,X_test_pca = pca(X_resampled,X_test)
makeClassificationLogisticRegression(X_resampled, y_resampled, X_test_pca, y_test)
lr_auc7 = printCurvesWithClusterCentroids(lr_probs, y_test, y_predicted, X_test_pca)
#plotTargetClassValues(X_resampled,y_resampled,7)
#------------------------
#plotTargetClassValues(X,y,8)
fig3,axRoc = pyplot.subplots()
axRoc.bar(['Unbalanced' , 'SMOTE' , 'BorderLine SMOTE' , 'RandomOverSampler', 'ClusterOverSampler', 'UnderSampling', 'ClusterCentroids'],[lr_auc1,lr_auc2,lr_auc3,lr_auc4,lr_auc5,lr_auc6,lr_auc7])
fig3.show()
plotCurves()
#------------------------
|
{"hexsha": "2e371fe7480f7424a8e659d1c39521a5472d37ee", "size": 19594, "ext": "py", "lang": "Python", "max_stars_repo_path": "advanced-ml.py", "max_stars_repo_name": "skouras-io/advanced-ml", "max_stars_repo_head_hexsha": "68cfaa3edb9592b8183f607f9ff61628fdb0c2cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "advanced-ml.py", "max_issues_repo_name": "skouras-io/advanced-ml", "max_issues_repo_head_hexsha": "68cfaa3edb9592b8183f607f9ff61628fdb0c2cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "advanced-ml.py", "max_forks_repo_name": "skouras-io/advanced-ml", "max_forks_repo_head_hexsha": "68cfaa3edb9592b8183f607f9ff61628fdb0c2cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0396975425, "max_line_length": 195, "alphanum_fraction": 0.6919465142, "include": true, "reason": "import numpy,from numpy", "num_tokens": 5153}
|
theory SimplyTypedLambdaCalculus
imports Main
begin
type_synonym var = string
no_notation Set.member ("(_/ \<in> _)" [51, 51] 50)
datatype type = TUnit
| TApp type type ("_ \<rightarrow> _")
datatype expr =
Unit
| Var var
| Abs var type expr
| App expr expr
(* it is important to choose a list here, since this defines how the type lookup is made
when augmenting the context *)
type_synonym ctx = "(var \<times> type) list"
fun has_type_in_ctx :: "var \<times> type \<Rightarrow> ctx \<Rightarrow> bool" ("(_ \<in> _)" [51, 51] 50) where
"has_type_in_ctx _ [] = False" |
"has_type_in_ctx (x,\<tau>) ((y,\<tau>')#\<Gamma>) = (if x = y then \<tau> = \<tau>' else has_type_in_ctx (x,\<tau>) \<Gamma>)"
inductive "value" :: "expr \<Rightarrow> bool" where
ValueUnit: "value Unit"
| ValueFn: "value (Abs x t e1)"
declare value.intros[simp,intro]
fun fvs :: "expr \<Rightarrow> var set" where
"fvs Unit = {}" |
"fvs (Var v) = {v}" |
"fvs (Abs x ty e) = fvs e - {x}" |
"fvs (App e1 e2) = fvs e1 \<union> fvs e2"
fun subst :: "var \<Rightarrow> expr \<Rightarrow> expr \<Rightarrow> expr" where
"subst x t Unit = Unit" |
"subst x t (Var v) = (if x = v then t else Var v)" |
"subst x t (Abs v ty e) = Abs v ty (if x = v then e else subst x t e)" |
"subst x t (App e1 e2) = App (subst x t e1) (subst x t e2)"
inductive reduce :: "expr \<Rightarrow> expr \<Rightarrow> bool" where
CApp1: "reduce e1 e1' \<Longrightarrow> reduce (App e1 e2) (App e1' e2)"
| CApp2: "value e1 \<Longrightarrow> reduce e2 e2' \<Longrightarrow> reduce (App e1 e2) (App e1 e2')"
| RApp: "value e2 \<Longrightarrow> reduce (App (Abs x ty e1) e2) (subst x e2 e1)"
declare reduce.intros[simp,intro]
inductive_cases ReduceAppE[elim]: "reduce (App e1 e2) e"
inductive_cases ReduceElimE[elim]: "reduce (Let x e1 e2) e"
inductive has_type :: "ctx \<Rightarrow> expr \<Rightarrow> type \<Rightarrow> bool" ("_ \<turnstile> _ : _" [40,40,40]) where
TypeUnit: "\<Gamma> \<turnstile> Unit : TUnit" |
TypeVar: "(x, \<tau>) \<in> \<Gamma> \<Longrightarrow> \<Gamma> \<turnstile> (Var x) : \<tau>" |
TypeFn: "\<lbrakk> ((x, \<tau>)#\<Gamma>) \<turnstile> e : \<tau>' \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (Abs x \<tau> e) : (\<tau> \<rightarrow> \<tau>')" |
TypeApp: "\<lbrakk> \<Gamma> \<turnstile> e1 : (\<tau>' \<rightarrow> \<tau>); \<Gamma> \<turnstile> e2 : \<tau>' \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (App e1 e2) : \<tau>"
declare has_type.intros[simp,intro]
inductive_cases TypeUnitE[elim!]: "\<Gamma> \<turnstile> Unit : \<tau>"
inductive_cases TypeVarE[elim!]: "\<Gamma> \<turnstile> (Var x) : \<tau>"
inductive_cases TypeFnE[elim]: "\<Gamma> \<turnstile> (Abs x \<tau> e) : \<tau>"
inductive_cases TypeAppE[elim]: "\<Gamma> \<turnstile> (App e1 e2) : \<tau>"
inductive step_or_value where
IsValue: "value e \<Longrightarrow> step_or_value e" |
IsStep: "\<exists>e'. reduce e e' \<Longrightarrow> step_or_value e"
declare step_or_value.intros[simp,intro]
inductive_cases StepOrValueE[elim!]: "step_or_value e"
lemma subst_identity[simp]:
"subst x (Var x) e = e"
by (induct e, auto)
lemma identity_exists:
"value e \<Longrightarrow> reduce (App (Abs x \<tau> (Var x)) e) e"
(* found by sledgehammer *)
by (metis RApp subst.simps(2))
theorem progress:
"[] \<turnstile> e : \<tau> \<Longrightarrow> step_or_value e"
proof (induction "[] :: ctx" "e" "\<tau>" rule:has_type.induct)
case (TypeApp e1 \<tau>' \<tau> e2)
then show ?case
(* found by sledgehammer *)
using step_or_value.simps value.simps by auto
qed auto
lemma context_strengthening:
"\<Gamma> \<turnstile> e : \<tau> \<Longrightarrow> \<forall>x \<tau>'. Set.member x (fvs e) \<longrightarrow> (x, \<tau>') \<in> \<Gamma> \<longrightarrow> (x, \<tau>') \<in> \<Gamma>' \<Longrightarrow> \<Gamma>' \<turnstile> e : \<tau>"
proof (induction arbitrary: \<Gamma>' rule: has_type.induct)
case (TypeApp \<Gamma> e1 \<tau>' \<tau> e2)
then show ?case
(* found by sledgehammer *)
by (metis Un_iff fvs.simps(4) has_type.TypeApp)
qed auto
lemma well_typed_fvs:
"\<Gamma> \<turnstile> e : \<tau> \<Longrightarrow> \<forall>v \<in> fvs e. (\<exists>\<tau>. (v, \<tau>) \<in> \<Gamma>)"
apply (induction rule:has_type.induct, auto)
by metis+
corollary well_typed_no_fvs:
"[] \<turnstile> e : \<tau> \<Longrightarrow> fvs e = {}"
(* sledgehammer *)
by (meson ex_in_conv has_type_in_ctx.simps(1) well_typed_fvs)
lemma subst_preservation:
"\<lbrakk> ((x, \<tau>')#\<Gamma>) \<turnstile> e2 : \<tau>; [] \<turnstile> e1 : \<tau>' \<rbrakk>
\<Longrightarrow> \<Gamma> \<turnstile> subst x e1 e2 : \<tau>"
proof (induct e2 arbitrary: \<Gamma> \<tau> \<tau>' x)
case (Var v)
then show ?case
(* found by sledgehammer *)
by (metis TypeVar TypeVarE context_strengthening has_type_in_ctx.simps(1) has_type_in_ctx.simps(2) subst.simps(2))
next
case (Abs y \<sigma> e)
then show ?case
proof (cases "x = y")
case False
with Abs(2) obtain \<tau>\<^sub>2 where "\<tau> = (\<sigma> \<rightarrow> \<tau>\<^sub>2)" "((y, \<sigma>)#(x, \<tau>')#\<Gamma>) \<turnstile> e : \<tau>\<^sub>2"
by (induct "(x, \<tau>') # \<Gamma>" "Abs y \<sigma> e" "\<tau>" rule:has_type.induct, blast)
then have "((x, \<tau>')#(y, \<sigma>)#\<Gamma>) \<turnstile> e : \<tau>\<^sub>2"
(* found by sledgehammer *)
using False context_strengthening has_type_in_ctx.simps(2) by presburger
then show ?thesis
(* found by sledgehammer *)
by (simp add: Abs.hyps Abs.prems(2) False \<open>\<tau> = \<sigma> \<rightarrow> \<tau>\<^sub>2\<close>)
qed (auto simp: context_strengthening)
qed fastforce+
theorem preservation:
"reduce e e' \<Longrightarrow> [] \<turnstile> e : \<tau> \<Longrightarrow> [] \<turnstile> e' : \<tau>"
proof (induct arbitrary: \<tau> rule:reduce.induct)
case (RApp e2 x \<tau>' e)
from RApp.prems show ?case
by (induct "[] :: ctx" "App (Abs x \<tau>' e) e2" "\<tau>")
(* found by sledgehammer *)
(metis expr.inject(2) expr.simps(11) expr.simps(15) expr.simps(7) has_type.simps subst_preservation type.inject)
qed blast+
end
|
{"author": "ThreeFx", "repo": "toy-examples", "sha": "b687b0c48742d01a1839f71068f51fcc026aad34", "save_path": "github-repos/isabelle/ThreeFx-toy-examples", "path": "github-repos/isabelle/ThreeFx-toy-examples/toy-examples-b687b0c48742d01a1839f71068f51fcc026aad34/SimplyTypedLambdaCalculus.thy"}
|
"""
MultiComplexMat implements a wrapper for any objects (reals, np.arrays
and sparse matrices) which can have multiple imaginary like units.
E.g. rules i*i = -1, j*j = -1 but i*j = j*i will not simplify further.
The MultiComplexMat overloads all common
numerical operations: +,-,*,@ etc. such that these rules are preserved.
For example
x = a + bi + cj
y = d + ei
x*y = (a*d - b*e) + i*(a*e + d*b) + j*(c*d) + ij*(c*e)
x + y = a+d + (b+e)*i + cj
Here a,b,c,d,e can be whatever objects implementing the common numerical
operations, e.g. numpy arrays or scipy sparse matrices.
One can use whatever characters as indices and one can query specific
components from the matrix with A["i"] or A["ij"]. Missing, or zero,
components are indicated with "None" and they will translate to zeros
in numerical operations.
Warning: The objects inside MultiComplexMat objects are aggressively
recycled, i.e. in a sum C = A + B, if A["i"] == None, C["i"] will be the
exact object which was stored in B["i"] and thus any mutations done for
C["i"] will be visible in B["i"] also.
"""
import numpy as np
import itertools
from collections import defaultdict
import time
from . import dict_tools
import scipy.sparse as sps
def get_linear_system(A, b, **kwargs):
""" Get a linear system from multicomplex matrices A and b such that
A x = b is decomposed for each component and stacked in a sparse CSC
matrix which can be given e.g. to spsolve.
The equation system is, e.g. for "ij" component string:
A x = b
=> (A*x)[""] = b[""]
(A*x)["i"] = b["i"]
(A*x)["j"] = b["j"]
(A*x)["ij"] = b["ij"]
Example:
# components '',i,j,ij
A = 1 + 1*i - 1*j
b = 10 + 10*i*j
C, d = get_linear_system(A,b)
=> C = array([[ 1, 1, -1, 0],
[-1, 1, 0, -1],
[ 1, 0, 1, 1],
[ 0, 1, -1, 1]])
d = array([[10],
[0],
[0],
[10]])
x = scipy.sparse.linalg.spsolve(C,d)
=> x = array([ 2., 4., -4., 2.])
"""
order = list(A.components())
order_lookup = dict(map(reversed, enumerate(order)))
sysmat = {}
bcol = len(order)
shapes = []
# assemble the system as single matrix and slice the last column off
# to utilize the shape information of the matrix blocks also in b
for c1, val in A.data.items():
for c2, col in order_lookup.items():
sign, comp = simplify(c1+c2)
# row = order_lookup[c2]
row = order_lookup[comp]
old = sysmat.get((row,col), None)
if old is None:
sysmat[(row,col)] = sign*val
else:
sysmat[(row,col)] = old + sign*val
for c, v in b.data.items():
row = order_lookup[c]
sysmat[(row, bcol)] = v
shapes.append(np.shape(v))
lst = dict_tools.tolist(sysmat)
M = sps.bmat(lst,format='csc')
if kwargs.get('get_shapes', False):
return (M[:,:-1], M[:,-1]), shapes
else:
return (M[:,:-1], M[:,-1])
def to_mcmat(cstr, arr, shapes):
start = 0
components = []
for i in range(0,len(shapes)):
stop = start + shapes[i]
if start == stop:
components.append(None)
else:
vals = arr[start:stop]
if stop-start == 1:
components.append(vals[0])
else:
components.append(vals)
start = stop
data = dict(zip(all_components(cstr), components))
return mcmat(cstr, data)
def all_components(compstr):
return itertools.chain([""], combinations(compstr))
def combinations(components):
""" Return all possible 1..n combinations, order doesn't matter
e.g. "ij" -> "i", "j", "ij" """
for i in range(1,len(components)+1):
for x in itertools.combinations(components,i):
yield "".join(x)
def unified_component_string(*mcmats):
concat = "".join([m.component_str for m in mcmats])
out = list(set(concat))
out.sort()
return "".join(out)
def simplify(lst):
""" Given a component string 'lst' use simplification rules
(e.g. i*i = -1) to simplify it. Return the sign and the simplified
string.
Example: simplify('ijki') = (-1,'jk') """
n = len(lst)
# premature optimization
if(n == 1):
return 1, lst
elif(n == 2):
if lst[0] == lst[1]:
return -1, ''
else:
return 1, "".join(sorted(lst))
# general slow-ass algorithm for n > 2
d = defaultdict(lambda: 0)
for t in lst:
d[t] = d[t]+1
terms_left = []
sign = 1
for t,v in d.items():
if v % 2 == 0:
sign = sign*(-1)**(int(v/2))
else:
terms_left.append(t)
sign = sign*(-1)**int((v-1)/2)
# keep in alphabetical order
terms_left.sort()
return sign, "".join(terms_left)
def mcmat(components, values, ident=None):
"""Construct a MultiComplexMat object from values.
Components is a string indicating which characters act as the components.
Values is a dict: {component_string: object} where component_string
are the components strings which can be formed using 'components'.
The empty string "" represents the real component.
Example:
mcmat("abc", {"": 1, "ab": 5, "ac": 15, "abc": 50}) // "bc" component
// is zero
"""
data = {}
for k in itertools.chain([""], combinations(components)):
item = values.pop(k, None)
if item is not None:
data[k] = item
if values:
components = list(itertools.chain([""], combinations(components)))
raise ValueError(f"Extra components {list(values.keys())} given "
"which is not allowed. Only components "
f"{components} are needed.")
return MultiComplexMat(components, data, ident, None)
def realmcmat(value, ident=None):
""" Construct a MultiComplexMat object with only "" component """
return MultiComplexMat("", {"": value}, ident, None)
def sub_with_none(v1,v2):
""" Substract treating None as zero """
p1 = v1 is None
p2 = v2 is None
if p1 and p2:
return None
elif p1:
return -v2
elif p2:
return v1
else:
return v1 - v2
def sum_with_none(v1,v2):
""" Sum treating None as zero """
p1 = v1 is None
p2 = v2 is None
if p1 and p2:
return None
elif p1:
return v2
elif p2:
return v1
else:
return v1 + v2
def matmul_with_none(v1,v2):
""" Matmul treating None as zero """
if v1 is None:
return None
elif v2 is None:
return None
else:
return v1 @ v2
def mul_with_none(v1,v2):
""" Standard mul treating None with zero """
if v1 is None:
return None
elif v2 is None:
return None
else:
return v1 * v2
def dictzip(*dicts):
""" Iterate over multiple dicts 'zipping' their elements with
matching keys. If some of the dicts are missing the entries,
they will be None."""
keyset = set(itertools.chain(*dicts))
return ((k, *[d.get(k,None) for d in dicts]) for k in keyset)
#%%
class MultiComplexMat():
def __init__(self, component_str, data, ident, default_zero_constructor):
self.component_str = component_str
self.data = data
self.ident = ident
self.default_zero_constructor = default_zero_constructor
def _shallow_copy(self, *objs, **kwargs):
""" Create shallow copy of the object, inheriting all possible data
from 'self' and overriding the stuff passed in with 'kwargs' """
cstr = kwargs.pop('component_str', None)
if cstr == None:
cstr = unified_component_string(self, *objs)
data = kwargs.pop('data', self.data)
ident = kwargs.pop('ident', self.ident)
default_zero_constructor = kwargs.pop('default_zero_constructor',
self.default_zero_constructor)
# assert not (re is None or im is None), "re and im must be specified"
return MultiComplexMat(cstr, data, ident, default_zero_constructor)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
ufname = ufunc.__name__
if ufname == 'matmul':
# matmul f @ B where f is a numpy array
# easy way, elevate inputs to mcmats and then multiply
B = inputs[1]
A = realmcmat(inputs[0])
return A @ B
elif ufname == 'multiply':
B = inputs[1]
A = realmcmat(inputs[0])
return A * B
# elif ufname == 'absolute':
# return inputs[0].abs()
elif ufname == 'subtract':
B = inputs[1]
A = realmcmat(inputs[0])
return A - B
elif ufname == 'add':
B = inputs[1]
A = realmcmat(inputs[0])
return A + B
# elif ufname == 'conjugate':
# return inputs[0].conj()
# elif ufname == 'sqrt':
# raise NotImplementedError()
else:
from debug import debug
debug()
def _mul_generic(self, obj_in, op):
""" Generic multiplication machinery, * and @ are implemented using
this
"""
if not isinstance(obj_in, MultiComplexMat):
# Wrap whateber obj is into a multicomplexmat
obj = realmcmat(obj_in)
from utils.debug import debug
debug()
else:
obj = obj_in
d = dict()
for k1, v1 in self.data.items():
for k2, v2 in obj.data.items():
newind = "".join([k1,k2])
sign, left = simplify(newind)
old = d.get(left, None)
result = op(v1,v2)
if old is None:
d[left] = sign*result if result is not None else result
else:
d[left] = old + sign*result if result is not None else 0
return self._shallow_copy(obj, data=d)
def __matmul__(self, obj):
return self._mul_generic(obj, matmul_with_none)
def __mul__(self, obj):
return self._mul_generic(obj, mul_with_none)
def set_default_constructor(self, fun):
""" The default shape of the contained matrices, this affects
the value returned by the indexing [] operator. If default shape is
None, indexing returns None for an empty component.
Otherwise it calls 'fun' and returns whatever it returns. """
self.default_zero_constructor = fun
def __rmatmul__(self, obj):
# is called if f @ A where f is not a MultiComplexMat object
# except when f is np.array, then an ufunc is called
A = realmcmat(obj)
return A.__matmul__(self)
def components(self):
""" Return all possible components of this MultiComplexMat object """
#return [""] + list(combinations(self.component_str))
return all_components(self.component_str)
def __repr__(self):
elems = {}
for k in self.components():
v = self.data.get(k,None)
if v is None:
continue
elif np.shape(v) == ():
elems[k] = v
else:
elems[k] = np.shape(v)
return f"MultiComplexMat({elems})"
def __delitem__(self, key):
self.data.__delattr__(key)
def __getitem__(self, key):
""" The [] indexing. If the component is 'None' check if
default constructor is set. This helps with arithmetic when using
components explicitly, e.g. A["i"] @ A["j"] will produce an error
if either one is None. """
item = self.data.get(key,None)
if item is None and self.default_zero_constructor != None:
return self.default_zero_constructor()
return item
def __setitem__(self, key, value):
self.data[key] = value
# def toarray(self):
# return self.re + 1j*self.im
# def __mul__(self, obj):
# # is called when A @ B where B is whatever
# if isinstance(obj, MultiComplexMat):
# re = self.re * obj.re - self.im * obj.im
# im = self.re * obj.im + self.im * obj.re
# else:
# # multiplication with a numpy array or constant
# ore, oim = csplit(obj)
# re = self.re * ore - self.im * oim
# im = self.re * oim + self.im * ore
# return self._shallow_copy(re=re, im=im)
def __rmul__(self, obj):
# is called if f @ A where f is not a MultiComplexMat object
# except when f is np.array, then an ufunc is called
A = realmcmat(obj)
return A.__mul__(self)
def __radd__(self, obj):
# is called if f @ A where f is not a MultiComplexMat object
# except when f is np.array, then an ufunc is called
A = realmcmat(obj)
return A.__add__(self)
def _sum_generic(self, obj, op):
""" Implements + and - operations """
if not isinstance(obj, MultiComplexMat):
# Wrap whateber obj is into a multicomplexmat
obj = realmcmat(obj)
data = {k: op(v1,v2)
for k,v1,v2 in dictzip(self.data, obj.data)}
return self._shallow_copy(obj, data=data)
def __add__(self, obj):
return self._sum_generic(obj, sum_with_none)
def __sub__(self, obj):
return self._sum_generic(obj, sub_with_none)
def __rsub__(self, obj):
# is called if f @ A where f is not a MultiComplexMat object
# except when f is np.array, then an ufunc is called
A = realmcmat(obj)
return A.__sub__(self)
# def elementwise_inv(self):
# return self.conj()*(1/self.abs()**2)
def abs(self):
return np.sqrt(self.re**2 + self.im**2)
# def angle(self):
# return np.arctan2(self.im, self.re)
# def conj(self):
# return self._shallow_copy(im=-self.im)
# @property
# def real(self):
# return self.re
# @property
# def imag(self):
# return self.im
# def __pow__(self,n):
# magn = self.abs()**n
# ang = self.angle()
# re = magn*np.cos(ang*n)
# im = magn*np.sin(ang*n)
# return self._shallow_copy(re=re, im=im)
# def __truediv__(self, B):
# # is called when A @ B where B is whatever
# if not isinstance(B, MultiComplexMat):
# B = mcmat(B)
# return self*B.elementwise_inv()
#%%
"""
For convenience, define few arrays which can be easily used in expressions
"""
i = mcmat("i", {'i':1})
j = mcmat("j", {'j':1})
k = mcmat("k", {'k':1})
|
{"hexsha": "5ebc854b1e159abedaf0bda958a78d32355fd444", "size": 15380, "ext": "py", "lang": "Python", "max_stars_repo_path": "multicomplexmat.py", "max_stars_repo_name": "Ehtycs/modeling-spork", "max_stars_repo_head_hexsha": "9aeb8ce5f69f7456feb872744c07341c2966de33", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "multicomplexmat.py", "max_issues_repo_name": "Ehtycs/modeling-spork", "max_issues_repo_head_hexsha": "9aeb8ce5f69f7456feb872744c07341c2966de33", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "multicomplexmat.py", "max_forks_repo_name": "Ehtycs/modeling-spork", "max_forks_repo_head_hexsha": "9aeb8ce5f69f7456feb872744c07341c2966de33", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5811088296, "max_line_length": 78, "alphanum_fraction": 0.5468140442, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3904}
|
import tensorflow as tf
from Transformer import MHA
from Transformer.TransformerEncoder import TransformerEncoder
from Transformer.TransformerDecoder import TransformerDecoder
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from Transformer.TransformerCore import Get_Custom_Adam_Optimizer
import numpy as np
def initSession():
conf = tf.ConfigProto()
conf.gpu_options.allow_growth= True
sess = tf.Session(config=conf)
tf.keras.backend.set_session(sess)
def Get_Transformer_Model(transformer_encoder_layers, transformer_decoder_layers, model_depth, ff_depth, num_heads, SOURCE_SIZE, TARGET_SIZE,
POS_ENC_INPUT, POS_ENC_TARGET,
MAX_SEQ_LEN_INPUT, MAX_SEQ_LEN_TARGET):
initSession()
input_encoder = Input(shape=(None,))
input_decoder = Input(shape=(None,))
encoder_padding_mask = Input(shape=(None, 1, MAX_SEQ_LEN_INPUT))
decoder_padding_mask = Input(shape=(None, 1, MAX_SEQ_LEN_INPUT))
look_ahead_mask = Input(shape=(None, MAX_SEQ_LEN_TARGET, MAX_SEQ_LEN_TARGET))
transformer_encoder = TransformerEncoder(num_layers=transformer_encoder_layers,
model_depth= model_depth,
num_heads= num_heads,
feed_forward_depth= ff_depth,
input_vocab_size= SOURCE_SIZE,
maximum_pos_encoding= POS_ENC_INPUT
)(input_encoder, mask=encoder_padding_mask)
transformer_decoder, attn = TransformerDecoder(num_layers= transformer_decoder_layers,
model_depth= model_depth,
num_heads = num_heads,
feed_forward_depth= ff_depth,
target_vocab_size= TARGET_SIZE,
maximum_position_encoding= POS_ENC_TARGET)(input_decoder,
encoder_output=transformer_encoder,
look_ahead_mask = look_ahead_mask,
padding_mask = decoder_padding_mask)
output = Dense(TARGET_SIZE, activation='softmax')(transformer_decoder)
transformer_optimizer = Get_Custom_Adam_Optimizer(model_depth=model_depth)
model = Model([input_encoder, input_decoder, encoder_padding_mask, decoder_padding_mask, look_ahead_mask], output)
model.compile(optimizer=transformer_optimizer, loss='categorical_crossentropy')
model.summary()
return model
def create_padding_mask(seq):
seq = np.equal(seq, 0)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, np.newaxis, np.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
mask = 1 - np.triu(np.ones((size, size)), -1)
return mask # (seq_len, seq_len)
def create_masks(inp, tar):
# Encoder padding mask
enc_padding_mask = create_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(tar.shape[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = np.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
if __name__ == '__main__':
multiHeadAttention = MHA.MHA(model_depth=512, num_heads=8)
y = tf.random.uniform((1,60,512)) #(batch_size, encoder_sequence, model_depth)
out, attn = multiHeadAttention(y, k=y, q=y, mask=None)
print(f'MHA output shape => {out.shape}')
print(f'MHA attention shape => {attn.shape}')
encoderLayer = EncoderLayer(model_depth=512, num_heads=8, feed_forward_depth=2048)
output_layer_encoder = encoderLayer(tf.random.uniform((64,43,512)), mask=None)
print(f'Encoder layer output shape => {output_layer_encoder.shape}')
decoder_layer = DecoderLayer(512, 8, 2048)
decoder_layer_output, _, _ = decoder_layer(tf.random.uniform((64,50,512)), encoder_output=output_layer_encoder, look_ahead_mask=None, padding_mask=None)
print(f'Decoder layer output shape => {decoder_layer_output.shape}')
encoder = TransformerEncoder(num_layers=2, model_depth=512, num_heads=8, feed_forward_depth=2048, input_vocab_size=8500, maximum_pos_encoding=10000)
encoder_input = tf.random.uniform((64,62), dtype=tf.int64, minval=0, maxval=200)
encoder_output = encoder(encoder_input, mask=None)
print(f'Encoder output => {encoder_output.shape}')
decoder = TransformerDecoder(num_layers=2, model_depth=512, num_heads=8, feed_forward_depth=2048, target_vocab_size=8000,
maximum_position_encoding=5000)
decoder_input = tf.random.uniform((64,26), dtype=tf.int64, minval=0, maxval=200)
output, attn = decoder(decoder_input,
encoder_output=encoder_output,
look_ahead_mask = None,
padding_mask = None)
print(f'Decoder output => {output.shape}')
model = Get_Transformer_Model(2, 2, 512, 2048, 8, 8500, 8000, 10000, 6000, 38,36)
temp_input = np.random.uniform(0,200, size=(64,38))
temp_target = np.random.uniform(0,200, size=(64,36))
input_padding_mask, combined_mask, target_padding_mask = create_masks(temp_input, temp_target)
prediction = model.predict([temp_input, temp_target, input_padding_mask, target_padding_mask, combined_mask])
print(f'Transformer prediction shape => {prediction.shape}')
|
{"hexsha": "d7fd0567a8d6c8dec65c9507876f3cc93d6e2236", "size": 6103, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests.py", "max_stars_repo_name": "antoniorv6/Transformer-Keras", "max_stars_repo_head_hexsha": "9566f4211f92922a668977e72dbb72b722d4de5e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests.py", "max_issues_repo_name": "antoniorv6/Transformer-Keras", "max_issues_repo_head_hexsha": "9566f4211f92922a668977e72dbb72b722d4de5e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests.py", "max_forks_repo_name": "antoniorv6/Transformer-Keras", "max_forks_repo_head_hexsha": "9566f4211f92922a668977e72dbb72b722d4de5e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.5447761194, "max_line_length": 156, "alphanum_fraction": 0.6541045388, "include": true, "reason": "import numpy", "num_tokens": 1274}
|
# -*- coding: utf-8 -*-
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, roc_auc_score, confusion_matrix
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import itertools
import re
def data_load_home_credit(path):
"""
To load dataset from the directory for home credit and return test, train DataFrame
"""
data = pd.read_csv('/media/ismaeel/Work/msds19029_thesis/dataset/home_with_missing.csv')
data = data.dropna(axis=0, subset=['TARGET'])
total = data.isnull().sum().sort_values(ascending = False)
percent = (data.isnull().sum()/data.isnull().count()*100).sort_values(ascending = False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
feature_name = missing_data[missing_data['Percent'] > 10]
data.drop(list(feature_name.index), axis=1, inplace=True)
total = data.isnull().sum().sort_values(ascending = False)
percent = (data.isnull().sum()/data.isnull().count()*100).sort_values(ascending = False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
feature_name = missing_data[missing_data['Percent'] != 0]
data = data.dropna(axis=0, subset=list(feature_name.index))
data = data.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x))
Y = data['TARGET']
X = data
train, test, __, __ = train_test_split(X, Y, test_size=0.10, random_state=42)
return train, test
def plot_confusion_matrix(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.show()
def find_optimal_cutoff(target, predicted):
""" Find the optimal probability cutoff point for a classification model related to event rate
Parameters
----------
target : Matrix with dependent or target data, where rows are observations
predicted : Matrix with predicted data, where rows are observations
Returns
-------
list type, with optimal cutoff value
"""
fpr, tpr, threshold = roc_curve(target, predicted)
i = np.arange(len(tpr))
roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold' : pd.Series(threshold, index=i)})
roc_t = roc.iloc[(roc.tf-0).abs().argsort()[:1]]
return list(roc_t['threshold'])
def plot_auc(labels, pred):
"""
Plot auc plot against the labels and pred
"""
ns_probs = [0 for _ in range(len(labels))]
ns_auc = roc_auc_score(labels, ns_probs)
lr_auc = roc_auc_score(labels, pred)
print('Binary: ROC AUC=%.3f' % (lr_auc))
plt.figure(figsize=(5, 5))
ns_fpr, ns_tpr, th1 = roc_curve(labels, ns_probs)
lr_fpr, lr_tpr, th2 = roc_curve(labels, pred)
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
plt.plot(lr_fpr, lr_tpr, marker='.', label='Binary Classification')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show
|
{"hexsha": "73e021f7572832485684ffd424767ae891476b53", "size": 4229, "ext": "py", "lang": "Python", "max_stars_repo_path": "home_credit/common.py", "max_stars_repo_name": "ismaeelnawaz/msds19029_thesis", "max_stars_repo_head_hexsha": "83eac1aa22105404d0551bfa49be5b3e960a6aa2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "home_credit/common.py", "max_issues_repo_name": "ismaeelnawaz/msds19029_thesis", "max_issues_repo_head_hexsha": "83eac1aa22105404d0551bfa49be5b3e960a6aa2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "home_credit/common.py", "max_forks_repo_name": "ismaeelnawaz/msds19029_thesis", "max_forks_repo_head_hexsha": "83eac1aa22105404d0551bfa49be5b3e960a6aa2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7589285714, "max_line_length": 109, "alphanum_fraction": 0.6315913928, "include": true, "reason": "import numpy", "num_tokens": 1072}
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from brightics.common.datasets import load_iris
from brightics.function.regression import random_forest_regression_train, random_forest_regression_predict
import HtmlTestRunner
import os
class TestRandomForestRegression(unittest.TestCase):
def test_default(self):
df_iris = load_iris()
model_train = random_forest_regression_train(table=df_iris,
feature_cols=['sepal_length', 'sepal_width'],
label_col='petal_length',
n_estimators=10, criterion="mse",
max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0, max_features="None",
max_leaf_nodes=None, min_impurity_decrease=0, random_state=12345)['model']
df_feature_importance = model_train['feature_importance_table']
np.testing.assert_array_almost_equal([0.8419393152, 0.1580606848], [df_feature_importance.values[i][1] for i in range(2)], 10, 'incorrect feature_importance')
df_res = random_forest_regression_predict(table=df_iris, model=model_train, prediction_col='prediction')['out_table']
np.testing.assert_array_almost_equal([1.3975, 1.4200000000000002, 1.446, 1.45, 1.41],
df_res['prediction'].values[:5], 10, 'incorrect prediction')
def test_optional(self):
df_iris = load_iris()
model_train = random_forest_regression_train(table=df_iris,
feature_cols=['sepal_length', 'sepal_width', 'petal_length'],
label_col='petal_width',
n_estimators=20, criterion="mse",
max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0, max_features="None",
max_leaf_nodes=None, min_impurity_decrease=0, random_state=12345)['model']
df_feature_importance = model_train['feature_importance_table']
np.testing.assert_array_almost_equal([0.0201313834, 0.0233862213, 0.9564823953], [df_feature_importance.values[i][1] for i in range(3)], 10, 'incorrect feature_importance')
df_res = random_forest_regression_predict(table=df_iris, model=model_train, prediction_col='prediction')['out_table']
np.testing.assert_array_almost_equal([0.24708333333333332, 0.19000000000000009, 0.20000000000000004, 0.19166666666666674, 0.23875000000000002],
df_res['prediction'].values[:5], 10, 'incorrect prediction')
if __name__ == '__main__':
filepath = os.path.dirname(os.path.abspath(__file__))
reportFoler = filepath + "/../../../../../../../reports"
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(combine_reports=True, output=reportFoler))
|
{"hexsha": "e5846c27474b5bc85798194321dd09efcdbd7f52", "size": 4053, "ext": "py", "lang": "Python", "max_stars_repo_path": "function/python/brightics/function/regression/test/random_forest_regression_test.py", "max_stars_repo_name": "parkjh80/studio", "max_stars_repo_head_hexsha": "6d8d8384272e5e1b2838b12e5557272a19408e89", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 202, "max_stars_repo_stars_event_min_datetime": "2018-10-23T04:37:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T05:51:10.000Z", "max_issues_repo_path": "function/python/brightics/function/regression/test/random_forest_regression_test.py", "max_issues_repo_name": "data-weirdo/studio", "max_issues_repo_head_hexsha": "48852c4f097f773ce3d408b59f79fda2e2d60470", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 444, "max_issues_repo_issues_event_min_datetime": "2018-11-07T08:41:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T06:48:57.000Z", "max_forks_repo_path": "function/python/brightics/function/regression/test/random_forest_regression_test.py", "max_forks_repo_name": "data-weirdo/studio", "max_forks_repo_head_hexsha": "48852c4f097f773ce3d408b59f79fda2e2d60470", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 99, "max_forks_repo_forks_event_min_datetime": "2018-11-08T04:12:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T05:36:27.000Z", "avg_line_length": 58.7391304348, "max_line_length": 180, "alphanum_fraction": 0.5825314582, "include": true, "reason": "import numpy", "num_tokens": 790}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 16:07:58 2019
@author: Yaqiong Su
"""
import numpy as np
import pandas as pd
import linecache as lc
f1 = open('POSCAR','rb')
f2 = open('head','wb')
f3 = open('direct','wb')
f4 = open('lattice','wb')
i = 0
while True:
line = f1.readline()
i+=1
if i < 7:
f2.write(line)
if i == 7:
f2.write('Cartesian \n')
if i > 8:
f3.write(line)
if i > 300:
break
f1.close()
f2.close()
f3.close()
with open('head','rb') as f22:
i = 0
while True:
line = f22.readline()
i+=1
if i> 2 and i < 6:
f4.write(line)
if i > 300:
break
f4.close()
#with open('matrix','rb') as f33:
# for data in f33.readlines():
# data = data.strip('\n')
# nums = data.split()
# nums = [float(x) for x in nums]
# direct = np.array(nums)
# print direct
direct = np.loadtxt('direct')
print "direct coordinate"
print direct
lattice = np.loadtxt('lattice')
print "lattice"
print lattice
cartesian = np.dot(direct,lattice)
print"cartesian coordinate"
print cartesian
np.savetxt('cartesian',cartesian)
f5 = open('Cartesian_POSCAR','ab')
with open('head','rb') as f:
for lines in f.readlines():
f5.write(lines)
with open('cartesian','rb') as f0:
for lines in f0.readlines():
f5.write(lines)
f5.close()
print "Cartesian_POSCAR"
with open('Cartesian_POSCAR','rb') as f:
for lines in f.readlines():
print lines
|
{"hexsha": "42cebeaede80a4cf61d0ca0345c7374f090ccc8b", "size": 1600, "ext": "py", "lang": "Python", "max_stars_repo_path": "coordinate.py", "max_stars_repo_name": "YaqiongSu/transformation-between-direct-and-cartesian-coordinate-VASP", "max_stars_repo_head_hexsha": "30109019681af8ffa7f6d2ddaef909cc4c7c854b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-15T16:35:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-14T17:11:25.000Z", "max_issues_repo_path": "coordinate.py", "max_issues_repo_name": "YaqiongSu/transformation-between-direct-and-cartesian-coordinate-VASP", "max_issues_repo_head_hexsha": "30109019681af8ffa7f6d2ddaef909cc4c7c854b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "coordinate.py", "max_forks_repo_name": "YaqiongSu/transformation-between-direct-and-cartesian-coordinate-VASP", "max_forks_repo_head_hexsha": "30109019681af8ffa7f6d2ddaef909cc4c7c854b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-03T18:25:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-03T18:25:02.000Z", "avg_line_length": 21.0526315789, "max_line_length": 41, "alphanum_fraction": 0.5475, "include": true, "reason": "import numpy", "num_tokens": 464}
|
! Last change: HO 27 May 2000 11:47 pm
MODULE mpeg
IMPLICIT NONE
PUBLIC
TYPE:: mpeg_parameters
INTEGER :: mtype
INTEGER :: layer
INTEGER :: ibit_rate
INTEGER :: isample_rate
INTEGER :: ipsychoacoustic
INTEGER :: iemphasis
INTEGER :: ipadding
INTEGER :: icrc
INTEGER :: mode
INTEGER :: iextension
INTEGER :: mode_extension
INTEGER :: icopyright
INTEGER :: ioriginal
END TYPE mpeg_parameters
!MPEG1 / audio
INTEGER, PARAMETER :: mpeg_frame_size(3) = (/1152, 1152, 384/)
INTEGER, PARAMETER :: mpeg_sample_rates(0:3) = (/44100, 48000, 32000, 0/)
INTEGER, PARAMETER :: mpeg_bit_rates(0:14, 3) = &
RESHAPE( (/ 0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, &
0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, &
0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 414, 448 /), &
(/15, 3/) )
CHARACTER (LEN = 8) :: mpeg_mode_names(4) = (/'stereo ', 'j-stereo', 'dual-ch ', 'mono '/)
CHARACTER (LEN = 3) :: mpeg_layer_names(3) = (/'III', 'II ', 'I '/)
CHARACTER (LEN = 7) :: mpeg_version_names(0:3) = (/'MPEG2.5', ' ', 'MPEG-II', 'MPEG-I '/)
CHARACTER (LEN = 6) :: mpeg_psy_names(4) = (/' ', 'Enoken', ' ', ' '/)
CHARACTER (LEN = 7) :: mpeg_demp_names(4) = (/'none ', '50/15us', ' ', 'CITT '/)
!--------------------------------------------------------------------
END MODULE mpeg
|
{"hexsha": "ce8c2609fbff2aa6bee56dd702cbe13da70ea371", "size": 1447, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "mpeg.f90", "max_stars_repo_name": "cure-honey/uzura2", "max_stars_repo_head_hexsha": "2ab63fa966daa7e5331fcd3f33be4417bd6222ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-23T17:50:56.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-23T17:50:56.000Z", "max_issues_repo_path": "mpeg.f90", "max_issues_repo_name": "cure-honey/uzura2", "max_issues_repo_head_hexsha": "2ab63fa966daa7e5331fcd3f33be4417bd6222ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mpeg.f90", "max_forks_repo_name": "cure-honey/uzura2", "max_forks_repo_head_hexsha": "2ab63fa966daa7e5331fcd3f33be4417bd6222ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1944444444, "max_line_length": 99, "alphanum_fraction": 0.5459571527, "num_tokens": 550}
|
"""
routines to compute derivatives of spherical functions
"""
import numpy as np
def d_r_dx(x, y):
"""
derivative of r with respect to x
:param x:
:param y:
:return:
"""
return x / np.sqrt(x**2 + y**2)
def d_r_dy(x, y):
"""
:param x:
:param y:
:return:
"""
return y / np.sqrt(x**2 + y**2)
def d_x_diffr_dx(x, y):
"""
derivative of d(x/r)/dx
:param x:
:param y:
:return:
"""
return y**2 / (x**2 + y**2)**(3/2.)
def d_y_diffr_dy(x, y):
"""
derivative of d(y/r)/dy
:param x:
:param y:
:return:
"""
return x**2 / (x**2 + y**2)**(3/2.)
def d_y_diffr_dx(x, y):
"""
derivative of d(y/r)/dx
:param x:
:param y:
:return:
"""
return -x*y / (x**2 + y**2)**(3/2.)
def d_x_diffr_dy(x, y):
"""
derivative of d(x/r)/dy
:param x:
:param y:
:return:
"""
return -x*y / (x**2 + y**2)**(3/2.)
|
{"hexsha": "9d4f232e2f067c59df83436b583a50c8d541175b", "size": 951, "ext": "py", "lang": "Python", "max_stars_repo_path": "astrofunc/LensingProfiles/calc_util.py", "max_stars_repo_name": "LBJ-Wade/astrofunc_lensing_profile", "max_stars_repo_head_hexsha": "d2223705bc44d07575a5e93291375ab8e69ebfa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-08T12:33:26.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-08T12:33:26.000Z", "max_issues_repo_path": "astrofunc/LensingProfiles/calc_util.py", "max_issues_repo_name": "LBJ-Wade/astrofunc_lensing_profile", "max_issues_repo_head_hexsha": "d2223705bc44d07575a5e93291375ab8e69ebfa8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "astrofunc/LensingProfiles/calc_util.py", "max_forks_repo_name": "LBJ-Wade/astrofunc_lensing_profile", "max_forks_repo_head_hexsha": "d2223705bc44d07575a5e93291375ab8e69ebfa8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-06-25T18:55:26.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-25T18:55:26.000Z", "avg_line_length": 14.6307692308, "max_line_length": 54, "alphanum_fraction": 0.468980021, "include": true, "reason": "import numpy", "num_tokens": 340}
|
\section{Privacy Preserving Voting}
\todo{
consider the liquid democracy requirement that individual voters' votes remain private, while delegates' votes are public. This can be achieved by blinding the votes but still allowing a final tally.
}
|
{"hexsha": "e8c4582913cf11594d15d0546d074579e7657b28", "size": 244, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "papers/working-document/privacy.tex", "max_stars_repo_name": "MitchellTesla/decentralized-software-updates", "max_stars_repo_head_hexsha": "89f5873f82c0ff438e2cd3fff83cc030a46e29da", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-01-25T19:38:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T02:08:38.000Z", "max_issues_repo_path": "papers/working-document/privacy.tex", "max_issues_repo_name": "MitchellTesla/decentralized-software-updates", "max_issues_repo_head_hexsha": "89f5873f82c0ff438e2cd3fff83cc030a46e29da", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 120, "max_issues_repo_issues_event_min_datetime": "2019-03-06T18:29:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-24T10:20:09.000Z", "max_forks_repo_path": "papers/working-document/privacy.tex", "max_forks_repo_name": "MitchellTesla/decentralized-software-updates", "max_forks_repo_head_hexsha": "89f5873f82c0ff438e2cd3fff83cc030a46e29da", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-07-18T13:38:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-16T10:39:00.000Z", "avg_line_length": 61.0, "max_line_length": 199, "alphanum_fraction": 0.8155737705, "num_tokens": 48}
|
A popular snack to munch while watching Movies.
|
{"hexsha": "90dbd3b81388c99694558415ab008926279ad162", "size": 50, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Popcorn.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Popcorn.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Popcorn.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.5, "max_line_length": 47, "alphanum_fraction": 0.78, "num_tokens": 10}
|
#include <boost/mpl/aux_/preprocessed/no_ttp/quote.hpp>
|
{"hexsha": "165ad44d5f598b55102ff9a60f3a25e222a372f3", "size": 56, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_mpl_aux__preprocessed_no_ttp_quote.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_mpl_aux__preprocessed_no_ttp_quote.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_mpl_aux__preprocessed_no_ttp_quote.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 28.0, "max_line_length": 55, "alphanum_fraction": 0.8035714286, "num_tokens": 17}
|
## GeomUtils.jl --- constructs for geometry
export distance, anglespan, dihedral
export centroid, RMSD
"""
distance(a, b) -> Real
Calculate Euclidean distance from `a` to `b`.
"""
distance(a, b) = norm(a-b)
"""
anglespan(a, b, c) ∈ [0, π]
Calculate smaller angle between two vectors ``AB`` and ``BC`` meeting at ``B``.
"""
function anglespan(a, b, c)
Δ1 = a - b; n1 = norm(Δ1)
Δ2 = c - b; n2 = norm(Δ2)
(n1 == 0 || n2 == 0) && error("two positions overlap")
cos_ = Δ1⋅Δ2 / norm(Δ1) / norm(Δ2)
cos_ >= 1 && return 0 ## handle "overflow?" cases where cos_ ∉ [-1,1]
cos_ <= -1 && return π
acos(cos_)
end
"""
dihedral(a, b, c, d) ∈ [-π, π]
Calculate right handed dihedral (torsion) angle between the plane ``ABC`` and
``BCD`` intersecting along ``BC``. ``ABC`` is determined by the first three
arguments as points, while ``BCD`` is determined by the last three.
"""
function dihedral(a, b, c, d)
Δ1 = a - b
Δ2 = b - c; n2 = norm(Δ2)
Δ3 = d - c
n2 == 0 && error("torque axis is undefined; `b`, `c` overlap")
Δ23 = Δ2×Δ3
Δ21 = Δ2×Δ1
(norm(Δ23) == 0 || norm(Δ21) == 0) && error("some points overlap/colinear")
atan(Δ23⋅(Δ21×Δ2) / n2, Δ23⋅Δ21)
end
"""
centroid(collection) -> eltype(collection)
Get the centroid of a collection of points. The preferred point representation
is a subtype of `StaticVector`. This centroid is an _unweighted_ average of
positions.
"""
function centroid(collection::Vector{StaticVector})
sm_ = sum(collection)
sm_ / length(collection)
end
"""
centered(collection) -> centered positions
Get a copy of the centered positions in collection. The preferred point
representation is a subtype of `StaticVector`.
Equivalent to `collection .- centroid(collection)`.
"""
centered(collection) = collection .- centroid(collection)
"""
RMSD(cA, cB) -> Real
Calculate root-mean-square deviation of points in `cA` from those in `cB`.
Preferred point representation is a subtype of `StaticVector`.
"""
function RMSD(cA::Vector{M}, cB::Vector{M}) where M <: StaticVector
len = length(cA)
@assert length(cB) == len "arguments don't have the same length"
msd = sum(cA, cB; init=zero(eltype(M))) do (a, b)
distance(a, b)
end / len
sqrt(msd)
end
|
{"hexsha": "b9239d18d4c5d5b8d73e21c6af3bb55e7adc0613", "size": 2295, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/GeomUtils.jl", "max_stars_repo_name": "bldamalla/ProtStructs.jl", "max_stars_repo_head_hexsha": "98f73e61d123b8f319c597368f00b4b83a416d62", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/GeomUtils.jl", "max_issues_repo_name": "bldamalla/ProtStructs.jl", "max_issues_repo_head_hexsha": "98f73e61d123b8f319c597368f00b4b83a416d62", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-27T10:43:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T10:43:37.000Z", "max_forks_repo_path": "src/GeomUtils.jl", "max_forks_repo_name": "bldamalla/ProtStructs.jl", "max_forks_repo_head_hexsha": "98f73e61d123b8f319c597368f00b4b83a416d62", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6860465116, "max_line_length": 80, "alphanum_fraction": 0.6379084967, "num_tokens": 720}
|
#!/usr/bin/env python
import cv2
import numpy as np
import rospy
import math
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import CompressedImage, Image
def crop_line_image(image_raw):
OK = 20
CK = 20
hough_thresh = 220
hough_min_line = 100
hough_max_gap = 100
col = 100
row = 500
image = image_raw[col:col + 600, row:row + 400]
# blur the image to get rid of some of that noise
blur = cv2.GaussianBlur(image, (5, 5), 10)
# break image into blue, green, red
b, g, r = cv2.split(blur)
# increase the amount of green relative to red and blue
I = 2 * g - r - b
# define the kernels for opening and closing
open_k = np.ones((OK, OK), np.uint8)
close_k = np.ones((CK, CK), np.uint8)
# close the image to get rid of the small noise
closed = cv2.morphologyEx(I, cv2.MORPH_CLOSE, close_k)
# open to fill in any gaps
opened = cv2.morphologyEx(closed, cv2.MORPH_OPEN, open_k)
# threshold to binary with Otsu
rt, th = cv2.threshold(opened, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# invert because Hough looks for 255
th = 255 - th
# deep copy the image for Hough
hough_image = image
# find the Hough lines using the PPHT method
lines = cv2.HoughLinesP(image=th, rho=1, theta=np.pi / 100, threshold=hough_thresh, minLineLength=hough_min_line,
maxLineGap=hough_max_gap)
if lines is not None:
for x in range(0, len(lines)):
count = 0
for x1, y1, x2, y2 in lines[x]:
angle = math.atan2(y1-y2, x1-x2) * (180 / math.pi)
if 135 > angle > 45:
cv2.line(hough_image, (x1, y1), (x2, y2), (0, 0, 255), 2)
count += 1
else:
pass
rospy.loginfo('Lines %d', len(lines))
else:
rospy.logwarn('No lines detected')
return hough_image
class ImgProc(object):
def __init__(self):
self.img_pub = rospy.Publisher('crop_rows', Image, queue_size=1)
self.img_sub = rospy.Subscriber('camera/image_color/compressed', CompressedImage, self.crop_image_cb)
self.bridge = CvBridge()
def crop_image_cb(self, data):
try:
img = self.bridge.compressed_imgmsg_to_cv2(data)
lines_img = crop_line_image(img)
if lines_img is not None:
self.img_pub.publish(self.bridge.cv2_to_imgmsg(lines_img, 'bgr8'))
else:
rospy.loginfo('No Image')
except CvBridgeError as e:
print e
def driver():
rospy.init_node('crop_row_follow', anonymous=True)
ImgProc()
while not rospy.is_shutdown():
rospy.spin()
if __name__ == '__main__':
try:
driver()
except rospy.ROSInterruptException:
pass
|
{"hexsha": "690ab22d21b21af36e00cf2f29db39b1878daf86", "size": 2851, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/crop_row_follow.py", "max_stars_repo_name": "sonyccd/crop_row_follow", "max_stars_repo_head_hexsha": "26ee58804d0b569608da4b825c3b14c227cfd245", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-12-18T23:57:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-17T19:31:43.000Z", "max_issues_repo_path": "scripts/crop_row_follow.py", "max_issues_repo_name": "UGA-AgRobotics/crop_row_follow", "max_issues_repo_head_hexsha": "26ee58804d0b569608da4b825c3b14c227cfd245", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-03-21T18:20:07.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-25T15:06:26.000Z", "max_forks_repo_path": "scripts/crop_row_follow.py", "max_forks_repo_name": "UGA-AgRobotics/crop_row_follow", "max_forks_repo_head_hexsha": "26ee58804d0b569608da4b825c3b14c227cfd245", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-07-14T16:22:00.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-24T01:26:21.000Z", "avg_line_length": 31.3296703297, "max_line_length": 117, "alphanum_fraction": 0.6110136794, "include": true, "reason": "import numpy", "num_tokens": 795}
|
"Split a subspace into two"
split(ss::UInt64) = hash((ss, 0)), hash((ss, 1))
|
{"hexsha": "b8150b2996449861ec57bacc2f51d1cb1d5bfc6d", "size": 76, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "OmegaCore/src/space/subspace.jl", "max_stars_repo_name": "zenna/expect", "max_stars_repo_head_hexsha": "48bd661df410777eeb8940876a5cc8817eed2ac5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OmegaCore/src/space/subspace.jl", "max_issues_repo_name": "zenna/expect", "max_issues_repo_head_hexsha": "48bd661df410777eeb8940876a5cc8817eed2ac5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OmegaCore/src/space/subspace.jl", "max_forks_repo_name": "zenna/expect", "max_forks_repo_head_hexsha": "48bd661df410777eeb8940876a5cc8817eed2ac5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0, "max_line_length": 48, "alphanum_fraction": 0.6315789474, "num_tokens": 29}
|
\chapter{Abstract}
Here goes the abstract, a summary of your thesis work. You may add some keyword at the end that clearly identify the research field of the thesis. The abstract should not contain any reference to related works.
\paragraph{Keywords} keyword1; keyword2; keyword3
|
{"hexsha": "b6922dacf4724bc1a551a989ac732888642af9ec", "size": 282, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapters/Abstract.tex", "max_stars_repo_name": "ste7en/master-thesis-template-polimi", "max_stars_repo_head_hexsha": "b56d8dd68cf80061d92e9cf783248b31ecbae0d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-11T13:32:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T22:54:07.000Z", "max_issues_repo_path": "Chapters/Abstract.tex", "max_issues_repo_name": "ste7en/master-thesis-template-polimi", "max_issues_repo_head_hexsha": "b56d8dd68cf80061d92e9cf783248b31ecbae0d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-29T21:08:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-29T21:08:58.000Z", "max_forks_repo_path": "Chapters/Abstract.tex", "max_forks_repo_name": "ste7en/master-thesis-template-polimi", "max_forks_repo_head_hexsha": "b56d8dd68cf80061d92e9cf783248b31ecbae0d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-29T21:09:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-29T21:09:36.000Z", "avg_line_length": 56.4, "max_line_length": 210, "alphanum_fraction": 0.8014184397, "num_tokens": 60}
|
\section*{Publication Data}
\textcopyright Copyright The Rexx Language Association, 2011-\splice{java TexYear}\\
All original material in this publication is published under the Creative Commons - Share Alike 3.0 License as stated at \url{http://creativecommons.org/licenses/by-nc-sa/3.0/us/legalcode}.\\[0.5cm]
The responsible publisher of this edition is identified as \emph{IBizz IT Services and Consultancy}, Amsteldijk 14, 1074 HR Amsterdam, a registered company governed by the laws of the Kingdom of The Netherlands.\\[1cm]
This edition is registered under ISBN \isbn \\[1cm]
\psset{unit=1in}
\begin{pspicture}(3.5,1in)
\psbarcode{\isbn}{includetext guardwhitespace}{isbn}
\end{pspicture}
\newpage
%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End:
|
{"hexsha": "166bce9afbbc3866201228c2d0bf6079ce923928", "size": 775, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "documentation/boilerplate/bookmeta.tex", "max_stars_repo_name": "RexxLA/NetRexx", "max_stars_repo_head_hexsha": "ec27b6e3f908fbc50cb6dc54696daea68ae59103", "max_stars_repo_licenses": ["ICU"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "documentation/boilerplate/bookmeta.tex", "max_issues_repo_name": "RexxLA/NetRexx", "max_issues_repo_head_hexsha": "ec27b6e3f908fbc50cb6dc54696daea68ae59103", "max_issues_repo_licenses": ["ICU"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2022-01-24T12:13:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-01T16:14:50.000Z", "max_forks_repo_path": "documentation/boilerplate/bookmeta.tex", "max_forks_repo_name": "RexxLA/NetRexx", "max_forks_repo_head_hexsha": "ec27b6e3f908fbc50cb6dc54696daea68ae59103", "max_forks_repo_licenses": ["ICU"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.4375, "max_line_length": 218, "alphanum_fraction": 0.7612903226, "num_tokens": 217}
|
import numpy as np
from hw2 import Transform, Translation, Rotation, Base
from hw3 import Quaternion, link, symLink
import matplotlib.pyplot as plt
from simulation import Simulation
import sympy
from pprint import pprint
from gradient import gradientVariable, forwardVariable
# exit()
print("Q1")
t01 = Transform(rot=Rotation(180, 0), loc=Translation(-8, 6, 2))
tan68 = np.arctan(6 / 8) * 180 / np.pi
t13 = Transform(rot=Rotation(90 + tan68, 0) * Rotation(-90, 1),
loc=Translation(0, 6, -8))
t03 = t01 * t13
print("T01", t01)
print("T13", t13)
print("T03", t03)
t31 = t13.T()
print("T31", t31)
q = Quaternion.fromRotationMat(t31.rot)
print("Quaternion", q)
angle, raxis = q.getRotationParam()
print("Rotation axis", raxis)
print("Rotation angle", angle)
print("Q2")
t1, t2, d3, t4, t5, t6, l1 = sympy.symbols("t1, t2, d3, t4, t5, t6, l1")
linkparam = [( 0, 0, t1, 0),
(-90, 0, t2, l1),
( 90, 0, 0, d3),
( 0, 0, t4, 0),
(-90, 0, t5, 0),
( 90, 0, t6, 0)]
T = sympy.Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
for p in linkparam:
T = T * symLink(*p)
pprint(T)
print("Q3")
tan68 = np.arctan(6 / 8) * 180 / np.pi
t_fp = Transform(rot=Rotation(90 - tan68, 1)) * \
Transform(loc=Translation(50, 35, 0))
print("t_fp", t_fp)
t_cf = Transform(rot=Rotation(90, 0), loc=Translation(0, 0, 65))
print("t_cf", t_cf)
t_bc = Transform(rot=Rotation(90, 0), loc=Translation(0, 100, 100))
t_bp = t_bc * t_cf * t_fp
print("t_bp", t_bp)
linkparam = [(90, 0, 90, 0),
( 0, 20, None, 0),
( 0, 100, None, 0),
( 0, 100, None, 0),
( 0, 60, 90, 0),
(90, 0, 0, 0)]
init_angle = np.array([44, 283, 264]) / 180 * np.pi
th = gradientVariable(t_bp.mat, linkparam, init_angle)
t = forwardVariable(th, linkparam)
print("Angle", th)
print("Real", t_bp)
print("Calculated", t)
if True:
# test by simulation
sim = Simulation(linkparam, 200)
angle = [90, 0, 0, 0, 90, 0]
sim.sim(angle)
angle[1:4] = th
sim.sim(angle)
sim.runAnimation(repeat=False)
print("Q4")
r, a, d = sympy.symbols("r, a, d")
pi = sympy.pi
linkparam = [( 0, 0, r, 0),
( 0, a, 0, 0),
(-90, 0, 0, d)]
T01 = symLink(*linkparam[0])
T12 = symLink(*linkparam[1]) * symLink(*linkparam[2])
T02 = T01 * T12
print("T01")
pprint(T01)
print("T12")
pprint(T12)
print("T02")
pprint(T02)
|
{"hexsha": "16441702e9b3fe3112f4bf9f5ae6866d423023ba", "size": 2482, "ext": "py", "lang": "Python", "max_stars_repo_path": "midterm.py", "max_stars_repo_name": "linnil1/2020Robotics", "max_stars_repo_head_hexsha": "fdb18abe020f7123b5112dac9b49deb51850f054", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "midterm.py", "max_issues_repo_name": "linnil1/2020Robotics", "max_issues_repo_head_hexsha": "fdb18abe020f7123b5112dac9b49deb51850f054", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "midterm.py", "max_forks_repo_name": "linnil1/2020Robotics", "max_forks_repo_head_hexsha": "fdb18abe020f7123b5112dac9b49deb51850f054", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3333333333, "max_line_length": 72, "alphanum_fraction": 0.5733279613, "include": true, "reason": "import numpy,import sympy", "num_tokens": 954}
|
# Copyright 2018-2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This module contains the :func:`about` function to display all the details of the PennyLane installation,
e.g., OS, version, `Numpy` and `Scipy` versions, installation method.
"""
import platform
import importlib
import sys
from pkg_resources import iter_entry_points
import numpy
import scipy
# The following if/else block enables support for pip versions 19.3.x
_parent_module = importlib.util.find_spec("pip._internal.main") or importlib.util.find_spec(
"pip._internal"
)
_internal_main = importlib.util.module_from_spec(_parent_module)
_parent_module.loader.exec_module(_internal_main)
def about():
"""
Prints the information for pennylane installation.
"""
plugin_devices = iter_entry_points("pennylane.plugins")
_internal_main.main(["show", "pennylane"])
print("Platform info: {}".format(platform.platform(aliased=True)))
print("Python version: {0}.{1}.{2}".format(*sys.version_info[0:3]))
print("Numpy version: {}".format(numpy.__version__))
print("Scipy version: {}".format(scipy.__version__))
print("Installed devices:")
for d in plugin_devices:
print("- {} ({}-{})".format(d.name, d.dist.project_name, d.dist.version))
if __name__ == "__main__":
about()
|
{"hexsha": "1d05117bc5e3068029200857ca3898be613dc137", "size": 1873, "ext": "py", "lang": "Python", "max_stars_repo_path": "pennylane/about.py", "max_stars_repo_name": "InduManimaran/pennylane", "max_stars_repo_head_hexsha": "375d25acc7bd2e6d5243b5273958b26513c33189", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-21T01:56:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T01:56:37.000Z", "max_issues_repo_path": "pennylane/about.py", "max_issues_repo_name": "InduManimaran/pennylane", "max_issues_repo_head_hexsha": "375d25acc7bd2e6d5243b5273958b26513c33189", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-09-25T21:04:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:39:53.000Z", "max_forks_repo_path": "pennylane/about.py", "max_forks_repo_name": "InduManimaran/pennylane", "max_forks_repo_head_hexsha": "375d25acc7bd2e6d5243b5273958b26513c33189", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-21T18:49:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-21T18:49:07.000Z", "avg_line_length": 36.0192307692, "max_line_length": 105, "alphanum_fraction": 0.7229044314, "include": true, "reason": "import numpy,import scipy", "num_tokens": 432}
|
<!-- dom:TITLE: Demo - 3D Poisson's equation -->
# Demo - 3D Poisson's equation
<!-- dom:AUTHOR: Mikael Mortensen Email:mikaem@math.uio.no at Department of Mathematics, University of Oslo. -->
<!-- Author: -->
**Mikael Mortensen** (email: `mikaem@math.uio.no`), Department of Mathematics, University of Oslo.
Date: **April 13, 2018**
**Summary.** This is a demonstration of how the Python module [shenfun](https://github.com/spectralDNS/shenfun) can be used to solve a 3D Poisson
equation in a 3D tensor product domain that has homogeneous Dirichlet boundary
conditions in one direction and periodicity in the
remaining two. The solver described runs with MPI without any further
considerations required from the user. Spectral convergence, as shown in [Figure](#fig:3d:ct0), is demonstrated.
The demo is implemented in
a single Python file [dirichlet_poisson3D.py](https://github.com/spectralDNS/shenfun/blob/master/demo/dirichlet_poisson3D.py), and the numerical method is is described in more detail by J. Shen [[shen1]](#shen1) and [[shen95]](#shen95).
<!-- dom:FIGURE: [https://rawgit.com/spectralDNS/spectralutilities/master/figures/poisson3D_errornorm.png] Convergence of 3D Poisson solvers for both Legendre and Chebyshev modified basis function. <a id="fig:3d:ct0"></a> -->
<!-- begin figure -->
<a id="fig:3d:ct0"></a>
<p>Convergence of 3D Poisson solvers for both Legendre and Chebyshev modified basis function.</p>
<!-- end figure -->
## Poisson's equation
<a id="demo:poisson3d"></a>
Poisson's equation is given as
<!-- Equation labels as ordinary links -->
<a id="eq:3d:poisson"></a>
$$
\begin{equation}
\nabla^2 u(\boldsymbol{x}) = f(\boldsymbol{x}) \quad \text{for }\, \boldsymbol{x}=(x, y, z) \in \Omega, \label{eq:3d:poisson} \tag{1}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<a id="_auto1"></a>
$$
\begin{equation}
u(\pm 1 ,y, z) =0,
\label{_auto1} \tag{2}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<a id="_auto2"></a>
$$
\begin{equation}
u(x, 2\pi, z) = u(x, 0, z),
\label{_auto2} \tag{3}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<a id="_auto3"></a>
$$
\begin{equation}
u(x, y, 2\pi) = u(x, y, 0),
\label{_auto3} \tag{4}
\end{equation}
$$
where $u(\boldsymbol{x})$ is the solution and $f(\boldsymbol{x})$ is a function. The domain
$\Omega = [-1, 1]\times [0, 2\pi]^2$.
To solve Eq. ([1](#eq:3d:poisson)) with the Galerkin method we need smooth basis
functions, $v(\boldsymbol{x})$, that live
in the Hilbert space $H^1(\Omega)$ and that satisfy the given boundary
conditions. To this end we will use one basis function for the $x$-direction,
$\mathcal{X}(x)$,
one for the $y$-direction, $\mathcal{Y}(y)$, and one for the $z$-direction,
$\mathcal{Z}(z)$. And
then we create three-dimensional basis functions like
$$
v(x, y, z) = \mathcal{X}(x) \mathcal{Y}(y) \mathcal{Z} (z).
$$
The basis functions $\mathcal{Y}(y)$ and $\mathcal{Z}(z)$ are chosen as Fourier exponentials, since these
functions are periodic. Likewise, the basis functions $\mathcal{X}(x)$ are chosen as
modified Legendre or Chebyshev polynomials, using $\phi_l(x)$ to refer to either
one
<!-- Equation labels as ordinary links -->
<a id="_auto4"></a>
$$
\begin{equation}
\mathcal{X}_l(x) = \phi_l(x) - \phi_{l+2}(x), \forall \, l \in \boldsymbol{l}^{N_0},
\label{_auto4} \tag{5}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<a id="_auto5"></a>
$$
\begin{equation}
\mathcal{Y}_m(y) = e^{\imath m y}, \forall \, m \in \boldsymbol{m}^{N_1},
\label{_auto5} \tag{6}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<a id="_auto6"></a>
$$
\begin{equation}
\mathcal{Z}_n(z) = e^{\imath n z}, \forall \, n \in \boldsymbol{n}^{N_2},
\label{_auto6} \tag{7}
\end{equation}
$$
where the size of the discretized problem is $\boldsymbol{N} = (N_0, N_1, N_2)$,
$\boldsymbol{l}^{N_0} = (0, 1, \ldots, N_0-3)$, $\boldsymbol{m}^{N_1} =
(-N_1/2, -N_1/2+1, \ldots, N_1/2-1)$ and $\boldsymbol{n}^{N_2} = (-N_2/2, -N_2/2+1,
\ldots, N_2/2-1)$. However, due to [Hermitian symmetry](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.fft.rfft.html#numpy.fft.rfft), we only store $N_2/2+1$
wavenumbers in the $z$-direction, such that $\boldsymbol{n}^{N_2} = (0, 1, \ldots,
N_2/2)$. We refer to the Cartesian wavenumber mesh on vector form as $\boldsymbol{k}$:
$$
\boldsymbol{k} = \{(l, m, n)\, | \,(l, m, n) \in \boldsymbol{l}^{N_0} \times \boldsymbol{m}^{N_1} \times \boldsymbol{n}^{N_2}\}.
$$
We have the one-dimensional spaces
<!-- Equation labels as ordinary links -->
<a id="_auto7"></a>
$$
\begin{equation}
V^{N_0} = \text{span}\{ \mathcal{X}_l \}_{l\in\boldsymbol{l}^{N_0}},
\label{_auto7} \tag{8}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<a id="_auto8"></a>
$$
\begin{equation}
V^{N_1} = \text{span}\{ \mathcal{Y}_m \}_{m\in\boldsymbol{m}^{N_1}},
\label{_auto8} \tag{9}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<a id="_auto9"></a>
$$
\begin{equation}
V^{N_2} = \text{span}\{ \mathcal{Z}_n \}_{n\in\boldsymbol{n}^{N_2}},
\label{_auto9} \tag{10}
\end{equation}
$$
and from these we create a tensor product space $W^{\boldsymbol{N}}(\boldsymbol{x})$
<!-- Equation labels as ordinary links -->
<a id="_auto10"></a>
$$
\begin{equation}
W^{\boldsymbol{N}}(\boldsymbol{x}) = V^{N_0}(x) \otimes V^{N_1}(y) \otimes V^{N_2}(z).
\label{_auto10} \tag{11}
\end{equation}
$$
And then we look for discrete solutions $u \in W^{\boldsymbol{N}}$ like
<!-- Equation labels as ordinary links -->
<a id="eq:3d:u"></a>
$$
\begin{equation}
u(\boldsymbol{x}) = \sum_{l\in \boldsymbol{l}^{N_0}} \sum_{m\in \boldsymbol{m}^{N_1}}\sum_{n\in
\boldsymbol{n}^{N_2}}\hat{u}_{lmn} \mathcal{X}_l(x) \mathcal{Y}_m(y) \mathcal{Z}_n(z), \label{eq:3d:u} \tag{12}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<a id="_auto11"></a>
$$
\begin{equation}
= \sum_{\boldsymbol{\textsf{k}} \in \boldsymbol{k}}\hat{u}_{\boldsymbol{\textsf{k}}} v_{\boldsymbol{\textsf{k}}}(\boldsymbol{x}),
\label{_auto11} \tag{13}
\end{equation}
$$
where $\hat{u}_{lmn}$ are components of the expansion coefficients for $u$ and
the second form, $\{\hat{u}_{\boldsymbol{\textsf{k}}}\}_{\boldsymbol{\textsf{k}}\in\boldsymbol{k}}$, is a shorter,
simplified notation, with sans-serif $\boldsymbol{\textsf{k}}=(l, m, n)$.
The expansion coefficients are the unknowns in the spectral Galerkin method.
We now formulate a variational problem using the Galerkin method: Find $u \in
W^{\boldsymbol{N}}$ such that
<!-- Equation labels as ordinary links -->
<a id="eq:3d:varform"></a>
$$
\begin{equation}
\int_{\Omega} \nabla^2 u \, \overline{v} \, w\, \boldsymbol{dx} = \int_{\Omega} f \,
\overline{v}\, w\, \boldsymbol{dx} \quad
\forall v \, \in \, W^{\boldsymbol{N}}. \label{eq:3d:varform} \tag{14}
\end{equation}
$$
Here $\boldsymbol{dx}=dxdydz$, and the overline represents a complex conjugate, which is needed here because
the Fourier exponentials are complex functions.
The weighted integrals, weighted by $w(\boldsymbol{x})$, are called inner products, and a common notation is
<!-- Equation labels as ordinary links -->
<a id="_auto12"></a>
$$
\begin{equation}
\int_{\Omega} u \, \overline{v} \, w\, \boldsymbol{dx} = \langle u, v\rangle _w.
\label{_auto12} \tag{15}
\end{equation}
$$
The integral can either be computed exactly, or with quadrature. The advantage
of the latter is that it is generally faster, and that non-linear terms may be
computed just as quickly as linear. For a linear problem, it does not make much of a difference, if any at all. Approximating the integral with quadrature, we obtain
$$
\begin{align*}
\int_{\Omega} u \, \overline{v} \, w\, \boldsymbol{dx} &\approx \langle u, v
\rangle_w^{\boldsymbol{N}}, \\
&\approx \sum_{i=0}^{N_0-1} \sum_{j=0}^{N_1-1}\sum_{k=0}^{N_2-1} u(x_i, y_j, z_k) \overline{v}(x_i, y_j, z_k) w(x_i, y_j, z_k),
\end{align*}
$$
where $w(\boldsymbol{x})$ now are the quadrature weights. The quadrature points
$\{x_i\}_{i=0}^{N_0-1}$ are specific to the chosen basis, and even within basis there
are two different choices based on which quadrature rule is selected, either
Gauss or Gauss-Lobatto. The quadrature points for the Fourier bases are the
uniform $\{y_j\}_{j=0}^{N_1-1}=2\pi j / N_1$ and $\{z_k\}_{k=0}^{N_2-1} = 2 \pi
k/N_2$.
Inserting for test function ([12](#eq:3d:u)) and trialfunction
$v_{pqr} = \mathcal{X}_{p} \mathcal{Y}_q \mathcal{Z}_r$ on the
left hand side of ([14](#eq:3d:varform)), we get
$$
\begin{align*}
\langle \nabla^2u, v \rangle_w^{\boldsymbol{N}} &= \left\langle \nabla^2\sum_{l\in \boldsymbol{l}^{N_0}}
\sum_{m\in \boldsymbol{m}^{N_1}}\sum_{n\in \boldsymbol{n}^{N_2}}\hat{u}_{lmn}
\mathcal{X}_{l} \mathcal{Y}_m \mathcal{Z}_n,
\mathcal{X}_{p} \mathcal{Y}_q \mathcal{Z}_r \right\rangle_w^{\boldsymbol{N}}, \\
&= \left[\left(\mathcal{X}_l^{''}, \mathcal{X}_p \right)_w^N - (m^2+n^2)\left(\mathcal{X}_l, \mathcal{X}_p \right)_w^N \right]\delta_{mq} \delta_{nr} \hat{u}_{lmn}, \\
&= \left( a_{pl} - (m^2 + n^2)b_{pl}\right) \hat{u}_{lqr},
\end{align*}
$$
where the notation $(\cdot, \cdot)_w^{N_0}$
<!-- Equation labels as ordinary links -->
<a id="_auto13"></a>
$$
\begin{equation}
b_{pl} = \left( \mathcal{X}_l, \mathcal{X}_p \right)_w^{N_0} = \sum_{i=0}^{N_0-1} \mathcal{X}_l(x_i)
\mathcal{X}_p(x_i) w(x_i),
\label{_auto13} \tag{16}
\end{equation}
$$
is used to represent an $L_2$ inner product along only the first, nonperiodic,
direction. The delta functions above come from integrating over the two periodic
directions, where we use constant weight functions $w=1/(2\pi)$ in the
inner products
<!-- Equation labels as ordinary links -->
<a id="eq:delta0"></a>
$$
\begin{equation}
\int_0^{2\pi} \mathcal{Y}_m(y) \overline{\mathcal{Y}}_q(y) \frac{1}{2\pi} dy = \delta_{mq}, \label{eq:delta0} \tag{17}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<a id="eq:delta1"></a>
$$
\begin{equation}
\int_0^{2\pi} \mathcal{Z}_n(z) \overline{\mathcal{Z}}_r(z) \frac{1}{2\pi} dz = \delta_{nr}. \label{eq:delta1} \tag{18}
\end{equation}
$$
The Kronecker delta-function $\delta_{ij}$ is one for $i=j$ and
zero otherwise.
The right hand side of Eq. ([14](#eq:3d:varform)) is computed as
<!-- Equation labels as ordinary links -->
<a id="_auto14"></a>
$$
\begin{equation}
\tilde{f}_{pqr} = \left\langle f, \mathcal{X}_{p}
\mathcal{Y}_q \mathcal{Z}_r \right \rangle_w^{\boldsymbol{N}},
\label{_auto14} \tag{19}
\end{equation}
$$
where a tilde is used because this is not a complete transform of the function
$f$, but only an inner product.
The linear system of equations to solve for the expansion coefficients can now
be found as follows
<!-- Equation labels as ordinary links -->
<a id="eq:AB"></a>
$$
\begin{equation}
\left(a_{lj} - (m^2+n^2)b_{lj}\right) \hat{u}_{jmn} =
\tilde{f}_{lmn}\quad \forall \, (l,m,n) \in \boldsymbol{k}. \label{eq:AB} \tag{20}
\end{equation}
$$
Now, when $\hat{\boldsymbol{u}} = \{\hat{u}_{\boldsymbol{\textsf{k}}}\}_{\boldsymbol{\textsf{k}} \in \boldsymbol{k}}$ is
found by solving this linear system over the
entire computational mesh, it may be
transformed to real space $u(\boldsymbol{x})$ using ([12](#eq:3d:u)). Note that the matrices
$A \in \mathbb{R}^{N_0-3 \times N_0-3}$ and $B \in \mathbb{R}^{N_0-3 \times N_0-3}$
differ for Legendre or Chebyshev bases, but
for either case they have a
special structure that allows for a solution to be found very efficiently
in the order of $\mathcal{O}(N_0-3)$ operations given $m$ and $n$, see
[[shen1]](#shen1) and [[shen95]](#shen95). Fast solvers for ([20](#eq:AB)) are implemented in `shenfun` for both bases.
### Method of manufactured solutions
In this demo we will use the method of manufactured
solutions to demonstrate spectral accuracy of the `shenfun` bases. To
this end we choose a smooth analytical function that satisfies the given boundary
conditions:
<!-- Equation labels as ordinary links -->
<a id="eq:3d:u_e"></a>
$$
\begin{equation}
u_e(x, y, z) = \left(\cos(4x) + \sin(2y) + \sin(4z)\right)(1-x^2). \label{eq:3d:u_e} \tag{21}
\end{equation}
$$
Sending $u_e$ through the Laplace operator, we obtain the right hand side
<!-- Equation labels as ordinary links -->
<a id="eq:3d:solution"></a>
$$
\begin{equation}
\nabla^2 u_e(x,y,z) = -16(1 - x^2) \cos(4 x) + 16 x \sin(4 x) - 2 \cos(4 x)
- (1-x^2)(4 \sin(2y) + 16\sin(4z)). \label{eq:3d:solution} \tag{22}
\end{equation}
$$
Now, setting $f_e(\boldsymbol{x}) = \nabla^2 u_e(\boldsymbol{x})$ and solving for $\nabla^2
u(\boldsymbol{x}) = f_e(\boldsymbol{x})$, we can compare the numerical solution $u(\boldsymbol{x})$ with
the analytical solution $u_e(\boldsymbol{x})$ and compute error norms.
## Implementation
### Preamble
We will solve the Poisson problem using the [shenfun](https://github.com/spectralDNS/shenfun) Python module. The first thing needed
is then to import some of this module's functionality
plus some other helper modules, like [Numpy](https://numpy.org) and [Sympy](https://sympy.org):
```
from sympy import symbols, cos, sin, exp, lambdify
import numpy as np
from shenfun.tensorproductspace import TensorProductSpace
from shenfun import inner, div, grad, TestFunction, TrialFunction, Function, \
project, Dx, FunctionSpace, comm, Array, chebyshev, dx, la
```
We use `Sympy` for the manufactured solution and `Numpy` for testing. MPI for
Python (`mpi4py`) is required for running the solver with MPI.
### Manufactured solution
The exact solution $u_e(x, y, z)$ and the right hand side $f_e(x, y, z)$ are created using `Sympy` as follows
```
x, y, z = symbols("x,y,z")
ue = (cos(4*x) + sin(2*y) + sin(4*z))*(1-x**2)
fe = ue.diff(x, 2) + ue.diff(y, 2) + ue.diff(z, 2)
```
These solutions are now valid for a continuous domain. The next step is thus to
discretize, using the computational mesh
$$
(x_i, y_j, z_k)\, \forall \, (i, j, k) \in [0, 1,\ldots, N_0-1] \times [0, 1, \ldots, N_1-1] \times [0, 1, \ldots, N_2-1]
$$
and a finite number of basis functions.
Note that it is not mandatory to use `Sympy` for the manufactured solution. Since the
solution is known ([22](#eq:3d:solution)), we could just as well simply use `Numpy`
to compute $f_e$. However, with `Sympy` it is much
easier to experiment and quickly change the solution.
### Discretization and MPI
We create three function spaces with given size, one for each dimension of the problem.
From these three spaces a [TensorProductSpace](https://shenfun.readthedocs.io/en/latest/shenfun.html#shenfun.tensorproductspace.TensorProductSpace) is created.
```
# Size of discretization
N = [14, 15, 16]
SD = FunctionSpace(N[0], 'Chebyshev', bc=(0, 0))
#SD = FunctionSpace(N[0], 'Legendre', bc=(0, 0))
K1 = FunctionSpace(N[1], 'Fourier', dtype='D')
K2 = FunctionSpace(N[2], 'Fourier', dtype='d')
T = TensorProductSpace(comm, (SD, K1, K2), axes=(0, 1, 2))
X = T.local_mesh()
```
Note that we can either choose a Legendre or a Chebyshev basis for the
nonperiodic direction. The
[TensorProductSpace](https://shenfun.readthedocs.io/en/latest/shenfun.html#shenfun.tensorproductspace.TensorProductSpace) class takes an MPI communicator as first argument and the
computational mesh is distributed internally using the `pencil` method. The
`T.local_mesh` method returns the mesh local to each processor. The `axes`
keyword determines the order of transforms going back and forth between real and
spectral space. With `axes=(0, 1, 2)` and a forward transform (from real space
to spectral, i.e., from $u$ to $\hat{u}$) axis 2 is transformed first and then 1
and 0, respectively.
The manufactured solution is created with Dirichlet boundary conditions in the
$x$-direction, and for this reason `SD` is the first space in `T`. We could just
as well have put the nonperiodic direction along either $y$- or $z$-direction,
though, but this would then require that the order of the transformed axes be
changed as well. For example, putting the Dirichlet direction along $y$, we
would need to create the tensorproductspace as
```Python
T = TensorProductSpace(comm, (K1, SD, K2), axes=(1, 0, 2))
```
such that the Dirichlet direction is the last to be transformed. The reason for
this is that only the Dirichlet direction leads to matrices that need to be
inverted (or solved). And for this we need the entire data array along the Dirichlet
direction to be local to the processor. If the `SD` basis is the last to be
transformed, then the data will be aligned in this direction, whereas the other
two directions may both, or just one of them, be distributed.
Note that `X` is a list containing local values of the arrays $\{x_i\}_{i=0}^{N_0-1}$,
$\{y_j\}_{j=0}^{N_1-0}$ and $\{z_k\}_{k=0}^{N_2-1}$.
Now, it's not possible to run a jupyter notebook with more than one process,
but we can imagine running [the complete solver](https://github.com/spectralDNS/shenfun/blob/master/demo/dirichlet_poisson3D.py)
with 4 procesors and a processor mesh of shape $2\times 2$.
We would then get the following local slices for
each processor in spectral space
```Python
print(comm.Get_rank(), T.local_slice())
3 [slice(0, 14, None), slice(8, 15, None), slice(5, 9, None)]
1 [slice(0, 14, None), slice(0, 8, None), slice(5, 9, None)]
2 [slice(0, 14, None), slice(8, 15, None), slice(0, 5, None)]
0 [slice(0, 14, None), slice(0, 8, None), slice(0, 5, None)]
```
where the global shape is $\boldsymbol{N}=(14, 15, 9)$ after taking advantage of
Hermitian symmetry in the $z$-direction. So, all processors have the complete first dimension available locally, as they
should. Furthermore, processor three owns the slices from $8:15$ and $5:9$ along
axes $y$ and $z$, respectively. Processor 2 owns slices $0:8$ and $0:5$ etc. In
real space the mesh is distributed differently. First of all the global mesh
shape is $\boldsymbol{N}=(14, 15, 16)$, and it is distributed along the first two
dimensions. The local slices can be inspected as
```Python
print(comm.Get_rank(), T.local_slice(False))
0 [slice(0, 7, None), slice(0, 8, None), slice(0, 16, None)]
1 [slice(0, 7, None), slice(8, 15, None), slice(0, 16, None)]
2 [slice(7, 14, None), slice(0, 8, None), slice(0, 16, None)]
3 [slice(7, 14, None), slice(8, 15, None), slice(0, 16, None)]
```
Since two directions are distributed, both in spectral and real space, we say
that we have a two-dimensional decomposition (here a $2\times 2$ shaped
processor mesh) and the
MPI distribution is of type *pencil*. It is also possible to choose a *slab*
decomposition, where only one dimension of the array is distributed. This choice
needs to be made when creating the tensorproductspace as
```Python
T = TensorProductSpace(comm, (SD, K1, K2), axes=(0, 1, 2), slab=True)
```
which would lead to a mesh that is distributed along $x$-direction in real space
and $y$-direction in spectral space. The local slices would then be
print(comm.Get_rank(), T.local_slice()) # spectral space
1 [slice(0, 14, None), slice(4, 8, None), slice(0, 9, None)]
2 [slice(0, 14, None), slice(8, 12, None), slice(0, 9, None)]
0 [slice(0, 14, None), slice(0, 4, None), slice(0, 9, None)]
3 [slice(0, 14, None), slice(12, 15, None), slice(0, 9, None)]
print(comm.Get_rank(), T.local_slice(False)) # real space
3 [slice(11, 14, None), slice(0, 15, None), slice(0, 16, None)]
0 [slice(0, 4, None), slice(0, 15, None), slice(0, 16, None)]
2 [slice(8, 11, None), slice(0, 15, None), slice(0, 16, None)]
1 [slice(4, 8, None), slice(0, 15, None), slice(0, 16, None)]
Note that the *slab* decomposition is usually the fastest choice. However, the maximum
number of processors with *slab* is $\min \{N_0, N_1\}$, whereas a *pencil*
approach can be used with up to $\min \{N_1(N_2/2+1), N_0 N_1\}$ processors.
### Variational formulation
The variational problem ([14](#eq:3d:varform)) can be assembled using `shenfun`'s
form language, which is perhaps surprisingly similar to FEniCS.
```
u = TrialFunction(T)
v = TestFunction(T)
# Get f on quad points
fj = Array(T, buffer=fe)
# Compute right hand side of Poisson equation
f_hat = inner(v, fj)
# Get left hand side of Poisson equation
matrices = inner(v, div(grad(u)))
```
The Laplacian operator is recognized as `div(grad)`. The `matrices` object is a
list of two tensor product matrices, stored as instances of the class [TPMatrix](https://shenfun.readthedocs.io/en/latest/shenfun.html#shenfun.matrixbase.TPMatrix).
The two tensor product matrices represents
$$
a_{pl} \delta_{mq} \delta_{nr}\, \text{ and }\, -(m^2 + n^2)b_{pl} \delta_{mq} \delta_{nr}
$$
from Eqs. ([20](#eq:AB)), ([17](#eq:delta0)) and ([18](#eq:delta1)).
The second matrix ($-(m^2 + n^2)b_{pl} \delta_{mq} \delta_{nr}$) has an
attribute `scale` that is equal to $-(m^2+n^2)$.
This `scale` is stored as a numpy array of shape $(1, 15, 9)$, representing the set
$\{-(m^2+n^2): (m, n) \in \boldsymbol{m}^{N_1} \times \boldsymbol{n}^{N_2}\}$. Note that $\boldsymbol{n}^{N_2}$ is stored
simply as an array of length $N_2/2+1$ (here 9), since the transform in direction $z$
takes a real signal and transforms it taking advantage of Hermitian symmetry,
see [rfft](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.fft.rfft.html).
### Solve linear equations
Finally, solve linear equation system and transform solution from spectral
$\hat{u}_{\boldsymbol{\textsf{k}}}$ vector to the real space $u(\boldsymbol{x})$ and then check how the solution corresponds with the exact solution $u_e$.
```
# Create Helmholtz linear algebra solver
Solver = chebyshev.la.Helmholtz
#Solver = la.SolverGeneric1ND # For Legendre
H = Solver(matrices)
# Solve and transform to real space
u_hat = Function(T) # Solution spectral space
u_hat = H(u_hat, f_hat) # Solve
uq = T.backward(u_hat)
# Compare with analytical solution
uj = Array(T, buffer=ue)
error = comm.reduce(np.linalg.norm(uj-uq)**2)
if comm.Get_rank() == 0:
print("Error=%2.16e" %(np.sqrt(error)))
```
### Convergence test
To do a convergence test we will now create a function `main`, that takes the
number of quadrature points as parameter, and prints out
the error.
```
def main(N, family='Chebyshev'):
Solver = chebyshev.la.Helmholtz if family.lower() == 'chebyshev' else la.SolverGeneric1ND
SD = FunctionSpace(N, family=family, bc=(0, 0))
K1 = FunctionSpace(N, family='F', dtype='D')
K2 = FunctionSpace(N, family='F', dtype='d')
T = TensorProductSpace(comm, (SD, K1, K2), axes=(0, 1, 2))
u = TrialFunction(T)
v = TestFunction(T)
# Get f on quad points
fj = Array(T, buffer=fe)
# Compute right hand side of Poisson's equation
f_hat = Function(T)
f_hat = inner(v, fj, output_array=f_hat)
if family == 'legendre':
f_hat *= -1.
# Get left hand side of Poisson equation
if family.lower() == 'chebyshev':
matrices = inner(v, div(grad(u)))
else:
matrices = inner(grad(v), grad(u))
# Create Helmholtz linear algebra solver
H = Solver(matrices)
# Solve and transform to real space
u_hat = Function(T) # Solution spectral space
u_hat = H(f_hat, u_hat) # Solve
uj = Array(T)
uj = u_hat.backward(uj)
# Compare with analytical solution
ua = Array(T, buffer=ue)
#l2_error = np.linalg.norm(uj-ua)
L2_error = np.sqrt(dx((uj-ua)**2))
return L2_error
```
For example, we find the error of a Chebyshev discretization
using 12 quadrature points as
```
main(12, 'Chebyshev')
```
To get the convergence we call `main` for a list
of $N=[12, 16, \ldots, 48]$, and collect the errornorms in
arrays to be plotted. The error can be plotted using
[matplotlib](https://matplotlib.org), and the generated
figure is also shown in this demos summary.
```
%matplotlib inline
import matplotlib.pyplot as plt
N = range(12, 50, 4)
error = {}
for basis in ('legendre', 'chebyshev'):
error[basis] = []
for i in range(len(N)):
errN = main(N[i], basis)
error[basis].append(errN)
plt.figure(figsize=(6, 4))
for basis, col in zip(('legendre', 'chebyshev'), ('r', 'b')):
plt.semilogy(N, error[basis], col, linewidth=2)
plt.title('Convergence of Poisson solvers 1D')
plt.xlabel('N')
plt.ylabel('Error norm')
plt.legend(('Legendre', 'Chebyshev'))
plt.show()
```
The spectral convergence is evident and we can see that
after $N=24$ roundoff errors dominate as the errornorm trails off around $10^{-14}$.
## Complete solver
<a id="sec:complete"></a>
A complete solver, that can use either Legendre or Chebyshev bases, chosen as a
command-line argument, can also be found [here](https://github.com/spectralDNS/shenfun/blob/master/demo/dirichlet_poisson3D.py).
<!-- ======= Bibliography ======= -->
1. <a id="shen1"></a> **J. Shen**.
Efficient Spectral-Galerkin Method I. Direct Solvers of Second- and Fourth-Order Equations Using Legendre Polynomials,
*SIAM Journal on Scientific Computing*,
15(6),
pp. 1489-1505,
[doi: 10.1137/0915089](http://dx.doi.org/10.1137/0915089),
1994.
2. <a id="shen95"></a> **J. Shen**.
Efficient Spectral-Galerkin Method II. Direct Solvers of Second- and Fourth-Order Equations Using Chebyshev Polynomials,
*SIAM Journal on Scientific Computing*,
16(1),
pp. 74-87,
[doi: 10.1137/0916006](http://dx.doi.org/10.1137/0916006),
1995.
|
{"hexsha": "d49b96fa656fc77c983bbd0174c000bb6fdb73c0", "size": 38890, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "content/poisson3d.ipynb", "max_stars_repo_name": "mikaem/shenfun-demos", "max_stars_repo_head_hexsha": "c2ad13d62866e0812068673fdb6a7ef68ecfb7f2", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "content/poisson3d.ipynb", "max_issues_repo_name": "mikaem/shenfun-demos", "max_issues_repo_head_hexsha": "c2ad13d62866e0812068673fdb6a7ef68ecfb7f2", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-21T16:10:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-21T16:10:01.000Z", "max_forks_repo_path": "content/poisson3d.ipynb", "max_forks_repo_name": "mikaem/shenfun-demos", "max_forks_repo_head_hexsha": "c2ad13d62866e0812068673fdb6a7ef68ecfb7f2", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4969853575, "max_line_length": 245, "alphanum_fraction": 0.5444073026, "converted": true, "num_tokens": 8449}
|
$\newcommand{\xv}{\mathbf{x}}
\newcommand{\wv}{\mathbf{w}}
\newcommand{\vv}{\mathbf{v}}
\newcommand{\yv}{\mathbf{y}}
\newcommand{\zv}{\mathbf{z}}
\newcommand{\av}{\mathbf{a}}
\newcommand{\Chi}{\mathcal{X}}
\newcommand{\R}{\rm I\!R}
\newcommand{\sign}{\text{sign}}
\newcommand{\Tm}{\mathbf{T}}
\newcommand{\Xm}{\mathbf{X}}
\newcommand{\Xlm}{\mathbf{X1}}
\newcommand{\Wm}{\mathbf{W}}
\newcommand{\Vm}{\mathbf{V}}
\newcommand{\Ym}{\mathbf{Y}}
\newcommand{\Zm}{\mathbf{Z}}
\newcommand{\Gm}{\mathbf{G}}
\newcommand{\Zlm}{\mathbf{Z1}}
\newcommand{\I}{\mathbf{I}}
\newcommand{\muv}{\boldsymbol\mu}
\newcommand{\Sigmav}{\boldsymbol\Sigma}
\newcommand{\Phiv}{\boldsymbol\Phi}
$
# Nonlinear Logistic Regression
Previously, we learned the linear logistic regression that uses the softmax layer for classification along with a linear model.
$$
g_k(\xv) = P(T=k \mid \xv) = \frac{e^{\kappa_k}}{\sum_{c=1}^K e^{\kappa_c}}
$$
By using this softmax function, we were able to generate probablistic outputs for all classes. To handle multi-label classes, we use the indicator target labels for training to update the weights for the linear model.
Following the derivation, we have achieved the following update rule:
$$
\wv_j \leftarrow \wv_j + \alpha \sum_{n=1}^{N} \Big( t_{n,j} - g_j(\xv_n)\Big) \xv_n.
$$
To update the weights with batch samples, we can convert this update rule in matrix form as follows:
$$
\wv \leftarrow \wv + \alpha \Xm^\top \Big( \Tm - g(\Xm)\Big).
$$
Remember we start from the error function below for the derivation bvefore:
$$
E(\wv) = - \ln P(\Tm \mid \wv) = - \sum_{n=1}^{N} \sum_{k=1}^{K} t_{n,k} \ln y_{n,k}.
$$
# Nonlinear Extension with Neural Networks
Now, we extend this to two layer neural networks. Similar to the derivation of neural network for regression, we can derive the gradient by switching the squuared error with the negative log likelihood function above.
From the error function $E(\wv)$, we can derive the gradient to update the weights for each layer.
$$
\begin{align}
v_{dg} &\leftarrow v_{dg} - \alpha_h \frac{\partial{E(\Wm, \Vm)}} {\partial{v_{dg}}} \\
\\
w_{gk} &\leftarrow w_{gk} - \alpha_o \frac{\partial{E(\Wm, \Vm)}} {\partial{w_{gk}}},
\end{align}
$$
where $\alpha_h$ and $\alpha_o$ are the learning rate for hidden and output layer respectively. Here, we denote the output of the neural network as $\kappa$.
$$
\begin{align}
\frac{\partial{E}}{\partial{w_{gk}}} &= -\frac{\partial{\Big( \sum_{n=1}^{N} \sum_{l=1}^{K} (t_{nl} \ln g_{nl}(\xv_n))} \Big)}{\partial{w_{gk}}} \\
\\
&= -\sum_{n=1}^{N} \sum_{l=1}^{K} t_{n,l} \frac{1}{g_{n,k}(\xv_n)} \frac{\partial g_{n,k}(\xv_n)}{\partial {w_{gk}}}\\
&= -\sum_{n=1}^{N} \sum_{l=1}^{K} t_{n,l} \frac{1}{g_{n,k}(\xv_n)} \frac{\partial g_{n,k}(\xv_n)}{\partial \kappa_{nk}} \frac{\partial \kappa_{nk} }{\partial {w_{gk}}}\\
&= -\sum_{n=1}^{N} \sum_{l=1}^{K} t_{n,l} \frac{1}{g_{n,k}(\xv_n)} g_{nk}(\xv_n) (I_{lk} - g_{nk}(\xv_n)) \frac{\partial \kappa_{nk} }{\partial {w_{gk}}}\\
&= -\sum_{n=1}^{N} \sum_{l=1}^{K} t_{n,l} (I_{lk} - g_{nk}(\xv_n)) \frac{\partial \sum_{g=0}^{G} z1_{ng} w_{gk} }{\partial {w_{gk}}}\\
&= -\sum_{n=1}^{N} \sum_{l=1}^{K} t_{n,l} (I_{lk} - g_{nk}(\xv_n)) z1_{ng}\\
&= -\sum_{n=1}^{N} \Big(\sum_{l=1}^{K} t_{n,l} (I_{lk} - g_{nk}(\xv_n)) \Big) z1_{ng}\\
&= -\sum_{n=1}^{N} \Big( \sum_{l=1}^{K} t_{n,l} I_{lk} - g_{nk}(\xv_n) \sum_{l=1}^{K} t_{n,l} \Big) z1_{ng}\\
&= -\sum_{n=1}^{N} \Big( t_{n,k} - g_{nk}(\xv_n) \Big) z1_{ng}.
\end{align}
$$
Coverting this gradient in matrix form and reflecting it on our weight update,
$$
\Wm \leftarrow \Wm + \alpha_o \Zlm^\top \Big( \Tm - g(\Xm)\Big).
$$
Now let us update the weight $\vv$ for the hidden layer.
For the hidden layer, we repeat this:
$$
\begin{align}
\frac{\partial{E}}{\partial{v_{dg}}} &= \frac{\partial{\Big( \sum_{n=1}^{N} \sum_{k=1}^{K} (t_{nk} \ln g_{nk}(\xv_n))} \Big)}{\partial{v_{dg}}} \\
&= -\sum_{n=1}^{N} \sum_{k=1}^{K} t_{n,k} \frac{1}{g_{n,k}(\xv_n)} \frac{\partial g_{n,k}(\xv_n)}{\partial {v_{dg}}}\\
&= -\sum_{n=1}^{N} \sum_{k=1}^{K} t_{n,k} \frac{1}{g_{n,k}(\xv_n)} \frac{\partial g_{n,k}(\xv_n)} {\partial \kappa_{nk}} \frac{\partial \kappa_{nk}} {\partial {v_{dg}}}\\
&= -\sum_{n=1}^{N} \sum_{k=1}^{K} t_{n,k} \frac{1}{g_{n,k}(\xv_n)} \frac{\partial g_{n,k}(\xv_n)} {\partial \kappa_{nk}} \sum_{g=0}^G w_{gk} \frac{\partial{h(a_{ng})}}{\partial{a_{ng}}} \frac{\partial{a_{ng}}}{\partial{v_{dg}}}\\
&= -\sum_{n=1}^{N} \sum_{k=1}^{K} t_{n,k} (I_{kk} - g_{nk}(\xv_n)) \sum_{g=0}^G w_{gl} \frac{\partial{h(a_{ng})}}{\partial{a_{ng}}} \frac{\partial{a_{ng}}}{\partial{v_{dg}}}\\
&= -\sum_{n=1}^{N} \sum_{k=1}^{K} t_{n,k} (I_{kk} - g_{nk}(\xv_n)) \sum_{g=0}^G w_{gk} \frac{\partial{h(a_{ng})}}{\partial{a_{ng}}} x1_{nd}\\
&= -\sum_{n=1}^{N} \sum_{k=1}^{K} t_{n,k} (I_{kk} - g_{nk}(\xv_n)) \sum_{g=0}^G w_{gk} \frac{\partial{h(a_{ng})}}{\partial{a_{ng}}} x1_{nd}\\
&= -\sum_{n=1}^{N} \sum_{k=1}^{K} t_{n,k} (I_{kk} - g_{nk}(\xv_n)) \sum_{g=0}^G w_{gk} (1 - z_{ng}^2) x1_{nd}.
\end{align}
$$
Again, coverting in matrix form for the hidden weight update,
$$
\Vm \leftarrow \Vm + \alpha_h \Xlm^\top \Big( (\Tm - g(\Xm)) \Wm^\top \odot (1 - \Zm^2) \Big).
$$
Here, $\odot$ denotes the element-wise multiplication.
## Summary (Regression vs Classification)
<table>
<tr>
<th></th>
<th width=45%> Regression </th>
<th width=45%> Classification </th>
</tr>
<tr>
<td>
Forward Pass
</td>
<td>
$$
\begin{align}
\Zm &= h(\Xlm \cdot \Vm) \\
\\
\Ym & = \Zlm \cdot \Wm
\end{align}
$$
</td>
<td>
$$
\begin{align}
\Zm &= h(\Xlm \cdot \Vm) \\
\\
\Ym & = \Zlm \cdot \Wm \\
\Gm & = softmax(\Ym)
\end{align}
$$
</td>
</tr>
<tr>
<td>
Backward Pass
</td>
<td>
$$
\begin{align}
\Vm &\leftarrow \Vm + \alpha_h \frac{1}{N} \frac{1}{K} \Xlm^\top \Big( (\Tm - \Ym) \Wm^\top \odot (1 - \Zm^2) \Big) \\
\Wm &\leftarrow \Wm + \alpha_o \frac{1}{N} \frac{1}{K} \Zlm^\top \Big( \Tm - \Ym \Big)
\end{align}
$$
</td>
<td>
$$
\begin{align}
\Vm &\leftarrow \Vm + \alpha_h \Xlm^\top \Big( (\Tm - \Gm) \Wm^\top \odot (1 - \Zm^2) \Big)\\
\Wm &\leftarrow \Wm + \alpha_o \Zlm^\top \Big( \Tm - \Gm\Big)
\end{align}
$$
</td>
</tr>
<tr>
<td></td>
<td></td>
<td>
Note: Here $\Tm$ is a matrix with indicator variable outputs, <br/>
and $\Gm$ is the output matrix after the softmax layer.</td>
</tr>
</table>
# Practice
Now, inherit the previous NeuralNetwork class to implement neural network classification.
```python
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import nn
```
Let us repeat the previous classification example with nonlinear classification.
```python
# Data for testing
N1 = 50
N2 = 50
N = N1 + N2
D = 2
K = 2
mu1 = [-1, -1]
cov1 = np.eye(2)
mu2 = [2,3]
cov2 = np.eye(2) * 3
#
# Train Data
#
C1 = np.random.multivariate_normal(mu1, cov1, N1)
C2 = np.random.multivariate_normal(mu2, cov2, N2)
plt.plot(C1[:, 0], C1[:, 1], 'or')
plt.plot(C2[:, 0], C2[:, 1], 'xb')
plt.xlim([-3, 6])
plt.ylim([-3, 7])
plt.title("training data set")
Xtrain = np.vstack((C1, C2))
Ttrain = np.zeros((N, 1))
Ttrain[50:, :] = 1 # labels are zero or one
means, stds = np.mean(Xtrain, 0), np.std(Xtrain, 0)
# normalize inputs
Xtrains = (Xtrain - means) / stds
#
# Test Data
#
Ct1 = np.random.multivariate_normal(mu1, cov1, 20)
Ct2 = np.random.multivariate_normal(mu2, cov2, 20)
Xtest = np.vstack((Ct1, Ct2))
Ttest = np.zeros((40, 1))
Ttest[20:, :] = 1
# normalize inputs
Xtests = (Xtrain - means) / stds
plt.figure()
plt.plot(Ct1[:, 0], Ct1[:, 1], 'or')
plt.plot(Ct2[:, 0], Ct2[:, 1], 'xb')
plt.xlim([-3, 6])
plt.ylim([-3, 7])
plt.title("test data set")
```
```python
# Apply Nonlinear Logistic Regression
from imp import reload
reload(nn)
#import warnings
#warnings.filterwarnings('ignore')
clsf = nn.NeuralNetLogReg([2, 4, 2])
clsf.train(Xtrain, Ttrain)
classes, Y = clsf.use(Xtest)
```
```python
classes
```
array([ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.,
0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0.])
```python
Y
```
array([[ 1.00000000e+00, 3.62429239e-24],
[ 1.00000000e+00, 4.49791656e-10],
[ 9.99999998e-01, 2.08859210e-09],
[ 1.00000000e+00, 3.62429232e-24],
[ 2.99307336e-01, 7.00692664e-01],
[ 1.00000000e+00, 3.62429229e-24],
[ 1.00000000e+00, 3.62429243e-24],
[ 1.00000000e+00, 3.62926450e-24],
[ 1.00000000e+00, 6.36987730e-24],
[ 1.00000000e+00, 3.62429240e-24],
[ 5.99996498e-01, 4.00003502e-01],
[ 1.00000000e+00, 3.62429229e-24],
[ 1.00000000e+00, 4.48284191e-24],
[ 1.00000000e+00, 3.64360621e-24],
[ 1.00000000e+00, 3.95678934e-24],
[ 1.00000000e+00, 3.67347448e-24],
[ 1.00000000e+00, 3.62429229e-24],
[ 1.00000000e+00, 3.62429233e-24],
[ 1.00000000e+00, 5.59588219e-18],
[ 1.00000000e+00, 3.62429229e-24],
[ 1.71288265e-23, 1.00000000e+00],
[ 2.19935378e-14, 1.00000000e+00],
[ 1.71288265e-23, 1.00000000e+00],
[ 1.71290892e-23, 1.00000000e+00],
[ 1.70788912e-14, 1.00000000e+00],
[ 4.48420430e-14, 1.00000000e+00],
[ 6.00020101e-01, 3.99979899e-01],
[ 1.71594780e-23, 1.00000000e+00],
[ 1.75541816e-23, 1.00000000e+00],
[ 6.04943745e-15, 1.00000000e+00],
[ 4.48184489e-14, 1.00000000e+00],
[ 4.48415004e-14, 1.00000000e+00],
[ 4.37481476e-14, 1.00000000e+00],
[ 1.71532100e-23, 1.00000000e+00],
[ 1.41251835e-13, 1.00000000e+00],
[ 4.28267187e-14, 1.00000000e+00],
[ 4.48414747e-14, 1.00000000e+00],
[ 1.71288265e-23, 1.00000000e+00],
[ 1.71288265e-23, 1.00000000e+00],
[ 5.00047240e-01, 4.99952760e-01]])
```python
# retrieve labels and plot
plt.plot(Ttest)
plt.plot(classes)
print("Accuracy: ", 100 - np.mean(np.abs(Tl - Yl)) * 100, "%")
```
```python
# show me the boundary
x = np.linspace(-3, 6, 1000)
y = np.linspace(-3, 7, 1000)
xs, ys = np.meshgrid(x, y)
X = np.vstack((xs.flat, ys.flat)).T
classes, _ = clsf.use(X)
zs = classes.reshape(xs.shape)
plt.figure(figsize=(6,6))
plt.contourf(xs, ys, zs.reshape(xs.shape))
plt.title("Decision Boundary")
plt.plot(Ct1[:, 0], Ct1[:, 1], 'or')
plt.plot(Ct2[:, 0], Ct2[:, 1], 'xb')
```
```python
from sklearn.datasets import make_circles
X, T = make_circles(n_samples=800, noise=0.07, factor=0.4)
plt.figure(figsize=(10, 8))
plt.scatter(X[:, 0], X[:, 1], marker='o', c=T)
plt.title("Circles")
```
```python
clsf = nn.NeuralNetLogReg([2, 1, 2])
clsf.train(X, T)
# checking the training error only
classes, Y = clsf.use(X)
```
```python
# retrieve labels and plot
plt.plot(T)
plt.plot(classes)
print("Accuracy: ", 100 - np.mean(np.abs(T - classes)) * 100, "%")
```
```python
# show me the boundary
x = np.linspace(-1.5, 1.5, 1000)
y = np.linspace(-1.5, 1.5, 1000)
xs, ys = np.meshgrid(x, y)
Xt = np.vstack((xs.flat, ys.flat)).T
classes, _ = clsf.use(Xt)
zs = classes.reshape(xs.shape)
plt.figure(figsize=(6,6))
plt.contourf(xs, ys, zs.reshape(xs.shape), alpha=0.3)
plt.title("Decision Boundary")
plt.scatter(X[:, 0], X[:, 1], marker='o', c=T+3)
```
```python
from sklearn.datasets import load_iris
data = load_iris()
```
```python
data.keys()
```
dict_keys(['data', 'target', 'target_names', 'DESCR', 'feature_names'])
```python
data.target
```
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
```python
data.data.shape
```
(150, 4)
```python
clsf = nn.NeuralNetLogReg([4, 1, 3])
clsf.train(data.data, data.target)
# checking the training error only
classes, Y = clsf.use(data.data)
```
```python
# retrieve labels and plot
plt.plot(data.target)
plt.plot(classes)
print("Accuracy: ", 100 - np.mean(np.abs(data.target - classes)) * 100, "%")
```
```python
Y
```
array([[ 9.99991385e-01, 8.61541898e-06, 2.16892851e-27],
[ 9.99978241e-01, 2.17593859e-05, 1.73875252e-26],
[ 9.99986146e-01, 1.38544096e-05, 6.30606216e-27],
[ 9.99971064e-01, 2.89362302e-05, 3.29887992e-26],
[ 9.99992127e-01, 7.87333680e-06, 1.77158220e-27],
[ 9.99982124e-01, 1.78759977e-05, 1.11795632e-26],
[ 9.99978766e-01, 2.12335059e-05, 1.64576183e-26],
[ 9.99986703e-01, 1.32968101e-05, 5.75011385e-27],
[ 9.99962956e-01, 3.70441567e-05, 5.74627718e-26],
[ 9.99985191e-01, 1.48093898e-05, 7.32482714e-27],
[ 9.99993245e-01, 6.75457274e-06, 1.25551382e-27],
[ 9.99980827e-01, 1.91733484e-05, 1.30853666e-26],
[ 9.99985342e-01, 1.46575442e-05, 7.15717099e-27],
[ 9.99989996e-01, 1.00044561e-05, 3.03453396e-27],
[ 9.99997699e-01, 2.30076932e-06, 1.11687549e-28],
[ 9.99995299e-01, 4.70117860e-06, 5.56182688e-28],
[ 9.99992709e-01, 7.29105849e-06, 1.49071353e-27],
[ 9.99986934e-01, 1.30657238e-05, 5.52802932e-27],
[ 9.99988758e-01, 1.12416319e-05, 3.94324546e-27],
[ 9.99989750e-01, 1.02495403e-05, 3.20410301e-27],
[ 9.99983378e-01, 1.66221586e-05, 9.49442080e-27],
[ 9.99981331e-01, 1.86685265e-05, 1.23239976e-26],
[ 9.99995566e-01, 4.43440123e-06, 4.87770831e-28],
[ 9.99875830e-01, 1.24170250e-04, 8.70150917e-25],
[ 9.99957625e-01, 4.23750801e-05, 7.77272389e-26],
[ 9.99965660e-01, 3.43395579e-05, 4.84634245e-26],
[ 9.99954736e-01, 4.52641786e-05, 9.01421936e-26],
[ 9.99989901e-01, 1.00986885e-05, 3.09912634e-27],
[ 9.99990556e-01, 9.44389535e-06, 2.66581715e-27],
[ 9.99970726e-01, 2.92737336e-05, 3.38595511e-26],
[ 9.99967160e-01, 3.28396227e-05, 4.38365163e-26],
[ 9.99974144e-01, 2.58558431e-05, 2.56177660e-26],
[ 9.99996831e-01, 3.16910051e-06, 2.29314277e-28],
[ 9.99997130e-01, 2.86951379e-06, 1.83458591e-28],
[ 9.99985191e-01, 1.48093898e-05, 7.32482714e-27],
[ 9.99990733e-01, 9.26748328e-06, 2.55523967e-27],
[ 9.99994322e-01, 5.67817825e-06, 8.50057818e-28],
[ 9.99985191e-01, 1.48093898e-05, 7.32482714e-27],
[ 9.99976392e-01, 2.36084880e-05, 2.08842366e-26],
[ 9.99987487e-01, 1.25126585e-05, 5.01613497e-27],
[ 9.99988945e-01, 1.10546501e-05, 3.79741708e-27],
[ 9.99841005e-01, 1.58994538e-04, 1.51643682e-24],
[ 9.99983278e-01, 1.67216522e-05, 9.62257539e-27],
[ 9.99885143e-01, 1.14856806e-04, 7.30324880e-25],
[ 9.99955163e-01, 4.48367166e-05, 8.82408733e-26],
[ 9.99961576e-01, 3.84240227e-05, 6.23837010e-26],
[ 9.99991524e-01, 8.47589867e-06, 2.09081186e-27],
[ 9.99981236e-01, 1.87640738e-05, 1.24661598e-26],
[ 9.99992874e-01, 7.12563295e-06, 1.41579845e-27],
[ 9.99987612e-01, 1.23883729e-05, 4.90488913e-27],
[ 9.92514386e-09, 9.99995068e-01, 4.92238946e-06],
[ 2.81161744e-09, 9.99976281e-01, 2.37163997e-05],
[ 1.58608201e-10, 9.99147203e-01, 8.52797328e-04],
[ 3.20936206e-09, 9.99979887e-01, 2.01102108e-05],
[ 1.41769171e-10, 9.99019410e-01, 9.80590261e-04],
[ 1.34843027e-09, 9.99940726e-01, 5.92727292e-05],
[ 1.48275804e-10, 9.99072649e-01, 9.27351189e-04],
[ 7.01402577e-05, 9.99929860e-01, 7.82466725e-11],
[ 7.98274948e-09, 9.99993534e-01, 6.45785196e-06],
[ 8.25410487e-09, 9.99993797e-01, 6.19426970e-06],
[ 1.63145958e-06, 9.99998360e-01, 8.50748157e-09],
[ 3.62438865e-09, 9.99982715e-01, 1.72812922e-05],
[ 7.10123000e-07, 9.99999266e-01, 2.39963651e-08],
[ 2.19376410e-10, 9.99430473e-01, 5.69526372e-04],
[ 4.40842296e-06, 9.99995589e-01, 2.46382864e-09],
[ 3.67207674e-08, 9.99999000e-01, 9.63527356e-07],
[ 1.42809879e-10, 9.99028291e-01, 9.71709222e-04],
[ 3.67635175e-06, 9.99996321e-01, 3.08979613e-09],
[ 5.15195942e-12, 9.45936370e-01, 5.40636300e-02],
[ 8.00753494e-07, 9.99999179e-01, 2.06592004e-08],
[ 4.41072836e-13, 5.93580187e-01, 4.06419813e-01],
[ 2.48716706e-07, 9.99999663e-01, 8.87474025e-08],
[ 1.11553496e-12, 7.70387256e-01, 2.29612744e-01],
[ 3.01218163e-09, 9.99978233e-01, 2.17643013e-05],
[ 6.74092428e-08, 9.99999481e-01, 4.51843906e-07],
[ 1.60820690e-08, 9.99997287e-01, 2.69695532e-06],
[ 2.50279695e-10, 9.99516668e-01, 4.83331455e-04],
[ 8.67286890e-13, 7.25443946e-01, 2.74556054e-01],
[ 1.94136395e-10, 9.99336870e-01, 6.63129541e-04],
[ 2.35046257e-04, 9.99764954e-01, 1.73213370e-11],
[ 8.31184015e-07, 9.99999149e-01, 1.97205855e-08],
[ 9.51617694e-06, 9.99990483e-01, 9.44062340e-10],
[ 8.23075824e-07, 9.99999157e-01, 1.99630652e-08],
[ 1.45909043e-14, 1.25902978e-01, 8.74097022e-01],
[ 9.15653663e-11, 9.98311607e-01, 1.68839247e-03],
[ 7.43966704e-10, 9.99875614e-01, 1.24385605e-04],
[ 5.52689872e-10, 9.99819854e-01, 1.80145500e-04],
[ 6.36834105e-10, 9.99849017e-01, 1.50982632e-04],
[ 1.11186266e-07, 9.99999647e-01, 2.42131810e-07],
[ 1.05800317e-08, 9.99995444e-01, 4.54549912e-06],
[ 3.09433480e-09, 9.99978951e-01, 2.10463547e-05],
[ 9.25529647e-10, 9.99905251e-01, 9.47481251e-05],
[ 1.92789944e-07, 9.99999685e-01, 1.21916057e-07],
[ 4.94115602e-05, 9.99950588e-01, 1.21101648e-10],
[ 7.81447411e-09, 9.99993361e-01, 6.63166922e-06],
[ 2.97695285e-07, 9.99999631e-01, 7.09305506e-08],
[ 3.24226072e-08, 9.99998842e-01, 1.12528516e-06],
[ 4.27392158e-08, 9.99999160e-01, 7.97427967e-07],
[ 4.59452664e-04, 9.99540547e-01, 7.50720002e-12],
[ 4.22858293e-08, 9.99999150e-01, 8.08100849e-07],
[ 2.70847094e-26, 4.13427816e-08, 9.99999959e-01],
[ 1.03345356e-18, 6.65624607e-04, 9.99334375e-01],
[ 2.37218813e-22, 6.36828511e-06, 9.99993632e-01],
[ 5.66073971e-19, 4.76658147e-04, 9.99523342e-01],
[ 1.31560301e-23, 1.27957104e-06, 9.99998720e-01],
[ 4.25521605e-25, 1.90616835e-07, 9.99999809e-01],
[ 1.11980477e-14, 1.09604382e-01, 8.90395618e-01],
[ 1.50074234e-21, 1.77246114e-05, 9.99982275e-01],
[ 3.45504953e-21, 2.81532268e-05, 9.99971847e-01],
[ 7.65161563e-25, 2.63976626e-07, 9.99999736e-01],
[ 1.89107898e-16, 1.19244800e-02, 9.88075520e-01],
[ 5.65090581e-19, 4.76198585e-04, 9.99523801e-01],
[ 1.44992009e-20, 6.23962870e-05, 9.99937604e-01],
[ 2.91038759e-20, 9.18475187e-05, 9.99908152e-01],
[ 1.32492366e-23, 1.28459344e-06, 9.99998715e-01],
[ 2.20083178e-21, 2.19201699e-05, 9.99978080e-01],
[ 1.52744732e-17, 2.96366398e-03, 9.97036336e-01],
[ 9.46967965e-24, 1.06618240e-06, 9.99998934e-01],
[ 1.59595014e-28, 2.39415393e-09, 9.99999998e-01],
[ 5.84505693e-15, 7.76200612e-02, 9.22379939e-01],
[ 7.99106985e-23, 3.48186224e-06, 9.99996518e-01],
[ 1.56590712e-18, 8.38185840e-04, 9.99161814e-01],
[ 3.34349936e-25, 1.66744938e-07, 9.99999833e-01],
[ 3.15474552e-15, 5.57056298e-02, 9.44294370e-01],
[ 1.12856928e-20, 5.42974161e-05, 9.99945703e-01],
[ 1.09282260e-18, 6.86572337e-04, 9.99313428e-01],
[ 2.96124617e-14, 1.81129636e-01, 8.18870364e-01],
[ 3.55492752e-14, 1.98548812e-01, 8.01451188e-01],
[ 1.78754800e-22, 5.44290923e-06, 9.99994557e-01],
[ 9.00937884e-16, 2.81480525e-02, 9.71851948e-01],
[ 1.01341406e-21, 1.42546802e-05, 9.99985745e-01],
[ 8.87413796e-20, 1.70497390e-04, 9.99829503e-01],
[ 1.95908602e-23, 1.59595718e-06, 9.99998404e-01],
[ 1.20569901e-12, 7.83523300e-01, 2.16476700e-01],
[ 1.03591996e-15, 3.03841422e-02, 9.69615858e-01],
[ 2.01777153e-24, 4.52107113e-07, 9.99999548e-01],
[ 2.27027959e-23, 1.73200324e-06, 9.99998268e-01],
[ 2.94263762e-17, 4.26181204e-03, 9.95738188e-01],
[ 1.03943413e-13, 3.32051564e-01, 6.67948436e-01],
[ 2.16147268e-19, 2.79403213e-04, 9.99720597e-01],
[ 7.19304204e-24, 9.15302655e-07, 9.99999085e-01],
[ 7.42616434e-20, 1.54451957e-04, 9.99845548e-01],
[ 1.03345356e-18, 6.65624607e-04, 9.99334375e-01],
[ 6.00800204e-24, 8.28288824e-07, 9.99999172e-01],
[ 1.43258291e-24, 3.73852067e-07, 9.99999626e-01],
[ 3.73190193e-21, 2.93834989e-05, 9.99970617e-01],
[ 4.11023391e-18, 1.43146740e-03, 9.98568533e-01],
[ 4.83922300e-18, 1.56711967e-03, 9.98432880e-01],
[ 1.83042506e-21, 1.97894410e-05, 9.99980211e-01],
[ 6.62613124e-16, 2.37833230e-02, 9.76216677e-01]])
```python
x = np.linspace(3.5, 8, 100)
y = np.linspace(1.5, 5, 100)
xs, ys = np.meshgrid(x, y)
Xt = np.vstack((xs.flat, ys.flat)).T
Xt = np.hstack((Xt, np.random.rand(*Xt.shape) * 0.001)) # fill random noise for other columns
classes, Y = clsf.use(Xt)
for k in range(3):
zs = Y[:, k].reshape(xs.shape)
plt.figure(figsize=(6,6))
plt.imshow(zs, origin='lower', extent=(3,9,1,5))
#plt.contourf(xs, ys, zs.reshape(xs.shape), alpha=0.3)
plt.title("class: " + data.target_names[k])
plt.scatter(data.data[data.target==k, 0], data.data[data.target==k, 1], marker='o')
```
|
{"hexsha": "6f81f00dced9be42e450c3bc18375b2726e81e57", "size": 356271, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "reading_assignments/questions/7_Note-NonlinearLogReg.ipynb", "max_stars_repo_name": "biqar/Fall-2020-ITCS-8156-MachineLearning", "max_stars_repo_head_hexsha": "ce14609327e5fa13f7af7b904a69da3aa3606f37", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reading_assignments/questions/7_Note-NonlinearLogReg.ipynb", "max_issues_repo_name": "biqar/Fall-2020-ITCS-8156-MachineLearning", "max_issues_repo_head_hexsha": "ce14609327e5fa13f7af7b904a69da3aa3606f37", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reading_assignments/questions/7_Note-NonlinearLogReg.ipynb", "max_forks_repo_name": "biqar/Fall-2020-ITCS-8156-MachineLearning", "max_forks_repo_head_hexsha": "ce14609327e5fa13f7af7b904a69da3aa3606f37", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 350.6604330709, "max_line_length": 121600, "alphanum_fraction": 0.9048056115, "converted": true, "num_tokens": 10976}
|
*DECK D9ATN1
DOUBLE PRECISION FUNCTION D9ATN1 (X)
C***BEGIN PROLOGUE D9ATN1
C***SUBSIDIARY
C***PURPOSE Evaluate DATAN(X) from first order relative accuracy so
C that DATAN(X) = X + X**3*D9ATN1(X).
C***LIBRARY SLATEC (FNLIB)
C***CATEGORY C4A
C***TYPE DOUBLE PRECISION (R9ATN1-S, D9ATN1-D)
C***KEYWORDS ARC TANGENT, ELEMENTARY FUNCTIONS, FIRST ORDER, FNLIB,
C TRIGONOMETRIC
C***AUTHOR Fullerton, W., (LANL)
C***DESCRIPTION
C
C Evaluate DATAN(X) from first order, that is, evaluate
C (DATAN(X)-X)/X**3 with relative error accuracy so that
C DATAN(X) = X + X**3*D9ATN1(X).
C
C Series for ATN1 on the interval 0. to 1.00000E+00
C with weighted error 3.39E-32
C log weighted error 31.47
C significant figures required 30.26
C decimal places required 32.27
C
C***REFERENCES (NONE)
C***ROUTINES CALLED D1MACH, DCSEVL, INITDS, XERMSG
C***REVISION HISTORY (YYMMDD)
C 780401 DATE WRITTEN
C 890531 Changed all specific intrinsics to generic. (WRB)
C 891115 Corrected third argument in reference to INITDS. (WRB)
C 891115 REVISION DATE from Version 3.2
C 891214 Prologue converted to Version 4.0 format. (BAB)
C 900315 CALLs to XERROR changed to CALLs to XERMSG. (THJ)
C 900720 Routine changed from user-callable to subsidiary. (WRB)
C***END PROLOGUE D9ATN1
DOUBLE PRECISION X, XBIG, XMAX, XSML, Y, ATN1CS(40), EPS,
1 DCSEVL, D1MACH
LOGICAL FIRST
SAVE ATN1CS, NTATN1, XSML, XBIG, XMAX, FIRST
DATA ATN1CS( 1) / -.3283997535 3552023569 0793992299 0 D-1 /
DATA ATN1CS( 2) / +.5833432343 1724124499 5166991490 7 D-1 /
DATA ATN1CS( 3) / -.7400369696 7196464638 0901155141 3 D-2 /
DATA ATN1CS( 4) / +.1009784199 3372880835 9035751163 9 D-2 /
DATA ATN1CS( 5) / -.1439787163 5652056214 7130369770 0 D-3 /
DATA ATN1CS( 6) / +.2114512648 9921075720 7211224343 9 D-4 /
DATA ATN1CS( 7) / -.3172321074 2546671674 0256499675 7 D-5 /
DATA ATN1CS( 8) / +.4836620365 4607108253 7785938480 0 D-6 /
DATA ATN1CS( 9) / -.7467746546 8141126704 3761432277 6 D-7 /
DATA ATN1CS( 10) / +.1164800896 8244298306 2099864134 2 D-7 /
DATA ATN1CS( 11) / -.1832088370 8472013926 9995624245 2 D-8 /
DATA ATN1CS( 12) / +.2901908277 9660633131 7535123045 5 D-9 /
DATA ATN1CS( 13) / -.4623885312 1063267383 5180572151 2 D-10 /
DATA ATN1CS( 14) / +.7405528668 7757369179 9219704828 6 D-11 /
DATA ATN1CS( 15) / -.1191354457 8451366823 7082037341 7 D-11 /
DATA ATN1CS( 16) / +.1924090144 3917725998 6785569251 8 D-12 /
DATA ATN1CS( 17) / -.3118271051 0761942722 5447615532 7 D-13 /
DATA ATN1CS( 18) / +.5069240036 5677317896 9452059303 2 D-14 /
DATA ATN1CS( 19) / -.8263694719 8028660538 1828440596 4 D-15 /
DATA ATN1CS( 20) / +.1350486709 8170794205 2650612302 9 D-15 /
DATA ATN1CS( 21) / -.2212023650 4817460458 4013782319 1 D-16 /
DATA ATN1CS( 22) / +.3630654747 3813567838 2904764770 9 D-17 /
DATA ATN1CS( 23) / -.5970345328 8471540524 5121585916 5 D-18 /
DATA ATN1CS( 24) / +.9834816050 0771331194 4832900573 8 D-19 /
DATA ATN1CS( 25) / -.1622655075 8550623361 4438760448 0 D-19 /
DATA ATN1CS( 26) / +.2681186176 9454367963 0132030122 6 D-20 /
DATA ATN1CS( 27) / -.4436309706 7852554796 3624368810 6 D-21 /
DATA ATN1CS( 28) / +.7349691897 6524969450 7246551040 0 D-22 /
DATA ATN1CS( 29) / -.1219077508 3500525882 8940137813 3 D-22 /
DATA ATN1CS( 30) / +.2024298836 8052154031 8454087679 9 D-23 /
DATA ATN1CS( 31) / -.3364871555 7973545799 2557636266 6 D-24 /
DATA ATN1CS( 32) / +.5598673968 3469887494 9293397333 3 D-25 /
DATA ATN1CS( 33) / -.9323939267 2723202296 2853205333 3 D-26 /
DATA ATN1CS( 34) / +.1554133116 9959702229 3480789333 3 D-26 /
DATA ATN1CS( 35) / -.2592569534 1797459227 5742719999 9 D-27 /
DATA ATN1CS( 36) / +.4328193466 2457346850 3790933333 3 D-28 /
DATA ATN1CS( 37) / -.7231013125 5954374711 9240533333 3 D-29 /
DATA ATN1CS( 38) / +.1208902859 8304947729 4216533333 3 D-29 /
DATA ATN1CS( 39) / -.2022404543 4498975793 1519999999 9 D-30 /
DATA ATN1CS( 40) / +.3385428713 0464938430 7370666666 6 D-31 /
DATA FIRST /.TRUE./
C***FIRST EXECUTABLE STATEMENT D9ATN1
IF (FIRST) THEN
EPS = D1MACH(3)
NTATN1 = INITDS (ATN1CS, 40, 0.1*REAL(EPS))
C
XSML = SQRT (0.1D0*EPS)
XBIG = 1.571D0/SQRT(EPS)
XMAX = 1.571D0/EPS
ENDIF
FIRST = .FALSE.
C
Y = ABS(X)
IF (Y.GT.1.0D0) GO TO 20
C
IF (Y.LE.XSML) D9ATN1 = -1.0D0/3.0D0
IF (Y.LE.XSML) RETURN
C
D9ATN1 = -0.25D0 + DCSEVL (2.D0*Y*Y-1.D0, ATN1CS, NTATN1)
RETURN
C
20 IF (Y .GT. XMAX) CALL XERMSG ('SLATEC', 'D9ATN1',
+ 'NO PRECISION IN ANSWER BECAUSE X IS TOO BIG', 2, 2)
IF (Y .GT. XBIG) CALL XERMSG ('SLATEC', 'D9ATN1',
+ 'ANSWER LT HALF PRECISION BECAUSE X IS TOO BIG', 1, 1)
C
D9ATN1 = (ATAN(X) - X) / X**3
RETURN
C
END
|
{"hexsha": "8f64e14b7a7a9280df8f0dd49b419822efc203ba", "size": 5371, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "slatec/src/d9atn1.f", "max_stars_repo_name": "andremirt/v_cond", "max_stars_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "slatec/src/d9atn1.f", "max_issues_repo_name": "andremirt/v_cond", "max_issues_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slatec/src/d9atn1.f", "max_forks_repo_name": "andremirt/v_cond", "max_forks_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.8272727273, "max_line_length": 72, "alphanum_fraction": 0.610873208, "num_tokens": 2271}
|
from math import log, sqrt, floor
from typing import List
import numpy as np
from scipy import integrate
from scipy.signal import argrelextrema
from cell_models import protocols, trace
from cell_models.current_models import ExperimentalArtefactsThesis
from cell_models.protocols import VoltageClampProtocol
class CellModel:
"""An implementation a general cell model
Attributes:
default_parameters: A dict containing tunable parameters
updated_parameters: A dict containing all parameters that are being
tuned.
"""
def __init__(self, concentration_indices, y_initial=[],
default_parameters=None, updated_parameters=None,
no_ion_selective_dict=None, default_time_unit='s',
default_voltage_unit='V', default_voltage_position=0,
y_ss=None, is_exp_artefact=False, exp_artefact_params=None):
self.y_initial = y_initial
self.default_parameters = default_parameters
self.no_ion_selective = {}
self.is_no_ion_selective = False
self.default_voltage_position = default_voltage_position
self.y_ss = y_ss
self.concentration_indices = concentration_indices
self.i_stimulation = 0
self.is_exp_artefact = is_exp_artefact
if updated_parameters:
self.default_parameters.update(updated_parameters)
if no_ion_selective_dict:
self.no_ion_selective = no_ion_selective_dict
self.is_no_ion_selective = True
if default_time_unit == 's':
self.time_conversion = 1.0
self.default_unit = 'standard'
else:
self.time_conversion = 1000.0
self.default_unit = 'milli'
if default_voltage_unit == 'V':
self.voltage_conversion = 1
else:
self.voltage_conversion = 1000
self.t = []
self.y_voltage = []
self.d_y_voltage = []
self.current_response_info = None
self.full_y = []
self.exp_artefacts = ExperimentalArtefactsThesis()
if exp_artefact_params is not None:
for k, v in exp_artefact_params.items():
setattr(self.exp_artefacts, k, v)
self.exp_artefacts.g_leak *= default_parameters['G_seal_leak']
v_off_shift = np.log10(default_parameters['V_off']) * 2
v_off = -2.8 + v_off_shift #mV
self.exp_artefacts.v_off += v_off_shift
default_parameters['R_access'] = 1 + .5 * (
default_parameters['R_access']-1)
self.exp_artefacts.r_access *= default_parameters['R_access']
v_cmd_initial = -80 #mV
if is_exp_artefact:
"""
differential equations for Kernik iPSC-CM model
solved by ODE15s in main_ipsc.m
# State variable definitions:
# 0: Vm (millivolt)
# Ionic Flux: ---------------------------------------------------------
# 1: Ca_SR (millimolar)
# 2: Cai (millimolar)
# 3: Nai (millimolar)
# 4: Ki (millimolar)
# Current Gating (dimensionless):--------------------------------------
# 5: y1 (I_K1 Ishihara)
# 6: d (activation in i_CaL)
# 7: f1 (inactivation in i_CaL)
# 8: fCa (calcium-dependent inactivation in i_CaL)
# 9: Xr1 (activation in i_Kr)
# 10: Xr2 (inactivation in i_Kr
# 11: Xs (activation in i_Ks)
# 12: h (inactivation in i_Na)
# 13: j (slow inactivation in i_Na)
# 14: m (activation in i_Na)
# 15: Xf (inactivation in i_f)
# 16: s (inactivation in i_to)
# 17: r (activation in i_to)
# 18: dCaT (activation in i_CaT)
# 19: fCaT (inactivation in i_CaT)
# 20: R (in Irel)
# 21: O (in Irel)
# 22: I (in Irel)
# With experimental artefact --------------------------------------
# 23: Vp (millivolt)
# 24: Vclamp (millivolt)
# 25: Iout (nA)
# 26: Vcmd (millivolt)
# 27: Vest (millivolt)
"""
if default_voltage_unit == 'V':
conversion = 1000
else:
conversion = 1
self.y_initial = np.append(self.y_initial, 0)
self.y_initial = np.append(self.y_initial, 0)
self.y_initial = np.append(self.y_initial, 0)
self.y_initial = np.append(self.y_initial, 0)
self.cmd_index = len(self.y_initial) - 1
v_est = v_cmd_initial/conversion
self.y_initial = np.append(self.y_initial, 0)
@property
def no_ion_selective(self):
return self.__no_ion_selective
@no_ion_selective.setter
def no_ion_selective(self, no_ion_selective):
self.__no_ion_selective = no_ion_selective
def calc_currents(self, exp_target=None):
self.current_response_info = trace.CurrentResponseInfo()
if exp_target is not None:
i_stim = [exp_target.get_current_at_time(t) for t in self.t * 1000 / self.time_conversion]
if len(self.y) < 200:
list(map(self.action_potential_diff_eq, self.t, self.y.transpose()))
else:
list(map(self.action_potential_diff_eq, self.t, self.y))
if exp_target is not None:
for i, current_timestep in enumerate(self.
current_response_info.currents):
for c in current_timestep:
if c.name == 'I_stim':
c.value = i_stim[i]
def generate_response(self, protocol, is_no_ion_selective):
"""Returns a trace based on the specified target objective.
Args:
protocol: A Protocol Object or a TargetObjective Object.
Returns:
A Trace object representing the change in membrane potential over
time.
"""
# Reset instance variables when model is run again.
self.t = []
self.y_voltage = []
self.d_y_voltage = []
self.full_y = []
self.current_response_info = None
self.is_no_ion_selective = is_no_ion_selective
from cell_models.ga.target_objective import TargetObjective
if isinstance(protocol, protocols.SpontaneousProtocol):
return self.generate_spontaneous_response(protocol)
elif isinstance(protocol, protocols.IrregularPacingProtocol):
return self.generate_irregular_pacing_response(protocol)
elif isinstance(protocol, protocols.VoltageClampProtocol):
return self.generate_VC_protocol_response(protocol)
elif isinstance(protocol, protocols.PacedProtocol):
return self.generate_pacing_response(protocol)
elif isinstance(protocol, protocols.AperiodicPacingProtocol):
return self.generate_aperiodic_pacing_response(protocol)
#This means, the input is a Target Objective
elif isinstance(protocol, TargetObjective):
#This is if the input Target Objective is a protocol
if protocol.target_protocol is not None:
if protocol.g_ishi is not None:
self.no_ion_selective = {'I_K1_Ishi': protocol.g_ishi}
is_no_ion_selective = True
return self.generate_response(protocol.target_protocol,
is_no_ion_selective)
#These are if the input Target Objective is exp data
elif protocol.protocol_type == "Voltage Clamp":
return self.generate_exp_voltage_clamp(protocol)
elif protocol.protocol_type == "Dynamic Clamp":
return self.generate_exp_current_clamp(protocol)
def find_steady_state(self, ss_type=None, from_peak=False, tol=1E-3,
max_iters=140):
"""
Finds the steady state conditions for a spontaneous or stimulated
(in the case of OR) AP
"""
if self.y_ss is not None:
return
if (ss_type is None):
protocol = protocols.VoltageClampProtocol(
[protocols.VoltageClampStep(voltage=-80.0, duration=10000)])
concentration_indices = list(self.concentration_indices.values())
is_err = True
i = 0
y_values = []
import time
outer_time = time.time()
while is_err:
init_t = time.time()
tr = self.generate_response(protocol, is_no_ion_selective=False)
if isinstance(protocol, protocols.VoltageClampProtocol):
y_val = self.y[:, -1]
else:
y_val = self.get_last_min_max(from_peak)
self.y_initial = self.y[:, -1]
self.t = []
y_values.append(y_val)
y_percent = []
if len(y_values) > 2:
y_percent = np.abs((y_values[i][concentration_indices] -
y_values[i - 1][concentration_indices]) / (
y_values[i][concentration_indices]))
is_below_tol = (y_percent < tol)
is_err = not is_below_tol.all()
if i >= max_iters:
#print("Did not reach steady state. Setting y_ss to last iter.")
self.y_ss = y_val
return
i = i + 1
if i > 10:
print(
f'Iteration {i}; {time.time() - init_t} seconds; {y_percent}')
self.y_ss = y_values[-1]
print(f'Total Time: {time.time() - outer_time}')
return [tr, i]
def get_last_min_max(self, from_peak):
if from_peak:
inds = argrelextrema(self.y_voltage, np.less)
last_peak_time = self.t[inds[0][-2]]
ss_time = last_peak_time - .04*self.time_conversion
y_val_idx = np.abs(self.t - ss_time).argmin()
else:
inds = argrelextrema(self.y_voltage, np.less)
y_val_idx = inds[0][-2]
try:
y_val = self.y[:,y_val_idx]
except:
y_val = self.y[y_val_idx,:]
return y_val
def generate_spontaneous_function(self):
def spontaneous(t, y):
return self.action_potential_diff_eq(t, y)
return spontaneous
def generate_spontaneous_response(self, protocol):
"""
Args:
protocol: An object of a specified protocol.
Returns:
A single action potential trace
"""
if self.y_ss is not None:
y_init = self.y_ss
else:
y_init = self.y_initial
solution = integrate.solve_ivp(
self.generate_spontaneous_function(),
[0, protocol.duration * self.time_conversion * 1e-3],
y_init,
method='BDF',
max_step=1e-3*self.time_conversion)
self.t = solution.t
self.y = solution.y.transpose()
self.y_initial = self.y[-1]
self.y_voltage = solution.y[self.default_voltage_position,:]
self.calc_currents()
return trace.Trace(protocol,
self.default_parameters,
self.t,
self.y_voltage,
current_response_info=self.current_response_info,
default_unit=self.default_unit)
def generate_irregular_pacing_response(self, protocol):
"""
Args:
protocol: An irregular pacing protocol
Returns:
A irregular pacing trace
"""
if self.y_ss is not None:
y_init = self.y_ss
else:
y_init = self.y_initial
pacing_info = trace.IrregularPacingInfo()
try:
solution = integrate.solve_ivp(self.generate_irregular_pacing_function(
protocol, pacing_info), [0, protocol.duration],
y_init,
method='BDF',
max_step=1e-3*self.time_conversion)
self.t = solution.t
self.y = solution.y
self.y_initial = self.y[-1]
self.y_voltage = solution.y[self.default_voltage_position,:]
self.calc_currents()
except ValueError:
return None
return trace.Trace(protocol, self.default_parameter, self.t,
self.y_voltage, pacing_info=pacing_info,
default_unit=self.default_unit)
def generate_irregular_pacing_function(self, protocol, pacing_info):
offset_times = protocol.make_offset_generator()
def irregular_pacing(t, y):
d_y = self.action_potential_diff_eq(t, y)
if pacing_info.detect_peak(self.t, y[0], self.d_y_voltage):
pacing_info.peaks.append(t)
voltage_diff = abs(pacing_info.AVG_AP_START_VOLTAGE - y[0])
pacing_info.apd_90_end_voltage = y[0] - voltage_diff * 0.9
if pacing_info.detect_apd_90(y[0]):
try:
pacing_info.add_apd_90(t)
pacing_info.stimulations.append(t + next(offset_times))
except StopIteration:
pass
if pacing_info.should_stimulate(t):
i_stimulation = protocol.STIM_AMPLITUDE_AMPS / self.cm_farad
else:
i_stimulation = 0.0
d_y[0] += i_stimulation
return d_y
return irregular_pacing
def generate_VC_protocol_response(self, protocol):
"""
Args:
protocol: A voltage clamp protocol
Returns:
A Trace object for a voltage clamp protocol
"""
if self.y_ss is not None:
y_init = self.y_ss
else:
y_init = self.y_initial
self.current_response_info = trace.CurrentResponseInfo(
protocol=protocol)
solution = integrate.solve_ivp(
self.generate_voltage_clamp_function(protocol),
[0, protocol.get_voltage_change_endpoints()[-1] /
1E3 * self.time_conversion],
y_init,
method='BDF',
max_step=1E-3*self.time_conversion,
atol=1E-2, rtol=1E-4)
self.t = solution.t
self.y = solution.y
command_voltages = [protocol.get_voltage_at_time(t *
1E3 / self.time_conversion) / 1E3 * self.time_conversion
for t in self.t]
self.command_voltages = command_voltages
if self.is_exp_artefact:
self.y_voltages = self.y[0, :]
else:
self.y_voltages = command_voltages
self.calc_currents()
return trace.Trace(protocol,
self.default_parameters,
self.t,
command_voltages=self.command_voltages,
y=self.y_voltages,
current_response_info=self.current_response_info,
default_unit=self.default_unit)
def generate_voltage_clamp_function(self, protocol):
def voltage_clamp(t, y):
if self.is_exp_artefact:
try:
y[self.cmd_index] = protocol.get_voltage_at_time(t * 1e3 / self.time_conversion)
# Breaks if Vcmd = 0
if y[self.cmd_index] == 0:
y[self.cmd_index] = .1
except:
y[self.cmd_index] = 2000
y[self.cmd_index] /= (1E3 / self.time_conversion)
else:
try:
y[self.default_voltage_position] = protocol.get_voltage_at_time(t * 1E3 / self.time_conversion)
#Can't handle Vcmd = 0
if y[self.default_voltage_position] == 0:
y[self.default_voltage_position] = .1
except:
y[self.default_voltage_position] = 2000
y[self.default_voltage_position] /= (1E3 / self.time_conversion)
return self.action_potential_diff_eq(t, y)
return voltage_clamp
def generate_pacing_response(self, protocol):
"""
Args:
protocol: A pacing protocol
Returns:
A pacing trace
"""
if self.y_ss is not None:
y_init = self.y_ss
else:
y_init = self.y_initial
pacing_info = trace.IrregularPacingInfo()
solution = integrate.solve_ivp(self.generate_pacing_function(
protocol), [0, protocol.stim_end * self.time_conversion * 1e-3],
y_init,
method='LSODA',
max_step=8e-4*self.time_conversion)
self.t = solution.t
self.y = solution.y
self.y_initial = self.y[:,-1]
self.y_voltage = solution.y[self.default_voltage_position,:]
self.calc_currents()
return trace.Trace(protocol,
self.default_parameters,
self.t, self.y_voltage, pacing_info=pacing_info,
current_response_info=self.current_response_info,
default_unit=self.default_unit)
def generate_pacing_function(self, protocol):
stim_amplitude = protocol.stim_amplitude * 1E-3 * self.time_conversion
stim_start = protocol.stim_start * 1E-3 * self.time_conversion
stim_duration = protocol.stim_duration * 1E-3 * self.time_conversion
stim_end = protocol.stim_end * 1E-3 * self.time_conversion
i_stim_period = self.time_conversion / protocol.pace
if self.time_conversion == 1:
denom = 1E9
else:
denom = 1
def pacing(t, y):
self.i_stimulation = (stim_amplitude if t - stim_start -\
i_stim_period*floor((t - stim_start)/i_stim_period) <=\
stim_duration and t <= stim_end and t >= stim_start else\
0) / self.cm_farad / denom
d_y = self.action_potential_diff_eq(t, y)
return d_y
return pacing
def generate_aperiodic_pacing_response(self, protocol):
"""
Args:
protocol: A pacing protocol
Returns:
A pacing trace
"""
if self.y_ss is not None:
y_init = self.y_ss
else:
y_init = self.y_initial
solution = integrate.solve_ivp(self.generate_aperiodic_pacing_function(
protocol), [0, protocol.duration / 1E3 * self.time_conversion],
y_init,
method='BDF',
max_step=1e-3*self.time_conversion)
self.t = solution.t
self.y = solution.y
self.y_initial = self.y[:,-1]
self.y_voltage = solution.y[self.default_voltage_position,:]
self.calc_currents()
return trace.Trace(protocol, self.default_parameters, self.t,
self.y_voltage, current_response_info=self.current_response_info,
default_unit=self.default_unit)
def generate_aperiodic_pacing_function(self, protocol):
def pacing(t, y):
for t_start in protocol.stim_starts:
t_start = t_start / 1000 * self.time_conversion
t_end = t_start + (protocol.stim_duration /
1000 * self.time_conversion)
if (t > t_start) and (t < t_end):
self.i_stimulation = protocol.stim_amplitude
break
else:
self.i_stimulation = 0
d_y = self.action_potential_diff_eq(t, y)
return d_y
return pacing
def generate_exp_voltage_clamp(self, exp_target):
"""
Args:
protocol: A voltage clamp protocol
Returns:
A Trace object for a voltage clamp protocol
"""
if self.y_ss is not None:
y_init = self.y_ss
else:
y_init = self.y_initial
self.current_response_info = trace.CurrentResponseInfo(
protocol=exp_target)
solution = integrate.solve_ivp(
self.generate_exp_voltage_clamp_function(exp_target),
[0, floor(exp_target.time.max()) /
1E3 * self.time_conversion],
y_init,
method='BDF',
max_step=1e-3*self.time_conversion,
atol=1E-2, rtol=1E-4)
self.t = solution.t
self.y = solution.y
command_voltages = [exp_target.get_voltage_at_time(t *
1E3 / self.time_conversion) / 1E3 * self.time_conversion
for t in self.t]
self.command_voltages = command_voltages
if self.is_exp_artefact:
self.y_voltages = self.y[0, :]
else:
self.y_voltages = command_voltages
self.calc_currents()
#import matplotlib.pyplot as plt
#plt.plot(self.t, self.command_voltages)
#plt.plot(self.t, self.y_voltages)
#plt.show()
return trace.Trace(exp_target, self.default_parameters, self.t,
command_voltages=self.command_voltages,
y=self.y_voltages,
current_response_info=self.current_response_info,
default_unit=self.default_unit)
def generate_exp_voltage_clamp_function(self, exp_target):
def voltage_clamp(t, y):
if self.is_exp_artefact:
try:
y[26] = exp_target.get_voltage_at_time(t * 1e3 / self.time_conversion)
if y[self.cmd_index] == 0:
y[self.cmd_index] = .1
except:
y[26] = 20000
y[26] /= (1E3 / self.time_conversion)
else:
try:
y[self.default_voltage_position] = exp_target.get_voltage_at_time(t * 1E3 / self.time_conversion)
if y[self.default_voltage_position] == 0:
y[self.default_voltage_position] = .1
except:
y[self.default_voltage_position] = 2000
y[self.default_voltage_position] /= (1E3 / self.time_conversion)
return self.action_potential_diff_eq(t, y)
return voltage_clamp
def generate_exp_current_clamp(self, exp_target):
"""
Args:
protocol: A voltage clamp protocol
Returns:
A Trace object for a voltage clamp protocol
"""
if self.y_ss is not None:
y_init = self.y_ss
else:
y_init = self.y_initial
self.current_response_info = trace.CurrentResponseInfo(
protocol=exp_target)
solution = integrate.solve_ivp(
self.generate_exp_dynamic_clamp_function(exp_target),
[0, floor(exp_target.time.max()) /
1E3 * self.time_conversion],
y_init,
method='BDF',
max_step=1e-3*self.time_conversion)
self.t = solution.t
self.y = solution.y
self.y_voltages = self.y[0, :]
self.calc_currents(exp_target)
voltages_offset_added = (self.y_voltages +
self.exp_artefacts['v_off'] / 1000 * self.time_conversion)
return trace.Trace(exp_target, self.t,
y=voltages_offset_added,
current_response_info=self.current_response_info,
voltages_with_offset=self.y_voltages,
default_unit=self.default_unit)
def generate_exp_dynamic_clamp_function(self, exp_target):
def dynamic_clamp(t, y):
r_access = self.exp_artefacts['r_access']
r_seal = 1 / self.exp_artefacts['g_leak']
i_access_proportion = r_seal / (r_seal + r_access)
self.i_stimulation = (-exp_target.get_current_at_time(t * 1000 /
self.time_conversion) *
i_access_proportion)
d_y = self.action_potential_diff_eq(t, y)
return d_y
return dynamic_clamp
|
{"hexsha": "2946b50c5b6bea7c4c3629d161fe3b46c991fd9e", "size": 24351, "ext": "py", "lang": "Python", "max_stars_repo_path": "cell_models/cell_model.py", "max_stars_repo_name": "Christini-Lab/cell-models", "max_stars_repo_head_hexsha": "a2a4b2b8678e3846b77b48c93eb1df080281490e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cell_models/cell_model.py", "max_issues_repo_name": "Christini-Lab/cell-models", "max_issues_repo_head_hexsha": "a2a4b2b8678e3846b77b48c93eb1df080281490e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2022-02-15T00:07:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T02:42:50.000Z", "max_forks_repo_path": "Lib/cell_models/cell_model.py", "max_forks_repo_name": "hannania7/Electrophysiology", "max_forks_repo_head_hexsha": "b5a8643e21f4204a96e82d81671a6d52948fab48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2276295133, "max_line_length": 117, "alphanum_fraction": 0.566547575, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5285}
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import os
from datetime import datetime
from IPython.core.display import ProgressBar
import numpy as np
import pandas as pd
from gql import gql, Client, AIOHTTPTransport
import asyncio
TOP = 100
TOKEN = "bearer " + os.getenv("GH_GQL_API_TOKEN")
# Select your transport with a GitHub url endpoint
transport = AIOHTTPTransport(
url="https://api.github.com/graphql", headers={"Authorization": TOKEN}
)
client = Client(transport=transport, fetch_schema_from_transport=True,)
def progress(percent=0, width=30):
"Print simple progress bar"
left = int(width * percent // 100)
right = width - left
print(
"\r[",
"=" * left,
" " * right,
"]",
f" {percent:.0f}%",
sep="",
end="",
flush=True,
)
async def main():
# Using `async with` on the client will start a connection on the transport
# and provide a `session` variable to execute queries on this connection
async with client as session:
# Provide a GraphQL query
query = gql(
"""
query search($query: String!, $type: SearchType!, $numOfResults: Int!, $nextPageCursor: String) {
search(type: $type, query: $query, first: $numOfResults, after: $nextPageCursor) {
pageInfo {
hasNextPage
endCursor
}
userCount
nodes {
... on User {
name
login
location
bio
avatarUrl
followers {
totalCount
}
contributionsCollection {
contributionCalendar{
totalContributions
}
totalCommitContributions
totalPullRequestContributions
restrictedContributionsCount
}
}
}
}
}
"""
)
params = {"query": "location:Cuba", "type": "USER", "numOfResults": 20}
print("Getting users...")
result_users = []
result_count = 0
result_next = True
first = True
pb = ProgressBar(result_count)
while result_next:
try:
result = await session.execute(query, variable_values=params)
except asyncio.exceptions.TimeoutError:
continue
result_users += result.get("search").get("nodes", [])
result_count = result.get("search").get("userCount", 0)
if first:
pb.total = result_count
first = False
pb._progress = len(result_users)
result_next = result.get("search").get("pageInfo").get("hasNextPage", False)
params["nextPageCursor"] = (
result.get("search").get("pageInfo").get("endCursor")
)
progress(len(result_users) * 100 / result_count)
await asyncio.sleep(1)
users = result_users
print("\nTotal GitHub Users from Cuba: %s" % len(users))
try:
assert len(users) > 0
except AssertionError:
raise Exception("ERROR: Unavailable users!")
for user in users:
if user:
user["followers"] = user.get("followers").get("totalCount", 0)
user["contributions"] = user.get("contributionsCollection").get(
"contributionCalendar"
).get("totalContributions", 0) - user.get(
"contributionsCollection"
).get(
"restrictedContributionsCount", 0
)
user["commits"] = user.get("contributionsCollection").get(
"totalCommitContributions", 0
) + user.get("contributionsCollection").get(
"totalPullRequestContributions", 0
)
del user["contributionsCollection"]
df = pd.DataFrame(users)
df.dropna(how="all", inplace=True)
# df = df.reset_index(drop=True)
df = df.sort_values(by="commits", ascending=False)
# Top Ten Cuba
new_dtypes = {
"followers": np.int64,
"contributions": np.int64,
"commits": np.int64,
}
position = ["\U0001F947", "\U0001F948", "\U0001F949"] + list(range(4, TOP + 1))
df_top_ten = df[:TOP]
df_top_ten = df_top_ten.astype(new_dtypes)
# Clean
df_top_ten.fillna("", inplace=True)
# Re-Order Columns
df_top_ten = df_top_ten[
[
"name",
"login",
"location",
"commits",
"contributions",
"followers",
"bio",
"avatarUrl",
]
]
df_top_ten.insert(0, "#", position)
print("Top %s of %s GitHub Users from Cuba" % (TOP, len(users)))
print("Generated at: %s UTC" % datetime.utcnow())
table_name = "table_cuba_contributions.html"
df_top_ten.to_html(buf=table_name, index=False)
print("Saved table: %s" % table_name)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
{"hexsha": "9a884625895381c9903a18d692d86f46caa92fdb", "size": 5502, "ext": "py", "lang": "Python", "max_stars_repo_path": "contributions.py", "max_stars_repo_name": "oleksis/github-cuba", "max_stars_repo_head_hexsha": "bbb2aa4b52cdcaa74bff0532a627dc715f65ead5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "contributions.py", "max_issues_repo_name": "oleksis/github-cuba", "max_issues_repo_head_hexsha": "bbb2aa4b52cdcaa74bff0532a627dc715f65ead5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "contributions.py", "max_forks_repo_name": "oleksis/github-cuba", "max_forks_repo_head_hexsha": "bbb2aa4b52cdcaa74bff0532a627dc715f65ead5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9101123596, "max_line_length": 109, "alphanum_fraction": 0.5119956379, "include": true, "reason": "import numpy", "num_tokens": 1153}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from regions import PixCoord
from gammapy.datasets import SpectrumDatasetOnOff
from gammapy.maps import RegionGeom, RegionNDMap, WcsNDMap
from gammapy.utils.regions import list_to_compound_region
from ..core import Maker
__all__ = ["ReflectedRegionsFinder", "ReflectedRegionsBackgroundMaker"]
log = logging.getLogger(__name__)
class ReflectedRegionsFinder:
"""Find reflected regions.
This class is responsible for placing :ref:`region_reflected` for a given
input region and pointing position. It converts to pixel coordinates
internally assuming a tangent projection at center position.
If the center lies inside the input region, no reflected regions
can be found.
If you want to make a
background estimate for an IACT observation using the reflected regions
method, see also `~gammapy.makers.ReflectedRegionsBackgroundMaker`
Parameters
----------
region : `~regions.SkyRegion`
Region to rotate
center : `~astropy.coordinates.SkyCoord`
Rotation point
angle_increment : `~astropy.coordinates.Angle`, optional
Rotation angle applied when a region falls in an excluded region.
min_distance : `~astropy.coordinates.Angle`, optional
Minimal distance between two consecutive reflected regions
min_distance_input : `~astropy.coordinates.Angle`, optional
Minimal distance from input region
max_region_number : int, optional
Maximum number of regions to use
exclusion_mask : `~gammapy.maps.WcsNDMap`, optional
Exclusion mask
binsz : `~astropy.coordinates.Angle`
Bin size of the reference map used for region finding.
Examples
--------
>>> from astropy.coordinates import SkyCoord, Angle
>>> from regions import CircleSkyRegion
>>> from gammapy.makers import ReflectedRegionsFinder
>>> pointing = SkyCoord(83.2, 22.7, unit='deg', frame='icrs')
>>> target_position = SkyCoord(80.2, 23.5, unit='deg', frame='icrs')
>>> theta = Angle(0.4, 'deg')
>>> on_region = CircleSkyRegion(target_position, theta)
>>> finder = ReflectedRegionsFinder(min_distance_input='1 rad', region=on_region, center=pointing)
>>> finder.run()
>>> print(finder.reflected_regions[0])
Region: CircleSkyRegion
center: <SkyCoord (ICRS): (ra, dec) in deg
(83.19879005, 25.57300957)>
radius: 0.39953342830756855 deg
"""
def __init__(
self,
region,
center,
angle_increment="0.1 rad",
min_distance="0 rad",
min_distance_input="0.1 rad",
max_region_number=10000,
exclusion_mask=None,
binsz="0.01 deg",
):
self.region = region
self.center = center
self.angle_increment = Angle(angle_increment)
if self.angle_increment <= Angle(0, "deg"):
raise ValueError("angle_increment is too small")
self.min_distance = Angle(min_distance)
self.min_distance_input = Angle(min_distance_input)
self.exclusion_mask = exclusion_mask
self.max_region_number = max_region_number
self.reflected_regions = None
self.reference_map = None
self.binsz = Angle(binsz)
def run(self):
"""Run all steps.
"""
self.reference_map = self.make_reference_map(
self.region, self.center, self.binsz
)
if self.exclusion_mask:
coords = self.reference_map.geom.get_coord()
vals = self.exclusion_mask.get_by_coord(coords)
self.reference_map.data += vals
else:
self.reference_map.data += 1
# Check if center is contained in region
if self.region.contains(self.center, self.reference_map.geom.wcs):
self.reflected_regions = []
else:
self.setup()
self.find_regions()
@staticmethod
def make_reference_map(region, center, binsz="0.01 deg", min_width="0.3 deg"):
"""Create empty reference map.
The size of the map is chosen such that all reflected regions are
contained on the image.
To do so, the reference map width is taken to be 4 times the distance between
the target region center and the rotation point. This distance is larger than
the typical dimension of the region itself (otherwise the rotation point would
lie inside the region). A minimal width value is added by default in case the
region center and the rotation center are too close.
The WCS of the map is the TAN projection at the `center` in the coordinate
system used by the `region` center.
Parameters
----------
region : `~regions.SkyRegion`
Region to rotate
center : `~astropy.coordinates.SkyCoord`
Rotation point
binsz : `~astropy.coordinates.Angle`
Reference map bin size.
min_width : `~astropy.coordinates.Angle`
Minimal map width.
Returns
-------
reference_map : `~gammapy.maps.WcsNDMap`
Map containing the region
"""
frame = region.center.frame.name
# width is the full width of an image (not the radius)
width = 4 * region.center.separation(center) + Angle(min_width)
return WcsNDMap.create(
skydir=center, binsz=binsz, width=width, frame=frame, proj="TAN"
)
@staticmethod
def _region_angular_size(pixels, center):
"""Compute maximum angular size of a group of pixels as seen from center.
This assumes that the center lies outside the group of pixel
Parameters
----------
pixels : `~astropy.regions.PixCoord`
the pixels coordinates
center : `~astropy.regions.PixCoord`
the center coordinate in pixels
Returns
-------
angular_size : `~astropy.coordinates.Angle`
the maximum angular size
"""
newX, newY = center.x - pixels.x, center.y - pixels.y
angles = Angle(np.arctan2(newX, newY), "rad")
angular_size = np.max(angles) - np.min(angles)
if angular_size.value > np.pi:
angular_size = np.max(angles.wrap_at(0 * u.rad)) - np.min(
angles.wrap_at(0 * u.rad)
)
return angular_size
def setup(self):
"""Compute parameters for reflected regions algorithm."""
geom = self.reference_map.geom
self._pix_region = self.region.to_pixel(geom.wcs)
self._pix_center = PixCoord.from_sky(self.center, geom.wcs)
# Make the ON reference map
mask = geom.region_mask([self.region], inside=True).data
# on_reference_map = WcsNDMap(geom=geom, data=mask)
# Extract all pixcoords in the geom
X, Y = geom.get_pix()
ONpixels = PixCoord(X[mask], Y[mask])
# find excluded PixCoords
mask = self.reference_map.data == 0
self.excluded_pixcoords = PixCoord(X[mask], Y[mask])
# Minimum angle a region has to be moved to not overlap with previous one
min_ang = self._region_angular_size(ONpixels, self._pix_center)
# Add required minimal distance between two off regions
self._min_ang = min_ang + self.min_distance
# Maximum possible angle before regions is reached again
self._max_angle = Angle("360deg") - self._min_ang - self.min_distance_input
def find_regions(self):
"""Find reflected regions."""
curr_angle = self._min_ang + self.min_distance_input
reflected_regions = []
while curr_angle < self._max_angle:
test_reg = self._pix_region.rotate(self._pix_center, curr_angle)
if not np.any(test_reg.contains(self.excluded_pixcoords)):
region = test_reg.to_sky(self.reference_map.geom.wcs)
reflected_regions.append(region)
curr_angle += self._min_ang
if self.max_region_number <= len(reflected_regions):
break
else:
curr_angle = curr_angle + self.angle_increment
self.reflected_regions = reflected_regions
def plot(self, fig=None, ax=None):
"""Standard debug plot.
See example here: :ref:'regions_reflected'.
"""
fig, ax, cbar = self.reference_map.plot(
fig=fig, ax=ax, cmap="gray", vmin=0, vmax=1
)
wcs = self.reference_map.geom.wcs
on_patch = self.region.to_pixel(wcs=wcs).as_artist(edgecolor="red", alpha=0.6)
ax.add_patch(on_patch)
for off in self.reflected_regions:
tmp = off.to_pixel(wcs=wcs)
off_patch = tmp.as_artist(edgecolor="blue", alpha=0.6)
ax.add_patch(off_patch)
xx, yy = self.center.to_pixel(wcs)
ax.plot(xx, yy, marker="+", color="green", markersize=20, linewidth=5)
return fig, ax
class ReflectedRegionsBackgroundMaker(Maker):
"""Reflected regions background maker.
Parameters
----------
angle_increment : `~astropy.coordinates.Angle`, optional
Rotation angle applied when a region falls in an excluded region.
min_distance : `~astropy.coordinates.Angle`, optional
Minimal distance between two consecutive reflected regions
min_distance_input : `~astropy.coordinates.Angle`, optional
Minimal distance from input region
max_region_number : int, optional
Maximum number of regions to use
exclusion_mask : `~gammapy.maps.WcsNDMap`, optional
Exclusion mask
binsz : `~astropy.coordinates.Angle`
Bin size of the reference map used for region finding.
"""
tag = "ReflectedRegionsBackgroundMaker"
def __init__(
self,
angle_increment="0.1 rad",
min_distance="0 rad",
min_distance_input="0.1 rad",
max_region_number=10000,
exclusion_mask=None,
binsz="0.01 deg",
):
self.binsz = binsz
self.exclusion_mask = exclusion_mask
self.angle_increment = Angle(angle_increment)
self.min_distance = Angle(min_distance)
self.min_distance_input = Angle(min_distance_input)
self.max_region_number = max_region_number
def _get_finder(self, dataset, observation):
return ReflectedRegionsFinder(
binsz=self.binsz,
exclusion_mask=self.exclusion_mask,
center=observation.pointing_radec,
region=dataset.counts.geom.region,
min_distance=self.min_distance,
min_distance_input=self.min_distance_input,
max_region_number=self.max_region_number,
angle_increment=self.angle_increment,
)
def make_counts_off(self, dataset, observation):
"""Make off counts.
Parameters
----------
dataset : `SpectrumDataset`
Spectrum dataset.
observation : `DatastoreObservation`
Data store observation.
Returns
-------
counts_off : `RegionNDMap`
Off counts.
"""
finder = self._get_finder(dataset, observation)
finder.run()
energy_axis = dataset.counts.geom.axes["energy"]
if len(finder.reflected_regions) > 0:
region_union = list_to_compound_region(finder.reflected_regions)
wcs = finder.reference_map.geom.wcs
geom = RegionGeom.create(region=region_union, axes=[energy_axis], wcs=wcs)
counts_off = RegionNDMap.from_geom(geom=geom)
counts_off.fill_events(observation.events)
acceptance_off = len(finder.reflected_regions)
else:
# if no OFF regions are found, off is set to None and acceptance_off to zero
log.warning(
f"ReflectedRegionsBackgroundMaker failed. No OFF region found outside exclusion mask for {dataset.name}."
)
counts_off = None
acceptance_off = 0
return counts_off, acceptance_off
def run(self, dataset, observation):
"""Run reflected regions background maker
Parameters
----------
dataset : `SpectrumDataset`
Spectrum dataset.
observation : `DatastoreObservation`
Data store observation.
Returns
-------
dataset_on_off : `SpectrumDatasetOnOff`
On off dataset.
"""
counts_off, acceptance_off = self.make_counts_off(dataset, observation)
return SpectrumDatasetOnOff.from_spectrum_dataset(
dataset=dataset,
acceptance=1,
acceptance_off=acceptance_off,
counts_off=counts_off,
)
|
{"hexsha": "425772fbbd1c9b1b3fd0dbf9f290487b5c9d2560", "size": 12881, "ext": "py", "lang": "Python", "max_stars_repo_path": "gammapy/makers/background/reflected.py", "max_stars_repo_name": "kabartay/gammapy", "max_stars_repo_head_hexsha": "015206d2418b1d254f1c9d3ea819ab0c5ece99e9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-02T21:35:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-02T21:35:27.000Z", "max_issues_repo_path": "gammapy/makers/background/reflected.py", "max_issues_repo_name": "kabartay/gammapy", "max_issues_repo_head_hexsha": "015206d2418b1d254f1c9d3ea819ab0c5ece99e9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gammapy/makers/background/reflected.py", "max_forks_repo_name": "kabartay/gammapy", "max_forks_repo_head_hexsha": "015206d2418b1d254f1c9d3ea819ab0c5ece99e9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4848484848, "max_line_length": 121, "alphanum_fraction": 0.6383821132, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2798}
|
import cv2
import numpy as np
import time
import autopy
import mediapipe as mp
import math
import wx
import threading
thread = None
# noinspection PyAttributeOutsideInit,PyShadowingNames
class TraceThread(threading.Thread):
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
self._run = self.run
self.run = self.settrace_and_run
threading.Thread.start(self)
def settrace_and_run(self):
import sys
sys.settrace(self.globaltrace)
self._run()
def globaltrace(self, frame, event, arg):
return self.localtrace if event == 'call' else None
def localtrace(self, frame, event, arg):
if self.killed and event == 'line':
raise SystemExit()
return self.localtrace
class handDetector:
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12, 16, 20]
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
xList = []
yList = []
bbox = []
self.lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
xList.append(cx)
yList.append(cy)
# print(id, cx, cy)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
bbox = xmin, ymin, xmax, ymax
if draw:
cv2.rectangle(img, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20),
(0, 255, 0), 2)
return self.lmList, bbox
def fingersUp(self):
fingers = []
# Thumb
try:
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Fingers
for id in range(1, 5):
if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)
except IndexError:
pass
# totalFingers = fingers.count(1)
return fingers
def findDistance(self, p1, p2, img, draw=True, r=15, t=3):
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t)
cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)
return length, img, [x1, y1, x2, y2, cx, cy]
class Mouse:
def __init__(self):
self.right_down = False
self.left_down = False
self.middle_down = False
def stop_all(self):
if self.left_down:
autopy.mouse.toggle(autopy.mouse.Button.LEFT, False)
self.left_down = False
if self.right_down:
autopy.mouse.toggle(autopy.mouse.Button.RIGHT, False)
self.right_down = False
if self.middle_down:
autopy.mouse.toggle(autopy.mouse.Button.MIDDLE, False)
self.middle_down = False
def left_click(self, command):
autopy.mouse.toggle(autopy.mouse.Button.RIGHT, False)
if command == 1:
self.stop_all()
autopy.mouse.toggle(autopy.mouse.Button.LEFT, True)
self.left_down = True
return
if command == 3:
autopy.mouse.toggle(autopy.mouse.Button.LEFT, True)
self.left_down = True
self.right_down = True
def right_click(self, len1):
if len1 >= 40:
print("Right button down")
self.stop_all()
autopy.mouse.toggle(autopy.mouse.Button.RIGHT, True)
self.right_down = True
return
def middle_click(self):
autopy.mouse.toggle(autopy.mouse.Button.RIGHT, False)
self.stop_all()
if not self.middle_down:
autopy.mouse.toggle(autopy.mouse.Button.MIDDLE, True)
self.middle_down = True
return
def run(self, command, len1):
if command == 0:
self.stop_all()
elif command == 1:
if not self.left_down:
self.left_click(command)
elif command == 2:
if not self.right_down:
self.right_click(len1)
elif command == 3:
if not self.right_down:
return
self.left_click(command)
elif command == 4:
if not self.middle_down:
self.middle_click()
def MouseCTRL(cam_on=False):
wCam, hCam = 640, 480
frameR = 100
smoothening = 7
pTime = 0
plocX, plocY = 0, 0
clocX, clocY = 0, 0
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cap.set(3, wCam)
cap.set(4, hCam)
detector = handDetector(maxHands=1)
wScr, hScr = autopy.screen.size()
mouse = Mouse()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
if len(lmList) != 0:
x1, y1 = lmList[8][1:]
fingers = detector.fingersUp()
num = 0
for i in fingers:
if i == 1:
num += 1
cv2.rectangle(img, (frameR, frameR), (wCam - frameR, hCam - frameR),
(255, 0, 255), 2)
try:
length, img, lineInfo = detector.findDistance(8, 12, img)
mouse.run(num, length)
if fingers[1] == 1 and fingers[2] == 1 and fingers[3] == 0 and fingers[4] == 0:
length, img, lineInfo = detector.findDistance(8, 12, img)
if length < 40:
try:
x3 = np.interp(x1, (frameR, wCam - frameR), (0, wScr))
y3 = np.interp(y1, (frameR, hCam - frameR), (0, hScr))
clocX = plocX + (x3 - plocX) / smoothening
clocY = plocY + (y3 - plocY) / smoothening
autopy.mouse.move(wScr - clocX, clocY * 1.26)
cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
plocX, plocY = clocX, clocY
except Exception:
pass
except IndexError:
pass
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(num), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
if cam_on:
cv2.imshow("Image", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
try:
global thread
thread.killed = True
except Exception:
pass
class User_Info(wx.Frame):
def __init__(self, *args, **kw):
super(User_Info, self).__init__(*args, **kw)
self.InitUI()
def InitUI(self):
self.SetTitle('MouseCTRL')
icon = wx.Icon()
icon.CopyFromBitmap(wx.Bitmap("src.ico", wx.BITMAP_TYPE_ANY))
self.SetIcon(icon)
panel = wx.Panel(self)
hbox = wx.BoxSizer(wx.HORIZONTAL)
nm = wx.StaticBox(panel, -1, 'Settings')
nm1 = wx.StaticBox(panel, -1, 'About Us')
nmSizer = wx.StaticBoxSizer(nm, wx.VERTICAL)
nm1Sizer = wx.StaticBoxSizer(nm1, wx.VERTICAL)
nmbox = wx.BoxSizer(wx.VERTICAL)
nm1box = wx.BoxSizer(wx.VERTICAL)
font = wx.Font(16, family=wx.SCRIPT, weight=wx.BOLD, style=wx.ITALIC)
st1 = wx.StaticText(panel, label="MouseCTRL", style=wx.ALIGN_LEFT)
st1.SetFont(font)
text = """MouseCTRL [version 1.2.2.7]
People actively involved in this projects were:
- Akarsh
- Aradhya
- Siddharth
__________________________________
© 2021: MouseCTRL & co.
"""
self.newBtn = wx.Button(panel, wx.ID_ANY, 'Start MouseCTRL', size=(150, 30))
self.newBtn1 = wx.Button(panel, wx.ID_ANY, 'Kill MouseCTRL process', size=(150, 30))
self.newBtn2 = wx.Button(panel, wx.ID_ANY, 'How to Use MouseCTRL', size=(150, 30))
self.nm4 = wx.TextCtrl(panel, -1, style=wx.TE_MULTILINE | wx.ALIGN_LEFT | wx.TE_NO_VSCROLL | wx.TE_READONLY, size=(500, 150))
self.nm4.AppendText(text)
nmbox.Add(st1, 0, wx.ALL, 5)
nmbox.Add(self.newBtn, 0, wx.ALL | wx.CENTER, 5)
nmbox.Add(self.newBtn1, 0, wx.ALL | wx.CENTER, 5)
nmbox.Add(self.newBtn2, 0, wx.ALL | wx.CENTER, 5)
nm1box.Add(self.nm4, 0, wx.ALL | wx.CENTER, 5)
nmSizer.Add(nmbox, 0, wx.ALL | wx.CENTER, 10)
nm1Sizer.Add(nm1box, 0, wx.ALL | wx.CENTER, 10)
hbox.Add(nmSizer, 0, wx.ALL, 5)
hbox.Add(nm1Sizer, 0, wx.ALL, 5)
self.newBtn.Bind(wx.EVT_BUTTON, self.runit)
self.newBtn1.Bind(wx.EVT_BUTTON, self.runit2)
self.newBtn2.Bind(wx.EVT_BUTTON, self.runit3)
panel.SetSizer(hbox)
self.Centre()
self.SetSize(wx.Size(450, 280))
self.SetBackgroundColour('white')
def runit(self, event):
global thread
thread = TraceThread(target=runmain)
thread.run()
def runit2(self, event):
try:
global thread
thread.killed = True
except Exception:
pass
wx.Exit()
import sys
sys.exit(0)
def runit3(self, event):
import webbrowser
webbrowser.open("https://mousectrl.herokuapp.com/")
def runmain():
MouseCTRL(True)
if __name__ == '__main__':
app = wx.App()
frame = User_Info(None, style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX))
frame.Show()
app.MainLoop()
|
{"hexsha": "a643643c8e2ae4cea1d63084d87f425fb8fc3a45", "size": 11116, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/MouseCTRL.py", "max_stars_repo_name": "MouseCTRL/MouseCTRL", "max_stars_repo_head_hexsha": "920c6b99c6729b41a34af77d47936c8893bf46f3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-22T18:37:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-22T18:37:02.000Z", "max_issues_repo_path": "src/MouseCTRL.py", "max_issues_repo_name": "MouseCTRL/MouseCTRL", "max_issues_repo_head_hexsha": "920c6b99c6729b41a34af77d47936c8893bf46f3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MouseCTRL.py", "max_forks_repo_name": "MouseCTRL/MouseCTRL", "max_forks_repo_head_hexsha": "920c6b99c6729b41a34af77d47936c8893bf46f3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3139534884, "max_line_length": 133, "alphanum_fraction": 0.5461496941, "include": true, "reason": "import numpy", "num_tokens": 3002}
|
import os
import sys
import numpy as np
import cv2
def parse_csv_annotations(filepath, num_classes=60):
"""
Extract annotations from the csv file from one epoch produced in main.py
:param filepath: path of the csv file
:param num_classes: number of classes used
:return: return a list[dict] of record, containing prediction with bbox and score and time in seconds
"""
records = []
with open(filepath, "r") as f:
line = f.readline()
cur_record = {"predictions": []}
cur_time = -1
cur_class = 1
cur_bbox = None
cur_video = None
cur_scores = {}
while len(line) > 0:
line = line.strip().split(",")
if cur_time == -1:
cur_time = float(line[1])
if cur_time != float(line[1]):
cur_record["video"] = cur_video
cur_record["time"] = cur_time
records.append(cur_record)
cur_record = {"predictions": []}
cur_class = 1
cur_scores = {}
cur_time = float(line[1])
if cur_class == 1:
cur_video = line[0]
cur_bbox = np.array([float(line[2]), float(line[3]), float(line[4]), float(line[5])])
cur_scores[int(line[6])] = float(line[7])
cur_class += 1
if cur_class > num_classes:
cur_record["predictions"].append({"bbox": cur_bbox, "scores": cur_scores})
cur_class = 1
cur_scores = {}
line = f.readline()
return records
def get_bboxes_scores_from_record(record, width, height, idx_2_class, threshold):
bboxes = np.stack([p["bbox"] for p in record["predictions"]], axis=0)
bboxes[:, 0] *= width
bboxes[:, 1] *= height
bboxes[:, 2] *= width
bboxes[:, 3] *= height
bboxes = bboxes.astype(np.int32)
def get_label_name(cls_id):
for idx, l in enumerate(idx_2_class):
if cls_id == l["id"]:
return l["name"]
raise ValueError(f"could not find label for {cls_id}")
scores = [list(p["scores"].items()) for p in record["predictions"]]
final_scores = []
for score in scores:
bbox_score = []
score.sort(key=lambda elt: elt[0])
start_idx = 0
if len(score) > 13:
posture_idx = np.argmax([s[1] for s in score[:13]])
bbox_score.append((idx_2_class[score[posture_idx][0]], score[posture_idx][1]))
start_idx = 13
for i in range(start_idx, len(score)):
if score[i][1] > threshold:
l_name = get_label_name(score[i][0])
bbox_score.append((l_name, score[i][1]))
final_scores.append(bbox_score)
return bboxes, final_scores
def apply_annotations(root_path, records, idx_2_class, frame_rate=30, threshold=0.5,
interpolate=True, image_format="images_%06d.jpg",
line_width=2, font_scale=1.5):
num_record = len(records)
print(f"num records {num_record}")
frame_per_clip = round(3 * frame_rate) + 1
videos = []
for idx, record in enumerate(records):
print(f"applying records - {idx + 1} out of {num_record}", end='\r')
mid_frame = round(record["time"] * frame_rate)
start_frame = mid_frame - (frame_per_clip - 1) // 2
mid_frame_low_bound = mid_frame - frame_rate // 2
mid_frame_high_bound = mid_frame + frame_rate // 2
is_last = idx == num_record - 1
is_first = idx == 0
video_path = os.path.join(root_path, record["video"])
videos.append(record["video"])
try:
height, width = cv2.imread(os.path.join(video_path, image_format % start_frame)).shape[:2]
except Exception as e:
print(os.path.join(video_path, image_format % start_frame))
raise e
bboxes, scores = get_bboxes_scores_from_record(record, width, height, idx_2_class, threshold)
frames = list(range(start_frame, start_frame + frame_per_clip))
for idx_frame, cur_frame in enumerate(frames):
if not is_first and cur_frame < mid_frame_low_bound:
continue
if not is_last and cur_frame >= mid_frame_high_bound:
continue
image_path = os.path.join(video_path, image_format % cur_frame)
img = cv2.imread(image_path)
for idx_bbox in range(bboxes.shape[0]):
img = cv2.rectangle(img, tuple(bboxes[idx_bbox, :2]), tuple(bboxes[idx_bbox, 2:]), (0, 0, 255),
thickness=line_width)
start_origin = bboxes[idx_bbox, :2] + np.array([10, 20], dtype=np.int32)
for score in scores[idx_bbox]:
img = cv2.putText(img, f"{score[0]} - {score[1]}", tuple(start_origin), cv2.FONT_HERSHEY_PLAIN,
fontScale=font_scale, color=(255, 120, 0))
start_origin[1] += 20
cv2.imwrite(image_path[:-4] + "_annotated.jpg", img)
videos = set(videos)
print("copying images without annotations")
for video in videos:
video_path = os.path.join(root_path, video)
for entry in os.scandir(video_path):
if entry.is_file() and entry.name.endswith(".jpg") and not entry.name.endswith("annotated.jpg"):
annot_path = entry.path[:-4] + "_annotated.jpg"
if not os.path.isfile(annot_path):
ret = os.system(f"cp {entry.path} {annot_path}")
assert ret == 0, f"Could not copy {entry.path} to {annot_path}"
if __name__ == "__main__":
idx_class = [{'name': 'bend/bow (at the waist)', 'id': 1}, {'name': 'crouch/kneel', 'id': 3},
{'name': 'dance', 'id': 4}, {'name': 'fall down', 'id': 5}, {'name': 'get up', 'id': 6},
{'name': 'jump/leap', 'id': 7}, {'name': 'lie/sleep', 'id': 8}, {'name': 'martial art', 'id': 9},
{'name': 'run/jog', 'id': 10}, {'name': 'sit', 'id': 11}, {'name': 'stand', 'id': 12},
{'name': 'swim', 'id': 13}, {'name': 'walk', 'id': 14}, {'name': 'answer phone', 'id': 15},
{'name': 'carry/hold (an object)', 'id': 17}, {'name': 'climb (e.g., a mountain)', 'id': 20},
{'name': 'close (e.g., a door, a box)', 'id': 22}, {'name': 'cut', 'id': 24},
{'name': 'dress/put on clothing', 'id': 26},
{'name': 'drink', 'id': 27}, {'name': 'drive (e.g., a car, a truck)', 'id': 28},
{'name': 'eat', 'id': 29},
{'name': 'enter', 'id': 30}, {'name': 'hit (an object)', 'id': 34},
{'name': 'lift/pick up', 'id': 36},
{'name': 'listen (e.g., to music)', 'id': 37},
{'name': 'open (e.g., a window, a car door)', 'id': 38},
{'name': 'play musical instrument', 'id': 41}, {'name': 'point to (an object)', 'id': 43},
{'name': 'pull (an object)', 'id': 45},
{'name': 'push (an object)', 'id': 46}, {'name': 'put down', 'id': 47}, {'name': 'read', 'id': 48},
{'name': 'ride (e.g., a bike, a car, a horse)', 'id': 49}, {'name': 'sail boat', 'id': 51},
{'name': 'shoot', 'id': 52}, {'name': 'smoke', 'id': 54}, {'name': 'take a photo', 'id': 56},
{'name': 'text on/look at a cellphone', 'id': 57}, {'name': 'throw', 'id': 58},
{'name': 'touch (an object)', 'id': 59},
{'name': 'turn (e.g., a screwdriver)', 'id': 60}, {'name': 'watch (e.g., TV)', 'id': 61},
{'name': 'work on a computer', 'id': 62},
{'name': 'write', 'id': 63}, {'name': 'fight/hit (a person)', 'id': 64},
{'name': 'give/serve (an object) to (a person)', 'id': 65},
{'name': 'grab (a person)', 'id': 66}, {'name': 'hand clap', 'id': 67},
{'name': 'hand shake', 'id': 68}, {'name': 'hand wave', 'id': 69},
{'name': 'hug (a person)', 'id': 70}, {'name': 'kiss (a person)', 'id': 72},
{'name': 'lift (a person)', 'id': 73},
{'name': 'listen to (a person)', 'id': 74}, {'name': 'push (another person)', 'id': 76},
{'name': 'sing to (e.g., self, a person, a group)', 'id': 77},
{'name': 'take (an object) from (a person)', 'id': 78},
{'name': 'talk to (e.g., self, a person, a group)', 'id': 79},
{'name': 'watch (a person)', 'id': 80}]
idx_2_class = {}
for c in idx_class:
idx_2_class[c['id']] = c['name']
records = parse_csv_annotations(sys.argv[1])
print("applying annotations")
apply_annotations("data", records, idx_class, frame_rate=int(sys.argv[2]))
|
{"hexsha": "573fb427365bf7981c7d389afc31dec9b2e7f5d0", "size": 8903, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/visualize.py", "max_stars_repo_name": "AlexandreDh/ACAR-Net", "max_stars_repo_head_hexsha": "db28009388512e31cb6ff8e86725dc9b026886b6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/visualize.py", "max_issues_repo_name": "AlexandreDh/ACAR-Net", "max_issues_repo_head_hexsha": "db28009388512e31cb6ff8e86725dc9b026886b6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/visualize.py", "max_forks_repo_name": "AlexandreDh/ACAR-Net", "max_forks_repo_head_hexsha": "db28009388512e31cb6ff8e86725dc9b026886b6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.4292682927, "max_line_length": 116, "alphanum_fraction": 0.5174660227, "include": true, "reason": "import numpy", "num_tokens": 2401}
|
! { dg-do compile }
! PR fortran/41940
integer, allocatable :: a
TYPE :: x
integer, allocatable :: a
END TYPE
TYPE (x) :: y
allocate(a(4)) ! { dg-error "Shape specification for allocatable scalar" }
allocate(y%a(4)) ! { dg-error "Shape specification for allocatable scalar" }
end
|
{"hexsha": "0fa9ce1fce9c2d8ea2e4b2bbf956c80ace473e15", "size": 291, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/allocate_scalar_with_shape.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/allocate_scalar_with_shape.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/allocate_scalar_with_shape.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 20.7857142857, "max_line_length": 78, "alphanum_fraction": 0.6701030928, "num_tokens": 85}
|
// embeddedprolog.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#include <stdexcept>
#include <utility>
#include <boost/any.hpp>
#include <vector>
#include <boost/regex.hpp>
#include <boost/variant.hpp>
#include <boost/variant/recursive_variant.hpp>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <boost/flyweight.hpp>
#include <iostream>
#include <string>
#include <functional>
#include <map>
#include <stdint.h>
//#define OWN_MEMORY_MANAGEMENT
using std::ostream;
using std::cout;
using std::endl;
using std::string;
using boost::any_cast;
using boost::intrusive_ref_counter;
using boost::intrusive_ptr;
using std::function;
class LogicalVariant;
class LCons;
class LVar;
enum UninstanciatedType { UNINSTANCIATED };
enum NilType { NIL };
#define CapturedLambda(...) CapturedVar<std::function<Trampoline (__VA_ARGS__) >>
#define UncountedLambda(...) UncountedVar<std::function<Trampoline (__VA_ARGS__) >>
inline ostream & operator<<(ostream & os, const NilType &)
{
os << "Nil";
return os;
}
inline ostream & operator<<(ostream & os, const UninstanciatedType &)
{
os << "Uninstanciated";
return os;
}
//Much to my surprise Search in C++ is only 1/3 more lines than the lua version.
class Search;
//Note: the search parameter only seems necessary in the final continuation that
//reports when the search has failed, the rest of the time it could be captured... but
//it's simpler to pass than capture especially since other values are captured by value
//and this one would have to be captured by reference.
//The other times it has to be passed are in direct calls not continuations
typedef std::function<void(Search &)> Continuation;
//typedef std::initializer_list<boost::any> Params;
//typedef std::function<void(Search&, Params)> TailWithParams;
template<typename T>
class CapturedVar;
#ifdef OWN_MEMORY_MANAGEMENT
struct FreeList
{
FreeList * next;
};
const int FREE_LIST_BLOCK_SIZE = 32768;
template<typename T> void *allocate_from_freelist()
{
if (T::free_list == nullptr) {
void *block = malloc(FREE_LIST_BLOCK_SIZE);
intptr_t align;
if (T::blocksize > 16) align = 16;
else align = T::blocksize;
intptr_t offset = ((reinterpret_cast<intptr_t>(block) + align - 1)& ~(align - 1)) - reinterpret_cast<intptr_t>(block);
FreeList* p = nullptr;
for (intptr_t i = offset;i <= FREE_LIST_BLOCK_SIZE - T::blocksize; i += T::blocksize) {
FreeList* const f = reinterpret_cast<FreeList *>(i + reinterpret_cast<intptr_t>(block));
f->next = p;
p = f;
}
T::free_list = p;
}
FreeList* r = T::free_list;
T::free_list = r->next;
return static_cast<void *>(r);
}
template <typename T>
void free_to_freelist(void *v)
{
if (v == nullptr) return;
FreeList* r = static_cast<FreeList*>(v);
r->next = T::free_list;
T::free_list = r;
}
#endif
#define DECLARE_MEM_MANAGEMENT(name) \
static intptr_t blocksize; \
static FreeList *free_list; \
void * operator new (size_t size) \
{ \
assert(size == sizeof(name)); \
return allocate_from_freelist<name>(); \
} \
void * operator new (size_t, void *place) \
{ \
return place; \
} \
void operator delete (void *, void *) {} \
\
void operator delete (void * mem) \
{ \
free_to_freelist<name>(mem); \
}
#define DEFINE_MEM_MANAGEMENT(name) \
intptr_t name::blocksize = intptr_t(((sizeof(name) + 15)>>4)<<4); \
FreeList *name::free_list = nullptr;
#define DEFINE_MEM_MANAGEMENT_T(name,...) \
template <__VA_ARGS__>\
intptr_t name::blocksize = intptr_t(((sizeof(name) + 15)>>4)<<4); \
template <__VA_ARGS__>\
FreeList *name::free_list = nullptr;
//very simple not thread safe class compatible with intrusive_ptr
//the point of this class is that you can define both counted and eternal/singleton objects
//and the built in class doesn't have that
class SimpleRefCount
{
protected:
static const int SINGLETON = -1000000;
public:
int _ref;
int use_count() const { return _ref; }
void _inc() { if (_ref != SINGLETON) ++_ref; }
SimpleRefCount * _dec() {
if (_ref != SINGLETON) if (0 == --_ref) return this;
return nullptr;
}
SimpleRefCount() :_ref(0) {}
SimpleRefCount(int initial) :_ref(initial) {}
virtual ~SimpleRefCount()
{}
};
void intrusive_ptr_add_ref(SimpleRefCount *p)
{
p->_inc();
}
void intrusive_ptr_release(SimpleRefCount *p)
{
SimpleRefCount * d = p->_dec();
if (d) delete d;
}
class Trampoline;
class TrampolineLetter :public SimpleRefCount
{
public:
TrampolineLetter() {}
TrampolineLetter(int i) :SimpleRefCount(i) {}
virtual ~TrampolineLetter() {}
virtual Trampoline execute() = 0;
virtual bool isNull() const { return false; }
virtual void _for_retargetting(void *n) { }
};
class CombinableRefCount
{
public:
int _ref;
CombinableRefCount *_next;//in union head , otherwise next member of union
CombinableRefCount *_forward;//in union end of list, otherwise not used
int use_count() const { if (_forward) return _forward->_ref; else return _ref; }
void _inc() { if (_forward) ++_forward->_ref; else ++_ref; }
CombinableRefCount * _dec() {
if (_forward) {
if (0 == --_forward->_ref) return _forward;
}
else {
if (0 == --_ref) return this;
}
return nullptr;
}
CombinableRefCount() :_ref(0), _next(nullptr), _forward(nullptr) {}
virtual ~CombinableRefCount()
{
if (_next) delete _next;
}
template<typename T>
void add_ref(T &one_more)
{
if (_forward == nullptr) {
_forward = new CombinableRefCount;
_forward->_ref = _ref;
_forward->_next = this;
}
CombinableRefCount *o = one_more.get();
_forward->_ref += o->_ref;
o->_forward = _forward;
o->_next = _forward->_next;
_forward->_next = o;
}
#ifdef OWN_MEMORY_MANAGEMENT
static intptr_t blocksize;
static FreeList *free_list;
void * operator new (size_t size)
{
assert(size == sizeof(CombinableRefCount));
return allocate_from_freelist<CombinableRefCount>();
}
void * operator new (size_t, void *place)
{
return place;
}
void operator delete (void *, void *) {}
void operator delete (void * mem)
{
free_to_freelist<CombinableRefCount>(mem);
}
#endif
};
template <typename T>
class CapturedVarLetter;
template<typename T>
class CapturedVar;
class Trampoline : public intrusive_ptr<TrampolineLetter>
{
public:
Trampoline(const CapturedVar<std::function<Trampoline() >> &);
Trampoline(TrampolineLetter * v) :intrusive_ptr(v) {}
Trampoline() :intrusive_ptr() {}
Trampoline(const Trampoline &o) :intrusive_ptr(o.get()) {}
};
template <typename T>
class Trampoline0 : public TrampolineLetter
{
T fn;
public:
Trampoline0(const T &f) :fn(f) { }
virtual Trampoline execute() { return fn(); }
};
class NullTrampoline : public TrampolineLetter
{
public:
NullTrampoline() :TrampolineLetter(SimpleRefCount::SINGLETON) { }
virtual bool isNull() const { return true; }
virtual Trampoline execute() { return this; }
};
template <typename T, typename P1>
class Trampoline1 : public TrampolineLetter
{
T fn;
P1 p1;
public:
Trampoline1(const T &f, const P1 &_p1) :fn(f), p1(_p1) {}
virtual Trampoline execute() { return fn(p1); }
virtual void _for_retargetting(void *n) { *static_cast<T **>(n) = &fn; }
};
template <typename T, typename P1, typename P2>
class Trampoline2 : public TrampolineLetter
{
T fn;
P1 p1;
P2 p2;
public:
Trampoline2(const T &f, const P1 &_p1, const P2 &_p2) :fn(f), p1(_p1), p2(_p2) {}
virtual Trampoline execute() { return fn(p1, p2); }
virtual void _for_retargetting(void *n) { *static_cast<T **>(n) = &fn; }
};
template <typename T, typename P1, typename P2, typename P3>
class Trampoline3 : public TrampolineLetter
{
T fn;
P1 p1;
P2 p2;
P3 p3;
public:
Trampoline3(const T &f, const P1 &_p1, const P2 &_p2, const P3 &_p3) :fn(f), p1(_p1), p2(_p2), p3(_p3) {}
virtual Trampoline execute() { return fn(p1, p2, p3); }
virtual void _for_retargetting(void *n) { *static_cast<T **>(n) = &fn; }
};
template <typename T, typename P1, typename P2, typename P3, typename P4>
class Trampoline4 : public TrampolineLetter
{
T fn;
P1 p1;
P2 p2;
P3 p3;
P4 p4;
public:
Trampoline4(const T &f, const P1 &_p1, const P2 &_p2, const P3 &_p3, const P4 &_p4) :fn(f), p1(_p1), p2(_p2), p3(_p3), p4(_p4) {}
virtual Trampoline execute() { return fn(p1, p2, p3, p4); }
virtual void _for_retargetting(void *n) { *static_cast<T **>(n) = &fn; }
};
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5>
class Trampoline5 : public TrampolineLetter
{
T fn;
P1 p1;
P2 p2;
P3 p3;
P4 p4;
P5 p5;
public:
Trampoline5(const T &f, const P1 &_p1, const P2 &_p2, const P3 &_p3, const P4 &_p4, const P5 &_p5) :fn(f), p1(_p1), p2(_p2), p3(_p3), p4(_p4), p5(_p5) {}
virtual Trampoline execute() { return fn(p1, p2, p3, p4, p5); }
virtual void _for_retargetting(void *n) { *static_cast<T **>(n) = &fn; }
};
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6>
class Trampoline6 : public TrampolineLetter
{
T fn;
P1 p1;
P2 p2;
P3 p3;
P4 p4;
P5 p5;
P6 p6;
public:
Trampoline6(const T &f, const P1 &_p1, const P2 &_p2, const P3 &_p3, const P4 &_p4, const P5 &_p5, const P6 &_p6) :fn(f), p1(_p1), p2(_p2), p3(_p3), p4(_p4), p5(_p5), p6(_p6) {}
virtual Trampoline execute() { return fn(p1, p2, p3, p4, p5, p6); }
virtual void _for_retargetting(void *n) { *static_cast<T **>(n) = &fn; }
};
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7>
class Trampoline7 : public TrampolineLetter
{
T fn;
P1 p1;
P2 p2;
P3 p3;
P4 p4;
P5 p5;
P6 p6;
P7 p7;
public:
Trampoline7(const T &f, const P1 &_p1, const P2 &_p2, const P3 &_p3, const P4 &_p4, const P5 &_p5, const P6 &_p6, const P7 &_p7) :fn(f), p1(_p1), p2(_p2), p3(_p3), p4(_p4), p5(_p5), p6(_p6), p7(_p7) {}
virtual Trampoline execute() { return fn(p1, p2, p3, p4, p5, p6, p7); }
virtual void _for_retargetting(void *n) { *static_cast<T **>(n) = &fn; }
};
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8>
class Trampoline8 : public TrampolineLetter
{
T fn;
P1 p1;
P2 p2;
P3 p3;
P4 p4;
P5 p5;
P6 p6;
P7 p7;
P8 p8;
public:
Trampoline8(const T &f, const P1 &_p1, const P2 &_p2, const P3 &_p3, const P4 &_p4, const P5 &_p5, const P6 &_p6, const P7 &_p7, const P8 &_p8) :fn(f), p1(_p1), p2(_p2), p3(_p3), p4(_p4), p5(_p5), p6(_p6), p7(_p7), p8(_p8) {}
virtual Trampoline execute() { return fn(p1, p2, p3, p4, p5, p6, p7, p8); }
virtual void _for_retargetting(void *n) { *static_cast<T **>(n) = &fn; }
};
/* CombinableRefCount is a replacement for intrusive_ref_counter<_, boost::thread_unsafe_counter >
* With the difference that you can combine a bunches of them to share a reference counter.
* The point of that is to handle the case where a bunch of objects create a cycle of references.
* Note it is assumed that the organization of this cycle is immutable.
* Then by combining the counts, a reference to one object is considered a reference to all for the
* sake of refernce counting. Only when there are no external links to all of the objects will they
* be collected.
* Note it is assumed that the cycle of references either is special cased to not cause the counter
* to increment, or that all of those increments have been manually decremented out.
* The class is hard to understand because CombinableRefCount is used in two separate ways by the
* algorithm and it's just punning that the same code works for both.
* The main way is that CombinableRefCount is subclassed. These subclasses can be used just like
* subclasses of boost::intrusive_ref_counter.
* However, if combine_refs is called on a list of CombinableRefCount* (or on a list of CapturedVar<T> holding
* CapturedVarLetter<T> derived from CombinableRefCount) then a single CombinableRefCount is allocated
* to hold the combined reference count for all those objects. Note that this CombinableRefCount is
* just the raw type, not a subclass.
* For the first kind, the subclassed version, _forward points at the shared count if there is one
* and _next makes a list to facilitate deleting the whole set at once.
* For the second kind, the shared count, _next points to the head of the list of shared objects
* and _forward isn't used.
*/
#ifdef OWN_MEMORY_MANAGEMENT
intptr_t CombinableRefCount::blocksize = intptr_t((sizeof(CombinableRefCount) + sizeof(CombinableRefCount) - 1)&~((sizeof(CombinableRefCount) + sizeof(CombinableRefCount) - 1) >> 1));
FreeList *CombinableRefCount::free_list = nullptr;
#endif
void intrusive_ptr_add_ref(CombinableRefCount *p)
{
p->_inc();
}
void intrusive_ptr_release(CombinableRefCount *p)
{
CombinableRefCount * d = p->_dec();
if (d) delete d;
// delete(p->_dec());
}
CombinableRefCount * combine_refs()
{
return new CombinableRefCount;
}
template<typename ... Types>
CombinableRefCount * combine_refs(CombinableRefCount *first, Types ... rest)
{
CombinableRefCount *u = combine_refs(rest...);
first->_forward = u;
u->_ref += first->_ref;
first->_next = u->_next;
u->_next = first;
return u;
}
template <typename T>
class CapturedVarLetter :public CombinableRefCount
{
public:
typedef T type;
T value;
CapturedVarLetter(const T& a) :value(a) {}
CapturedVarLetter() {}
T& operator *() { return value; }
T* operator ->() { return &value; }
#ifdef OWN_MEMORY_MANAGEMENT
static intptr_t blocksize;
static FreeList *free_list;
void * operator new (size_t size)
{
assert(size == sizeof(CapturedVarLetter<T>));
return allocate_from_freelist<CapturedVarLetter<T>>();
}
void * operator new (size_t, void *place)
{
return place;
}
void operator delete (void *, void *) {}
void operator delete (void * mem)
{
free_to_freelist<CapturedVarLetter<T>>(mem);
}
#endif
};
#ifdef OWN_MEMORY_MANAGEMENT
template <typename T>
intptr_t CapturedVarLetter<T>::blocksize = intptr_t((sizeof(CapturedVarLetter<T>) + sizeof(CapturedVarLetter<T>) - 1)&~((sizeof(CapturedVarLetter<T>) + sizeof(CapturedVarLetter<T>) - 1) >> 1));
template <typename T>
FreeList *CapturedVarLetter<T>::free_list = nullptr;
#endif
/* CapturedVar has two uses
* it can be used inside of lambdas to give the variable capture semantics of other language ie:
* 1) variables are captured by reference but
* 2) the lifetime of captured variables is controlled by garbage collection - even if the original variable goes out of scope
* the variable exists as long as there is a lambda that references it.
* 3) note that this garbage collection is not thread safe - I decided that speed is more important, do not share lambdas that hold
* CapturedVar across threads
*
* The other use (for CapturedCont) is to speed up copying std::function objects that would otherwise require copying a heap object
* on each copy. Incrementing and decrementing the refernce counter is faster than calling new and delete.
*
* A weirdness with CapturedVar<T> where T is a std function type such as CapturedCont is that while lambdas can be stored in
* std::function<> types you can't match the type of a lambda to automatically convert to std::function as a result I couldn't give
* CapturedCont a shortcut assignment such as "foo=[=](){...};", instead you have to use * to expose the std::function inside and assign
* to that. Ie it's "CapturedCont foo; *foo=[=](){...};" The difference is the "*"
* Avoid assigning to CapturedVar<T> without the *.
*
* Note, there's a bug/complication with the use of reference counters here.
* Lambdas that are held in CapturedConts that can form cycles of ownership will never be collected.
* In normal programs that would rarely come up, but I'm afraid that in logic style programming it will come up quite often, so
* there's a memory leak unless a somewhat complicated fix is used.
* The fix is:
* Any CapturedCont that's part of a cycle (even a self reference) needs a shadow ptr variable thus:
* UncountedCont foo_uncounted = foo;
* and inside the lambdas use foo_uncounted everywhere you would have used foo. foo_uncounted can convert to CapturedCont as needed, say to pass
* as a parameter.
*
* For all cycles of more than one CapturedCont you have to call combine_refs on the set like so:
* CapturedCont foo,bar,baz;
* UncountedCont foo_uncounted = foo,bar_uncounted=bar,baz_uncounted=baz;
* combine_refs(foo,bar,baz);
* and inside of the lambdas that foo,bar and baz are set to use foo_uncounted, bar_uncounted and baz_uncounted instead of foo,bar&baz
* Having done that incantation, the problem of circular references of CapturedConts is solved.
*
* What combine_refs does combine the reference count of the objects it refers to so that for the sake of garbage collection the
* whole set is managed as a single object. A reference to one is a reference to all.
*/
template <typename T>
class UncountedVar;
class TrampolineLetter;
enum CombineRefType { CombineRef };
template<typename T>
class CapturedVar : public intrusive_ptr< CapturedVarLetter<T> >
{
public:
CapturedVar(const CapturedVarLetter<T>& v) :intrusive_ptr(&const_cast<CapturedVarLetter<T>&>(v))
{
}
CapturedVar(const T& v) :intrusive_ptr(new CapturedVarLetter<T>(v)) {}
template<typename U>
CapturedVar(CombineRefType, const CapturedVar<U> &c) : intrusive_ptr(new CapturedVarLetter<T>()) { c.get()->add_ref(*this); }
template<typename U>
CapturedVar(CombineRefType, const UncountedVar<U> &c) : intrusive_ptr(new CapturedVarLetter<T>()) { c.get()->add_ref(*this); }
CapturedVar() :intrusive_ptr(new CapturedVarLetter<T>()) {}
CapturedVar(const CapturedVar<T> &o) :intrusive_ptr(static_cast<const intrusive_ptr< CapturedVarLetter<T> > &>(o)) {}
CapturedVar(const UncountedVar<T> &o);
void clear()
{
*static_cast<intrusive_ptr< CapturedVarLetter<T> > *>(this) = nullptr;
}
auto operator()() { return (this->operator *())(); }
template <typename P1>
auto operator()(P1 &&p1) { return (this->operator *())(p1); }
template <typename P1, typename P2>
auto operator()(P1 &&p1, P2 &&p2) { return (this->operator *())(p1, p2); }
template <typename P1, typename P2, typename P3>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3) { return (this->operator *())(p1, p2, p3); }
template <typename P1, typename P2, typename P3, typename P4>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4) { return (this->operator *())(p1, p2, p3, p4); }
template <typename P1, typename P2, typename P3, typename P4, typename P5>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5) { return (this->operator *())(p1, p2, p3, p4, p5); }
template <typename P1, typename P2, typename P3, typename P4, typename P5, typename P6>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6) { return (this->operator *())(p1, p2, p3, p4, p5, p6); }
template <typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6, P7 &&p7) { return (this->operator *())(p1, p2, p3, p4, p5, p6, p7); }
template <typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6, P7 &&p7, P8 &&p8) { return (this->operator *())(p1, p2, p3, p4, p5, p6, p7, p8); }
T * operator->() { return &get()->value; }
T * operator->() const { return &get()->value; }
T& operator *() { return get()->value; }
T& operator *() const { return get()->value; }
};
//typedef CapturedVarLetter<Continuation> *UncountedCont;
template <typename T>
class UncountedVar
{
public:
CapturedVarLetter<T> * value;
UncountedVar(const CapturedVar<T> &c) :value(c.get()) {}
template<typename U>
UncountedVar(CombineRefType, const CapturedVar<U> &c) : value(new CapturedVarLetter<T>()) { c.get()->add_ref(*this); }
template<typename U>
UncountedVar(CombineRefType, const UncountedVar<U> &c) : value(new CapturedVarLetter<T>()) { c.get()->add_ref(*this); }
CapturedVarLetter<T> * get() const {
return const_cast<CapturedVarLetter<T> *>(value);
}
T * operator->() { return &get()->value; }
T * operator->() const { return &get()->value; }
T& operator *() { return value->value; }
T& operator *() const { return const_cast<T&>(value->value); }
auto operator()() { return (this->operator *())(); }
template <typename P1>
auto operator()(P1 &&p1) { return (this->operator *())(p1); }
template <typename P1, typename P2>
auto operator()(P1 &&p1, P2 &&p2) { return (this->operator *())(p1, p2); }
template <typename P1, typename P2, typename P3>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3) { return (this->operator *())(p1, p2, p3); }
template <typename P1, typename P2, typename P3, typename P4>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4) { return (this->operator *())(p1, p2, p3, p4); }
template <typename P1, typename P2, typename P3, typename P4, typename P5>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5) { return (this->operator *())(p1, p2, p3, p4, p5); }
template <typename P1, typename P2, typename P3, typename P4, typename P5, typename P6>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6) { return (this->operator *())(p1, p2, p3, p4, p5, p6); }
template <typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6, P7 &&p7) { return (this->operator *())(p1, p2, p3, p4, p5, p6, p7); }
template <typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8>
auto operator()(P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6, P7 &&p7, P8 &&p8) { return (this->operator *())(p1, p2, p3, p4, p5, p6, p7, p8); }
};
template<typename T>
CapturedVar<T>::CapturedVar(const UncountedVar<T> &o) :intrusive_ptr(o.value) {}
template<typename T, typename ... Types>
CombinableRefCount * combine_refs(const CapturedVar<T> &_first, Types ... rest)
{
CombinableRefCount *first = _first.get();
CombinableRefCount *u = combine_refs(rest...);
first->_forward = u;
u->_ref += first->_ref;
first->_next = u->_next;
u->_next = first;
return u;
}
template <typename T>
auto make_counted(T && o) {
return o;
}
template <typename T>
CapturedVar<T> make_counted(const UncountedVar<T> &o) {
return *o.get();
}
template <typename T>
CapturedVar<T> make_counted(UncountedVar<T> &o) {
return *o.get();
}
std::reference_wrapper<Search> make_counted(Search &o) {
return std::ref(o);
}
template <typename T>
inline TrampolineLetter* new_trampoline(T &&f)
{
return new Trampoline0<T>(f);
}
template <typename T, typename P1>
inline TrampolineLetter* new_trampoline(T &&f, P1 &&p1)
{
return new Trampoline1<T, P1>(f, p1);
}
template <typename T, typename P1, typename P2>
inline TrampolineLetter * new_trampoline(T &&f, P1 &&p1, P2 &&p2)
{
return new Trampoline2<T, P1, P2>(f, p1, p2);
}
template <typename T, typename P1, typename P2, typename P3>
inline TrampolineLetter * new_trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3)
{
return new Trampoline3<T, P1, P2, P3>(f, p1, p2, p3);
}
template <typename T, typename P1, typename P2, typename P3, typename P4>
inline TrampolineLetter * new_trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4)
{
return new Trampoline4<T, P1, P2, P3, P4>(f, p1, p2, p3, p4);
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5>
inline TrampolineLetter * new_trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5)
{
return new Trampoline5<T, P1, P2, P3, P4, P5>(f, p1, p2, p3, p4, p5);
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6>
inline TrampolineLetter * new_trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6)
{
return new Trampoline6<T, P1, P2, P3, P4, P5, P6>(f, p1, p2, p3, p4, p5, p6);
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7>
inline TrampolineLetter * new_trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6, P7 &&p7)
{
return new Trampoline7<T, P1, P2, P3, P4, P5, P6, P7>(f, p1, p2, p3, p4, p5, p6, p7);
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8>
inline TrampolineLetter * new_trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6, P7 &&p7, P8 &&p8)
{
return new Trampoline8<T, P1, P2, P3, P4, P5, P6, P7, P8>(f, p1, p2, p3, p4, p5, p6, p7, p8);
}
template <typename T>
Trampoline trampoline(T &&f)
{
return new_trampoline(make_counted(f));
}
template <typename T, typename P1>
Trampoline trampoline(T &&f, P1 &&p1)
{
return new_trampoline(make_counted(f), make_counted(p1));
}
template <typename T, typename P1, typename P2>
Trampoline trampoline(T &&f, P1 &&p1, P2 &&p2)
{
return new_trampoline(make_counted(f), make_counted(p1), make_counted(p2));
}
template <typename T, typename P1, typename P2, typename P3>
Trampoline trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3)
{
return new_trampoline(make_counted(f), make_counted(p1), make_counted(p2), make_counted(p3));
}
template <typename T, typename P1, typename P2, typename P3, typename P4>
Trampoline trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4)
{
return new_trampoline(make_counted(f), make_counted(p1), make_counted(p2), make_counted(p3), make_counted(p4));
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5>
Trampoline trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5)
{
return new_trampoline(make_counted(f), make_counted(p1), make_counted(p2), make_counted(p3), make_counted(p4), make_counted(p5));
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6>
Trampoline trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6)
{
return new_trampoline(make_counted(f), make_counted(p1), make_counted(p2), make_counted(p3), make_counted(p4), make_counted(p5), make_counted(p6));
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7>
Trampoline trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6, P7 &&p7)
{
return new_trampoline(make_counted(f), make_counted(p1), make_counted(p2), make_counted(p3), make_counted(p4), make_counted(p5), make_counted(p6), make_counted(p7));
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8>
Trampoline trampoline(T &&f, P1 &&p1, P2 &&p2, P3 &&p3, P4 &&p4, P5 &&p5, P6 &&p6, P7 &&p7, P8 &&p8)
{
return new_trampoline(make_counted(f), make_counted(p1), make_counted(p2), make_counted(p3), make_counted(p4), make_counted(p5), make_counted(p6), make_counted(p7), make_counted(p8));
}
typedef CapturedLambda(Search &) CapturedCont;
typedef UncountedLambda(Search &) UncountedCont;
typedef CapturedLambda() Subclause;
typedef UncountedLambda() UncountedSubclause;
Trampoline::Trampoline(const CapturedVar< std::function< Trampoline() > > &c) :intrusive_ptr(new Trampoline0<CapturedVar< std::function< Trampoline() > > >(c))
{
}
Trampoline end_search = new NullTrampoline();
//for testing with .which()
enum LVType {
LV_NIL, LV_UNINSTANCIATED, LV_DOUBLE, LV_STRING, LV_LVAR, LV_LIST, LV_CUSTOM, LV_DATA1, LV_DATA2, LV_DATA3, LV_DATA4
};
char * const TypeNames[] = { "nil","variable","double","string","var","list","custom","extra data type 1","extra data type 2","extra data type 3","extra data type 4" };
//to allow custom logical types
class LogicalData :public intrusive_ref_counter<LogicalData, boost::thread_unsafe_counter >
{
public:
LVType class_type;
void *data;
LogicalData(LVType t, void *d) :class_type(t), data(d) {}
};
typedef boost::flyweight<string> InternedString;
/*
Note: difference from Lua version
boost::variant treats its values as value types, it's not possible to get a reference to the stored value, only a copy
so when you instanciate an LVar it's not the LVar that can be shared, it's the LVariant inside it. You can't instanciate by
setting anLVar = aValue you have to do it as anLVar->value = aValue!
Anyway the point of LVar in the C++ version is to allow reference counting and simple initialization.
All the extra levels of indirection are just ways of dealing with C++ limitations consider an LVar to just be a variable and
the LVariant/LValue to be the logical variable inside it.
*/
typedef boost::variant <
NilType
, UninstanciatedType
, double
, InternedString
, LVar
, intrusive_ptr<LCons>
, intrusive_ptr<LogicalData>
> LValue;
//Note, in this program LVars are never to hold NULL
//rather they can hold a LogicalVariant with the type LV_NIL
//The usual initialization is to be: LVar aVariable(LInit())
//that initializes the variable to UNINSTANCIATED
class LVar : public boost::intrusive_ptr<LogicalVariant>
{
public:
LVar();
LVar(NilType);
LVar(UninstanciatedType);
LVar(const char * c);
LVar(double d);
//copy constructor, not chaining
LVar(const LVar &v);
LVar(LogicalVariant *v);
LVar(LValue v);
LVar(InternedString s);
LVar(LCons *);
LVar(intrusive_ptr<LCons>&);
void chain(LVar&o);
LVar& get_target();
LVType target_type();
bool nullp() { return target_type() == LV_NIL; }
bool listp() { LVType t = target_type(); return t == LV_NIL || t == LV_LIST; }
bool pairp() { return target_type() == LV_LIST; }
void set_car(LVar &t);
void set_cdr(LVar &t);
// LV_NIL,LV_UNINSTANCIATED,LV_DOUBLE,LV_STRING,LV_LVAR,LV_LIST,LV_CUSTOM,LV_DATA1,LV_DATA2,LV_DATA3,LV_DATA4
LVType type() const;
bool uninstanciatedp() { return type() == LV_UNINSTANCIATED; }
bool doublep() { return type() == LV_DOUBLE; }
bool stringp() { return type() == LV_STRING; }
bool lvarp() { return type() == LV_LVAR; }
void operator = (const LVar &o);
bool ground() {
LVType t = type();
if (t == LV_LIST) return car().ground() && cdr().ground();
return t != LV_UNINSTANCIATED;
}
intrusive_ptr<LCons> as_LCons();
double as_double();
InternedString as_IString();
intrusive_ptr<LogicalData> as_LogicalValue();
LVar car();
LVar cdr();
// bool operator ==(LVar &);
#ifdef OWN_MEMORY_MANAGEMENT
static intptr_t blocksize;
static FreeList *free_list;
void * operator new (size_t size)
{
assert(size == sizeof(LVar));
return allocate_from_freelist<LVar>();
}
void * operator new (size_t, void *place)
{
return place;
}
void operator delete (void *, void *) {}
void operator delete (void * mem)
{
free_to_freelist<LVar>(mem);
}
#endif
};
#ifdef OWN_MEMORY_MANAGEMENT
intptr_t LVar::blocksize = intptr_t((sizeof(LVar) + sizeof(LVar) - 1)&~((sizeof(LVar) + sizeof(LVar) - 1) >> 1));
FreeList *LVar::free_list = nullptr;
#endif
class LogicalVariant :public intrusive_ref_counter<LogicalVariant, boost::thread_unsafe_counter>
{
public:
LogicalVariant() :value(UNINSTANCIATED) {}
LogicalVariant(LValue v) :value(v) {}
LogicalVariant(LogicalVariant &v) :value(v.value) {}
LogicalVariant(LVar &v) :value(v) {}
LogicalVariant(LCons *c) :value(intrusive_ptr<LCons>(c)) {}
LogicalVariant(boost::intrusive_ptr<LCons>&c) :value(c) {}
LValue value;
#ifdef OWN_MEMORY_MANAGEMENT
static intptr_t blocksize;
static FreeList *free_list;
void * operator new (size_t size)
{
assert(size == sizeof(LogicalVariant));
return allocate_from_freelist<LogicalVariant>();
}
void * operator new (size_t, void *place)
{
return place;
}
void operator delete (void *, void *) {}
void operator delete (void * mem)
{
free_to_freelist<LogicalVariant>(mem);
}
#endif
};
#ifdef OWN_MEMORY_MANAGEMENT
intptr_t LogicalVariant::blocksize = intptr_t((sizeof(LogicalVariant) + sizeof(LogicalVariant) - 1)&~((sizeof(LogicalVariant) + sizeof(LogicalVariant) - 1) >> 1));
FreeList *LogicalVariant::free_list = nullptr;
#endif
inline LVar::LVar() : intrusive_ptr<LogicalVariant>(new LogicalVariant(UNINSTANCIATED)) { }
inline LVar::LVar(NilType) : intrusive_ptr<LogicalVariant>(new LogicalVariant(NIL)) { }
inline LVar::LVar(LValue v) : intrusive_ptr<LogicalVariant>(new LogicalVariant(v)) { }
inline LVar::LVar(UninstanciatedType) : intrusive_ptr<LogicalVariant>(new LogicalVariant(UNINSTANCIATED)) { }
inline LVar::LVar(const char * c) : intrusive_ptr<LogicalVariant>(new LogicalVariant(InternedString(c))) {}
inline LVar::LVar(double d) : intrusive_ptr<LogicalVariant>(new LogicalVariant(d)) {}
//Note it's not safe to make this chain because it's used as a copy constructor
inline LVar::LVar(const LVar &v) : intrusive_ptr<LogicalVariant>(v) {}
inline LVar::LVar(LogicalVariant *v) : intrusive_ptr<LogicalVariant>(v) {}
inline LVar::LVar(InternedString s) : intrusive_ptr<LogicalVariant>(new LogicalVariant(s)) {}
inline void LVar::chain(LVar &o) { (*this)->value = o; }
inline LVType LVar::target_type() { return get_target().type(); }
inline void LVar::operator = (const LVar &o) { (*this)->value = o->value; }
LVType LVar::type() const
{
LVType t = (LVType)(*this)->value.which();
if (t == LV_CUSTOM) return boost::get<intrusive_ptr<LogicalData>&>((*this)->value)->class_type;
return t;
}
struct GetAddress : public boost::static_visitor<>
{
void *address;
template <typename T>
void operator()(T &t) const { address = &t; }
};
;
LVar& LVar::get_target()
{
LVar *t = this;
// GetAddress get_address;
while ((*t).type() == LV_LVAR) t = &boost::get<LVar>((*t)->value);
return *t;
}
inline LogicalVariant * LInit()
{
return new LogicalVariant();
}
//ostream & operator<<(ostream & os, const LogicalVariant &v);
ostream & operator<<(ostream & os, const LVar &v);
//typedef CapturedVar<LVar> CLVar;
//typedef UncountedVar<LVar> ULVar;
struct DotHolder
{
LVar cdr;
};
class LCons :public intrusive_ref_counter<LCons, boost::thread_unsafe_counter>
{
public:
static const char *open_paren;
static const char *close_paren;
static const char *display_dot;
static const char *display_nil;
ostream & _out_rest(ostream & os)
{
/*
if (nullp(logical_get(self[2]))) then return ' ' .. tostring(logical_get(self[1])) .. close_paren
elseif (listp(logical_get(self[2]))) then return ' ' .. tostring(logical_get(self[1])) .. logical_get(self[2]):rest_tostring()
else return ' ' .. tostring(logical_get(self[1])) .. display_dot .. tostring(self[2]) ..close_paren
end
*/
if (cdr.nullp()) os << " " << car.get_target() << close_paren;
else if (cdr.listp()) {
os << " "
<< car.get_target();
return cdr.as_LCons()->_out_rest(os);
}
else os << " " << car.get_target() << display_dot << cdr.get_target() << close_paren;
return os;
}
//allocating a new LogicalVariant for NIL allows the cons to be mutable
//Maybe we'd prefer immutable
LCons(LValue first) :car(new LogicalVariant(first)), cdr(new LogicalVariant(NIL)) {}
LCons(LValue first, LValue rest) :car(new LogicalVariant(first)), cdr(new LogicalVariant(rest)) {}
//LCons(LVar& first, DotHolder rest) :car(first), cdr(rest.cdr) {}
LCons(LVar && first, DotHolder && rest) :car(first), cdr(rest.cdr.car()) {}
LCons(LVar& first) :car(first), cdr(new LogicalVariant(NIL)) {}
LCons(LVar& first, LVar& rest) :car(first), cdr(rest) {}
LCons(LValue first, LVar& rest) :car(new LogicalVariant(first)), cdr(rest) {}
LCons(LVar& first, LValue rest) :car(first), cdr(new LogicalVariant(rest)) {}
//Note there is an extra level of indirection here so that there doesn't have to be
//some complicated plumbing for the garbage collection. And remember LVars should never be NULL
LVar car;
LVar cdr;
#ifdef OWN_MEMORY_MANAGEMENT
static intptr_t blocksize;
static FreeList *free_list;
void * operator new (size_t size)
{
assert(size == sizeof(LCons));
return allocate_from_freelist<LCons>();
}
void * operator new (size_t, void *place)
{
return place;
}
void operator delete (void *, void *) {}
void operator delete (void * mem)
{
free_to_freelist<LCons>(mem);
}
#endif
};
#ifdef OWN_MEMORY_MANAGEMENT
intptr_t LCons::blocksize = intptr_t((sizeof(LCons) + sizeof(LCons) - 1)&~((sizeof(LCons) + sizeof(LCons) - 1) >> 1));
FreeList *LCons::free_list = nullptr;
#endif
inline LVar::LVar(LCons *c) :intrusive_ptr<LogicalVariant>(new LogicalVariant(c)) {}
inline LVar::LVar(intrusive_ptr<LCons> &c) : intrusive_ptr<LogicalVariant>(new LogicalVariant(c)) {}
intrusive_ptr<LCons> LVar::as_LCons() {
return boost::get<intrusive_ptr<LCons> >(get_target()->value);
}
double LVar::as_double() {
return boost::get<double>(get_target()->value);
}
InternedString LVar::as_IString() {
return boost::get<InternedString>(get_target()->value);
}
intrusive_ptr<LogicalData> LVar::as_LogicalValue() {
return boost::get<intrusive_ptr<LogicalData> >(get_target()->value);
}
LVar LVar::car() { return as_LCons()->car.get_target(); }
LVar LVar::cdr() { return as_LCons()->cdr.get_target(); }
void LVar::set_car(LVar &t) { return as_LCons()->car.get_target() = t; }
void LVar::set_cdr(LVar &t) { return as_LCons()->cdr.get_target() = t; }
const char *LCons::open_paren = "( ";
const char *LCons::close_paren = " )";
const char *LCons::display_dot = " | ";
const char *LCons::display_nil = "()";
ostream & operator<<(ostream & os, const LVar &v)
//ostream & operator<<(ostream & os, const LogicalVariant &v)
{
switch (v.type()) {
case LV_UNINSTANCIATED:
os << "Var" << &v;
break;
case LV_LVAR:
#ifdef DISPLAY_LVAR_LINKS
os << "->(" << &v->value << ')' << v->value;
#else
os << v->value;
#endif
break;
case LV_NIL:
os << LCons::display_nil;
break;
case LV_LIST:
//boost::get<intrusive_ptr<LCons> >(cdr.get_target()->value)->_out_rest(os);
/*
__tostring=function (self)
if nullp(self) then return display_nil
elseif nullp(logical_get(self[2])) then return open_paren .. tostring(logical_get(self[1])) .. close_paren
elseif listp(logical_get(self[2])) then return open_paren .. tostring(logical_get(self[1])) .. logical_get(self[2]):rest_tostring()
else return open_paren .. tostring(logical_get(self[1])) .. display_dot .. tostring(self[2]) ..close_paren
end
end
*/
{
intrusive_ptr<LCons> l = boost::get<intrusive_ptr<LCons> >(v->value);
if (l->cdr.nullp()) os << LCons::open_paren << l->car.get_target() << LCons::close_paren;
else if (l->cdr.listp()) {
os << LCons::open_paren << l->car.get_target();
return boost::get<intrusive_ptr<LCons> >(l->cdr.get_target()->value)->_out_rest(os);
}
else os << LCons::open_paren << l->car.get_target() << LCons::display_dot << l->cdr.get_target() << LCons::close_paren;
}
break;
default:
#ifdef DISPLAY_LVAR_LINKS
os << "[" << &v->value << ']' << v->value;
#else
os << v->value;
#endif
}
return os;
}
//typedef std::vector<boost::any> AnyValues;
enum DotType { DOT };
LogicalVariant NilVariant(NIL);
template<typename ... TYPES>
LVar L()
{
return NIL;
}
template<typename T, typename ... TYPES>
LVar L(T a, TYPES ... rest)
{
return LVar(intrusive_ptr<LCons>(new LCons(LVar(a), L(rest ...))));
}
template<typename ... TYPES>
DotHolder L(DotType, TYPES ... rest)
{
return DotHolder{ L(rest ...) };
}
//emulate lua's ==
//the types that are primitive in lua are compared by value
//the other types are compared by address
//used for a quick equality test in unify
class are_strict_equals
: public boost::static_visitor<bool>
{
public:
template <typename T, typename U>
bool operator()(const T &, const U &) const
{
return false; // cannot compare different types
}
template <typename T>
bool operator()(const T & lhs, const T & rhs) const
{
return lhs == rhs;
}
template <>
bool operator()(const UninstanciatedType & lhs, const UninstanciatedType & rhs) const
{
return &lhs == &rhs;
}
template <>
bool operator()(const LVar & lhs, const LVar & rhs) const
{
return &lhs->value == &rhs->value;
}
template <>
bool operator()(const intrusive_ptr<LCons> & lhs, const intrusive_ptr<LCons> & rhs) const
{
return &(*lhs) == &(*rhs);
}
template <>
bool operator()(const intrusive_ptr<LogicalData> & lhs, const intrusive_ptr<LogicalData> & rhs) const
{
return &(*lhs) == &(*rhs);
}
};
bool strict_equals(LVar &a, LVar &b)
{
return boost::apply_visitor(are_strict_equals(), a->value, b->value);
}
bool _unify(Search &s, LVar &a, LVar&b);
bool _identical(LVar &a, LVar&b);
class Search
{
enum AmbTag { AMB_UNDO, AMB_ALT };
struct AmbRecord {
AmbTag tag;
Trampoline cont;
AmbRecord(AmbTag t, Trampoline c) :tag(t), cont(c) {}
};
std::vector<AmbRecord> amblist;
Trampoline captured_fail;
static Trampoline fail_fn(Search &s)
{
s.failed = true;
s.save_undo(s.captured_fail);
return end_search;
}
void new_amblist()
{
failed = false;
started = false;
amblist.clear();
amblist.push_back(AmbRecord(AMB_UNDO, captured_fail));
}
bool started;
Trampoline cont;
bool failed;
Trampoline initial;
public:
std::map<const char *, boost::any> results;
bool running() { return !failed; }
void save_undo(const Trampoline &c) { amblist.push_back(AmbRecord(AMB_UNDO, c)); }
void alt(Trampoline(*c)(Search &)) { amblist.push_back(AmbRecord(AMB_ALT, trampoline(c, *this))); }
void alt(const CapturedCont &c) {
amblist.push_back(AmbRecord(AMB_ALT, trampoline(c, *this)));
}
void alt(const UncountedCont &c) {
amblist.push_back(AmbRecord(AMB_ALT, trampoline(c, *this)));
}
void alt(const Subclause &c) {
amblist.push_back(AmbRecord(AMB_ALT, trampoline(c)));
}
void alt(const UncountedSubclause &c) {
amblist.push_back(AmbRecord(AMB_ALT, trampoline(c)));
}
void alt(const Trampoline &c) {
amblist.push_back(AmbRecord(AMB_ALT, c));
}
//Note this can throw a CleanStackException so only call in tail position
//you can use the fail() function defined below the class as a continuation
//instead
Trampoline fail() {
Trampoline c = amblist.back().cont;
amblist.pop_back();
return c; //tail call
}
int snip_start() { return (int)amblist.size() - 1; }
void snip(int pos) {
std::vector<AmbRecord> temp;
for (int i = (int)amblist.size() - 1; i > pos;--i) {
if (amblist.back().tag == AMB_UNDO) {
temp.push_back(amblist.back());
}
amblist.pop_back();
}
while (!temp.empty()) {
amblist.push_back(temp.back());
temp.pop_back();
}
}
void reset()
{
new_amblist();
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7>
Search(T &&f, P1 && p1, P2 && p2, P3 && p3, P4 && p4, P5 && p5, P6 && p6, P7 && p7) :initial(trampoline(f, *this, p1, p2, p3, p4, p5, p6, p7)), captured_fail(trampoline(fail_fn, *this))
{
new_amblist();
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6>
Search(T &&f, P1 && p1, P2 && p2, P3 && p3, P4 && p4, P5 && p5, P6 && p6) : initial(trampoline(f, *this, p1, p2, p3, p4, p5, p6)), captured_fail(trampoline(fail_fn, *this))
{
new_amblist();
}
template <typename T, typename P1, typename P2, typename P3, typename P4, typename P5>
Search(T &&f, P1 && p1, P2 && p2, P3 && p3, P4 && p4, P5 && p5) : initial(trampoline(f, *this, p1, p2, p3, p4, p5)), captured_fail(trampoline(fail_fn, *this))
{
new_amblist();
}
template <typename T, typename P1, typename P2, typename P3, typename P4>
Search(T &&f, P1 && p1, P2 && p2, P3 && p3, P4 && p4) : initial(trampoline(f, *this, p1, p2, p3, p4)), captured_fail(trampoline(fail_fn, *this))
{
new_amblist();
}
template <typename T, typename P1, typename P2, typename P3>
Search(T &&f, P1 && p1, P2 && p2, P3 && p3) : initial(trampoline(f, *this, p1, p2, p3)), captured_fail(trampoline(fail_fn, *this))
{
new_amblist();
}
template <typename T, typename P1, typename P2>
Search(T &&f, P1 && p1, P2 && p2) : initial(trampoline(f, *this, p1, p2)), captured_fail(trampoline(fail_fn, *this))
{
new_amblist();
}
template <typename T, typename P1>
Search(T &&f, P1 && p1) : initial(trampoline(f, *this, p1)), captured_fail(trampoline(fail_fn, *this))
{
new_amblist();
}
template <typename T>
Search(T &&f) : initial(trampoline(f, *this)), captured_fail(trampoline(fail_fn, *this))
{
new_amblist();
}
Search(const Trampoline i) :initial(i), captured_fail(trampoline(fail_fn, *this))
{
new_amblist();
}
~Search() {}
bool operator() ()
{
Trampoline c;
if (!started) {
started = true;
failed = false;
c = initial;
}
else c = fail();
do { c = c->execute(); } while (!c->isNull());
return !failed;
}
Trampoline unify(LVar a, LVar b, Trampoline c)
{
if (!_unify(*this, a, b)) return fail();
return c;
}
Trampoline identical(LVar a, LVar b, Trampoline c)
{
if (!_identical(a, b)) return fail();
return c;
}
Trampoline not_identical(LVar a, LVar b, Trampoline c)
{
if (_identical(a, b)) return fail();
return c;
}
};
Trampoline _restore_unified(Search &s, LVar a_save, LValue restore_a)
{
a_save->value = restore_a;
return s.fail();
}
bool _unify(Search &s, LVar &a, LVar&b)
{
if (strict_equals(a, b)) return true;
LVar a_target = a.get_target();
LVar b_target = b.get_target();
if (strict_equals(a_target, b_target)) return true; //test strict equals on uninstanciated {}{}{}
if (a_target.uninstanciatedp() && b_target.uninstanciatedp())
{
LValue restore_a = a_target.get()->value;
a_target.chain(b_target);
s.save_undo(trampoline(_restore_unified, s, a_target, restore_a));
return true;
}
else if (a_target.uninstanciatedp()) {
LValue restore_a = a_target.get()->value;
a_target->value = b_target.get()->value;
s.save_undo(trampoline(_restore_unified, s, a_target, restore_a));
return true;
}
else if (b_target.uninstanciatedp()) {
LValue restore_b = b_target.get()->value;
b_target->value = a_target.get()->value;
s.save_undo(trampoline(_restore_unified, s, b_target, restore_b));
return true;
}
if (a_target.pairp() && b_target.pairp())
{
if (!_unify(s, a_target.car(), b_target.car())) return false;
return _unify(s, a_target.cdr(), b_target.cdr());
}
return false;
}
bool _identical(LVar &a, LVar&b)
{
if (strict_equals(a, b)) return true;
LVar a_target = a.get_target();
LVar b_target = b.get_target();
if (strict_equals(a_target, b_target)) return true; //test strict equals on uninstanciated {}{}{}
if (a_target.pairp() && b_target.pairp())
{
if (!_identical(a_target.car(), b_target.car())) return false;
return _identical(a_target.cdr(), b_target.cdr());
}
return false;
}
//rolling my own so that I can be sure that iterators are preserved across deletes.
enum ClauseRootType { ClauseRoot };
enum InsertHeadType { ClauseHead };
enum InsertTailType { ClauseTail };
//while it's not strictly necessary, this is properly garbage collected so the programmer
//is freed from designing around lifetime issues. If you delete a DynamicPredicate while
//its clauses are running, nothing will break;
struct DynamicClauseBase : public SimpleRefCount
{
intrusive_ptr<DynamicClauseBase> next;
DynamicClauseBase* prev;
virtual bool is_root() const { return true; }
bool empty() const { return next == this; }
DynamicClauseBase(ClauseRootType) : SimpleRefCount(SimpleRefCount::SINGLETON)
{
cout << "creating root " << this << endl;
next = prev = this;
}
DynamicClauseBase(InsertHeadType, DynamicClauseBase *root) : next(root->next), prev(root)
{
cout << "creating at head " << this << endl;
root->next = this; next->prev = this;
}
DynamicClauseBase(InsertTailType, DynamicClauseBase *root) : next(root), prev(root->prev)
{
cout << "creating at tail " << this << endl;
root->prev = this; prev->next = this;
}
void unlink()
{
if (next != nullptr) next->prev = prev;
if (prev != nullptr) prev->next = next;
next = nullptr;
prev = nullptr;
}
virtual ~DynamicClauseBase()
{
cout << "deleting " << this << endl;
unlink();
}
};
template <typename T>
struct DynamicClauseT :public DynamicClauseBase
{
T value;
virtual bool is_root() const { return false; }
DynamicClauseT(InsertHeadType i, DynamicClauseBase &root, T f) : DynamicClauseBase(i, &root), value(f) { }
DynamicClauseT(InsertTailType i, DynamicClauseBase &root, T f) : DynamicClauseBase(i, &root), value(f) { }
};
typedef intrusive_ptr<DynamicClauseBase> DynamicClause;
//typedef std::function<void(Search&,)> DynamicCont;
template <typename T> struct gimme;
template <template <typename> typename TT, typename R, typename...Args>
struct gimme<TT<R(Args...)>> {
using type = R(Args...);
};
// T should be CapturedLambda(function...)
template <typename T>
class DynamicPredicate {
protected:
DynamicClauseBase root;
static Trampoline next_clause(Search &s, Trampoline last_template, Trampoline final_continuation, intrusive_ptr<DynamicClauseT<T>> last_ptr, T *stored_fn)
{
if (last_ptr->next->is_root()) return final_continuation;
last_ptr = static_cast<DynamicClauseT<T> *>(last_ptr->next.get());
*stored_fn = last_ptr->value;
s.alt(trampoline(next_clause, s, last_template, final_continuation, last_ptr, stored_fn));
return last_template;
}
Trampoline _apply(Search&s, Trampoline initial, Trampoline final_continuation)
{
intrusive_ptr<DynamicClauseT<T>> first = static_cast<DynamicClauseT<T>*>(root.next.get());
T * stored_fn = nullptr;
initial->_for_retargetting(&stored_fn);
assert(stored_fn != nullptr);
s.alt(trampoline(next_clause, s, initial, final_continuation, first, stored_fn));
return initial;
}
DynamicPredicate(const DynamicPredicate &) {}//no copy!
public:
DynamicPredicate() :root(ClauseRoot) {}
~DynamicPredicate() {
root.next->prev = nullptr;
root.next = nullptr; //break chain
}
DynamicClause asserta(T f)
{
return new DynamicClauseT<T>(ClauseHead, root, f);
}
DynamicClause asserta(typename T::element_type f)
{
return asserta(T(f));
}
DynamicClause asserta(typename gimme<typename T::element_type::type>::type f)
{
return asserta(T(f));
}
DynamicClause assertz(T f)
{
return new DynamicClauseT<T>(ClauseTail, root, f);
}
DynamicClause assertz(typename T::element_type f)
{
return assertz(T(f));
}
DynamicClause assertz(typename gimme<typename T::element_type::type>::type f)
{
return assertz(T(f));
}
void retract(DynamicClause c)
{
c->unlink();
}
void retract_all()
{
root.next->prev = nullptr;
root.next = &root; //break chain
root.prev = &root;
}
Trampoline operator () (Search &s, Trampoline c)
{
if (root.empty()) return s.fail();
int snip = s.snip_start();
return _apply(s, trampoline(root.next->value, s, snip, c ), c);
}
template <typename P1>
Trampoline operator () (Search &s, Trampoline c, P1 && p1)
{
if (root.empty()) return s.fail();
int snip = s.snip_start();
return _apply(s, trampoline(static_cast<DynamicClauseT<T>&>(*root.next).value, s, snip, c, p1), c);
}
template <typename P1, typename P2>
Trampoline operator () (Search &s, Trampoline c, P1 && p1, P2 && p2)
{
if (root.empty()) return s.fail();
int snip = s.snip_start();
return _apply(s, trampoline(static_cast<DynamicClauseT<T>&>(*root.next).value, s, snip, c, p1, p2), c);
}
template <typename P1, typename P2, typename P3>
Trampoline operator () (Search &s, Trampoline c, P1 && p1, P2 && p2, P3 && p3)
{
if (root.empty()) return s.fail();
int snip = s.snip_start();
return _apply(s, trampoline(static_cast<DynamicClauseT<T>&>(*root.next).value, s, snip, c, p1, p2, p3), c);
}
template <typename P1, typename P2, typename P3, typename P4>
Trampoline operator () (Search &s, Trampoline c, P1 && p1, P2 && p2, P3 && p3, P4 && p4)
{
if (root.empty()) return s.fail();
int snip = s.snip_start();
return _apply(s, trampoline(static_cast<DynamicClauseT<T>&>(*root.next).value, s, snip, c, p1, p2, p3, p4), c);
}
template <typename P1, typename P2, typename P3, typename P4, typename P5>
Trampoline operator () (Search &s, Trampoline c, P1 && p1, P2 && p2, P3 && p3, P4 && p4, P5 && p5)
{
if (root.empty()) return s.fail();
int snip = s.snip_start();
return _apply(s, trampoline(static_cast<DynamicClauseT<T>&>(*root.next).value, s, snip,c, p1, p2, p3, p4, p5), c);
}
};
Trampoline unify_tests(Search &s)
{
LVar A, B, C, D, E, F, G;
LVar hello("hello"), one(1), willBeHello, willBeOne, l1(L(A, "hello", B, L(one, C, hello), F));
CapturedCont c, d, e, f, g, h, i, j, k, l;
*c = [=](Search &s)
{
cout << hello << "?=" << willBeHello << endl;
return s.identical(1, one, trampoline(d, s));
};
*d = [=](Search &s) {
cout << one << "?=" << willBeOne << endl;
s.alt(f);
return s.identical(hello, "hello", trampoline(e, s));
};
*e = [=](Search &s) {
cout << "compare with string succeeded" << endl;
s.alt(g);
return s.identical(F, G, trampoline(h, s));
};
*f = [=](Search &s) { cout << "compare with string failed" << endl; return end_search; };
*g = [=](Search &s)
{
cout << "unlike compare with vars did the right thing" << endl;
s.alt(i);
return s.unify(l1, L("Say", D, "there", L(E, 2, "hello"), G), trampoline(j, s));
};
*h = [=](Search &s) { cout << "unlike compare with vars did the wrong thing" << endl; return end_search; };
*i = [=](Search &s) { cout << "list unify failed" << A << " " << D << " " << B << " " << E << " " << C << endl; return end_search; };
*j = [=](Search &s) { s.alt(l); return s.identical(F, G, trampoline(k, s));};
*k = [=](Search &s) { cout << "list unify: " << A << " " << D << " " << B << " " << E << " " << C << " " << F << " " << G << endl; return end_search; };
*l = [=](Search &s) { cout << "var unify failed" << endl; return end_search; };
return s.unify(hello, willBeHello, trampoline(c, s));
}
//oops, the return value could be nixed by stack clean exception
//but it worked when I made it always throw... {}{}{} WHY DOES IT WORK?
//OH it works because it doesn't use the search until AFTER it returns the value
Trampoline stream1(Search &s, CapturedVar<int> m, Trampoline c)
{
CapturedLambda(Search &, int) rest;
UncountedLambda(Search &, int) rest_uncounted = rest;
*rest = [=](Search &s, int n)
{
n = n + 1;
if (n == 10) {
return s.fail();
}
else {
s.alt(trampoline(rest_uncounted, s, n));
*m = n;
// cout << "n is " << *n << endl;
return c;
}
};
cout << rest.get()->use_count() << endl;
return trampoline(rest, s, 0);
}
//Note it's probably cheaper to pass a CapturedCont than a Continuation
Trampoline stream2(Search &s, CapturedVar<int> m, Trampoline c)
{
CapturedLambda(Search &, int) rest;
UncountedLambda(Search &, int) rest_uncounted = rest;
*rest = [=](Search &s, int n)
{
n += 1;
if (n == 4) {
return s.fail();
}
else {
s.alt(trampoline(rest_uncounted, s, n));
// cout << "m is " << *n * *n << endl;
*m = n * n;
return c;
}
};
return trampoline(rest, s, 0);
}
Trampoline AmbTest(Search &s)
{
CapturedVar<int> n, m;
CapturedCont c1, c2, c3;
UncountedCont c1_u = c1, c2_u = c2, c3_u = c3;
combine_refs(c1, c2, c3);
//note it can't safely use Search inside of functions that return a value
*c1 = [=](Search &s) { return stream1(s, n, trampoline(c2_u, s)); };
*c2 = [=](Search &s) { return stream2(s, m, trampoline(c3_u, s)); };
*c3 = [=](Search &s)
{
if (*n != *m) return s.fail();
else {
s.results.insert_or_assign("n", *n);
s.results.insert_or_assign("m", *m);
return end_search;
}
};
cout << c1.get()->use_count() << endl;
cout << c2.get()->use_count() << endl;
cout << c3.get()->use_count() << endl;
return trampoline(c1, s);
}
#define OUT_OS_TYPE(TYPE) if (v.type() == typeid(TYPE)) { os << any_cast<TYPE>(v); } else
inline std::ostream & operator<<(std::ostream & os, const boost::any &v)
{
OUT_OS_TYPE(int)
OUT_OS_TYPE(double)
OUT_OS_TYPE(std::string)
OUT_OS_TYPE(const char *)
OUT_OS_TYPE(LVar)
// OUT_OS_TYPE(LogicalVariant)
// OUT_OS_TYPE(LValue)
{
os << "[unhandled type]";
}
return os;
}
#undef OUT_OS_TYPE
char Hello[] = "Hello";
#define QUEENS 20
int rowsx[QUEENS];
bool distinct(int x1, int y1, int x2, int y2)
{
return x1 != x2 && y1 != y2 && x1 + y1 != x2 + y2 && x1 - y1 != x2 - y2;
}
bool distinct_from_row(int x, int y, int r)
{
return distinct(x, y, rowsx[r - 1], r);
}
bool distinct_from_all(int x, int y)
{
for (int i = 1; i <= y - 1;++i) {
if (!distinct_from_row(x, y, i)) return false;
}
rowsx[y - 1] = x;
return true;
}
/*
local function queen_row(C,row)
local function loop(C,n)
local function loop_rest()
return loop(C,n+1)
end
if n<=queens then
amb_next(loop_rest)
-- print ('try',n,row)
if not distinct_from_all(n,row) then return amb() end
if row<queens then return queen_row(C,row+1) end
return C()
end
return amb()
end
return loop(C,1)
end
*/
Trampoline QueenRow(Search &s, int ru)// {int row}
{
//CapturedVar<int> r = ru;
CapturedCont c;
CapturedLambda(Search &, int, int) loop;
UncountedLambda(Search &, int, int) loopu = loop;
// cout << "r = " << *r << endl;
*c = [=](Search &s)
{
cout << "Solution: ";
for (int y = 0;y < QUEENS;++y) cout << rowsx[y] << ' ';
cout << endl;
return end_search;
};
*loop = [=](Search &s, int n, int r)
{
//CapturedVar<int> nu = n;
// UncountedLambda(Search &, int, int) loop_restu(CombineRef,loopu);
// *loop_restu = [=](Search &s, int n,int r) { return trampoline(loopu,s, n + 1, r ); };
if (n <= QUEENS) {
s.alt(trampoline(loopu, s, n + 1, r));
if (!distinct_from_all(n, r)) return s.fail();
else {
if (r < QUEENS) return trampoline(QueenRow, s, r + 1);
else return trampoline(c, s);
}
}
else return s.fail();
};
return trampoline(loop, s, 1, ru);
}
InternedString eats("eats"), plays("plays"), with("with"), bat("bat"), cat("cat"), the("the"), IS_v("v"), IS_d("d"), IS_np("np"), IS_n("n"), IS_a("a");
//verb([eats | O], O, v(eats)).
//verb([plays with | O], O, v(plays with)).
Trampoline verb(Search &s, Trampoline c, LVar X, LVar Y, LVar Z)
{
LVar O;
Subclause rest;
*rest = [=, &s]() { return s.unify(L(X, Y, Z), L(L(plays, with, DOT, O), O, L(IS_v, plays, with)), c); };
s.alt(trampoline(rest));
return s.unify(L(X, Y, Z), L(L(eats, DOT, O), O, L(IS_v, eats)), c);
}
//noun([bat | O], O, n(bat)).
//noun([cat | O], O, n(cat)).
Trampoline noun(Search &s, Trampoline c, LVar X, LVar Y, LVar Z)
{
LVar O;
Subclause rest;
*rest = [=, &s]() { return s.unify(L(X, Y, Z), L(L(cat, DOT, O), O, L(IS_n, cat)), c); };
s.alt(rest);
return s.unify(L(X, Y, Z), L(L(bat, DOT, O), O, L(IS_n, bat)), c);
}
//det([the | O], O, d(the)).
//det([a | O], O, d(a)).
Trampoline det(Search &s, Trampoline c, LVar X, LVar Y, LVar Z)
{
LVar O;
Subclause rest;
*rest = [=, &s]() { return s.unify(L(X, Y, Z), L(L(IS_a, DOT, O), O, L(IS_d, IS_a)), c); };
s.alt(rest);
return s.unify(L(X, Y, Z), L(L(the, DOT, O), O, L(IS_d, the)), c);
}
//noun_phrase(A,B,np(D,N)) :- det(A,C,D), noun(C,B,N).
Trampoline noun_phrase(Search &s, Trampoline c, LVar X, LVar Y, LVar Z)
{
LVar A, B, C, D, N;
Subclause r1, r2;
*r1 = [=, &s]() { return det(s, r2, A, C, D); };
*r2 = [=, &s]() { return noun(s, c, C, B, N); };
return s.unify(L(X, Y, Z), L(A, B, L(IS_np, D, N)), r1);
}
//verb_phrase(A,B,vp(V,NP)):- verb(A,C,V), noun_phrase(C,B,NP).
Trampoline verb_phrase(Search &s, Trampoline c, LVar X, LVar Y, LVar Z)
{
LVar A, B, C, V, NP;
Subclause r1, r2;
*r1 = [=, &s]() { return verb(s, r2, A, C, V); };
*r2 = [=, &s]() { return noun_phrase(s, c, C, B, NP); };
return s.unify(L(X, Y, Z), L(A, B, L("vp", V, NP)), r1);
}
//sentence(A, B, s(NP, VP)) :-noun_phrase(A, C, NP), verb_phrase(C, B, VP).
Trampoline sentence(Search &s, Trampoline c, LVar X, LVar Y, LVar Z)
{
LVar A, B, C, VP, NP;
Subclause r1, r2;
*r1 = [=, &s]() { return noun_phrase(s, r2, A, C, NP); };
*r2 = [=, &s]() { return verb_phrase(s, c, C, B, VP); };
return s.unify(L(X, Y, Z), L(A, B, L("s", NP, VP)), r1);
}
Trampoline gen_sentences(Search &s)
{
LVar T, _, S;
Subclause display;
*display = [=, &s]() { cout << "sentence: " << T << endl << "parse: " << S << endl; return end_search; };
return sentence(s, display, T, _, S);
}
int main()
{
LVar A(NIL);
LVar B(UNINSTANCIATED);
LVar C("hello");
LVar D;
LVar E; // note that would share the value not chain E(D);
LVar F; // F(E);
F.chain(E);
E.chain(D);
F.get_target() = C.get_target();
std::cout << (LValue(InternedString("Hello")) == LValue(InternedString(Hello))) << std::endl;
std::cout << (LValue(InternedString("Hello")) == LValue(55)) << std::endl;
std::cout << (LValue(55) == LValue(55)) << std::endl;
std::cout << TypeNames[D.type()] << std::endl;
std::cout << A << ' ' << B << ' ' << C << ' ' << D << ' ' << E << ' ' << F << ' ' << &F.get_target() << std::endl;
LVar M = L(1, DOT, 2);
std::cout << L("hello", 1, "Laurie") << L(1, L(2, 3), 4) << M << std::endl;
Search s(AmbTest);
while (s()) {
std::cout << "n = " << s.results["n"] << " m = " << s.results["m"] << std::endl;
}
s.reset();
cout << "run a second time" << endl;
while (s()) {
std::cout << "n = " << s.results["n"] << " m = " << s.results["m"] << std::endl;
}
Search g(gen_sentences);
while (g());
Search q(QueenRow, 1);
q();
Search u(unify_tests);
u();
LVar A1;
LVar B1("hello"), B2("hello");
LVar C1(1.0), C2(1.0);
LVar D1, D2;
D1.chain(A1);D2.chain(A1);
cout << "equals tests " << strict_equals(A1, A1) << " " << strict_equals(B1, B2) << " " << strict_equals(C1, C2) << " " << strict_equals(D1, D2) << " " << endl;
LVar A2;
LVar B3("There");
LVar C3(2.0);
LVar D3;D2.chain(A2);
cout << "equals tests " << strict_equals(A1, A2) << " " << strict_equals(B1, B3) << " " << strict_equals(C1, C3) << " " << strict_equals(D1, D3) << " " << endl;
DynamicPredicate<CapturedLambda(Search &, int, Trampoline, LVar)> dynamic_test;
DynamicClause dog, cat, person;
dog = dynamic_test.asserta([](Search &s, int cut, Trampoline c, LVar Animal) { return s.unify(Animal, "dog", c); });
LVar Animal;
Search animals(std::function<Trampoline(Search&,Trampoline,LVar)>(std::ref(dynamic_test)),end_search, Animal);
cout << "should be just dog" << endl;
while (animals()) cout << Animal << endl;
cat = dynamic_test.assertz([](Search &s, int cut, Trampoline c, LVar Animal) { return s.unify(Animal, "cat", c); });
cout << "should be dog, cat" << endl;
animals.reset();
while (animals()) cout << Animal << endl;
person = dynamic_test.asserta([](Search &s, int cut, Trampoline c, LVar Animal) { return s.unify(Animal, "person", c); });
cout << "should be person, dog, cat" << endl;
animals.reset();
while (animals()) cout << Animal << endl;
dynamic_test.retract(dog);
cout << "should be person, cat" << endl;
animals.reset();
while (animals()) cout << Animal << endl;
dynamic_test.retract(person);
cout << "should be cat" << endl;
animals.reset();
while (animals()) cout << Animal << endl;
dynamic_test.retract(cat);
cout << "should be empty" << endl;
animals.reset();
while (animals()) cout << Animal << endl;
}
|
{"hexsha": "460488a3572d776eb49dc07376a823d20a335f91", "size": 64754, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "embeddedprolog.cpp", "max_stars_repo_name": "differentprogramming/logic_in_cpp", "max_stars_repo_head_hexsha": "f0ccf8d9a5108ff358c5cec189357205534b23ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2017-03-19T19:47:27.000Z", "max_stars_repo_stars_event_max_datetime": "2017-03-19T19:55:35.000Z", "max_issues_repo_path": "embeddedprolog.cpp", "max_issues_repo_name": "differentprogramming/logic_in_cpp", "max_issues_repo_head_hexsha": "f0ccf8d9a5108ff358c5cec189357205534b23ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "embeddedprolog.cpp", "max_forks_repo_name": "differentprogramming/logic_in_cpp", "max_forks_repo_head_hexsha": "f0ccf8d9a5108ff358c5cec189357205534b23ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0546197039, "max_line_length": 227, "alphanum_fraction": 0.6550483368, "num_tokens": 19657}
|
#!/usr/bin/env python
from __future__ import division
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_svmlight_file
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import log_loss
import argparse
import logging
import numpy as np
import os
import time
import xgboost_colsub as xgb
def train_predict(train_file, test_file, predict_valid_file, predict_test_file,
n_fold=5):
feature_name = os.path.basename(train_file)[:-10]
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
filename='esb_xg_grid_colsub_{}.log'.format(feature_name))
logging.info('Loading training and test data...')
X, y = load_svmlight_file(train_file)
X_tst, _ = load_svmlight_file(test_file)
xg = xgb.XGBClassifier()
param = {'learning_rate': [.01, .03, .05], 'max_depth': [4, 5, 6],
'n_estimators': [400, 600]}
cv = StratifiedKFold(y, n_folds=n_fold, shuffle=True, random_state=2015)
clf = GridSearchCV(xg, param, scoring='log_loss', verbose=1, cv=cv)
logging.info('Cross validation for grid search...')
clf.fit(X, y)
p = clf.predict_proba(X)[:, 1]
logging.info('best model = {}'.format(clf.best_estimator_))
logging.info('best score = {:.4f}'.format(clf.best_score_))
logging.info('Retraining with 100% data...')
clf.best_estimator_.fit(X, y)
p_tst = clf.best_estimator_.predict_proba(X_tst)[:, 1]
logging.info('Saving predictions...')
np.savetxt(predict_valid_file, p, fmt='%.6f')
np.savetxt(predict_test_file, p_tst, fmt='%.6f')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-file', required=True, dest='train_file')
parser.add_argument('--test-file', required=True, dest='test_file')
parser.add_argument('--predict-valid-file', required=True,
dest='predict_valid_file')
parser.add_argument('--predict-test-file', required=True,
dest='predict_test_file')
args = parser.parse_args()
start = time.time()
train_predict(train_file=args.train_file,
test_file=args.test_file,
predict_valid_file=args.predict_valid_file,
predict_test_file=args.predict_test_file)
logging.info('finished ({:.2f} min elasped)'.format((time.time() - start) /
60))
|
{"hexsha": "72e863a2f3b5ad6ff90852fde12c27baeca5e926", "size": 2522, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/train_predict_esb_xg_grid_colsub.py", "max_stars_repo_name": "drivendata/countable-care-3rd-place", "max_stars_repo_head_hexsha": "d1bba2f09ba0196cc3f35d2a41ea93bfbc4086a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-26T12:00:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-20T19:28:24.000Z", "max_issues_repo_path": "src/train_predict_esb_xg_grid_colsub.py", "max_issues_repo_name": "drivendata/countable-care-3rd-place", "max_issues_repo_head_hexsha": "d1bba2f09ba0196cc3f35d2a41ea93bfbc4086a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/train_predict_esb_xg_grid_colsub.py", "max_forks_repo_name": "drivendata/countable-care-3rd-place", "max_forks_repo_head_hexsha": "d1bba2f09ba0196cc3f35d2a41ea93bfbc4086a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-05-16T17:40:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-20T19:28:25.000Z", "avg_line_length": 36.0285714286, "max_line_length": 82, "alphanum_fraction": 0.655035686, "include": true, "reason": "import numpy", "num_tokens": 590}
|
!! SRN 04502674
!!
!! WRONG RESULT FROM POLYMORPHIC POINTER ASSIGNMENT
!!
!! This example shows that the polymorphic pointer assignment
!! is being done incorrectly, giving either wrong results or
!! a segfault. This occurs for version 19.1 and is a regression
!! from 18 and 19.0.
!!
!! $ ifort --version
!! ifort (IFORT) 19.1.0.166 20191121
!! $ ifort -g -traceback intel-20200125.f90
!! $ ./a.out
!! loop
!! forrtl: severe (174): SIGSEGV, segmentation fault occurred
!! Image PC Routine Line Source
!! a.out 0000000000404BFA Unknown Unknown Unknown
!! libpthread-2.28.s 00007FA0760F3E70 Unknown Unknown Unknown
!! a.out 0000000000403A01 mod_mp_next_ 27 intel-20200125.f90
!! a.out 0000000000403D5D MAIN__ 44 intel-20200125.f90
!! a.out 00000000004037E2 Unknown Unknown Unknown
!! libc-2.28.so 00007FA075F39413 __libc_start_main Unknown Unknown
!! a.out 00000000004036EE Unknown Unknown Unknown
module mod
type :: list_item
type(list_item), pointer :: next => null()
end type
type :: iterator
class(list_item), pointer :: item => null()
contains
procedure :: next
end type
contains
subroutine next(this)
class(iterator), intent(inout) :: this
logical :: flag
if(associated(this%item)) flag = associated(this%item%next)
if(associated(this%item)) this%item => this%item%next ! SEGFAULT HERE OR WRONG RESULT
if (associated(this%item) .neqv. flag) stop 1 ! THESE SHOULD BE THE SAME
end subroutine
end module
use mod
type(iterator) :: iter
type(list_item), pointer :: head
! One item in the list ==> segfault
! Two items in the list ==> premature exit from loop
allocate(head)
!allocate(head%next) ! add a second item
iter%item => head
do while (associated(iter%item))
print *, 'loop'
call iter%next
end do
end
|
{"hexsha": "d40dc463aba4c3f3616d7c87e10c60253067f4c4", "size": 2009, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "intel-bugs/intel-20200125.f90", "max_stars_repo_name": "dhnza/fortran-compiler-tests", "max_stars_repo_head_hexsha": "f60d1c2a6e67153fa92f5653e0a3df771d5ed5bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2017-11-20T16:35:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T16:58:15.000Z", "max_issues_repo_path": "intel-bugs/intel-20200125.f90", "max_issues_repo_name": "dhnza/fortran-compiler-tests", "max_issues_repo_head_hexsha": "f60d1c2a6e67153fa92f5653e0a3df771d5ed5bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-05-27T21:45:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T16:01:55.000Z", "max_forks_repo_path": "intel-bugs/intel-20200125.f90", "max_forks_repo_name": "dhnza/fortran-compiler-tests", "max_forks_repo_head_hexsha": "f60d1c2a6e67153fa92f5653e0a3df771d5ed5bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-01-27T11:03:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-14T00:13:33.000Z", "avg_line_length": 31.390625, "max_line_length": 90, "alphanum_fraction": 0.6356396217, "num_tokens": 555}
|
"""
The algorithms based on centered aligmnent proposed in
C. Cortes, M. Mohri, and A. Rostamizadeh, "Algorithms for Learning Kernels Based on Centered Alignment," J. Mach. Learn. Res., vol. 13, pp. 795-828, Mar. 2012.
Given :math:`p` kernel matrices :math:`\mathbf{K}_1, \mathbf{K}_2, ..., \mathbf{K}_p`, centered kernel alignment learns a linear combination of kernels resulting in a combined kernel matrix.
.. math::
\mathbf{K}_{c\mu} = \sum_{q=1}^p \mu_q \mathbf{K}_{cq}
where :math:`\mathbf{K}_{cq}` is the centered kernel matrix.
.. math::
\mathbf{K}_{cq} = (\mathbf{I} - \dfrac{\mathbf{11}^T}{n})\mathbf{K}_q (\mathbf{I} - \dfrac{\mathbf{11}^T}{n})
The ``Alignf`` method optimies with respect to :math:`\mathbf{\mu}` to maximize centered alignment.
.. math::
max_{\mathbf{\mu}} \dfrac{<\mathbf{K}_{c\mu}, \mathbf{y}\mathbf{y}^T>_F} {n <\mathbf{K}_{c\mu}, \mathbf{K}_{c\mu}>_F}
such that (``typ=linear``):
.. math::
\sum \mu_q = 1
or contraining sum of weights to a convex combination (``typ=convex``):
.. math::
\sum \mu_q = 1, \mu_q > 0, q = 1...p
"""
from ..util.la import fro_prod, fro_prod_low_rank
from ..kernel.kernel import center_kernel, center_kernel_low_rank
from numpy import zeros, eye, array, ndarray
from numpy.linalg import inv, norm
from itertools import combinations
from cvxopt.solvers import qp as QP, options
from cvxopt import matrix
options['show_progress'] = False
class Alignf:
def __init__(self, typ="linear"):
"""
:param typ: (``str``) "linear" or "convex" combination of kernels.
"""
assert typ in ["linear", "convex"]
self.typ = typ
self.trained = False
self.low_rank = False
def fit(self, Ks, y, holdout=None):
"""
Learn weights for kernel matrices or Kinterfaces.
:param Ks: (``list``) of (``numpy.ndarray``) or of (``Kinterface``) to be aligned.
:param y: (``numpy.ndarray``) Class labels :math:`y_i \in {-1, 1}` or regression targets.
:param holdout: (``list``) List of indices to exlude from alignment.
"""
m = len(y)
p = len(Ks)
y = y.reshape((m, 1))
# Generalization to Kinterfaces
Ks = [K[:, :] for K in Ks]
# Filter out hold out values
if not self.low_rank:
if not isinstance(holdout, type(None)):
holdin = sorted(list(set(range(m)) - set(holdout)))
y = y[holdin]
Ksa = map(lambda k: k[holdin, :][:, holdin], Ks)
en = enumerate(Ksa)
Ky = y.dot(y.T)
else:
Ksa = Ks
en = enumerate(Ksa)
Ky = y.dot(y.T)
else:
if not isinstance(holdout, type(None)):
holdin = sorted(list(set(range(m)) - set(holdout)))
y = y[holdin]
Ksa = map(lambda k: k[holdin, :], Ks)
en = enumerate(Ksa)
else:
Ksa = Ks
en = enumerate(Ksa)
a = zeros((p, 1))
M = zeros((p, p))
if not self.low_rank:
for (i, K), (j, L) in combinations(list(en), 2):
M[i, j] = M[j, i] = fro_prod(center_kernel(K), center_kernel(L))
if a[i] == 0:
M[i, i] = fro_prod(center_kernel(K), center_kernel(K))
a[i] = fro_prod(center_kernel(K), Ky)
if a[j] == 0:
M[j, j] = fro_prod(center_kernel(L), center_kernel(L))
a[j] = fro_prod(center_kernel(L), Ky)
else:
for (i, K), (j, L) in combinations(list(en), 2):
M[i, j] = M[j, i] = fro_prod_low_rank(center_kernel_low_rank(K),
center_kernel_low_rank(L))
if a[i] == 0:
M[i, i] = fro_prod_low_rank(center_kernel_low_rank(K),
center_kernel_low_rank(K))
a[i] = fro_prod_low_rank(center_kernel_low_rank(K), y)
if a[j] == 0:
M[j, j] = fro_prod_low_rank(center_kernel_low_rank(L),
center_kernel_low_rank(L))
a[j] = fro_prod_low_rank(center_kernel_low_rank(L), y)
if self.typ == "linear":
Mi = inv(M)
mu = Mi.dot(a) / norm(Mi.dot(a), ord=2)
elif self.typ == "convex":
Q = matrix(M)
r = matrix(-2 * a.ravel())
G = -1 * matrix(eye(p, p))
h = matrix(0.0, (p, 1))
sol = QP(Q, r, G, h)
mu = array(sol["x"]).ravel()
mu = mu / norm(mu, ord=1)
if not self.low_rank:
Kappa = sum([mu_i * center_kernel(k_i) for mu_i, k_i in zip(mu, Ks)])
self.Kappa = Kappa
else:
self.Gs = map(lambda g: center_kernel_low_rank(g), Ks)
mu = mu.ravel()
self.mu = mu
self.trained = True
def __call__(self, i, j):
"""
Access portions of the combined kernel matrix at indices i, j.
:param i: (``int``) or (``numpy.ndarray``) Index/indices of data points(s).
:param j: (``int``) or (``numpy.ndarray``) Index/indices of data points(s).
:return: (``numpy.ndarray``) Value of the kernel matrix for i, j.
"""
assert self.trained
if isinstance(i, ndarray):
i = i.astype(int).ravel()
if isinstance(j, ndarray):
j = j.astype(int).ravel()
if isinstance(i, int) and isinstance(j, int):
return self.Kappa[i, j]
else:
return self.Kappa[i, :][:, j]
def __getitem__(self, item):
"""
Access portions of the kernel matrix generated by ``kernel``.
:param item: (``tuple``) pair of: indices or list of indices or (``numpy.ndarray``) or (``slice``) to address portions of the kernel matrix.
:return: (``numpy.ndarray``) Value of the kernel matrix for item.
"""
assert self.trained
return self.Kappa[item]
class AlignfLowRank(Alignf):
"""
Use the align method using low-rank kernels.
Useful for computing alignment of low-rank representations.
"""
def __init__(self, typ="linear"):
"""
:param typ: (``str``) "linear" or "convex" combination of kernels.
"""
assert typ in ["linear", "convex"]
self.typ = typ
self.trained = False
self.low_rank = True
def __call__(self, i, j):
"""
Access portions of the combined kernel matrix at indices i, j.
:param i: (``int``) or (``numpy.ndarray``) Index/indices of data points(s).
:param j: (``int``) or (``numpy.ndarray``) Index/indices of data points(s).
:return: (``numpy.ndarray``) Value of the kernel matrix for i, j.
"""
assert self.trained
if isinstance(i, ndarray):
i = i.astype(int).ravel()
if isinstance(j, ndarray):
j = j.astype(int).ravel()
return sum([m * (G[i, :].dot(G[j, :].T))
for m, G in zip(self.mu, self.Gs)])
def __getitem__(self, item):
"""
Access portions of the kernel matrix generated by ``kernel``.
:param item: (``tuple``) pair of: indices or list of indices or (``numpy.ndarray``) or (``slice``) to address portions of the kernel matrix.
:return: (``numpy.ndarray``) Value of the kernel matrix for item.
"""
assert self.trained
return sum([m * (G[item[0]].dot(G[item[1]].T))
for m, G in zip(self.mu, self.Gs)])
|
{"hexsha": "88465c2608963df3004cc629e9554548399a2562", "size": 7739, "ext": "py", "lang": "Python", "max_stars_repo_path": "Project_Health/src/mklaren/mkl/alignf.py", "max_stars_repo_name": "Anonymous633671/STABILIZER", "max_stars_repo_head_hexsha": "5a1ab8099a2d75ace7e053afc78055f1f4d359c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2017-07-27T10:32:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-01T11:51:51.000Z", "max_issues_repo_path": "Project_Health/src/mklaren/mkl/alignf.py", "max_issues_repo_name": "Anonymous633671/STABILIZER", "max_issues_repo_head_hexsha": "5a1ab8099a2d75ace7e053afc78055f1f4d359c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2016-03-15T16:27:47.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-05T02:25:08.000Z", "max_forks_repo_path": "src/mklaren/mkl/alignf.py", "max_forks_repo_name": "ai-se/GENERAL", "max_forks_repo_head_hexsha": "5a4bef2a80526524e3e18139b561fc0e2bb8888d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-01-28T22:45:34.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-04T13:15:10.000Z", "avg_line_length": 34.3955555556, "max_line_length": 190, "alphanum_fraction": 0.5283628376, "include": true, "reason": "from numpy", "num_tokens": 2084}
|
# Copyright 2014, Jerome Fung, Rebecca W. Perry, Thomas G. Dimiduk
#
# flyvbjerg_petersen_std_err is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with flyvbjerg_petersen_std_err. If not, see
# <http://www.gnu.org/licenses/>.
'''
Apply Flyvbjerg-Petersen block decorrelation method for estimating the
standard error on the mean of a (possibly correlated) time series of
values.
.. moduleauthor:: Jerome Fung <jerome.fung@gmail.com>
.. moduleauthor:: Rebecca W. Perry <rperry@seas.harvard.edu>
.. moduleauthor:: Tom Dimiduk <tom@dimiduk.net>
Reference: H. Flyvbjerg and H. G. Petersen, "Error estimates on correlated
data", J. Chem. Phys. 91, 461--466 (1989).
'''
import numpy as np
import warnings
def block_transformation(series):
"""
Do a single step of fp block averaging.
Parameters
----------
series : ndarray
Things we want to average: e.g. squared displacements to calculate the
mean squared displacement of a Brownian particle.
Returns
-------
blocked_series : ndarray
an array of half the length of series with adjacent terms averaged
Notes
-----
Flyvbjerg & Peterson 1989, equation 20
"""
n_steps = series.size
n_steps_p = np.floor(n_steps/2.)
output = 0.5 * (series[::2][:n_steps_p] + series[1::2][:n_steps_p])
return output
def calculate_blocked_variances(series, npmin = 15):
"""
Compute a series of blocks and variances.
Parameters
----------
series : ndarray
the thing we want to average: e.g. squared
displacements for a Brownian random walk.
npmin : int
cutoff number of points to stop blocking
Returns
-------
output_var, var_stderr : ndarray
The variance and stderr of the variance at each blocking level
Notes
-----
Flyvbjerg & Peterson suggest continuing blocking down to 2 points, but the
last few blocks are very noisy, so we default to cutting off before that.
"""
n_steps = series.size
def var(d, n):
# see eq. 27 of FP paper
return d.var()/(n-1)
def stderr_var(n):
# see eq. 27 of FP paper
return np.sqrt(2./(n-1))
output_var = np.array([var(series, n_steps)]) # initialize
var_stderr = np.array([stderr_var(n_steps)])
while n_steps > npmin:
series = block_transformation(series)
n_steps = series.size
# TODO: precompute size of output_var and var_stderr from n_steps
# rather than appending
output_var = np.append(output_var, var(series, n_steps))
var_stderr = np.append(var_stderr, stderr_var(n_steps))
return output_var, var_stderr
def detect_fixed_point(fp_var, fp_sev, full_output = False):
"""
Find whether the block averages decorrelate the data series to a fixed
point.
Parameters
----------
fp_var: ndarray
FP blocked variance
fp_sev: ndarray
FP standard error of the variance.
Returns
-------
best_var : float
best estimate of the variance
converged : bool
did the series converge to a fixed point?
bounds : (int, int) only if full_output is True
range of fp_var averaged to compute best_var
Notes
-----
Expects both fp_var and fp_sev will have been
truncated to cut off points with an overly small n_p and
correspondingly large standard error of the variance.
"""
n_trans = fp_var.size # number of block transformations and index
left_index = 0
right_index = 0
# Detect left edge
for i in np.arange(n_trans):
# ith point inside error bars of next point
if np.abs(fp_var[i + 1] - fp_var[i]) < fp_var[i + 1] * fp_sev[i + 1]:
left_index = i
break
# Check right edge
for i in np.arange(n_trans)[::-1]:
if np.abs(fp_var[i] - fp_var[i - 1]) < fp_var[i - 1] * fp_sev[i - 1]:
right_index = i
break
# if search succeeds
if (left_index >= 0) and (right_index >= 0) and \
(right_index >= left_index):
best_var = np.average(fp_var[left_index:right_index + 1],
weights = 1./fp_sev[left_index:right_index + 1])
converged = True
else:
best_var = fp_var.max()
converged = False
if full_output is True:
return best_var, converged, (left_index, right_index)
else:
return best_var, converged
def fp_stderr(data):
'''
Compute standard error using Flyvbjerg-Petersen blocking.
Computes the standard error on the mean of a possibly correlated timeseries
of measurements.
Parameters
----------
data: ndarray
data whose mean is to be calculated, and for which we need
a standard error on the mean
Returns
-------
stderr : float
Standard error on the mean of data
Notes
-----
Uses the technique described in H. Flyvbjerg and H. G. Petersen,
"Error estimates on correlated data", J. Chem. Phys. 91, 461--466 (1989).
section 3.
'''
block_trans_var, block_trans_sev = calculate_blocked_variances(data)
var_mean, conv, bounds = detect_fixed_point(block_trans_var,
block_trans_sev, True)
if not conv:
warnings.warn("Fixed point not found, returned value is a lower bound on the standard error")
return np.sqrt(var_mean)
# LocalWords: Flyvbjerg
|
{"hexsha": "3001ecd252c6cc31aa3cc86326266420171eace6", "size": 5967, "ext": "py", "lang": "Python", "max_stars_repo_path": "US/diffusivity/2_fblockavg/flyvbjerg_petersen_std_err.py", "max_stars_repo_name": "vtlim/permeability", "max_stars_repo_head_hexsha": "56c9379b82ab586e76224937b72397011fd6acc0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-08-18T16:15:02.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-18T16:15:02.000Z", "max_issues_repo_path": "US/diffusivity/2_fblockavg/flyvbjerg_petersen_std_err.py", "max_issues_repo_name": "vtlim/permeability", "max_issues_repo_head_hexsha": "56c9379b82ab586e76224937b72397011fd6acc0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-10-01T18:04:59.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-01T18:04:59.000Z", "max_forks_repo_path": "US/diffusivity/2_fblockavg/flyvbjerg_petersen_std_err.py", "max_forks_repo_name": "vtlim/permeability", "max_forks_repo_head_hexsha": "56c9379b82ab586e76224937b72397011fd6acc0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.835, "max_line_length": 101, "alphanum_fraction": 0.6499078264, "include": true, "reason": "import numpy", "num_tokens": 1453}
|
#!/usr/bin/env python
from funlib.show.neuroglancer import add_layer
import argparse
import daisy
import glob
import neuroglancer
import os
import webbrowser
import numpy as np
import zarr
parser = argparse.ArgumentParser()
parser.add_argument(
'--file',
'-f',
type=str,
action='append',
help="The path to the container to show")
parser.add_argument(
'--datasets',
'-d',
type=str,
nargs='+',
action='append',
help="The datasets in the container to show")
parser.add_argument(
'--graphs',
'-g',
type=str,
nargs='+',
action='append',
help="The graphs in the container to show")
parser.add_argument(
'--no-browser',
'-n',
type=bool,
nargs='?',
default=False,
const=True,
help="If set, do not open a browser, just print a URL")
args = parser.parse_args()
neuroglancer.set_server_bind_address('0.0.0.0')
viewer = neuroglancer.Viewer()
def to_slice(slice_str):
values = [int(x) for x in slice_str.split(':')]
if len(values) == 1:
return values[0]
return slice(*values)
def parse_ds_name(ds):
tokens = ds.split('[')
if len(tokens) == 1:
return ds, None
ds, slices = tokens
slices = list(map(to_slice, slices.rstrip(']').split(',')))
return ds, slices
class Project:
def __init__(self, array, dim, value):
self.array = array
self.dim = dim
self.value = value
self.shape = array.shape[:self.dim] + array.shape[self.dim + 1:]
self.dtype = array.dtype
def __getitem__(self, key):
slices = key[:self.dim] + (self.value,) + key[self.dim:]
ret = self.array[slices]
return ret
def slice_dataset(a, slices):
dims = a.roi.dims
for d, s in list(enumerate(slices))[::-1]:
if isinstance(s, slice):
raise NotImplementedError("Slicing not yet implemented!")
else:
index = (s - a.roi.get_begin()[d])//a.voxel_size[d]
a.data = Project(a.data, d, index)
a.roi = daisy.Roi(
a.roi.get_begin()[:d] + a.roi.get_begin()[d + 1:],
a.roi.get_shape()[:d] + a.roi.get_shape()[d + 1:])
a.voxel_size = a.voxel_size[:d] + a.voxel_size[d + 1:]
return a
def open_dataset(f, ds):
original_ds = ds
ds, slices = parse_ds_name(ds)
slices_str = original_ds[len(ds):]
try:
dataset_as = []
if all(key.startswith("s") for key in zarr.open(f)[ds].keys()):
raise AttributeError("This group is a multiscale array!")
for key in zarr.open(f)[ds].keys():
dataset_as.extend(open_dataset(f, f"{ds}/{key}{slices_str}"))
return dataset_as
except AttributeError as e:
# dataset is an array, not a group
pass
print("ds :", ds)
print("slices:", slices)
try:
zarr.open(f)[ds].keys()
is_multiscale = True
except:
is_multiscale = False
if not is_multiscale:
a = daisy.open_ds(f, ds)
if slices is not None:
a = slice_dataset(a, slices)
if a.roi.dims == 2:
print("ROI is 2D, recruiting next channel to z dimension")
a.roi = daisy.Roi((0,) + a.roi.get_begin(), (a.shape[-3],) + a.roi.get_shape())
a.voxel_size = daisy.Coordinate((1,) + a.voxel_size)
if a.roi.dims == 4:
print("ROI is 4D, stripping first dimension and treat as channels")
a.roi = daisy.Roi(a.roi.get_begin()[1:], a.roi.get_shape()[1:])
a.voxel_size = daisy.Coordinate(a.voxel_size[1:])
if a.data.dtype == np.int64 or a.data.dtype == np.int16:
print("Converting dtype in memory...")
a.data = a.data[:].astype(np.uint64)
return [(a, ds)]
else:
return [([daisy.open_ds(f, f"{ds}/{key}") for key in zarr.open(f)[ds].keys()], ds)]
for f, datasets in zip(args.file, args.datasets):
arrays = []
for ds in datasets:
try:
print("Adding %s, %s" % (f, ds))
dataset_as = open_dataset(f, ds)
except Exception as e:
print(type(e), e)
print("Didn't work, checking if this is multi-res...")
scales = glob.glob(os.path.join(f, ds, 's*'))
if len(scales) == 0:
print(f"Couldn't read {ds}, skipping...")
raise e
print("Found scales %s" % ([
os.path.relpath(s, f)
for s in scales
],))
a = [
open_dataset(f, os.path.relpath(scale_ds, f))
for scale_ds in scales
]
for a in dataset_as:
arrays.append(a)
with viewer.txn() as s:
for array, dataset in arrays:
add_layer(s, array, dataset)
if args.graphs:
for f, graphs in zip(args.file, args.graphs):
for graph in graphs:
graph_annotations = []
try:
ids = daisy.open_ds(f, graph + '-ids').data
loc = daisy.open_ds(f, graph + '-locations').data
except:
loc = daisy.open_ds(f, graph).data
ids = None
dims = loc.shape[-1]
loc = loc[:].reshape((-1, dims))
if ids is None:
ids = range(len(loc))
for i, l in zip(ids, loc):
if dims == 2:
l = np.concatenate([[0], l])
graph_annotations.append(
neuroglancer.EllipsoidAnnotation(
center=l[::-1],
radii=(5, 5, 5),
id=i))
graph_layer = neuroglancer.AnnotationLayer(
annotations=graph_annotations,
voxel_size=(1, 1, 1))
with viewer.txn() as s:
s.layers.append(name='graph', layer=graph_layer)
url = str(viewer)
print(url)
if os.environ.get("DISPLAY") and not args.no_browser:
webbrowser.open_new(url)
print("Press ENTER to quit")
input()
|
{"hexsha": "f782ab579af857f8f3720eea3f0a98c731407db6", "size": 6061, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/visualize.py", "max_stars_repo_name": "rhoadesScholar/daisy", "max_stars_repo_head_hexsha": "78cdd2ed0d67647a6602fb53cc952214450f3753", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/visualize.py", "max_issues_repo_name": "rhoadesScholar/daisy", "max_issues_repo_head_hexsha": "78cdd2ed0d67647a6602fb53cc952214450f3753", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/visualize.py", "max_forks_repo_name": "rhoadesScholar/daisy", "max_forks_repo_head_hexsha": "78cdd2ed0d67647a6602fb53cc952214450f3753", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8027522936, "max_line_length": 91, "alphanum_fraction": 0.5408348457, "include": true, "reason": "import numpy", "num_tokens": 1528}
|
#-------------------------------------------------------------------------------
# Author: Lukasz Janyst <lukasz@jany.st>
# Date: 14.06.2017
#-------------------------------------------------------------------------------
import random
import cv2
import argparse
import sys,os
import os.path as ops
sys.path.append(os.getcwd())
sys.path.append("..")
from shutil import copyfile
import numpy as np
import glob
def init_args():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=str, help='The training dataset dir path',
default='D:/Personal_Research/Dataset/BreaKHis_v1/histology_slides/breast')
parser.add_argument('--output_dir', type=str, help='The output dir of the splitted dataset',
default='data')
parser.add_argument('--split_ratio', type=float, help='The pretrained weights path',
default=0.3)
return parser.parse_args()
#---------------------------------------------------------------------------
def split_data(inp_dir, out_dir, split_ratio, data_type):
"""
Load the data and make the generators
:param data_dir: the directory where the dataset's file are stored
:param valid_fraction: what franction of the dataset should be used
as a validation sample
"""
print("Start to split the data type {:s}...".format(data_type))
image_paths = glob.glob('{:s}/benign/SOB/**/**/{:s}/*.png'.format(inp_dir, data_type), recursive=True) + \
glob.glob('{:s}/malignant/SOB/**/**/{:s}/*.png'.format(inp_dir, data_type), recursive=True)
num_images = len(image_paths)
if num_images == 0:
raise RuntimeError('No data files found in ' + data_dir)
random.shuffle(image_paths)
test_paths = image_paths[:int(split_ratio * num_images)]
train_paths = image_paths[int(split_ratio * num_images):]
test_out_dir = ops.join(out_dir, 'test')
test_out_dir = ops.join(test_out_dir, data_type)
if not ops.exists(test_out_dir):
os.makedirs(test_out_dir)
train_out_dir = ops.join(out_dir, 'train')
train_out_dir = ops.join(train_out_dir, data_type)
if not ops.exists(train_out_dir):
os.makedirs(train_out_dir)
for path in train_paths:
# print(path)
# print(ops.join(train_out_dir, file_name))
file_name = path.strip('/').split('/')[-1]
copyfile(path, ops.join(train_out_dir, file_name))
# print(path.strip('/').split('\\')[-1])
# input("Press Enter to continue...")
for path in test_paths:
file_name = path.strip('/').split('/')[-1]
copyfile(path, ops.join(test_out_dir, file_name))
# print(path.strip('/').split('\\')[-1])
# input("Press Enter to continue...")
print("Finish splitting the data type {:s}...".format(data_type))
#-------------------------------------------------------------------------------
if __name__ == '__main__':
# init args
args = init_args()
# split the data to train and test set
split_data(inp_dir=args.dataset_dir, out_dir=args.output_dir,
split_ratio=args.split_ratio, data_type='40X')
split_data(inp_dir=args.dataset_dir, out_dir=args.output_dir,
split_ratio=args.split_ratio, data_type='100X')
split_data(inp_dir=args.dataset_dir, out_dir=args.output_dir,
split_ratio=args.split_ratio, data_type='200X')
split_data(inp_dir=args.dataset_dir, out_dir=args.output_dir,
split_ratio=args.split_ratio, data_type='400X')
|
{"hexsha": "16163245957ed80e038d66fae7a617d41e12981d", "size": 3629, "ext": "py", "lang": "Python", "max_stars_repo_path": "split_train_test_data_ubuntu.py", "max_stars_repo_name": "ivo-gilles/breast-cancer-classification", "max_stars_repo_head_hexsha": "eb79275ac281d0340ede5d21e3d4a1116d612ddd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "split_train_test_data_ubuntu.py", "max_issues_repo_name": "ivo-gilles/breast-cancer-classification", "max_issues_repo_head_hexsha": "eb79275ac281d0340ede5d21e3d4a1116d612ddd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "split_train_test_data_ubuntu.py", "max_forks_repo_name": "ivo-gilles/breast-cancer-classification", "max_forks_repo_head_hexsha": "eb79275ac281d0340ede5d21e3d4a1116d612ddd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2358490566, "max_line_length": 113, "alphanum_fraction": 0.5902452466, "include": true, "reason": "import numpy", "num_tokens": 791}
|
from preprocess.removeOutliers import remove_outliers
from preprocess.scale import scale
from preprocess.pca import pca
import numpy as np
def pre_process(arr, test, y=[]):
test = test.to_numpy()
arr = arr.to_numpy()
arr, y = remove_outliers(arr, y)
#arr, test = pca(arr, test)
arr, test = scale(arr, test)
return arr, test, y
|
{"hexsha": "ddfd4866b689364311d5e0ce7c2036a2fed6a34a", "size": 357, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess/index.py", "max_stars_repo_name": "ahmedgaafer/pattern-project", "max_stars_repo_head_hexsha": "b20cd9b0c3c62ef761fa6d2969ea3d7133e40632", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "preprocess/index.py", "max_issues_repo_name": "ahmedgaafer/pattern-project", "max_issues_repo_head_hexsha": "b20cd9b0c3c62ef761fa6d2969ea3d7133e40632", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocess/index.py", "max_forks_repo_name": "ahmedgaafer/pattern-project", "max_forks_repo_head_hexsha": "b20cd9b0c3c62ef761fa6d2969ea3d7133e40632", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0, "max_line_length": 53, "alphanum_fraction": 0.6862745098, "include": true, "reason": "import numpy", "num_tokens": 94}
|
#include <iostream>
#include <Eigen/Dense>
void gramSchmidtOrthogonalization(Eigen::MatrixXd &matrix,Eigen::MatrixXd &orthonormalMatrix)
{
/*
In this method you make every column perpendicular to it's previous columns,
here if a and b are representation vector of two columns, c=b-((b.a)/|a|).a
^
/
b /
/
/
---------->
a
^
/|
b / |
/ | c
/ |
---------->
a
you just have to normilze every vector after make it perpendicular to previous columns
so:
q1=a.normalized();
q2=b-(b.q1).q1
q2=q2.normalized();
q3=c-(c.q1).q1 - (c.q2).q2
q3=q3.normalized();
Now we have Q, but we want A=QR so we just multiply both side by Q.transpose(), since Q is orthonormal, Q*Q.transpose() is I
A=QR;
Q.transpose()*A=R;
*/
Eigen::VectorXd col;
for(int i=0;i<matrix.cols();i++)
{
col=matrix.col(i);
col=col.normalized();
for(int j=0;j<i-1;j++)
{
//orthonormalMatrix.col(i)
}
orthonormalMatrix.col(i)=col;
}
Eigen::MatrixXd A(4,3);
A<<1,2,3,-1,1,1,1,1,1,1,1,1;
Eigen::Vector4d a=A.col(0);
Eigen::Vector4d b=A.col(1);
Eigen::Vector4d c=A.col(2);
Eigen::Vector4d q1= a.normalized();
Eigen::Vector4d q2=b-(b.dot(q1))*q1;
q2=q2.normalized();
Eigen::Vector4d q3=c-(c.dot(q1))*q1 - (c.dot(q2))*q2;
q3=q3.normalized();
std::cout<< "q1:"<<std::endl;
std::cout<< q1<<std::endl;
std::cout<< "q2"<<std::endl;
std::cout<< q2<<std::endl;
std::cout<< "q3:"<<std::endl;
std::cout<< q3<<std::endl;
Eigen::MatrixXd Q(4,3);
Q.col(0)=q1;
Q.col(1)=q2;
Q.col(2)=q3;
Eigen::MatrixXd R(3,3);
R=Q.transpose()*(A);
std::cout<<"Q"<<std::endl;
std::cout<< Q<<std::endl;
std::cout<<"R"<<std::endl;
std::cout<< R.unaryExpr(std::ptr_fun(exp))<<std::endl;
//MatrixXd A(4,3), thinQ(4,3), Q(4,4);
Eigen::MatrixXd thinQ(4,3), q(4,4);
//A.setRandom();
Eigen::HouseholderQR<Eigen::MatrixXd> qr(A);
q = qr.householderQ();
thinQ.setIdentity();
thinQ = qr.householderQ() * thinQ;
std::cout << "Q computed by Eigen" << "\n\n" << thinQ << "\n\n";
std::cout << q << "\n\n" << thinQ << "\n\n";
}
void gramSchmidtOrthogonalizationExample()
{
Eigen::MatrixXd matrix(3,4),orthonormalMatrix(3,4) ;
matrix=Eigen::MatrixXd::Random(3,4);////A.setRandom();
gramSchmidtOrthogonalization(matrix,orthonormalMatrix);
}
int main()
{
}
|
{"hexsha": "4827f06fe844f4826a12fe584c7c179c64f364b5", "size": 2541, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/gram_schmidt_orthogonalization.cpp", "max_stars_repo_name": "behnamasadi/Mastering_Eigen", "max_stars_repo_head_hexsha": "99edbc819c89a4805b777eef69044a1658d96206", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2019-04-14T16:54:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T15:55:08.000Z", "max_issues_repo_path": "src/gram_schmidt_orthogonalization.cpp", "max_issues_repo_name": "behnamasadi/Mastering_Eigen", "max_issues_repo_head_hexsha": "99edbc819c89a4805b777eef69044a1658d96206", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gram_schmidt_orthogonalization.cpp", "max_forks_repo_name": "behnamasadi/Mastering_Eigen", "max_forks_repo_head_hexsha": "99edbc819c89a4805b777eef69044a1658d96206", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-12-25T10:08:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-06T14:27:32.000Z", "avg_line_length": 22.0956521739, "max_line_length": 128, "alphanum_fraction": 0.5521448249, "num_tokens": 839}
|
# -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Defines inline calculations to automatically get an initial window guess.
"""
import numpy as np
from aiida import orm
from aiida.engine import calcfunction
from .._calcfunctions import merge_nested_dict
@calcfunction
def get_initial_window_inline(wannier_bands, slice_reference_bands):
"""
InlineCalculation which returns the automatic guess for the window based on the Wannier bands.
Arguments
---------
wannier_bands : aiida.orm.data.array.bands.BandsData
Bands calculated for the Wannier run.
slice_reference_bands : aiida.orm.data.list.List
Indices of the reference bands which should be considered.
"""
return orm.List(
list=guess_window(
wannier_bands=wannier_bands,
slice_reference_bands=slice_reference_bands
)
)
@calcfunction
def add_initial_window_inline(
wannier_parameters, wannier_bands, slice_reference_bands
):
"""
InlineCalculation which adds the automatic guess for the window to an
existing Wannier input parameter set.
Arguments
---------
wannier_parameters: aiida.orm.data.parameter.ParameterData
Initial Wannier input parameters.
wannier_bands : aiida.orm.data.array.bands.BandsData
Bands calculated for the Wannier run.
slice_reference_bands : aiida.orm.data.list.List
Indices of the reference bands which should be considered.
"""
wannier_param_dict = wannier_parameters.get_dict()
window_keys = [
'dis_win_min', 'dis_froz_min', 'dis_froz_max', 'dis_win_max'
]
# Check if disentanglement is needed.
if (('num_bands' not in wannier_param_dict) or (
int(wannier_param_dict['num_bands']
) == int(wannier_param_dict['num_wann'])
)):
return {'result': orm.Dict(dict=wannier_param_dict)}
else:
window_dict = {
key: value
for key, value in zip(
window_keys,
guess_window(
wannier_bands=wannier_bands,
slice_reference_bands=slice_reference_bands
)
)
}
return merge_nested_dict(
dict_primary=wannier_parameters,
dict_secondary=orm.Dict(dict=window_dict)
)
def guess_window(wannier_bands, slice_reference_bands):
"""
Creates the maximal (up to delta = 0.01) inner and minimal outer energy windows, based the given reference bands.
"""
delta = 0.01
bands_sliced = wannier_bands.get_bands()[:, list(slice_reference_bands)]
lowest_band = bands_sliced[:, 0]
highest_band = bands_sliced[:, -1]
outer_lower = np.min(lowest_band) - delta
outer_upper = np.max(highest_band) + delta
inner_lower = np.max(lowest_band) + delta
inner_upper = np.min(highest_band) - delta
return [outer_lower, inner_lower, inner_upper, outer_upper]
|
{"hexsha": "2772c7ea847bd52038ef11a6b0883c9ca33939b3", "size": 3023, "ext": "py", "lang": "Python", "max_stars_repo_path": "aiida_tbextraction/energy_windows/auto_guess.py", "max_stars_repo_name": "DanielMarchand/aiida-tbextraction", "max_stars_repo_head_hexsha": "6f9513c0c16ef0874aeb5801481287f233277620", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aiida_tbextraction/energy_windows/auto_guess.py", "max_issues_repo_name": "DanielMarchand/aiida-tbextraction", "max_issues_repo_head_hexsha": "6f9513c0c16ef0874aeb5801481287f233277620", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aiida_tbextraction/energy_windows/auto_guess.py", "max_forks_repo_name": "DanielMarchand/aiida-tbextraction", "max_forks_repo_head_hexsha": "6f9513c0c16ef0874aeb5801481287f233277620", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1595744681, "max_line_length": 117, "alphanum_fraction": 0.6748263315, "include": true, "reason": "import numpy", "num_tokens": 714}
|
using MPI
using ClimateMachine
using Logging
using ClimateMachine.Mesh.Topologies
using ClimateMachine.Mesh.Grids
using ClimateMachine.DGmethods
using ClimateMachine.DGmethods.NumericalFluxes
using ClimateMachine.MPIStateArrays
using ClimateMachine.LowStorageRungeKuttaMethod
using LinearAlgebra
using ClimateMachine.GenericCallbacks:
EveryXWallTimeSeconds, EveryXSimulationSteps
using ClimateMachine.ODESolvers
MPI.Initialized() || MPI.Init()
mpicomm = MPI.COMM_WORLD
# set up domain
topl = StackedBrickTopology(
mpicomm,
(0:10, 0:10, 0:3);
periodicity = (false, false, false),
boundary = ((1, 1), (1, 2), (1, 2)),
)
@show MPI.Comm_rank(mpicomm) length(topl.realelems)
|
{"hexsha": "33cc2a70407efac916ef6715b6ed1e4a5e0328c3", "size": 693, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "tutorials/topo.jl", "max_stars_repo_name": "ChrisRackauckas/ClimateMachine.jl", "max_stars_repo_head_hexsha": "195bdaa323086c67a7aa4d1b5d99612f077ff3fe", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-12T20:15:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-12T20:15:02.000Z", "max_issues_repo_path": "tutorials/topo.jl", "max_issues_repo_name": "ChrisRackauckas/ClimateMachine.jl", "max_issues_repo_head_hexsha": "195bdaa323086c67a7aa4d1b5d99612f077ff3fe", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tutorials/topo.jl", "max_forks_repo_name": "ChrisRackauckas/ClimateMachine.jl", "max_forks_repo_head_hexsha": "195bdaa323086c67a7aa4d1b5d99612f077ff3fe", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-01T23:22:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-01T23:22:39.000Z", "avg_line_length": 25.6666666667, "max_line_length": 51, "alphanum_fraction": 0.7878787879, "num_tokens": 193}
|
module cube_to_vtk_c_binding
! Usage:
! Reference:
! http://fortranwiki.org/fortran/show/c_interface_module
use iso_c_binding
use c_f_string_c_binding, only : c_f_string
use asflowf_cube_to_vtk, only : cube_to_vtk
implicit none
contains
subroutine c_cube_to_vtk(cube_file_in_c, vtk_file_out_c) bind(c)
type(c_ptr), target, intent(in) :: cube_file_in_c, vtk_file_out_c
!character(c_char), intent(in) :: cube_file_in_c, vtk_file_out_c
character(len=128) :: cube_file_in, vtk_file_out
call c_f_string(c_loc(cube_file_in_c), cube_file_in)
call c_f_string(c_loc(vtk_file_out_c), vtk_file_out)
call cube_to_vtk(cube_file_in, vtk_file_out)
end subroutine c_cube_to_vtk
end module cube_to_vtk_c_binding
|
{"hexsha": "6caf56ae30f4f93ded874bc4003e32074762a35f", "size": 811, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "fortran/atomsciflowf/c_binding/cube_to_vtk_c_binding.f90", "max_stars_repo_name": "DeqiTang/pymatflow", "max_stars_repo_head_hexsha": "bd8776feb40ecef0e6704ee898d9f42ded3b0186", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-03-06T16:13:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T07:53:34.000Z", "max_issues_repo_path": "fortran/atomsciflowf/c_binding/cube_to_vtk_c_binding.f90", "max_issues_repo_name": "DeqiTang/pymatflow", "max_issues_repo_head_hexsha": "bd8776feb40ecef0e6704ee898d9f42ded3b0186", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-10-02T02:23:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-08T13:29:37.000Z", "max_forks_repo_path": "fortran/atomsciflowf/c_binding/cube_to_vtk_c_binding.f90", "max_forks_repo_name": "DeqiTang/pymatflow", "max_forks_repo_head_hexsha": "bd8776feb40ecef0e6704ee898d9f42ded3b0186", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-10T16:28:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-10T16:28:14.000Z", "avg_line_length": 35.2608695652, "max_line_length": 74, "alphanum_fraction": 0.7151664612, "num_tokens": 218}
|
import unittest
import numpy as np
import tensorflow as tf
import tf_encrypted as tfe
from tf_encrypted.layers.activation import Relu
class TestRelu(unittest.TestCase):
def setUp(self):
tf.reset_default_graph()
def test_forward(self):
input_shape = [2, 2, 2, 50]
input_relu = np.random.randn(np.prod(input_shape)).astype(np.float32).reshape(input_shape)
with tfe.protocol.SecureNN() as prot:
tf.reset_default_graph()
relu_input = prot.define_private_variable(input_relu)
relu_layer = Relu(input_shape)
relu_out_pond = relu_layer.forward(relu_input)
with tfe.Session() as sess:
sess.run(tf.global_variables_initializer())
out_pond = sess.run(relu_out_pond.reveal(), tag='tfe')
tf.reset_default_graph()
x = tf.Variable(input_relu, dtype=tf.float32)
relu_out_tf = tf.nn.relu(x)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
out_tensorflow = sess.run(relu_out_tf)
np.testing.assert_allclose(out_pond, out_tensorflow, atol=.01), out_pond
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "fbb435cae931769aa12d19aab95ee1c12290fc4b", "size": 1238, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_relu.py", "max_stars_repo_name": "robert-wagner/tf-encrypted", "max_stars_repo_head_hexsha": "d2cb60d68f263dfd5da46dbac55b8858d701bc47", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_relu.py", "max_issues_repo_name": "robert-wagner/tf-encrypted", "max_issues_repo_head_hexsha": "d2cb60d68f263dfd5da46dbac55b8858d701bc47", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_relu.py", "max_forks_repo_name": "robert-wagner/tf-encrypted", "max_forks_repo_head_hexsha": "d2cb60d68f263dfd5da46dbac55b8858d701bc47", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4761904762, "max_line_length": 98, "alphanum_fraction": 0.6437802908, "include": true, "reason": "import numpy", "num_tokens": 271}
|
#include "Gui.h"
#include "mycq.hpp"
#include <Windows.h>
#include "MyJson.h"
#include "Update.h"
//#include <regex>
#include <boost/regex.hpp>
#include <nana/gui.hpp>
#include <nana/gui/widgets/button.hpp>
#include <nana/gui/widgets/checkbox.hpp>
#include <nana/gui/widgets/combox.hpp>
#include <nana/gui/widgets/group.hpp>
#include <nana/gui/widgets/label.hpp>
#include <nana/gui/widgets/listbox.hpp>
#include <nana/gui/widgets/menu.hpp>
#include <nana/gui/widgets/panel.hpp>
#include <nana/gui/widgets/tabbar.hpp>
#include <nana/gui/widgets/textbox.hpp>
using namespace nana;
using namespace std;
//#define BG_COLOE 0xe6e6e6
#define GROUP_COLOE 0xc9c5be
extern MyJson conf;
void openUrl(std::string url) {
string temp = "\"" + url;
temp += "\"";
ShellExecuteA(NULL, "open", "explorer.exe", temp.c_str(), NULL, SW_SHOW);
}
//窗口父类
class tab_father : public panel<false> {
protected:
//写入配置
void writeConf() {
conf.all2json();
conf.json2file();
}
//显示文本
void showText(string title, string word) {
form fm_temp;
fm_temp.caption(title);
// fm_temp.bgcolor(color_rgb(BG_COLOE));
textbox text{fm_temp};
text.caption(word);
text.editable(false);
fm_temp.div("<text>");
fm_temp["text"] << text;
fm_temp.collocate();
fm_temp.show();
exec();
}
public:
tab_father(window wd) : panel<false>(wd), isSave(true) {
color_group = color_rgb(GROUP_COLOE);
// this->bgcolor(color_rgb(BG_COLOE));
}
//获取保存状态
bool getSave() {
return isSave;
}
//读取配置
virtual void readConf() {
msgbox m_error{*this, u8"错误"};
m_error.icon(msgbox::icon_error);
m_error << u8"非法访问";
m_error.show();
}
//保存
virtual bool save() {
msgbox m_error{*this, u8"错误"};
m_error.icon(msgbox::icon_error);
m_error << u8"非法访问";
m_error.show();
return false;
}
protected:
bool isSave;
color color_group;
};
//云端接口
class tab_page_moderation : public tab_father {
protected:
//读取配置
void readConf() {
conf.file2json();
conf.json2all();
check_huawei.check(conf.alone[conf_index].huaweiApiSwitch);
text_huaweiDomainName.reset(conf.alone[conf_index].huaweiDomainName);
text_huaweiPassWord.reset(conf.alone[conf_index].huaweiPassWord);
text_huaweiProjectName.reset(conf.alone[conf_index].huaweiProjectName);
text_huaweiUserName.reset(conf.alone[conf_index].huaweiUserName);
isSave = true;
}
public:
bool save() {
conf.alone[conf_index].huaweiApiSwitch = check_huawei.checked();
conf.alone[conf_index].huaweiDomainName = text_huaweiUserName.text();
conf.alone[conf_index].huaweiPassWord = text_huaweiPassWord.text();
conf.alone[conf_index].huaweiProjectName = text_huaweiProjectName.text();
conf.alone[conf_index].huaweiUserName = text_huaweiUserName.text();
conf.initModeration();
writeConf();
msgbox m_save{*this, u8"成功"};
m_save << u8"保存成功";
m_save.show();
//重新载入配置
readConf();
isSave = true;
return true;
}
private:
//初始化
void init() {
place_.bind(*this);
place_.div(
//整体边距
"margin = [15,15,15,15] "
"<vert"
"<weight=80% "
//华为云
"<group_huawei>"
">"
//保存
"<<> <button_save> <>>"
">");
//华为云
group_huawei.create(*this);
group_huawei.bgcolor(color_group);
group_huawei.caption(u8"华为云:");
group_huawei.div(
"<vert margin = [5,5]"
//华为云接口开关
"<weight=25 check_huawei>"
//华为云 IAM用户名
"<weight=25 <weight=75 lab_huaweiUserName><weight=150 text_huaweiUserName>>"
"<weight=10>"
//华为云 账号名
"<weight=25 <weight=75 lab_huaweiDomainName><weight=150 text_huaweiDomainName>>"
"<weight=10>"
//华为云 项目名
"<weight=25 <weight=75 lab_huaweiProjectName><weight=150 text_huaweiProjectName>>"
"<weight=10>"
//华为云 密码
"<weight=25 <weight=75 lab_huaweiPassWord><weight=150 text_huaweiPassWord>>"
"<weight=10>"
">");
place_.field("group_huawei") << group_huawei;
//华为云
check_huawei.create(group_huawei);
check_huawei.bgcolor(color_group);
check_huawei.caption(u8"华为云接口开关");
check_huawei.events().click([this]() { isSave = false; });
group_huawei["check_huawei"] << check_huawei;
// IAM用户名
lab_huaweiUserName.create(group_huawei);
lab_huaweiUserName.bgcolor(color_group);
lab_huaweiUserName.caption(u8"IAM用户名:");
group_huawei["lab_huaweiUserName"] << lab_huaweiUserName;
text_huaweiUserName.create(group_huawei);
text_huaweiUserName.line_wrapped(false);
text_huaweiUserName.multi_lines(false);
text_huaweiUserName.events().text_changed([this]() { isSave = false; });
group_huawei["text_huaweiUserName"] << text_huaweiUserName;
//账号名
lab_huaweiDomainName.create(group_huawei);
lab_huaweiDomainName.bgcolor(color_group);
lab_huaweiDomainName.caption(u8"账号名:");
group_huawei["lab_huaweiDomainName"] << lab_huaweiDomainName;
text_huaweiDomainName.create(group_huawei);
text_huaweiDomainName.line_wrapped(false);
text_huaweiDomainName.multi_lines(false);
text_huaweiDomainName.events().text_changed([this]() { isSave = false; });
group_huawei["text_huaweiDomainName"] << text_huaweiDomainName;
//项目名
lab_huaweiProjectName.create(group_huawei);
lab_huaweiProjectName.bgcolor(color_group);
lab_huaweiProjectName.caption(u8"设置名称:");
group_huawei["lab_huaweiProjectName"] << lab_huaweiProjectName;
text_huaweiProjectName.create(group_huawei);
text_huaweiProjectName.line_wrapped(false);
text_huaweiProjectName.multi_lines(false);
text_huaweiProjectName.events().text_changed([this]() { isSave = false; });
group_huawei["text_huaweiProjectName"] << text_huaweiProjectName;
//密码
lab_huaweiPassWord.create(group_huawei);
lab_huaweiPassWord.bgcolor(color_group);
lab_huaweiPassWord.caption(u8"密码:");
group_huawei["lab_huaweiPassWord"] << lab_huaweiPassWord;
text_huaweiPassWord.create(group_huawei);
text_huaweiPassWord.line_wrapped(false);
text_huaweiPassWord.multi_lines(false);
text_huaweiPassWord.events().text_changed([this]() { isSave = false; });
group_huawei["text_huaweiPassWord"] << text_huaweiPassWord;
//保存按钮
btn_save.create(*this);
btn_save.caption(u8"保存");
btn_save.events().click([this] { save(); });
place_.field("button_save") << btn_save;
readConf();
}
public:
tab_page_moderation(window wd, int index = 0) : tab_father(wd), conf_index(index) {
init();
}
private:
place place_;
int conf_index;
//华为云
group group_huawei;
checkbox check_huawei;
// IAM用户名
label lab_huaweiUserName;
textbox text_huaweiUserName;
//账号名
label lab_huaweiDomainName;
textbox text_huaweiDomainName;
//项目名
label lab_huaweiProjectName;
textbox text_huaweiProjectName;
//密码
label lab_huaweiPassWord;
textbox text_huaweiPassWord;
//保存
button btn_save;
};
//主要设置
class tab_page_main : public tab_father {
protected:
//读取配置
void readConf() {
conf.file2json();
conf.json2all();
//主人QQ
string admin;
for (auto id : conf.admin) {
admin += to_string(id) + "\n";
}
text_admin.reset(admin);
//指令前缀
text_prefix.reset(conf.prefix);
//私聊消息转发给主人
check_relayPrivateMsg.check(conf.relayPrivateMsg);
//多线程
check_async.check(conf.async);
isSave = true;
}
public:
bool save() {
bool ok = boost::regex_match(text_admin.text(), boost::regex("[\\n\\d\\r]*"));
if (!ok) {
msgbox m_error{*this, u8"错误"};
m_error.icon(msgbox::icon_error);
m_error << u8"主人QQ 格式有误";
m_error.show();
return false;
}
//主人QQ
auto line = text_admin.text_line_count();
conf.admin.clear();
for (size_t i = 0; i < line; i++) {
string buf;
text_admin.getline(i, buf);
if (!buf.empty()) conf.admin.insert(atoll(buf.c_str()));
}
//消息前缀
conf.prefix = text_prefix.text();
//收到的私聊消息转发给主人
conf.relayPrivateMsg = check_relayPrivateMsg.checked();
//多线程
conf.async = check_async.checked();
//写入配置
writeConf();
msgbox m_save{*this, u8"成功"};
m_save << u8"保存成功";
m_save.show();
//重新载入配置
readConf();
isSave = true;
return true;
}
private:
//初始化
void init() {
place_.bind(*this);
place_.div(
//整体边距
//"margin = [15,15,15,15] "
//主人QQ
"<group_admin>"
//"<weight=10>"
//其他
"<vert"
"<weight=40% group_other>"
// //绑定秘钥
// "<weight=25 lab_usingKey>"
// "<weight=25 <text_usingKey><weight=60 button_usingKey>>"
"<>"
//保存
"<margin = [15,15,15,15] button_save>"
">");
//主人QQ
group_admin.create(*this);
group_admin.bgcolor(color_group);
group_admin.caption(u8"主人QQ(每行一个):");
group_admin.div("margin = [5] <text_admin>");
place_.field("group_admin") << group_admin;
text_admin.create(group_admin);
text_admin.events().text_changed([this]() { isSave = false; });
group_admin["text_admin"] << text_admin;
//其他
group_other.create(*this);
group_other.bgcolor(color_group);
group_other.caption(u8"其他");
group_other.div(
"<vert "
//指令前缀
"<weight=25 <weight=25% lab_prefix><text_prefix><weight=20%>>"
//"<weight=10>"
//开关
"<vert check>"
//使用教程 //在线更新 //反馈
"<<button_document> <button_update> <button_feedback>>"
">");
place_.field("group_other") << group_other;
//指令前缀
lab_prefix.create(group_other);
lab_prefix.bgcolor(color_group);
lab_prefix.caption(u8"指令前缀:");
group_other["lab_prefix"] << lab_prefix;
text_prefix.create(group_other);
text_prefix.line_wrapped(false);
text_prefix.multi_lines(false);
text_prefix.events().text_changed([this]() { isSave = false; });
group_other["text_prefix"] << text_prefix;
//收到私聊消息转发给主人
check_relayPrivateMsg.create(group_other);
check_relayPrivateMsg.bgcolor(color_group);
check_relayPrivateMsg.caption(u8"收到的私聊消息转发给主人");
check_relayPrivateMsg.events().click([this]() { isSave = false; });
group_other["check"] << check_relayPrivateMsg;
//多线程
check_async.create(group_other);
check_async.bgcolor(color_group);
check_async.caption(u8"开启多线程处理");
check_async.events().click([this]() { isSave = false; });
group_other["check"] << check_async;
button_document.create(group_other);
button_document.caption(u8"使用教程 待开发");
button_document.events().click([this] { openUrl("https://jq.qq.com/?_wv=1027&k=5Q20l6I"); });
group_other["button_document"] << button_document;
button_update.create(group_other);
button_update.caption(u8"检查更新");
button_update.events().click([this] {
Update up;
string inf;
Update::updateType type = up.check(inf);
//更新
auto update = [&]() {
string inf;
bool ret = up.getUpdate(inf);
if (ret) {
msgbox m_inf{*this, u8"更新完成"};
m_inf << inf;
m_inf.show();
} else {
msgbox m_error{*this, u8"更新错误"};
m_error.icon(msgbox::icon_error);
m_error << inf;
m_error.show();
}
};
if (type == Update::updateType::no) {
msgbox m_inf{*this, u8"无需更新"};
m_inf << inf;
m_inf.show();
} else if (type == Update::updateType::update) {
msgbox m_inf{*this, u8"有新版本", msgbox::yes_no};
m_inf << inf << "\n\n";
m_inf << "是否更新";
auto ret = m_inf.show();
if (ret == msgbox::pick_yes) {
update();
}
} else if (type == Update::updateType::mustUpdate) {
update();
msgbox m_inf{*this, u8"更新内容"};
m_inf << inf;
m_inf.show();
} else if (type == Update::updateType::error) {
msgbox m_error{*this, u8"错误"};
m_error.icon(msgbox::icon_error);
m_error << inf;
m_error.show();
}
});
group_other["button_update"] << button_update;
button_feedback.create(group_other);
button_feedback.caption(u8"问题反馈 待开发");
button_feedback.events().click([this] { openUrl("https://jq.qq.com/?_wv=1027&k=5Q20l6I"); });
group_other["button_feedback"] << button_feedback;
// //使用秘钥
// lab_usingKey.create(*this);
// lab_usingKey.caption(u8"激活专业版:");
// place_.field("lab_usingKey") << lab_usingKey;
// text_usingKey.create(*this);
// text_usingKey.line_wrapped(false);
// text_usingKey.multi_lines(false);
// place_.field("text_usingKey") << text_usingKey;
// btn_usingKey.create(*this);
// btn_usingKey.caption(u8"使用秘钥");
// btn_usingKey.events().click([this] {
// });
// place_.field("button_usingKey") << btn_usingKey;
//保存按钮
btn_save.create(*this);
btn_save.caption(u8"保存");
btn_save.events().click([this] { save(); });
place_.field("button_save") << btn_save;
readConf();
}
public:
tab_page_main(window wd) : tab_father(wd) {
init();
}
private:
place place_;
int conf_index;
//主人QQ
group group_admin;
textbox text_admin;
//其他
group group_other;
//指令前缀
label lab_prefix;
textbox text_prefix;
//收到的私聊消息转发主人
checkbox check_relayPrivateMsg;
//多线程
checkbox check_async;
// //使用秘钥
// label lab_usingKey;
// textbox text_usingKey;
// button btn_usingKey;
button button_document;
button button_update;
button button_feedback;
//保存
button btn_save;
};
//单独设置中的主要设置
class tab_page_aloneMain : public tab_father {
protected:
//读取配置
void readConf() {
conf.file2json();
conf.json2all();
text_name.reset(conf.alone[conf_index].name);
text_priority.from(conf.alone[conf_index].priority);
isSave = true;
}
public:
bool save() {
string priority_temp = text_priority.text();
bool ok = boost::regex_match(priority_temp, boost::regex("[0-9]+"));
if (!ok) {
msgbox m_error{*this, u8"错误"};
m_error.icon(msgbox::icon_error);
m_error << u8"优先级 格式有误";
m_error.show();
return false;
}
conf.alone[conf_index].priority = atoi(priority_temp.c_str());
conf.alone[conf_index].name = text_name.text();
writeConf();
msgbox m_save{*this, u8"成功"};
m_save << u8"保存成功";
m_save.show();
//重新载入配置
readConf();
isSave = true;
return true;
}
private:
//初始化
void init() {
place_.bind(*this);
place_.div(
//整体边距
"margin = [15,15,15,15] "
"<vert"
"<weight=25>"
//优先级
"<weight=25 <weight=65 lab_priority><weight=65 text_priority>>"
"<weight=10>"
//设置名称
"<weight=25 <weight=65 lab_name><weight=65 text_name>>"
"<weight=60%>"
//保存
"<<> <button_save> <>>"
">");
//设置名称
lab_priority.create(*this);
// lab_priority.bgcolor(color_group);
lab_priority.caption(u8"优先级:");
place_.field("lab_priority") << lab_priority;
text_priority.create(*this);
text_priority.line_wrapped(false);
text_priority.multi_lines(false);
text_priority.events().text_changed([this]() { isSave = false; });
place_.field("text_priority") << text_priority;
//设置名称
lab_name.create(*this);
// lab_name.bgcolor(color_group);
lab_name.caption(u8"设置名称:");
place_.field("lab_name") << lab_name;
text_name.create(*this);
text_name.line_wrapped(false);
text_name.multi_lines(false);
text_name.events().text_changed([this]() { isSave = false; });
place_.field("text_name") << text_name;
//保存按钮
btn_save.create(*this);
btn_save.caption(u8"保存");
btn_save.events().click([this] { save(); });
place_.field("button_save") << btn_save;
readConf();
}
public:
tab_page_aloneMain(window wd, int index = 0) : tab_father(wd), conf_index(index) {
init();
}
private:
place place_;
int conf_index;
//优先级
label lab_priority;
textbox text_priority;
//设置名称
label lab_name;
textbox text_name;
//保存
button btn_save;
};
//全局设置
class tab_page_overall : public tab_father {
protected:
void readConf() {
conf.file2json();
conf.json2all();
//载入群名单
auto groupList = mycq::get_group_list_map();
//清空原有
list_groupList.erase();
auto& conf_groupList = conf.alone[conf_index].groupList;
//先载入已选
for (auto id : conf_groupList) {
list_groupList.at(0).append({groupList[id].group_name, to_string(id)});
}
//再载入未选
for (auto temp : groupList) {
if (find(conf_groupList.begin(), conf_groupList.end(), temp.first) == conf_groupList.end())
list_groupList.at(0).append({temp.second.group_name, to_string(temp.first)});
}
//勾选已选的
auto size = list_groupList.size_item(0);
for (size_t i = 0; i < size; i++) {
string buf = list_groupList.at(0).at(i).text(1);
if (find(conf.alone[conf_index].groupList.begin(),
conf.alone[conf_index].groupList.end(),
atoll(buf.c_str()))
!= conf.alone[conf_index].groupList.end()) {
list_groupList.at(0).at(i).check(true);
}
}
//处理方式
check_deal.at(conf.alone[conf_index].dealType)->check(true);
//禁言时长
text_banTimeLen.reset(to_string(conf.alone[conf_index].banTimeLen));
//一些开关
check_deleteMsg.check(conf.alone[conf_index].deleteMsg);
check_streng.check(conf.alone[conf_index].streng);
check_deleteCQCode.check(conf.alone[conf_index].deleteCQCode);
check_keyWordSendAdmin.check(conf.alone[conf_index].keyWordSendAdmin);
isSave = true;
}
public:
bool save() {
bool ok = boost::regex_match(text_banTimeLen.text(), boost::regex("[0-9]*"));
if (!ok) {
msgbox m_error{*this, u8"错误"};
m_error.icon(msgbox::icon_error);
m_error << u8"禁言时长 格式有误";
m_error.show();
return false;
}
//群名单
conf.alone[conf_index].groupList.clear();
for (auto temp : list_groupList.checked()) {
string buf = list_groupList.at(temp).text(1);
conf.alone[conf_index].groupList.insert(atoll(buf.c_str()));
}
//处理方式
int dealType = 0;
for (size_t i = 0; i < check_deal.size(); i++) {
if (check_deal.at(i)->checked()) {
dealType = i;
break;
}
}
conf.alone[conf_index].dealType = dealType;
//禁言时长
conf.alone[conf_index].banTimeLen = atoi(text_banTimeLen.text().c_str());
//一些开关
conf.alone[conf_index].deleteMsg = check_deleteMsg.checked();
conf.alone[conf_index].streng = check_streng.checked();
conf.alone[conf_index].deleteCQCode = check_deleteCQCode.checked();
conf.alone[conf_index].keyWordSendAdmin = check_keyWordSendAdmin.checked();
//写入配置
writeConf();
msgbox m_save{*this, u8"成功"};
m_save << u8"保存成功";
m_save.show();
//重新载入配置
readConf();
isSave = true;
return true;
}
private:
void init() {
place_.bind(*this);
place_.div(
//整体边距
//"margin = [5] "
//监控群
"<group_list>"
//"<weight=20>"
"<vert"
//处理方式
"<weight=40% group_deal>"
//"<weight=10>"
//一些开关
"<weight=40% group_switch>"
//"<weight=10>"
//保存按钮
"<weight=10% button_save>"
">");
//监控群
group_list.create(*this);
group_list.bgcolor(color_group);
group_list.caption(u8"监控群:");
group_list.div(
"margin = [5] <vert <list_groupList>"
"<weight=25 <text_groupAdd><weight=40% button_groupAdd>><weight=5>"
"<weight=30 gap=[10,10] arrange=[40,40] button_list> >");
place_.field("group_list") << group_list;
list_groupList.create(group_list);
list_groupList.checkable(true);
list_groupList.append_header(u8"群名");
list_groupList.append_header(u8"群号码");
list_groupList.events().checked([this]() { isSave = false; });
group_list["list_groupList"] << list_groupList;
//手动添加群
text_groupAdd.create(group_list);
text_groupAdd.line_wrapped(false);
text_groupAdd.multi_lines(false);
group_list["text_groupAdd"] << text_groupAdd;
button_groupAdd.create(group_list);
button_groupAdd.caption(u8"手动添加");
button_groupAdd.events().click([this] {
isSave = false;
bool ok = boost::regex_match(text_groupAdd.text(), boost::regex("[1-9][0-9]*"));
if (!ok) {
msgbox m_error{*this, u8"错误"};
m_error.icon(msgbox::icon_error);
m_error << u8"格式有误";
m_error.show();
return;
}
list_groupList.at(0).append({"", text_groupAdd.text()});
//查找添加项并勾选
auto size = list_groupList.size_item(0);
for (size_t i = 0; i < size; i++) {
string buf = list_groupList.at(0).at(i).text(1);
if (buf == text_groupAdd.text()) {
list_groupList.at(0).at(i).check(true);
break;
}
}
//
list_groupList.scroll(true);
text_groupAdd.caption(u8"");
});
group_list["button_groupAdd"] << button_groupAdd;
//列表按钮
//全选
button_listAll.create(group_list);
button_listAll.caption(u8"全选");
button_listAll.events().click([this] {
isSave = false;
auto size = list_groupList.size_item(0);
for (size_t i = 0; i < size; i++) {
list_groupList.at(listbox::index_pair(0, i)).check(true);
}
});
//反选
button_listReverse.create(group_list);
button_listReverse.caption(u8"反选");
button_listReverse.events().click([this] {
isSave = false;
auto size = list_groupList.size_item(0);
for (size_t i = 0; i < size; i++) {
auto p = list_groupList.at(listbox::index_pair(0, i));
p.check(!p.checked());
}
});
group_list["button_list"] << button_listAll << button_listReverse;
//处理方式
group_deal.create(*this);
group_deal.bgcolor(color_group);
group_deal.caption(u8"处理方式:");
group_deal.div(
"margin = [5] <vert"
"<vert check_deal>"
"<weight=25 <weight=45% label_banTimeLen> <weight=5% text_banTimeLen> <label_banTimeLenTip>> >");
place_["group_deal"] << group_deal;
vector<string> groupStr;
groupStr.push_back(u8"不做处理");
groupStr.push_back(u8"禁言");
groupStr.push_back(u8"踢出");
groupStr.push_back(u8"拉黑并踢出");
for (size_t i = 0; i < groupStr.size(); i++) {
auto p = std::make_shared<checkbox>(group_deal);
p->bgcolor(color_group);
p->events().checked([this]() { isSave = false; });
check_deal.push_back(p);
// Add the checkbox to the radio group. The radio group does not
// manage the life of checkboxs.
radio_group_deal.add(*p);
group_deal["check_deal"] << *p;
p->caption(groupStr[i]);
p->events().click([this]() {
std::size_t index = this->radio_group_deal.checked();
std::string str = this->check_deal[index]->caption();
if (str == u8"禁言") {
label_banTimeLenTip.caption(u8"分钟");
} else {
label_banTimeLenTip.caption(u8"分钟(禁言方式下生效)");
}
});
}
label_banTimeLen.create(group_deal);
label_banTimeLen.bgcolor(color_group);
label_banTimeLen.caption(u8"禁言时长:");
group_deal["label_banTimeLen"] << label_banTimeLen;
text_banTimeLen.create(group_deal);
text_banTimeLen.line_wrapped(false);
text_banTimeLen.multi_lines(false);
text_banTimeLen.events().text_changed([this]() { isSave = false; });
group_deal["label_banTimeLen"] << text_banTimeLen;
label_banTimeLenTip.create(group_deal);
label_banTimeLenTip.bgcolor(color_group);
label_banTimeLenTip.caption(u8"分钟");
group_deal["label_banTimeLenTip"] << label_banTimeLenTip;
//一些功能开关
group_switch.create(*this);
group_switch.bgcolor(color_group);
group_switch.caption(u8"一些功能开关:");
group_switch.div("<vert margin = [5] check_switch>");
place_.field("group_switch") << group_switch;
check_streng.create(group_switch);
check_streng.bgcolor(color_group);
check_streng.caption(u8"强力检测关键词");
check_streng.events().checked([this]() { isSave = false; });
group_switch["check_switch"] << check_streng;
check_deleteMsg.create(group_switch);
check_deleteMsg.bgcolor(color_group);
check_deleteMsg.caption(u8"撤回触发关键词消息(需Pro)");
check_deleteMsg.events().checked([this]() { isSave = false; });
group_switch["check_switch"] << check_deleteMsg;
check_deleteCQCode.create(group_switch);
check_deleteCQCode.bgcolor(color_group);
check_deleteCQCode.caption(u8"过滤CQ码(如图片消息,签到消息等)");
check_deleteCQCode.events().checked([this]() { isSave = false; });
group_switch["check_switch"] << check_deleteCQCode;
check_keyWordSendAdmin.create(group_switch);
check_keyWordSendAdmin.bgcolor(color_group);
check_keyWordSendAdmin.caption(u8"触发关键词提醒主人");
check_keyWordSendAdmin.events().checked([this]() { isSave = false; });
group_switch["check_switch"] << check_keyWordSendAdmin;
button_save.create(*this);
button_save.caption(u8"保存");
button_save.events().click([this] { save(); });
place_.field("button_save") << button_save;
group_list.collocate();
//读取配置
readConf();
}
public:
tab_page_overall(window wd, int index = 0) : tab_father(wd), conf_index(index) {
init();
}
private:
place place_;
int conf_index;
//监控群
group group_list;
listbox list_groupList; //群列表
textbox text_groupAdd; //手动添加群 文本框
button button_groupAdd; //手动添加群 按钮
button button_listReverse; //反选
button button_listAll; //全选
//处理方式
group group_deal;
std::vector<std::shared_ptr<checkbox>> check_deal;
radio_group radio_group_deal;
label label_banTimeLen; //禁言时间 标签
label label_banTimeLenTip; //禁言时间 提示标签
textbox text_banTimeLen; //禁言时间 文本框
//一些功能开关
group group_switch;
// vector<std::shared_ptr<checkbox>> check_switchs;
checkbox check_streng; //强力检测
checkbox check_deleteMsg; //撤回消息
checkbox check_deleteCQCode; //过滤CQ码
checkbox check_keyWordSendAdmin; //触发关键词提醒主人
//保存
button button_save;
};
//列表
class tab_page_list : public tab_father {
protected:
//读取配置
void readConf() {
conf.file2json();
conf.json2all();
// QQ列表
string QQList;
for (auto id : conf.alone[conf_index].QQList) {
QQList += to_string(id) + "\n";
}
text_QQList.reset(QQList, false);
//白名单关键词
string keyWordWhite;
for (auto keyword : conf.alone[conf_index].keyWordWhite) {
keyWordWhite += keyword.keyWord + "\n";
}
text_keyWordWhite.reset(keyWordWhite, false);
//特殊名单类型
check_QQListType.at(conf.alone[conf_index].QQListType)->check(true);
if (conf.alone[conf_index].QQListType == 0) {
lab_QQListTypeTip.caption(u8"当前为白名单模式,不会监控名单中的QQ");
group_QQList.caption(u8"QQ白名单(每行一个):");
} else if (conf.alone[conf_index].QQListType == 1) {
lab_QQListTypeTip.caption(u8"当前为监控名单模式,只监控名单中的QQ");
group_QQList.caption(u8"QQ监控名单(每行一个):");
}
isSave = true;
}
public:
bool save() {
bool ok = boost::regex_match(text_QQList.text(), boost::regex("[\\n\\d\\r]*"));
if (!ok) {
msgbox m_error{*this, u8"错误"};
m_error.icon(msgbox::icon_error);
m_error << u8"QQ白名单/监控名单 格式有误";
m_error.show();
return false;
}
// QQ特殊名单
auto QQList_line = text_QQList.text_line_count();
conf.alone[conf_index].QQList.clear();
for (size_t i = 0; i < QQList_line; i++) {
string buf;
text_QQList.getline(i, buf);
if (!buf.empty()) conf.alone[conf_index].QQList.insert(atoll(buf.c_str()));
}
//白名单关键词
auto keyWrodWhite_line = text_keyWordWhite.text_line_count();
conf.alone[conf_index].keyWordWhite.clear();
for (size_t i = 0; i < keyWrodWhite_line; i++) {
string buf;
text_keyWordWhite.getline(i, buf);
if (!buf.empty()) conf.alone[conf_index].keyWordWhite.insert(buf.c_str());
}
//特殊名单类型 白名单||监控名单
int listType = 0;
for (size_t i = 0; i < check_QQListType.size(); i++) {
if (check_QQListType.at(i)->checked()) {
listType = i;
break;
}
}
conf.alone[conf_index].QQListType = listType;
writeConf();
msgbox m_save{*this, u8"成功"};
m_save << u8"保存成功";
m_save.show();
//重新读取配置
readConf();
isSave = true;
return true;
}
private:
void init() {
place_.bind(*this);
place_.div(
//整体边距
//"margin = [15,15,15,15]"
//左边
"<vert"
//特殊QQ名单
"<group_QQList>"
//"<weight=10>"
">"
//右边
"<vert"
//白名单关键词
"<group_keyWordWhite>"
//"<weight=10>"
//保存
"<weight=20% button_save>"
">");
//白名单关键词
group_keyWordWhite.create(*this);
group_keyWordWhite.bgcolor(color_group);
group_keyWordWhite.caption(u8"白名单关键词(每行一个):");
group_keyWordWhite.div("margin = [5] <text_keyWordWhite>");
place_.field("group_keyWordWhite") << group_keyWordWhite;
text_keyWordWhite.create(group_keyWordWhite);
text_keyWordWhite.line_wrapped(true);
text_keyWordWhite.tip_string(u8"包含这些关键词的消息将不会检测");
text_keyWordWhite.events().text_changed([this]() { isSave = false; });
group_keyWordWhite["text_keyWordWhite"] << text_keyWordWhite;
//特殊QQ名单
group_QQList.create(*this);
group_QQList.bgcolor(color_group);
group_QQList.caption(u8"QQ 白名单/监控名单(每行一个):");
group_QQList.div("vert margin = [5] <text_QQList> <weight=25 check_QQListType> <weight=25 lab_QQListTypeTip>");
place_.field("group_QQList") << group_QQList;
text_QQList.create(group_QQList);
text_QQList.line_wrapped(true);
text_QQList.events().text_changed([this]() { isSave = false; });
group_QQList["text_QQList"] << text_QQList;
//设置特殊名单为 白名单 || 监控名单
lab_QQListTypeTip.create(group_QQList);
lab_QQListTypeTip.bgcolor(color_group);
group_QQList["lab_QQListTypeTip"] << lab_QQListTypeTip;
vector<string> groupStr;
groupStr.push_back(u8"白名单");
groupStr.push_back(u8"监控名单");
for (size_t i = 0; i < groupStr.size(); i++) {
auto p = std::make_shared<checkbox>(group_QQList);
p->bgcolor(color_group);
p->events().checked([this]() { isSave = false; });
check_QQListType.push_back(p);
group_QQListType.add(*p);
group_QQList["check_QQListType"] << *p;
p->caption(groupStr[i]);
p->events().click([this]() {
std::size_t index = this->group_QQListType.checked();
std::string str = this->check_QQListType[index]->caption();
if (str == u8"白名单") {
lab_QQListTypeTip.caption(u8"当前为白名单模式,不会监控名单中的QQ");
group_QQList.caption(u8"QQ白名单(每行一个):");
} else if (str == u8"监控名单") {
lab_QQListTypeTip.caption(u8"当前为监控名单模式,只监控名单中的QQ");
group_QQList.caption(u8"QQ监控名单(每行一个):");
} else {
lab_QQListTypeTip.caption("");
}
});
}
//保存按钮
btn_save.create(*this);
btn_save.caption(u8"保存");
btn_save.events().click([this] { save(); });
place_.field("button_save") << btn_save;
readConf();
}
public:
tab_page_list(window wd, int index = 0) : tab_father(wd), conf_index(index) {
init();
}
private:
place place_;
int conf_index;
//白名单关键词
group group_keyWordWhite;
textbox text_keyWordWhite;
// QQ列表
group group_QQList;
textbox text_QQList;
//特殊QQ名单类型
std::vector<std::shared_ptr<checkbox>> check_QQListType;
radio_group group_QQListType;
label lab_QQListTypeTip;
//保存
button btn_save;
};
//关键词
class tab_page_keyWord : public tab_father {
protected:
//读取配置
void readConf() {
conf.file2json();
conf.json2all();
//普通关键词
string keyWord;
for (auto keyword : conf.alone[conf_index].keyWord) {
keyWord += keyword.keyWord + "\n";
}
text_keyWord.reset(keyWord);
//正则表达式关键词
string keyWordRegex;
for (auto keyword : conf.alone[conf_index].keyWordRegex) {
keyWordRegex += keyword.keyWord + "\n";
}
text_keyWordRegex.reset(keyWordRegex);
isSave = true;
}
public:
bool save() {
//关键词
auto keyWord_line = text_keyWord.text_line_count();
conf.alone[conf_index].keyWord.clear();
for (size_t i = 0; i < keyWord_line; i++) {
string buf;
text_keyWord.getline(i, buf);
if (!buf.empty()) conf.alone[conf_index].keyWord.insert(buf.c_str());
}
//正则表达式关键词
auto keyWordRegex_line = text_keyWordRegex.text_line_count();
conf.alone[conf_index].keyWordRegex.clear();
for (size_t i = 0; i < keyWordRegex_line; i++) {
string buf;
text_keyWordRegex.getline(i, buf);
if (!buf.empty()) conf.alone[conf_index].keyWordRegex.insert(buf.c_str());
}
//写入配置
writeConf();
msgbox m_save{*this, u8"成功"};
m_save << u8"保存成功";
m_save.show();
//重新读取配置
readConf();
isSave = true;
return true;
}
private:
void init() {
place_.bind(*this);
place_.div(
//整体边距
//"margin = [15,15,15,15] "
//普通关键词
"<group_keyWord>"
//"<weight=10>"
//右边
"<vert"
//正则表达式关键词
"<group_keyWordRegex>"
//"<weight=10>"
//保存
"<weight=25% button_save>"
">");
//普通关键词
group_keyWord.create(*this);
group_keyWord.bgcolor(color_group);
group_keyWord.caption(u8"普通关键词(每行一个):");
group_keyWord.div("margin = [5] <text_keyWord>");
place_.field("group_keyWord") << group_keyWord;
text_keyWord.create(group_keyWord);
text_keyWord.line_wrapped(true);
text_keyWord.events().text_changed([this]() { isSave = false; });
group_keyWord["text_keyWord"] << text_keyWord;
//正则表达式关键词
group_keyWordRegex.create(*this);
group_keyWordRegex.bgcolor(color_group);
group_keyWordRegex.caption(u8"正则表达式关键词(每行一个):");
group_keyWordRegex.div("margin = [5] <vert <text_keyWordRegex> <weight=20 <><weight=30% btn_regexTest>> >");
place_.field("group_keyWordRegex") << group_keyWordRegex;
text_keyWordRegex.create(group_keyWordRegex);
text_keyWordRegex.line_wrapped(true);
text_keyWordRegex.events().text_changed([this]() { isSave = false; });
group_keyWordRegex["text_keyWordRegex"] << text_keyWordRegex;
//正则表达式测试
btn_regexTest.create(group_keyWordRegex);
btn_regexTest.caption(u8"测试正则");
btn_regexTest.events().click([this] { openUrl("https://c.runoob.com/front-end/854"); });
group_keyWordRegex["btn_regexTest"] << btn_regexTest;
//保存按钮
btn_save.create(*this);
btn_save.caption(u8"保存");
btn_save.events().click([this] { save(); });
place_.field("button_save") << btn_save;
readConf();
}
public:
tab_page_keyWord(window wd, int index = 0) : tab_father(wd), conf_index(index) {
init();
}
private:
place place_;
int conf_index;
//主人QQ
group group_keyWord;
textbox text_keyWord;
//正则表达式关键词
group group_keyWordRegex;
textbox text_keyWordRegex;
//正则表达式测试
button btn_regexTest;
//保存
button btn_save;
};
//自定义触发回复
class tab_page_groupWarnWord : public tab_father {
protected:
//读取配置
void readConf() {
conf.file2json();
conf.json2all();
//触发后回复群消息
check_groupWarn.check(conf.alone[conf_index].keyWordGroupWarn);
text_groupWarn.reset(conf.alone[conf_index].keyWordGroupWarnWord);
//触发后回复私聊消息
check_privateWarn.check(conf.alone[conf_index].keyWordPrivateWarn);
text_privateWarn.reset(conf.alone[conf_index].keyWordPrivateWarnWord);
isSave = true;
}
public:
bool save() {
conf.alone[conf_index].keyWordGroupWarnWord = text_groupWarn.text();
conf.alone[conf_index].keyWordGroupWarn = check_groupWarn.checked();
conf.alone[conf_index].keyWordPrivateWarnWord = text_privateWarn.text();
conf.alone[conf_index].keyWordPrivateWarn = check_privateWarn.checked();
writeConf();
msgbox m_save{*this, u8"成功"};
m_save << u8"保存成功";
m_save.show();
readConf();
isSave = true;
return true;
}
private:
void init() {
place_.bind(*this);
place_.div(
//整体边距
//"margin = [15,15,15,15] "
//触发后回复群消息
"<group_groupWarn>"
//"<weight=10>"
//右边
"<vert"
//触发后回复私聊消息
"<group_privateWarn>"
//"<weight=10>"
//保存
"<weight=25% button_save>"
">");
//触发后回复群消息
group_groupWarn.create(*this);
group_groupWarn.bgcolor(color_group);
group_groupWarn.caption(u8"触发后回复群消息内容:");
group_groupWarn.div(
"<vert margin = [5] <weight=25 <weight=70% check_groupWarn> <margin = [0,0,7] button_groupVariable>> "
"<text_groupWarn>>");
place_.field("group_groupWarn") << group_groupWarn;
//触发回复群消息开关
check_groupWarn.create(group_groupWarn);
check_groupWarn.bgcolor(color_group);
check_groupWarn.caption(u8"触发关键词发送群消息提醒");
check_groupWarn.events().checked([this]() { isSave = false; });
group_groupWarn["check_groupWarn"] << check_groupWarn;
//内容
text_groupWarn.create(group_groupWarn);
text_groupWarn.line_wrapped(true);
text_groupWarn.tip_string("默认值:\r\n{at} 触发关键词,处理方式:{处理方式}");
text_groupWarn.events().text_changed([this]() { isSave = false; });
group_groupWarn["text_groupWarn"] << text_groupWarn;
//变量按钮
button_groupVariable.create(group_groupWarn);
button_groupVariable.caption(u8"可用变量");
button_groupVariable.events().click([this] {
string variable(
u8"{at}\t\t艾特\r\n"
u8"{msg}\t\t消息内容\r\n"
u8"{日期}\t\t当前日期\r\n"
u8"{时间}\t\t当前时间\r\n"
u8"{星期}\t\t当前星期\r\n"
u8"{处理方式}\t触发后处理方式\r\n"
u8"{关键词}\t\t触发的关键词\r\n"
u8"{QQ号码}\t触发关键词的QQ号码\r\n"
u8"{QQ名称}\t触发关键词的QQ名称\r\n"
u8"{QQ名片}\t触发关键词的QQ名片\r\n"
u8"{群号码}\t触发关键词的群号码\r\n"
u8"{群名称}\t触发关键词的群名称\r\n"
u8"\r\n更多变量欢迎进群补充(群:839067703)");
showText("回复内容变量", variable);
});
group_groupWarn["button_groupVariable"] << button_groupVariable;
//触发后发送私聊消息
group_privateWarn.create(*this);
group_privateWarn.bgcolor(color_group);
group_privateWarn.caption(u8"触发后回复私聊消息内容:");
group_privateWarn.div(
"<vert <weight=25 <weight=70% check_privateWarn> <margin = [0,0,7] button_privateVariable>> "
"<text_privateWarn>>");
place_.field("group_privateWarn") << group_privateWarn;
//触发后回复私聊消息开关
check_privateWarn.create(group_privateWarn);
check_privateWarn.bgcolor(color_group);
check_privateWarn.caption(u8"触发关键词发送私聊消息提醒");
check_privateWarn.events().checked([this]() { isSave = false; });
group_privateWarn["check_privateWarn"] << check_privateWarn;
//内容
text_privateWarn.create(group_privateWarn);
text_privateWarn.line_wrapped(true);
text_privateWarn.events().text_changed([this]() { isSave = false; });
group_privateWarn["text_privateWarn"] << text_privateWarn;
//变量按钮
button_privateVariable.create(group_privateWarn);
button_privateVariable.caption(u8"可用变量");
button_privateVariable.events().click([this] {
string variable(
u8"{msg}\t\t消息内容\r\n"
u8"{日期}\t\t当前日期\r\n"
u8"{时间}\t\t当前时间\r\n"
u8"{星期}\t\t当前星期\r\n"
u8"{处理方式}\t触发后处理方式\r\n"
u8"{关键词}\t\t触发的关键词\r\n"
u8"{QQ号码}\t触发关键词的QQ号码\r\n"
u8"{QQ名称}\t触发关键词的QQ名称\r\n"
u8"{QQ名片}\t触发关键词的QQ名片\r\n"
u8"{群号码}\t触发关键词的群名称\r\n"
u8"{群名称}\t触发关键词的群名称\r\n"
u8"\r\n更多变量欢迎进群补充(群:839067703)");
showText("回复内容变量", variable);
});
group_privateWarn["button_privateVariable"] << button_privateVariable;
//保存按钮
btn_save.create(*this);
btn_save.caption(u8"保存");
btn_save.events().click([this] { save(); });
place_.field("button_save") << btn_save;
readConf();
}
public:
tab_page_groupWarnWord(window wd, int index = 0) : tab_father(wd), conf_index(index) {
init();
}
private:
place place_;
int conf_index;
//触发后回复群消息开关
checkbox check_groupWarn;
//触发后回复私聊消息开关
checkbox check_privateWarn;
//触发后回复群消息内容
group group_groupWarn;
textbox text_groupWarn;
//触发后发送私聊消息
group group_privateWarn;
textbox text_privateWarn;
//变量
button button_groupVariable;
button button_privateVariable;
//保存
button btn_save;
};
//转发到群
class tab_page_relayGroupMsg : public tab_father {
protected:
void readConf() {
conf.file2json();
conf.json2all();
//载入群名单
auto groupList = mycq::get_group_list_map();
//清空原有
list_groupList.erase();
auto& conf_groupList = conf.alone[conf_index].relayGroupList;
//先载入已选
for (auto id : conf_groupList) {
list_groupList.at(0).append({groupList[id].group_name, to_string(id)});
}
//再载入未选
for (auto temp : groupList) {
if (find(conf_groupList.begin(), conf_groupList.end(), temp.first) == conf_groupList.end())
list_groupList.at(0).append({temp.second.group_name, to_string(temp.first)});
}
//勾选已选的
auto size = list_groupList.size_item(0);
for (size_t i = 0; i < size; i++) {
string buf = list_groupList.at(0).at(i).text(1);
if (find(conf.alone[conf_index].relayGroupList.begin(),
conf.alone[conf_index].relayGroupList.end(),
atoll(buf.c_str()))
!= conf.alone[conf_index].relayGroupList.end()) {
list_groupList.at(0).at(i).check(true);
}
}
//转发格式
text_relayGroupWord.reset(conf.alone[conf_index].relayGroupWord);
text_relayGroupMsg_trimFront.reset(to_string(conf.alone[conf_index].relayGroupMsg_trimFront));
text_relayGroupMsg_trimBack.reset(to_string(conf.alone[conf_index].relayGroupMsg_trimBack));
isSave = true;
}
public:
bool save() {
bool ok_text_relayGroupMsg_trimFront =
boost::regex_match(text_relayGroupMsg_trimFront.text(), boost::regex("[0-9]*"));
bool ok_text_relayGroupMsg_trimBack =
boost::regex_match(text_relayGroupMsg_trimBack.text(), boost::regex("[0-9]*"));
if (!ok_text_relayGroupMsg_trimFront || !ok_text_relayGroupMsg_trimBack) {
msgbox m_error{*this, u8"错误"};
m_error.icon(msgbox::icon_error);
m_error << u8"消息修剪 格式有误";
m_error.show();
return false;
}
//群名单
conf.alone[conf_index].relayGroupList.clear();
for (auto temp : list_groupList.checked()) {
string buf = list_groupList.at(temp).text(1);
conf.alone[conf_index].relayGroupList.insert(atoll(buf.c_str()));
}
//转发格式
conf.alone[conf_index].relayGroupWord = text_relayGroupWord.text();
conf.alone[conf_index].relayGroupMsg_trimFront = atoi(text_relayGroupMsg_trimFront.text().c_str());
conf.alone[conf_index].relayGroupMsg_trimBack = atoi(text_relayGroupMsg_trimBack.text().c_str());
writeConf();
msgbox m_save{*this, u8"成功"};
m_save << u8"保存成功";
m_save.show();
readConf();
isSave = true;
return true;
}
private:
void init() {
place_.bind(*this);
place_.div(
//整体边距
//"margin = [15,15,15,15] "
"<vert"
"<"
//群列表
"<group_groupList>"
//"<weight=10>"
//转发格式
"<group_relayGroupWord>"
">"
//"<weight=20>"
"<weight=25%"
//消息修剪
"<group_relayGroupMsg_trim>"
//"<weight=10%>"
//保存
"<weight=40% button_save>"
">"
">");
//群列表
group_groupList.create(*this);
group_groupList.bgcolor(color_group);
group_groupList.caption(u8"触发后转发到群:");
group_groupList.div("vert <list_groupList>");
place_.field("group_groupList") << group_groupList;
list_groupList.create(group_groupList);
list_groupList.checkable(true);
list_groupList.append_header(u8"群名");
list_groupList.append_header(u8"群号码");
list_groupList.events().checked([this]() { isSave = false; });
group_groupList["list_groupList"] << list_groupList;
//转发消息格式
group_relayGroupWord.create(*this);
group_relayGroupWord.bgcolor(color_group);
group_relayGroupWord.caption(u8"转发消息格式:");
group_relayGroupWord.div("vert margin = [5] <text_relayGroupWord> <weight=20 <> <weight=40% button_variable>>");
place_.field("group_relayGroupWord") << group_relayGroupWord;
text_relayGroupWord.create(group_relayGroupWord);
text_relayGroupWord.line_wrapped(true);
text_relayGroupWord.tip_string(u8"不填写则转发消息原格式 即 {msg}");
text_relayGroupWord.events().text_changed([this]() { isSave = false; });
group_relayGroupWord["text_relayGroupWord"] << text_relayGroupWord;
//变量按钮
button_variable.create(group_relayGroupWord);
button_variable.caption(u8"可用变量");
button_variable.events().click([this] {
string variable(
u8"{msg}\t\t消息内容\r\n"
u8"{日期}\t\t当前日期\r\n"
u8"{时间}\t\t当前时间\r\n"
u8"{星期}\t\t当前星期\r\n"
u8"{关键词}\t\t触发的关键词\r\n"
u8"{处理方式}\t触发后处理方式\r\n"
u8"{QQ号码}\t触发关键词的QQ号码\r\n"
u8"{QQ名称}\t触发关键词的QQ名称\r\n"
u8"{QQ名片}\t触发关键词的QQ名片\r\n"
u8"{群号码}\t触发关键词的群号码\r\n"
u8"{群名称}\t触发关键词的群名称\r\n"
u8"\r\n更多变量欢迎进群补充(群:839067703)");
showText(u8"转发群消息变量", variable);
});
group_relayGroupWord["button_variable"] << button_variable;
//消息修剪
group_relayGroupMsg_trim.create(*this);
group_relayGroupMsg_trim.bgcolor(color_group);
group_relayGroupMsg_trim.caption(u8"消息修剪(去广告尾巴专用)");
group_relayGroupMsg_trim.div(
"vert <weight=25 <weight=30% lab_relayGroupMsg_trimFront><text_relayGroupMsg_trimFront><weight=15%>> "
"<weight=25 <weight=30% lab_relayGroupMsg_trimBack><text_relayGroupMsg_trimBack><weight=15%>>");
place_.field("group_relayGroupMsg_trim") << group_relayGroupMsg_trim;
lab_relayGroupMsg_trimFront.create(group_relayGroupMsg_trim);
lab_relayGroupMsg_trimFront.bgcolor(color_group);
lab_relayGroupMsg_trimFront.caption(u8"删除前面行数:");
group_relayGroupMsg_trim["lab_relayGroupMsg_trimFront"] << lab_relayGroupMsg_trimFront;
text_relayGroupMsg_trimFront.create(group_relayGroupMsg_trim);
text_relayGroupMsg_trimFront.line_wrapped(false);
text_relayGroupMsg_trimFront.multi_lines(false);
text_relayGroupMsg_trimFront.events().text_changed([this]() { isSave = false; });
group_relayGroupMsg_trim["text_relayGroupMsg_trimFront"] << text_relayGroupMsg_trimFront;
lab_relayGroupMsg_trimBack.create(group_relayGroupMsg_trim);
lab_relayGroupMsg_trimBack.bgcolor(color_group);
lab_relayGroupMsg_trimBack.caption(u8"删除后面行数:");
group_relayGroupMsg_trim["lab_relayGroupMsg_trimBack"] << lab_relayGroupMsg_trimBack;
text_relayGroupMsg_trimBack.create(group_relayGroupMsg_trim);
text_relayGroupMsg_trimBack.line_wrapped(false);
text_relayGroupMsg_trimBack.multi_lines(false);
text_relayGroupMsg_trimBack.events().text_changed([this]() { isSave = false; });
group_relayGroupMsg_trim["text_relayGroupMsg_trimBack"] << text_relayGroupMsg_trimBack;
//保存按钮
btn_save.create(*this);
btn_save.caption(u8"保存");
btn_save.events().click([this] { save(); });
place_.field("button_save") << btn_save;
readConf();
}
public:
tab_page_relayGroupMsg(window wd, int index = 0) : tab_father(wd), conf_index(index) {
init();
}
private:
place place_;
int conf_index;
//群列表
group group_groupList;
listbox list_groupList;
//消息修建
group group_relayGroupMsg_trim;
label lab_relayGroupMsg_trimFront;
label lab_relayGroupMsg_trimBack;
textbox text_relayGroupMsg_trimFront;
textbox text_relayGroupMsg_trimBack;
//转发格式
group group_relayGroupWord;
textbox text_relayGroupWord;
//变量按钮
button button_variable;
//保存
button btn_save;
};
//单独设置
class tab_page_alone : public tab_father {
protected:
//读取配置
void readConf() {
conf.file2json();
conf.json2all();
auto groupList = mycq::get_group_list_map();
vector<string> dealTypeList{"不作处理", "禁言", "踢出", "踢出并拉黑"};
list_aloneList.erase();
for (auto tempAlone : conf.alone) {
if (tempAlone.first == 0) continue;
string groupListWord;
dealTypeList[1] = "禁言" + to_string(tempAlone.second.banTimeLen) + "分钟";
int i = 0;
for (auto id : tempAlone.second.groupList) {
groupListWord += groupList[id].group_name + " ";
if (i > 5) break;
i++;
}
list_aloneList.at(0).append({to_string(tempAlone.first),
to_string(tempAlone.second.priority),
tempAlone.second.name,
groupListWord,
dealTypeList[tempAlone.second.dealType]});
}
//勾选已选的
auto size = list_aloneList.size_item(0);
for (size_t i = 0; i < size; i++) {
string buf = list_aloneList.at(0).at(i).text(0);
int id = atoi(buf.c_str());
if (conf.alone[id].use) {
list_aloneList.at(0).at(i).check(true);
}
}
isSave = true;
}
public:
bool save() {
isSave = true;
return true;
}
private:
//写入配置
void writeConf() {
conf.all2json();
conf.json2file();
}
void writeList() {
auto size = list_aloneList.size_item(0);
for (size_t i = 0; i < size; i++) {
string buf = list_aloneList.at(0).at(i).text(0);
int id = atoi(buf.c_str());
conf.alone[id].use = list_aloneList.at(0).at(i).checked();
}
}
void openAlone(int conf_index) {
form fm(*this);
fm.caption(u8"单独设置");
fm.size(nana::size(550, 400));
// fm.bgcolor(color_rgb(BG_COLOE));
place place;
place.bind(fm);
place.div("<vert <weight=30 tabbar> <tab_frame>>");
std::vector<std::shared_ptr<tab_father>> tabpages;
tabbar<std::string> tabbar_{fm};
//不知道为啥 在添加前禁用scroll 就可以实现显示最前面的scroll页面
tabbar_.toolbox(nana::tabbar<std::string>::kits::scroll, false);
tabbar_.push_back(u8"主要设置");
tabpages.push_back(std::make_shared<tab_page_aloneMain>(fm, conf_index));
tabbar_.push_back(u8"全局设置");
tabpages.push_back(std::make_shared<tab_page_overall>(fm, conf_index));
tabbar_.push_back(u8"关键词");
tabpages.push_back(std::make_shared<tab_page_keyWord>(fm, conf_index));
tabbar_.push_back(u8"过滤名单");
tabpages.push_back(std::make_shared<tab_page_list>(fm, conf_index));
tabbar_.push_back(u8"触发回复");
tabpages.push_back(std::make_shared<tab_page_groupWarnWord>(fm, conf_index));
tabbar_.push_back(u8"云端检测");
tabpages.push_back(std::make_shared<tab_page_moderation>(fm, conf_index));
tabbar_.push_back(u8"转发到群");
tabpages.push_back(std::make_shared<tab_page_relayGroupMsg>(fm, conf_index));
std::size_t index = 0;
for (auto& i : tabpages) {
tabbar_.attach(index++, *i);
place.field("tab_frame").fasten(*i);
}
tabbar_.toolbox(nana::tabbar<std::string>::kits::scroll, true);
tabbar_.toolbox(nana::tabbar<std::string>::kits::list, true);
tabbar_.events().click([&fm, &tabpages, &tabbar_, this]() {
// msgbox m{fm, u8"Test"};
// m << tabpages.at(_tab.item_pos)->getSave();
// m.show();
static int lastItem = 0;
int tempItem = tabbar_.activated();
//点击列表标签时
if (!tabpages.at(lastItem)->getSave()) {
tabbar_.activated(lastItem);
msgbox inf{fm, u8"未保存", nana::msgbox::yes_no_cancel};
inf << u8"是否保存已修改的内容";
auto res = inf.show();
if (res == nana::msgbox::pick_yes) {
if (!tabpages.at(lastItem)->save()) return; //保存失败时
} else if (res == nana::msgbox::pick_no) {
tabpages.at(lastItem)->readConf();
} else if (res == nana::msgbox::pick_cancel) {
return;
}
tabbar_.activated(tempItem);
}
tabpages.at(tempItem)->readConf();
lastItem = tempItem;
});
//窗口关闭时
fm.events().unload([&fm, &tabpages, &tabbar_, this](const arg_unload& un) {
int item = tabbar_.activated();
if (!tabpages.at(item)->getSave()) {
msgbox inf{fm, u8"未保存", nana::msgbox::yes_no_cancel};
inf << u8"是否保存已修改的内容";
auto res = inf.show();
if (res == nana::msgbox::pick_yes) {
if (!tabpages.at(item)->save()) un.cancel = true; //当保存失败时
} else if (res == nana::msgbox::pick_no) {
tabpages.at(item)->readConf();
} else if (res == nana::msgbox::pick_cancel) {
un.cancel = true;
}
}
});
tabbar_.activated(0);
place.field("tabbar") << tabbar_;
place.collocate();
fm.modality();
}
void init() {
place_.bind(*this);
place_.div(
//整体边距
"margin = [15,15,15,15] "
"<vert"
//群列表
"<list_aloneList>"
//优先级提示
"<weight=25 lab_priorityTip>"
">");
//列表
list_aloneList.create(*this);
list_aloneList.checkable(true);
list_aloneList.append_header(u8"id", 50);
list_aloneList.append_header(u8"优先级", 50);
list_aloneList.append_header(u8"设置名称");
list_aloneList.append_header(u8"启用群", 150);
list_aloneList.append_header(u8"处理方法");
place_.field("list_aloneList") << list_aloneList;
list_aloneList.events().mouse_up([this](const arg_mouse& arg) {
//如果为点击则重新保存复选框
if (mouse::right_button != arg.button) {
auto size = list_aloneList.size_item(0);
writeList();
writeConf();
readConf();
return;
}
auto index = list_aloneList.selected();
//菜单
menu menu_;
//点击菜单事件
//添加
auto handlerAdd = [=](menu::item_proxy& ip) {
int add_index = conf.alone.size();
if (conf.alone.find(add_index) != conf.alone.end()) {
msgbox m_error(*this, u8"错误");
m_error.icon(msgbox::icon_error);
m_error << u8"配置项id冲突,添加失败,请尝试重启酷Q";
m_error.show();
return;
}
openAlone(add_index);
conf.alone[add_index].use = true;
writeList();
writeConf();
readConf();
};
//编辑
auto handlerEdit = [=](menu::item_proxy& ip) {
int conf_index = atoi(list_aloneList.at(index.at(0)).text(0).c_str());
openAlone(conf_index);
writeList();
writeConf();
readConf();
};
//删除
auto handlerDelete = [=](menu::item_proxy& ip) {
msgbox m{*this, u8"确认", nana::msgbox::yes_no};
m << u8"是否删除此项";
auto res = m.show();
if (res == m.pick_yes) {
int conf_index = atoi(list_aloneList.at(index.at(0)).text(0).c_str());
conf.alone.erase(conf_index);
writeConf();
readConf();
}
};
//右击内容
if (index.size()) {
menu_.append(u8"添加", handlerAdd);
menu_.append(u8"编辑", handlerEdit);
menu_.append(u8"删除", handlerDelete);
menu_.popup_await(*this, arg.pos.x, arg.pos.y);
}
//右击空白
else {
menu_.append(u8"添加", handlerAdd);
menu_.popup_await(*this, arg.pos.x, arg.pos.y);
}
});
//优先级提示
lab_priorityTip.create(*this);
lab_priorityTip.caption(u8"提示: 优先级数值小 > 优先级数值大 > 默认的设置\t\t\t\t\t\t\t\t\t\t右击编辑");
place_.field("lab_priorityTip") << lab_priorityTip;
readConf();
}
public:
tab_page_alone(window wd) : tab_father(wd) {
init();
}
private:
place place_;
//列表
listbox list_aloneList;
//优先级提示
label lab_priorityTip;
};
Gui::Gui() {
}
//打开主要界面
void Gui::openMain() {
form fm;
fm.caption(u8"设置");
fm.size(nana::size(550, 400));
// fm.bgcolor(color_rgb(BG_COLOE));
place place;
place.bind(fm);
place.div("<vert <weight=30 tabbar> <tab_frame>>");
std::vector<std::shared_ptr<tab_father>> tabpages;
tabbar<std::string> tabbar_{fm};
//不知道为啥 在添加前禁用scroll 就可以实现显示最前面的scroll页面
tabbar_.toolbox(nana::tabbar<std::string>::kits::scroll, false);
tabbar_.push_back(u8"主要设置");
tabpages.push_back(std::make_shared<tab_page_main>(fm));
tabbar_.push_back(u8"全局设置");
tabpages.push_back(std::make_shared<tab_page_overall>(fm));
tabbar_.push_back(u8"关键词");
tabpages.push_back(std::make_shared<tab_page_keyWord>(fm));
tabbar_.push_back(u8"过滤名单");
tabpages.push_back(std::make_shared<tab_page_list>(fm));
tabbar_.push_back(u8"触发回复");
tabpages.push_back(std::make_shared<tab_page_groupWarnWord>(fm));
tabbar_.push_back(u8"转发到群");
tabpages.push_back(std::make_shared<tab_page_relayGroupMsg>(fm));
tabbar_.push_back(u8"云端检测");
tabpages.push_back(std::make_shared<tab_page_moderation>(fm));
tabbar_.push_back(u8"单独设置");
tabpages.push_back(std::make_shared<tab_page_alone>(fm));
std::size_t index = 0;
for (auto& i : tabpages) {
tabbar_.attach(index++, *i);
place.field("tab_frame").fasten(*i);
}
tabbar_.toolbox(nana::tabbar<std::string>::kits::scroll, true);
tabbar_.toolbox(nana::tabbar<std::string>::kits::list, true);
//点击列表标签时
tabbar_.events().click([&fm, &tabpages, &tabbar_, this]() {
static int lastItem = 0;
int tempItem = tabbar_.activated();
if (!tabpages.at(lastItem)->getSave()) {
tabbar_.activated(lastItem);
msgbox inf{fm, u8"未保存", nana::msgbox::yes_no_cancel};
inf << u8"是否保存已修改的内容";
auto res = inf.show();
if (res == nana::msgbox::pick_yes) {
if (!tabpages.at(lastItem)->save()) return; //保存失败时
} else if (res == nana::msgbox::pick_no) {
tabpages.at(lastItem)->readConf();
} else if (res == nana::msgbox::pick_cancel) {
return;
}
tabbar_.activated(tempItem);
}
tabpages.at(tempItem)->readConf();
lastItem = tempItem;
});
//窗口关闭时
fm.events().unload([&fm, &tabpages, &tabbar_, this](const arg_unload& un) {
int item = tabbar_.activated();
if (!tabpages.at(item)->getSave()) {
msgbox inf{fm, u8"未保存", nana::msgbox::yes_no_cancel};
inf << u8"是否保存已修改的内容";
auto res = inf.show();
if (res == nana::msgbox::pick_yes) {
if (!tabpages.at(item)->save()) un.cancel = true; //当保存失败时
} else if (res == nana::msgbox::pick_no) {
tabpages.at(item)->readConf();
} else if (res == nana::msgbox::pick_cancel) {
un.cancel = true;
}
}
});
tabbar_.activated(0);
place.field("tabbar") << tabbar_;
place.collocate();
fm.show();
exec();
}
|
{"hexsha": "0e33101ac5411560a650236cdd6cf35b688694df", "size": 65648, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/gui.cpp", "max_stars_repo_name": "zhaoguoqingit/GroupMonitor", "max_stars_repo_head_hexsha": "26d250048dd8c1a20fd1849e616ada6ea9996464", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2019-09-11T12:15:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-19T03:07:34.000Z", "max_issues_repo_path": "src/gui.cpp", "max_issues_repo_name": "constStar/coolq-keyword", "max_issues_repo_head_hexsha": "26d250048dd8c1a20fd1849e616ada6ea9996464", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-11-16T13:54:12.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-04T10:05:40.000Z", "max_forks_repo_path": "src/gui.cpp", "max_forks_repo_name": "constStar/coolq-keyword", "max_forks_repo_head_hexsha": "26d250048dd8c1a20fd1849e616ada6ea9996464", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-05-04T10:23:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-17T21:33:05.000Z", "avg_line_length": 29.6915422886, "max_line_length": 120, "alphanum_fraction": 0.5644345601, "num_tokens": 17879}
|
[STATEMENT]
lemma degree_pderiv_le: "degree (pderiv f) \<le> degree f - 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. degree (pderiv f) \<le> degree f - 1
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> degree (pderiv f) \<le> degree f - 1 \<Longrightarrow> False
[PROOF STEP]
assume "\<not> ?thesis"
[PROOF STATE]
proof (state)
this:
\<not> degree (pderiv f) \<le> degree f - 1
goal (1 subgoal):
1. \<not> degree (pderiv f) \<le> degree f - 1 \<Longrightarrow> False
[PROOF STEP]
hence ge: "degree (pderiv f) \<ge> Suc (degree f - 1)"
[PROOF STATE]
proof (prove)
using this:
\<not> degree (pderiv f) \<le> degree f - 1
goal (1 subgoal):
1. Suc (degree f - 1) \<le> degree (pderiv f)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Suc (degree f - 1) \<le> degree (pderiv f)
goal (1 subgoal):
1. \<not> degree (pderiv f) \<le> degree f - 1 \<Longrightarrow> False
[PROOF STEP]
hence "pderiv f \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
Suc (degree f - 1) \<le> degree (pderiv f)
goal (1 subgoal):
1. pderiv f \<noteq> 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
pderiv f \<noteq> 0
goal (1 subgoal):
1. \<not> degree (pderiv f) \<le> degree f - 1 \<Longrightarrow> False
[PROOF STEP]
hence "coeff (pderiv f) (degree (pderiv f)) \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
pderiv f \<noteq> 0
goal (1 subgoal):
1. lead_coeff (pderiv f) \<noteq> (0::'a)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
lead_coeff (pderiv f) \<noteq> (0::'a)
goal (1 subgoal):
1. \<not> degree (pderiv f) \<le> degree f - 1 \<Longrightarrow> False
[PROOF STEP]
from this[unfolded coeff_pderiv]
[PROOF STATE]
proof (chain)
picking this:
of_nat (Suc (degree (pderiv f))) * coeff f (Suc (degree (pderiv f))) \<noteq> (0::'a)
[PROOF STEP]
have "coeff f (Suc (degree (pderiv f))) \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
of_nat (Suc (degree (pderiv f))) * coeff f (Suc (degree (pderiv f))) \<noteq> (0::'a)
goal (1 subgoal):
1. coeff f (Suc (degree (pderiv f))) \<noteq> (0::'a)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
coeff f (Suc (degree (pderiv f))) \<noteq> (0::'a)
goal (1 subgoal):
1. \<not> degree (pderiv f) \<le> degree f - 1 \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
coeff f (Suc (degree (pderiv f))) \<noteq> (0::'a)
goal (1 subgoal):
1. \<not> degree (pderiv f) \<le> degree f - 1 \<Longrightarrow> False
[PROOF STEP]
have "Suc (degree (pderiv f)) > degree f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. degree f < Suc (degree (pderiv f))
[PROOF STEP]
using ge
[PROOF STATE]
proof (prove)
using this:
Suc (degree f - 1) \<le> degree (pderiv f)
goal (1 subgoal):
1. degree f < Suc (degree (pderiv f))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
degree f < Suc (degree (pderiv f))
goal (1 subgoal):
1. \<not> degree (pderiv f) \<le> degree f - 1 \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
coeff f (Suc (degree (pderiv f))) \<noteq> (0::'a)
degree f < Suc (degree (pderiv f))
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
coeff f (Suc (degree (pderiv f))) \<noteq> (0::'a)
degree f < Suc (degree (pderiv f))
goal (1 subgoal):
1. False
[PROOF STEP]
by (simp add: coeff_eq_0)
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1541, "file": "Polynomial_Interpolation_Missing_Polynomial", "length": 19}
|
struct QuickMDP{ID,S,A,D<:NamedTuple} <: MDP{S,A}
data::D
end
"""
QuickMDP(gen::Function, [id]; kwargs...)
Construct a generative MDP model with the function `gen` and keyword arguments.
`gen` should take three arguments: a state, an action, and a random number generator. It should return a `NamedTuple` with keys `sp` for the next state and `r` for the reward.
Keywords can be static objects or functions. See the QuickPOMDPs.jl documentation for more information.
"""
QuickMDP(gen::Function, id=uuid4(); kwargs...) = QuickMDP(id; gen=gen, kwargs...)
"""
QuickMDP([id]; kwargs...)
Construct an MDP model with keyword arguments. Keywords can be static objects or functions. See the QuickPOMDPs.jl documentation for more information.
"""
function QuickMDP(id=uuid4(); kwargs...)
kwd = Dict{Symbol, Any}(kwargs)
quick_defaults!(kwd)
S = infer_statetype(kwd)
A = infer_actiontype(kwd)
d = namedtuple(keys(kwd)...)(values(kwd)...)
qm = QuickMDP{id, S, A, typeof(d)}(d)
return qm
end
id(::QuickMDP{ID}) where ID = ID
struct QuickPOMDP{ID,S,A,O,D<:NamedTuple} <: POMDP{S,A,O}
data::D
end
"""
QuickPOMDP(gen::Function, [id]; kwargs...)
Construct a generative POMDP model with the function `gen` and keyword arguments.
`gen` should take three arguments: a state, an action, and a random number generator. It should return a `NamedTuple` with keys `sp` for the next state, `o` for the observation, and `r` for the reward.
Keywords can be static objects or functions. See the QuickPOMDPs.jl documentation for more information.
"""
QuickPOMDP(gen::Function, id=uuid4(); kwargs...) = QuickPOMDP(id; gen=gen, kwargs...)
"""
QuickPOMDP([id]; kwargs...)
Construct an POMDP model with keyword arguments. Keywords can be static objects or functions. See the QuickPOMDPs.jl documentation for more information.
"""
function QuickPOMDP(id=uuid4(); kwargs...)
kwd = Dict{Symbol, Any}(kwargs)
quick_defaults!(kwd)
S = infer_statetype(kwd)
A = infer_actiontype(kwd)
O = infer_obstype(kwd)
d = namedtuple(keys(kwd)...)(values(kwd)...)
qm = QuickPOMDP{id, S, A, O, typeof(d)}(d)
return qm
end
id(::QuickPOMDP{ID}) where ID = ID
const QuickModel = Union{QuickMDP, QuickPOMDP}
function quick_defaults!(kwd::Dict)
kwd[:discount] = get(kwd, :discount, 1.0)
kwd[:isterminal] = get(kwd, :isterminal, false)
# memoize initialstate_distribution since it should be constant (so we can use it below for initialstate)
if haskey(kwd, :initialstate_distribution) && kwd[:initialstate_distribution] isa Function
kwd[:initialstate_distribution] = kwd[:initialstate_distribution]()
end
if !haskey(kwd, :initialstate)
if haskey(kwd, :initialstate_distribution)
kwd[:initialstate] = rng -> rand(rng, kwd[:initialstate_distribution])
end
end
# default for initialobs must be in the method below because the method table might change
if !haskey(kwd, :stateindex)
if haskey(kwd, :states)
states = _call(Val(:states), kwd[:states], ())
if hasmethod(length, typeof((states,))) && length(states) < Inf
kwd[:stateindex] = Dict(s=>i for (i,s) in enumerate(states))
end
end
end
if !haskey(kwd, :actionindex)
if haskey(kwd, :actions)
actions = _call(Val(:actions), kwd[:actions], ())
if hasmethod(length, typeof((actions,))) && length(actions) < Inf
kwd[:actionindex] = Dict(s=>i for (i,s) in enumerate(actions))
end
end
end
if !haskey(kwd, :obsindex)
if haskey(kwd, :observations)
observations = _call(Val(:observations), kwd[:observations], ())
if hasmethod(length, typeof((observations,))) && length(observations) < Inf
kwd[:obsindex] = Dict(s=>i for (i,s) in enumerate(observations))
end
end
end
end
function infer_statetype(kwd)
if haskey(kwd, :statetype)
st = _call(Val(:statetype), kwd[:statetype], (), NamedTuple())
elseif haskey(kwd, :states)
st = eltype(_call(Val(:states), kwd[:states], (), NamedTuple()))
elseif haskey(kwd, :initialstate)
st = typeof(_call(Val(:initialstate), kwd[:initialstate], (MersenneTwister(0),), NamedTuple()))
else
st = Any
end
if st == Any
@warn("Unable to infer state type for a Quick(PO)MDP; using Any. This may have significant performance consequences. Use the statetype keyword argument to specify a concrete state type.")
end
return st
end
function infer_actiontype(kwd)
if haskey(kwd, :actiontype)
at = _call(Val(:actiontype), kwd[:actiontype], (), NamedTuple())
elseif haskey(kwd, :actions)
at = eltype(_call(Val(:actions), kwd[:actions], (), NamedTuple()))
else
at = Any
end
if at == Any
@warn("Unable to infer action type for a Quick(PO)MDP; using Any. This may have significant performance consequences. Use the actiontype keyword argument to specify a concrete action type.")
end
return at
end
function infer_obstype(kwd)
if haskey(kwd, :obstype)
ot = _call(Val(:obstype), kwd[:obstype], (), NamedTuple())
elseif haskey(kwd, :observations)
ot = eltype(_call(Val(:observations), kwd[:observations], (), NamedTuple()))
elseif haskey(kwd, :initialobs) && haskey(kwd, :initialstate)
s0 = _call(Val(:initialstate), kwd[:initialstate], (MersenneTwister(0),), NamedTuple())
ot = typeof(_call(Val(:initialobs), kwd[:initialobs], (s0, MersenneTwister(0),), NamedTuple()))
else
ot = Any
end
if ot == Any
@warn("Unable to infer observation type for a QuickPOMDP; using Any. This may have significant performance consequences. Use the obstype keyword argument to specify a concrete observation type.")
end
return ot
end
function _call(namev::Val{name}, m::QuickModel, args, kwargs=NamedTuple()) where name
_call(namev,
get(m.data, name) do
throw(MissingQuickArgument(m, name))
end,
args,
kwargs)
end
_call(::Val, f::Function, args, kwargs=NamedTuple()) = f(args...; kwargs...)
_call(v::Val, object, args, kwargs=NamedTuple()) = object
_call(v::Val, d::Dict, args, kwargs=NamedTuple()) = d[args]
macro forward_to_data(f)
quote
$f(m::QuickModel, args...; kwargs...) = _call(Val(nameof($f)), m, args, kwargs)
end
end
function POMDPs.transition(m::QuickModel, s, a)
if haskey(m.data, :transition)
return m.data.transition(s, a)
else
throw(MissingQuickArgument(m, :transition, types=[Function], also=[:gen]))
end
end
function POMDPs.observation(m::QuickPOMDP, args...)
if haskey(m.data, :observation)
return m.data.observation(args...)
else
throw(MissingQuickArgument(m, :observation, types=[Function], also=[:gen]))
end
end
@forward_to_data POMDPs.initialstate_distribution
@forward_to_data POMDPs.reward
function POMDPs.gen(m::QuickModel, s, a, rng)
if haskey(m.data, :gen)
return m.data.gen(s, a, rng)
else
return NamedTuple()
end
end
POMDPs.initialstate(m::QuickModel, rng::AbstractRNG) = _call(Val(:initialstate), m, (rng,))
function POMDPs.initialobs(m::QuickPOMDP, s, rng::AbstractRNG)
if haskey(m.data, :initialobs)
return _call(Val(:initialobs), m, (s, rng))
elseif haskey(m.data, :observation) && hasmethod(m.data.observation, typeof((s,)))
return rand(rng, m.data.observation(s))
else
throw(MissingQuickArgument(m, :initialobs, types=[obstype(m), Function], also=[:observation]))
end
end
@forward_to_data POMDPs.states
@forward_to_data POMDPs.actions
@forward_to_data POMDPs.observations
@forward_to_data POMDPs.discount
@forward_to_data POMDPs.stateindex
@forward_to_data POMDPs.actionindex
@forward_to_data POMDPs.obsindex
@forward_to_data POMDPs.isterminal
@forward_to_data POMDPModelTools.obs_weight
@forward_to_data POMDPModelTools.render
|
{"hexsha": "e6633ee66c7d2644e2a9a80262cebebd2da9832e", "size": 8114, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/quick.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/QuickPOMDPs.jl-8af83fb2-a731-493c-9049-9e19dbce6165", "max_stars_repo_head_hexsha": "3e1ce5ef3d1c8da474dc0b2b685e66ea7403058f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/quick.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/QuickPOMDPs.jl-8af83fb2-a731-493c-9049-9e19dbce6165", "max_issues_repo_head_hexsha": "3e1ce5ef3d1c8da474dc0b2b685e66ea7403058f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/quick.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/QuickPOMDPs.jl-8af83fb2-a731-493c-9049-9e19dbce6165", "max_forks_repo_head_hexsha": "3e1ce5ef3d1c8da474dc0b2b685e66ea7403058f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5276595745, "max_line_length": 203, "alphanum_fraction": 0.6629282721, "num_tokens": 2239}
|
@doc doc"""
GraphManifoldType
This type represents the type of data on the graph that the [`GraphManifold`](@ref)
represents.
"""
abstract type GraphManifoldType end
@doc doc"""
EdgeManifoldManifold <: GraphManifoldType
A type for a [`GraphManifold`](@ref) where the data is given on the edges.
"""
struct EdgeManifold <: GraphManifoldType end
@doc doc"""
VectexGraphManifold <: GraphManifoldType
A type for a [`GraphManifold`](@ref) where the data is given on the vertices.
"""
struct VertexManifold <: GraphManifoldType end
@doc doc"""
GraphManifold{G, M, T} <: AbstractPowerManifold{M,NestedPowerRepresentation}
Build a manifold, that is a [`PowerManifold`](@ref) of the [`Manifold`](@ref) `M` either on the edges or vertices
of a graph `G` depending on the [`GraphManifoldType`](@ref) `T`.
# Fields
* `G` is an `AbstractSimpleGraph`
* `M` is a [`Manifold`](@ref)
"""
struct GraphManifold{G<:AbstractGraph,TM,T<:GraphManifoldType} <: AbstractPowerManifold{TM,NestedPowerRepresentation}
graph::G
manifold::TM
end
function GraphManifold( g::G, M::TM, ::VertexManifold) where {G<:AbstractGraph,TM<:Manifold}
return GraphManifold{G,TM,VertexManifold}(g, M)
end
function GraphManifold(g::G, M::TM, ::EdgeManifold) where {G<:AbstractGraph,TM<:Manifold}
return GraphManifold{G,TM,EdgeManifold}(g, M)
end
const EdgeGraphManifold = GraphManifold{<:AbstractGraph,<:Manifold,EdgeManifold}
const VertexGraphManifold = GraphManifold{<:AbstractGraph,<:Manifold,VertexManifold}
@doc doc"""
check_manifold_point(M::GraphManifold, x)
Check whether `x` is a valid point on the [`GraphManifold`](@ref), i.e. its
length equals the number of vertices (for [`VertexManifold`](@ref)s) or
the number of edges (for [`EdgeManifold`](@ref)s) and that each element of `x`
passes the [`check_manifold_point`](@ref) test for the base manifold `M.manifold`.
"""
check_manifold_point(::GraphManifold, ::Any...)
function check_manifold_point(M::VertexGraphManifold, x; kwargs...)
if size(x) != (nv(M.graph),)
return DomainError(
length(x),
"The number of points in `x` ($(length(x))) does not match the number of nodes in the graph ($(nv(M.graph))).",
)
end
PM = PowerManifold(M.manifold, NestedPowerRepresentation(), nv(M.graph))
return check_manifold_point(PM, x; kwargs...)
end
function check_manifold_point(M::EdgeGraphManifold, x; kwargs...)
if size(x) != (ne(M.graph),)
return DomainError(
length(x),
"The number of points in `x` ($(size(x))) does not match the number of edges in the graph ($(ne(M.graph))).",
)
end
PM = PowerManifold(M.manifold, NestedPowerRepresentation(), ne(M.graph))
return check_manifold_point(PM, x; kwargs...)
end
@doc doc"""
check_tangent_vector(M::GraphManifold, x, v)
Check whether `x` is a valid point on the [`GraphManifold`](@ref), and
`v` it from its tangent space, i.e. its
length equals the number of vertices (for [`VertexManifold`](@ref)s) or
the number of edges (for [`EdgeManifold`](@ref)s) and that each element of `v`
together with its corresponding einty of `x` passes the
[`check_tangent_vector`](@ref) test for the base manifold `M.manifold`.
"""
check_tangent_vector(::GraphManifold, ::Any...)
function check_tangent_vector(M::VertexGraphManifold, x, v; kwargs...)
if size(x) != (nv(M.graph),)
return DomainError(
length(x),
"The number of points in `x` ($(size(x)) does not match the number of nodes in the graph ($(nv(M.graph))).",
)
end
if size(v) != (nv(M.graph),)
return DomainError(
length(v),
"The number of points in `v` ($(size(v)) does not match the number of nodes in the graph ($(nv(M.graph))).",
)
end
PM = PowerManifold(M.manifold, NestedPowerRepresentation(), nv(M.graph))
return check_tangent_vector(PM, x, v; kwargs...)
end
function check_tangent_vector(M::EdgeGraphManifold, x, v; kwargs...)
if size(x) != (ne(M.graph),)
return DomainError(
length(x),
"The number of elements in `x` ($(size(x)) does not match the number of edges in the graph ($(ne(M.graph))).",
)
end
if size(v) != (ne(M.graph),)
return DomainError(
length(v),
"The number of elements in `v` ($(size(v)) does not match the number of edges in the graph ($(ne(M.graph))).",
)
end
PM = PowerManifold(M.manifold, NestedPowerRepresentation(), ne(M.graph))
return check_tangent_vector(PM, x, v; kwargs...)
end
get_iterator(M::EdgeGraphManifold) = 1:ne(M.graph)
get_iterator(M::VertexGraphManifold) = 1:nv(M.graph)
@doc doc"""
incident_log(M::GraphManifold, x)
Return the tangent vector on the (vertex) [`GraphManifold`](@ref), where at
each node the sum of the [`log`](@ref)s to incident nodes is computed.
For a `SimpleGraph`, an egde is interpreted as double edge in the corresponding
SimpleDiGraph
If the internal graph is a `SimpleWeightedGraph` the weighted sum of the
tangent vectors is computed.
"""
function incident_log(M::VertexGraphManifold, x)
v = zero_tangent_vector(M, x)
return incident_log!(M, v, x)
end
function incident_log!(M::VertexGraphManifold, v, x)
rep_size = representation_size(M.manifold)
for e in edges(M.graph)
vw = _write(M, rep_size, v, src(e))
v[src(e)] +=
log(M.manifold, _read(M, rep_size, x, src(e)), _read(M, rep_size, x, dst(e)))
if !is_directed(M.graph)
v[dst(e)] +=
log(
M.manifold,
_read(M, rep_size, x, dst(e)),
_read(M, rep_size, x, src(e))
)
end
end
return v
end
function incident_log!(
M::GraphManifold{<:AbstractSimpleWeightedGraph,<:Manifold,VertexManifold},
v,
x,
)
rep_size = representation_size(M.manifold)
for e in edges(M.graph)
v[src(e)] += (
get_weight(M.graph, src(e), dst(e)) *
log(
M.manifold,
_read(M, rep_size, x, src(e)),
_read(M, rep_size, x, dst(e))
)
)
if !is_directed(M.graph)
v[dst(e)] += (
get_weight(M.graph, dst(e), src(e)) *
log(
M.manifold,
_read(M, rep_size, x, dst(e)),
_read(M, rep_size, x, src(e))
)
)
end
end
return v
end
@doc doc"""
manifold_dimension(N::GraphManifold{G,M,VertexManifold})
returns the manifold dimension of the [`GraphManifold`](@ref) `N` on the vertices of
a graph $G=(V,E)$, i.e.
````math
d_{\mathcal N} = \lvert V \rVert d_{\mathcal M}.
````
"""
function manifold_dimension(M::VertexGraphManifold)
return manifold_dimension(M.manifold) * nv(M.graph)
end
@doc doc"""
manifold_dimension(N::GraphManifold{G,M,EdgeManifold})
returns the manifold dimension of the [`GraphManifold`](@ref) `N` on the edges of
a graph $G=(V,E)$, i.e.
````math
d_{\mathcal N} = \lvert E \rVert d_{\mathcal M}.
````
"""
function manifold_dimension(M::EdgeGraphManifold)
return manifold_dimension(M.manifold) * ne(M.graph)
end
|
{"hexsha": "1f49dbed5a9845472a01692ecb389f98afd27a19", "size": 7286, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/manifolds/GraphManifold.jl", "max_stars_repo_name": "manuelweisser/Manifolds.jl", "max_stars_repo_head_hexsha": "07f889a290ece01569c6c53bb0c96a5608923a0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/manifolds/GraphManifold.jl", "max_issues_repo_name": "manuelweisser/Manifolds.jl", "max_issues_repo_head_hexsha": "07f889a290ece01569c6c53bb0c96a5608923a0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/manifolds/GraphManifold.jl", "max_forks_repo_name": "manuelweisser/Manifolds.jl", "max_forks_repo_head_hexsha": "07f889a290ece01569c6c53bb0c96a5608923a0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-09T10:46:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-09T10:46:39.000Z", "avg_line_length": 34.5308056872, "max_line_length": 123, "alphanum_fraction": 0.6351907768, "num_tokens": 1967}
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["IntegratedLimbDarkOp"]
import theano
from theano import gof
import theano.tensor as tt
from .base_op import StarryBaseOp
class IntegratedLimbDarkOp(StarryBaseOp):
params_type = gof.ParamsType(
tol=theano.scalar.float64,
min_depth=theano.scalar.int32,
max_depth=theano.scalar.int32,
Nc=theano.scalar.int32,
include_contacts=theano.scalar.bool,
)
__props__ = ()
func_file = "./integrated_limbdark.cc"
func_name = "APPLY_SPECIFIC(integrated_limbdark)"
def __init__(self, tol=1e-6, min_depth=0, max_depth=50, Nc=-1,
include_contacts=False, **kwargs):
self.tol = float(tol)
self.min_depth = max(0, int(min_depth))
self.max_depth = max(self.min_depth + 1, int(max_depth))
self.Nc = int(Nc)
self.include_contacts = bool(include_contacts)
super(IntegratedLimbDarkOp, self).__init__()
def make_node(self, *args):
if len(args) != 11:
raise ValueError("wrong number of inputs")
dtype = theano.config.floatX
in_args = [tt.as_tensor_variable(a) for a in args]
out_args = [
in_args[1].type(),
tt.TensorType(dtype=theano.config.floatX,
broadcastable=[False] * (in_args[1].ndim + 1))(),
in_args[1].type(),
in_args[2].type(),
in_args[3].type(),
in_args[4].type(),
in_args[5].type(),
in_args[6].type(),
in_args[7].type(),
tt.lscalar().type(),
]
return gof.Apply(self, in_args, out_args)
def infer_shape(self, node, shapes):
return (
shapes[1], list(shapes[0]) + list(shapes[1]),
shapes[1], shapes[2], shapes[3], shapes[4], shapes[5],
shapes[6], shapes[7], ())
def grad(self, inputs, gradients):
c, r, x, xt, xtt, y, yt, ytt, z, zt, dt = inputs
f, dfdcl, dfdr, dfdx, dfdxt, dfdxtt, dfdy, dfdyt, dfdytt, neval \
= self(*inputs)
bf = gradients[0]
for i, g in enumerate(gradients[1:]):
if not isinstance(g.type, theano.gradient.DisconnectedType):
raise ValueError("can't propagate gradients wrt parameter {0}"
.format(i+1))
bc = tt.sum(tt.reshape(bf, (1, bf.size)) *
tt.reshape(dfdcl, (c.size, bf.size)), axis=-1)
br = bf * dfdr
bx = bf * dfdx
bxt = bf * dfdxt
bxtt = bf * dfdxtt
by = bf * dfdy
byt = bf * dfdyt
bytt = bf * dfdytt
return (
bc, br, bx, bxt, bxtt, by, byt, bytt,
tt.zeros_like(z), tt.zeros_like(zt), tt.zeros_like(dt))
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
|
{"hexsha": "cc4890424359e922e3ed9c226308dddcc50678ba", "size": 2974, "ext": "py", "lang": "Python", "max_stars_repo_path": "exoplanet/theano_ops/starry/integrated_limbdark.py", "max_stars_repo_name": "exowanderer/exoplanet", "max_stars_repo_head_hexsha": "dfd4859525ca574f1936de7b683951c35c292586", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-10-01T12:46:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T10:25:20.000Z", "max_issues_repo_path": "exoplanet/theano_ops/starry/integrated_limbdark.py", "max_issues_repo_name": "exowanderer/exoplanet", "max_issues_repo_head_hexsha": "dfd4859525ca574f1936de7b683951c35c292586", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exoplanet/theano_ops/starry/integrated_limbdark.py", "max_forks_repo_name": "exowanderer/exoplanet", "max_forks_repo_head_hexsha": "dfd4859525ca574f1936de7b683951c35c292586", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4157303371, "max_line_length": 78, "alphanum_fraction": 0.5608607935, "include": true, "reason": "import theano,from theano", "num_tokens": 812}
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
import scipy.stats
import matplotlib.pylab as plt
import os
import sys
from .context import vfe
from .context import config
import pdb
np.random.seed(42)
# We first define several utility functions
def kink_true(x):
fx = np.zeros(x.shape)
for t in range(x.shape[0]):
xt = x[t]
if xt < 4:
fx[t] = xt + 1
else:
fx[t] = -4*xt + 21
return fx
def kink(T, process_noise, obs_noise, xprev=None):
if xprev is None:
xprev = np.random.randn()
y = np.zeros([T, ])
x = np.zeros([T, ])
xtrue = np.zeros([T, ])
for t in range(T):
if xprev < 4:
fx = xprev + 1
else:
fx = -4*xprev + 21
xtrue[t] = fx
x[t] = fx + np.sqrt(process_noise)*np.random.randn()
xprev = x[t]
y[t] = x[t] + np.sqrt(obs_noise)*np.random.randn()
return xtrue, x, y
def plot_latent_kink(model, y, plot_title=''):
# make prediction on some test inputs
N_test = 200
x_test = np.linspace(-4, 6, N_test) / model.emi_layer.C[0, 0]
x_test = np.reshape(x_test, [N_test, 1])
zu = model.dyn_layer.zu
mu, vu = model.predict_f(zu)
mf, vf = model.predict_f(x_test)
my, vy = model.predict_y(x_test)
# plot function
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x_test[:,0], kink_true(x_test[:,0]), '-', color='k')
ax.plot(zu, mu, 'ob')
ax.plot(x_test[:,0], mf[:,0], '-', color='b')
ax.fill_between(
x_test[:,0],
mf[:,0] + 2*np.sqrt(vf[:,0]),
mf[:,0] - 2*np.sqrt(vf[:,0]),
alpha=0.2, edgecolor='b', facecolor='b')
ax.plot(model.emi_layer.C[0, 0]*x_test[:,0], my[:,0], '-', color='r')
ax.fill_between(
model.emi_layer.C[0, 0]*x_test[:,0],
my[:,0] + 2*np.sqrt(vy[:,0]),
my[:,0] - 2*np.sqrt(vy[:,0]),
alpha=0.2, edgecolor='r', facecolor='r')
ax.plot(
y[0:model.N-1],
y[1:model.N],
'r+', alpha=0.5)
# mx, vx = model.get_posterior_x()
# ax.plot(mx[0:model.N-1], mx[1:model.N], 'og', alpha=0.3)
my, vy_noiseless, vy = model.get_posterior_y()
ax.plot(my[0:model.N-1], my[1:model.N], 'or', alpha=0.2)
ax.set_xlabel(r'$x_{t-1}$')
ax.set_ylabel(r'$x_{t}$')
ax.set_xlim([-4, 6])
ax.set_ylim([-7, 7])
plt.title(plot_title)
plt.savefig('/tmp/kink_'+plot_title+'.pdf')
def test_kink_linear_MM():
# generate a dataset from the kink function above
T = 200
process_noise = 0.2
obs_noise = 0.1
(xtrue, x, y) = kink(T, process_noise, obs_noise)
y_train = np.reshape(y, [y.shape[0], 1])
# init hypers
Dlatent = 1
Dobs = 1
M = 15
# create vfe model
model_vfe = vfe.SGPSSM(y_train, Dlatent, M,
lik='Gaussian', prior_mean=0, prior_var=1000, gp_emi=False, nat_param=True)
hypers = model_vfe.init_hypers(y_train)
model_vfe.update_hypers(hypers)
# optimise
# model_vfe.optimise(
# method='L-BFGS-B', maxiter=10000, reinit_hypers=False)
model_vfe.optimise(
method='adam', maxiter=10000, adam_lr=0.05, reinit_hypers=False)
opt_hypers = model_vfe.get_hypers()
plot_latent_kink(model_vfe, y, 'VFE_MM')
def test_kink_linear_MC():
# generate a dataset from the kink function above
T = 200
process_noise = 0.2
obs_noise = 0.1
(xtrue, x, y) = kink(T, process_noise, obs_noise)
y_train = np.reshape(y, [y.shape[0], 1])
# init hypers
Dlatent = 1
Dobs = 1
M = 15
# create VFE model
model_vfe = vfe.SGPSSM(y_train, Dlatent, M,
lik='Gaussian', prior_mean=0, prior_var=1000, gp_emi=False)
hypers = model_vfe.init_hypers(y_train)
model_vfe.update_hypers(hypers)
# optimise
# model_vfe.optimise(
# method='L-BFGS-B', maxiter=10000,
# reinit_hypers=False, prop_mode=config.PROP_MC)
model_vfe.optimise(
method='adam', maxiter=10000, adam_lr=0.05,
reinit_hypers=False, prop_mode=config.PROP_MC)
opt_hypers = model_vfe.get_hypers()
plot_latent_kink(model_vfe, y, 'VFE_MC')
if __name__ == '__main__':
np.random.seed(42)
test_kink_linear_MM()
# np.random.seed(42)
# test_kink_linear_MC()
|
{"hexsha": "5890d5e238d799e32b4fc3db64cede2b2b021fb0", "size": 4276, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/gpssm_vfe_examples.py", "max_stars_repo_name": "MattAshman/geepee", "max_stars_repo_head_hexsha": "ae71998579cb80e160f7ea5eb5adfa1c937fb90a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2017-08-16T18:45:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-28T08:21:36.000Z", "max_issues_repo_path": "examples/gpssm_vfe_examples.py", "max_issues_repo_name": "yohanJung/geepee", "max_issues_repo_head_hexsha": "4809c4f78efb9134677af57187957d8ba479ddea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-05-31T17:09:58.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-21T02:26:25.000Z", "max_forks_repo_path": "examples/gpssm_vfe_examples.py", "max_forks_repo_name": "yohanJung/geepee", "max_forks_repo_head_hexsha": "4809c4f78efb9134677af57187957d8ba479ddea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2017-06-02T08:13:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-07T13:48:22.000Z", "avg_line_length": 28.6979865772, "max_line_length": 83, "alphanum_fraction": 0.5919083255, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1424}
|
#!/usr/bin/python3
# coding:utf-8
from flask import render_template,json,jsonify,request
from app import app
import base64
import tensorflow as tf
import numpy as np
import tensorflow.contrib.slim as slim
import pickle
from PIL import Image,ImageFont, ImageDraw
__global_times = 0
__chinese_word_count = 3755 # 常见汉字个数
__checkpoint_dir = './app/train_model/checkpoint/' # 模型文件路径
__code_to_chinese_file = './app/train_model/code_word.pkl' # 文字和对应的编码
__test_image_file = './app/image/pred1.png' # 测试图片
__pred1_image_file = './app/image/pred1.png' # 预测结果1图片
__pred2_image_file = './app/image/pred2.png' # 预测结果2图片
__pred3_image_file = './app/image/pred3.png' # 预测结果3图片
# 构建一个三个卷积(3x3) + 三个最大池化层(2x2) + 两个FC层
def buildCnn(top_k):
# with tf.device('/cpu:0'):
keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
images = tf.placeholder(dtype=tf.float32, shape=[None, 64, 64, 1], name='image_batch') # image_size 64x64
labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
conv_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv1') # image_size 62x62
max_pool_1 = slim.max_pool2d(conv_1, [2, 2], [2, 2], padding='SAME') # image_size 31x31
conv_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv2') # image_size 29x29
max_pool_2 = slim.max_pool2d(conv_2, [2, 2], [2, 2], padding='SAME') # image_size 15x15
conv_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3') # image_size 13x13
max_pool_3 = slim.max_pool2d(conv_3, [2, 2], [2, 2], padding='SAME') # image_size 7x7
flatten = slim.flatten(max_pool_3)
fc1 = slim.fully_connected(slim.dropout(flatten, keep_prob), 1024, activation_fn=tf.nn.tanh, scope='fc1') # 激活函数tanh
logits = slim.fully_connected(slim.dropout(fc1, keep_prob),__chinese_word_count, activation_fn=None,scope='fc2') # 无激活函数
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)) # softmax
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32)) # 计算准确率
global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
rate = tf.train.exponential_decay(2e-4, global_step, decay_steps=2000, decay_rate=0.97, staircase=True) #
train_op = tf.train.AdamOptimizer(learning_rate=rate).minimize(loss, global_step=global_step) # 自动调节学习率的随机梯度下降算法训练模型
probabilities = tf.nn.softmax(logits) #
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
merged_summary_op = tf.summary.merge_all()
predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))
return {
'images': images,
'labels': labels,
'keep_prob': keep_prob,
'top_k': top_k,
'global_step': global_step,
'train_op': train_op,
'loss': loss,
'accuracy': accuracy,
'accuracy_top_k': accuracy_in_top_k,
'merged_summary_op': merged_summary_op,
'predicted_distribution': probabilities,
'predicted_index_top_k': predicted_index_top_k,
'predicted_val_top_k': predicted_val_top_k
}
def predictPrepare():
sess = tf.Session()
graph = buildCnn(top_k=3)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(__checkpoint_dir)
if ckpt:
saver.restore(sess, ckpt)
return graph, sess
def imagePrepare(image_path):
temp_image = Image.open(image_path).convert('L')
temp_image = temp_image.resize((64, 64), Image.ANTIALIAS)
temp_image = np.asarray(temp_image) / 255.0
temp_image = temp_image.reshape([-1, 64, 64, 1])
return temp_image
def createImage(predword,imagepath):
im = Image.new("RGB", (64, 64), (255, 255, 255))
dr = ImageDraw.Draw(im)
fonts = ImageFont.truetype("./app/static/fonts/msyh.ttc",36,encoding='utf-8')
dr.text((15, 10), predword,font=fonts,fill="#000000")
im.save(imagepath)
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html",title='Home')
@app.route('/chineseRecognize',methods=['POST'])
def chineseRecognize():
# 接受前端发来的数据
data = json.loads(request.form.get('data'))
imagedata = data["test_image"]
imagedata = imagedata[22:]
img = base64.b64decode(imagedata)
file = open(__test_image_file, 'wb')
file.write(img)
file.close()
global __global_times
if (__global_times == 0):
global __graph1, __sess1
__graph1, __sess1 = predictPrepare() #加载模型,准备好预测
temp_image = imagePrepare(__test_image_file)
predict_val, predict_index = __sess1.run([__graph1['predicted_val_top_k'], __graph1['predicted_index_top_k']],
feed_dict={__graph1['images']: temp_image, __graph1['keep_prob']: 1.0}) # 预测top3的汉字编码以及相应的准确率
with open(__code_to_chinese_file, 'rb') as f2:
word_dict = pickle.load(f2) # 汉字和编码对照字典
createImage(word_dict[predict_index[0][0]], __pred1_image_file) # 生成准确率top1的汉字图片
createImage(word_dict[predict_index[0][1]], __pred2_image_file)
createImage(word_dict[predict_index[0][2]], __pred3_image_file)
__global_times = 1
else:
temp_image = imagePrepare(__test_image_file)
predict_val, predict_index = __sess1.run([__graph1['predicted_val_top_k'], __graph1['predicted_index_top_k']],
feed_dict={__graph1['images']: temp_image, __graph1['keep_prob']: 1.0})
with open(__code_to_chinese_file, 'rb') as f2:
word_dict = pickle.load(f2)
createImage(word_dict[predict_index[0][0]], __pred1_image_file)
createImage(word_dict[predict_index[0][1]], __pred2_image_file)
createImage(word_dict[predict_index[0][2]], __pred3_image_file)
# 将识别图片转码传给前端,并带上对应的准确率
with open(__pred1_image_file, 'rb') as fin:
image1_data = fin.read()
pred1_image = base64.b64encode(image1_data)
with open(__pred2_image_file, 'rb') as fin:
image2_data = fin.read()
pred2_image = base64.b64encode(image2_data)
with open(__pred3_image_file, 'rb') as fin:
image3_data = fin.read()
pred3_image = base64.b64encode(image3_data)
info = dict()
info['pred1_image'] = "data:image/jpg;base64," + pred1_image.decode()
info['pred1_accuracy'] = str('{:.2%}'.format(predict_val[0][0]))
info['pred2_image'] = "data:image/jpg;base64," + pred2_image.decode()
info['pred2_accuracy'] = str('{:.2%}'.format(predict_val[0][1]))
info['pred3_image'] = "data:image/jpg;base64," + pred3_image.decode()
info['pred3_accuracy'] = str('{:.2%}'.format(predict_val[0][2]))
return jsonify(info)
|
{"hexsha": "060efe2544b9bdd047ccb62863ce6544fb3de599", "size": 6881, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/views.py", "max_stars_repo_name": "Stevenchooo/tensorflow-master", "max_stars_repo_head_hexsha": "1b7ee0fdcfd166cbbfabbbcf801cda56d8faca4e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-12-27T10:34:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-13T09:27:59.000Z", "max_issues_repo_path": "app/views.py", "max_issues_repo_name": "GuideWsp/online-hccr", "max_issues_repo_head_hexsha": "1b7ee0fdcfd166cbbfabbbcf801cda56d8faca4e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/views.py", "max_forks_repo_name": "GuideWsp/online-hccr", "max_forks_repo_head_hexsha": "1b7ee0fdcfd166cbbfabbbcf801cda56d8faca4e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-11-20T09:38:50.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-04T02:33:00.000Z", "avg_line_length": 45.8733333333, "max_line_length": 139, "alphanum_fraction": 0.6802790292, "include": true, "reason": "import numpy", "num_tokens": 2101}
|
function gen_player(model, number)
if model == "uniform"
# generate n uniformly spread over a given range
elseif model == "random"
# generate randomly according to given distribution
end
end
|
{"hexsha": "61b8f59ef6008626bff1fe2101136f41ac007d00", "size": 231, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Generation/PlayerGen.jl", "max_stars_repo_name": "JuliaTagBot/RatPack.jl", "max_stars_repo_head_hexsha": "44d5735c1bbfaa97b12d3f418b0e1c4b967da1df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Generation/PlayerGen.jl", "max_issues_repo_name": "JuliaTagBot/RatPack.jl", "max_issues_repo_head_hexsha": "44d5735c1bbfaa97b12d3f418b0e1c4b967da1df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-02-08T16:23:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-08T16:23:16.000Z", "max_forks_repo_path": "src/Generation/PlayerGen.jl", "max_forks_repo_name": "JuliaTagBot/RatPack.jl", "max_forks_repo_head_hexsha": "44d5735c1bbfaa97b12d3f418b0e1c4b967da1df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:20:51.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:20:51.000Z", "avg_line_length": 19.25, "max_line_length": 59, "alphanum_fraction": 0.645021645, "num_tokens": 46}
|
# encoding=utf-8
"""
Created on 17:25 2018/11/13
@author: Jindong Wang
"""
import numpy as np
import scipy.io
import bob.learn
import bob.learn.linear
import bob.math
from sklearn.neighbors import KNeighborsClassifier
class GFK:
def __init__(self, Xs, Ys, Xt, Yt, dim=20):
'''
Init func
:param Xs: ns * n_feature
:param Ys: ns * 1
:param Xt: nt * n_feature
:param Yt: nt * 1
:param dim: dimension after GFK
'''
self.Xs, self.Ys, self.Xt, self.Yt = Xs, Ys, Xt, Yt
self.dim = dim
self.eps = 1e-20
def fit(self, norm_inputs=None):
'''
Obtain the kernel G
:param norm_inputs: normalize the inputs or not
:return: GFK kernel G
'''
if norm_inputs:
source, mu_source, std_source = self.znorm(self.Xs)
target, mu_target, std_target = self.znorm(self.Xt)
else:
mu_source = np.zeros(shape=(self.Xs.shape[1]))
std_source = np.ones(shape=(self.Xs.shape[1]))
mu_target = np.zeros(shape=(self.Xt.shape[1]))
std_target = np.ones(shape=(self.Xt.shape[1]))
source = self.Xs
target = self.Xt
Ps = self.train_pca(source, mu_source, std_source, 0.99)
Pt = self.train_pca(target, mu_target, std_target, 0.99)
Ps = np.hstack((Ps.weights, scipy.linalg.null_space(Ps.weights.T)))
Pt = Pt.weights[:, :self.dim]
N = Ps.shape[1]
dim = Pt.shape[1]
# Principal angles between subspaces
QPt = np.dot(Ps.T, Pt)
# [V1,V2,V,Gam,Sig] = gsvd(QPt(1:dim,:), QPt(dim+1:end,:));
A = QPt[0:dim, :].copy()
B = QPt[dim:, :].copy()
# Equation (2)
[V1, V2, V, Gam, Sig] = bob.math.gsvd(A, B)
V2 = -V2
# Some sanity checks with the GSVD
I = np.eye(V1.shape[1])
I_check = np.dot(Gam.T, Gam) + np.dot(Sig.T, Sig)
assert np.sum(abs(I - I_check)) < 1e-10
theta = np.arccos(np.diagonal(Gam))
# Equation (6)
B1 = np.diag(0.5 * (1 + (np.sin(2 * theta) / (2. * np.maximum
(theta, 1e-20)))))
B2 = np.diag(0.5 * ((np.cos(2 * theta) - 1) / (2 * np.maximum(
theta, self.eps))))
B3 = B2
B4 = np.diag(0.5 * (1 - (np.sin(2 * theta) / (2. * np.maximum
(theta, self.eps)))))
# Equation (9) of the suplementary matetial
delta1_1 = np.hstack((V1, np.zeros(shape=(dim, N - dim))))
delta1_2 = np.hstack((np.zeros(shape=(N - dim, dim)), V2))
delta1 = np.vstack((delta1_1, delta1_2))
delta2_1 = np.hstack((B1, B2, np.zeros(shape=(dim, N - 2 * dim))))
delta2_2 = np.hstack((B3, B4, np.zeros(shape=(dim, N - 2 * dim))))
delta2_3 = np.zeros(shape=(N - 2 * dim, N))
delta2 = np.vstack((delta2_1, delta2_2, delta2_3))
delta3_1 = np.hstack((V1, np.zeros(shape=(dim, N - dim))))
delta3_2 = np.hstack((np.zeros(shape=(N - dim, dim)), V2))
delta3 = np.vstack((delta3_1, delta3_2)).T
delta = np.dot(np.dot(delta1, delta2), delta3)
G = np.dot(np.dot(Ps, delta), Ps.T)
return G
def fit_predict(self):
'''
Fit and use 1NN to classify
:return: Accuracy, predicted labels of target domain, and G
'''
G = self.fit()
sqG = scipy.real(scipy.linalg.fractional_matrix_power(G, 0.5))
Xs_new, Xt_new = np.dot(sqG, self.Xs.T).T, np.dot(sqG, self.Xt.T).T
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs_new, self.Ys.ravel())
y_pred = clf.predict(Xt_new)
acc = np.mean(y_pred == self.Yt.ravel())
return acc, y_pred, G
def principal_angles(self, Ps, Pt):
"""
Compute the principal angles between source (:math:`P_s`) and target (:math:`P_t`) subspaces in a Grassman which is defined as the following:
:math:`d^{2}(P_s, P_t) = \sum_{i}( \theta_i^{2} )`,
"""
# S = cos(theta_1, theta_2, ..., theta_n)
_, S, _ = np.linalg.svd(np.dot(Ps.T, Pt))
thetas_squared = np.arccos(S) ** 2
return np.sum(thetas_squared)
def train_pca(self, data, mu_data, std_data, subspace_dim):
'''
Modified PCA function, different from the one in sklearn
:param data: data matrix
:param mu_data: mu
:param std_data: std
:param subspace_dim: dim
:return: a wrapped machine object
'''
t = bob.learn.linear.PCATrainer()
machine, variances = t.train(data)
# For re-shaping, we need to copy...
variances = variances.copy()
# compute variance percentage, if desired
if isinstance(subspace_dim, float):
cummulated = np.cumsum(variances) / np.sum(variances)
for index in range(len(cummulated)):
if cummulated[index] > subspace_dim:
subspace_dim = index
break
subspace_dim = index
machine.resize(machine.shape[0], subspace_dim)
machine.input_subtract = mu_data
machine.input_divide = std_data
return machine
def znorm(self, data):
"""
Z-Normaliza
"""
mu = np.average(data, axis=0)
std = np.std(data, axis=0)
data = (data - mu) / std
return data, mu, std
def subspace_disagreement_measure(self, Ps, Pt, Pst):
"""
Get the best value for the number of subspaces
For more details, read section 3.4 of the paper.
**Parameters**
Ps: Source subspace
Pt: Target subspace
Pst: Source + Target subspace
"""
def compute_angles(A, B):
_, S, _ = np.linalg.svd(np.dot(A.T, B))
S[np.where(np.isclose(S, 1, atol=self.eps) == True)[0]] = 1
return np.arccos(S)
max_d = min(Ps.shape[1], Pt.shape[1], Pst.shape[1])
alpha_d = compute_angles(Ps, Pst)
beta_d = compute_angles(Pt, Pst)
d = 0.5 * (np.sin(alpha_d) + np.sin(beta_d))
return np.argmax(d)
if __name__ == '__main__':
domains = ['caltech.mat', 'amazon.mat', 'webcam.mat', 'dslr.mat']
for i in range(4):
for j in range(4):
if i != j:
src, tar = 'data/' + domains[i], 'data/' + domains[j]
src_domain, tar_domain = scipy.io.loadmat(src), scipy.io.loadmat(tar)
Xs, Ys, Xt, Yt = src_domain['feas'], src_domain['label'], tar_domain['feas'], tar_domain['label']
gfk = GFK(Xs, Ys, Xt, Yt)
acc, ypred, G = gfk.fit_predict()
print(acc)
|
{"hexsha": "22e7a5de13cd31a59f8e25ae67ee2d7468179b3f", "size": 6710, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/traditional/GFK/GFK.py", "max_stars_repo_name": "HyoKong/TransferLearning", "max_stars_repo_head_hexsha": "ae8c7104ca40705bc2437bdbc34483c46509ff57", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/traditional/GFK/GFK.py", "max_issues_repo_name": "HyoKong/TransferLearning", "max_issues_repo_head_hexsha": "ae8c7104ca40705bc2437bdbc34483c46509ff57", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/traditional/GFK/GFK.py", "max_forks_repo_name": "HyoKong/TransferLearning", "max_forks_repo_head_hexsha": "ae8c7104ca40705bc2437bdbc34483c46509ff57", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-06T06:43:51.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-06T06:43:51.000Z", "avg_line_length": 33.3830845771, "max_line_length": 149, "alphanum_fraction": 0.5451564829, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1948}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import os
import pickle
import re
import time
import warnings
import cc3d
import ants
import numpy as np
import scipy
import scipy.io as spio
from matplotlib import pyplot as plt
from scipy import ndimage
from skimage.measure import regionprops
from sklearn.decomposition import PCA
from dependencies import ROOTDIR
from utils.HelperFunctions import Output, Configuration, FileOperations, Imaging, MatlabEquivalent, LeadProperties
from utils.estimateLeadRotation import function_wrapper
cfg = Configuration.load_config(ROOTDIR)
def PaCER_script(subjects, inputfolder=''):
"""wrapper script for all steps included in the PaCER algorithm"""
print("\nLead detection of {} subject(s)".format(len(subjects)))
inputfolder = cfg['folders']['nifti'] if not inputfolder else inputfolder # select default input folder
LW = LeadWorks() # load the class including necessary functions
# Look for data files containing CT imaging including the brainMask and load this into workspace
available_files = FileOperations.get_filelist_as_tuple(inputdir=inputfolder, subjects=subjects)
regex2lookfor = 'reg_' + 'run[0-9]', 'brainmask_'
file_id_CTimaging = [file_tuple for file_tuple in available_files
if re.search(r'\w.({}).'.format(regex2lookfor[0]), file_tuple[0], re.IGNORECASE)
and file_tuple[0].endswith('.nii') and 'CT' in file_tuple[0]]
file_id_brainMask = [file_tuple for file_tuple in available_files
if re.search(r'\w.({}).'.format(regex2lookfor[1]), file_tuple[0], re.IGNORECASE)
and file_tuple[0].endswith('.nii')]
if any(t > 2 for t in [len(k) for k in file_id_CTimaging]):
print("More than one files for imaging or brainmask available. Please double-check!")
return
if not file_id_brainMask:
warnings.warn(message="\tNo brain mask was found, trying to obtain a mask using ANTSpyNET routines")
regex2lookforT1 = cfg['preprocess']['normalisation']['prefix'] + 'run'
file_id_T1 = [file_tuple for file_tuple in available_files
if re.search(r'\w.({}).'.format(regex2lookforT1), file_tuple[0], re.IGNORECASE)
and 't1' in file_tuple[0] and file_tuple[0].endswith('.nii')]
if not file_id_T1:
Output.msg_box(text='No T1-sequence imaging available. BrainMask extraction impossible.',
title='T1 sequences missing")')
return
else:
T1imaging = ants.image_read(file_id_T1[0][0])
file_id_brainMask = Imaging.create_brainmask(input_folder=inputfolder, subj=''.join(subjects),
registered_images=T1imaging)
file_id_brainMask = [file_id_brainMask] if type(file_id_brainMask) == tuple else file_id_brainMask
fileID = list(FileOperations.inner_join(file_id_brainMask, file_id_CTimaging)) # joins all to single list
metal_threshold = int(cfg['lead_detection']['PaCER']['metal_threshold'])
elecModels, intensityProfiles, skelSkalms = LW.electrodeEstimation(fileID[0], threshold=metal_threshold)
elecModels, skelSkalms, intensityProfiles, _ = \
LeadProperties.estimate_hemisphere(elecModels, intensityProfiles, skelSkalms) # returns hemisphere from coords.
filename_save = os.path.join(os.path.join(inputfolder, subjects[0]), 'elecModels_' + subjects[0] + '.pkl')
with open(filename_save, "wb") as f:
pickle.dump(elecModels, f)
pickle.dump(intensityProfiles, f)
pickle.dump(skelSkalms, f)
sides = ['left', 'right']
rotation_default, rotation_mod = [{k: [] for k in sides} for _ in range(2)]
for s in sides:
rotation_default[s] = function_wrapper(subj=subjects[0], side=s)
rotation_mod[s] = Configuration.rotation_dict_mod() # creates an empty array to save modified data later
filename_save = os.path.join(os.path.join(inputfolder, subjects[0]), 'rotation_' + subjects[0] + '.pkl')
with open(filename_save, "wb") as f:
pickle.dump(rotation_default, f)
pickle.dump(rotation_mod, f)
print("Finished with lead detection!")
# TODO: it does not return to the empty command line.
return
class LeadWorks:
"""Runs algorithm for lead detection developed by Andreas Husch et al.
see https://www.sciencedirect.com/science/article/pii/S2213158217302450?via%3Dihub
It includes a three-step process: i) pre-processing, ii) and iii)"""
def __init__(self):
self.verbose = 0
self.debug = False
self.PacerEmulation = True
def electrodeEstimation(self, fileID, threshold=1500):
""" estimates the electrode mask with a threshold according to:
https://github.com/adhusch/PaCER/blob/master/src/Functions/extractElectrodePointclouds.m"""
CTimaging = ants.image_read(fileID[0])
brainMask_prob = ants.image_read(fileID[1]) # probabilistic brainMask
if CTimaging.dimension != 3:
warnings.warn_explicit("\t Something went wrong during CT-preprocessing (ndim != 3)")
return
elif max(CTimaging.spacing) > 1:
warnings.warn("\tSlice thickness > 1mm! Independent contact detection unlikely. Using 'contactAreaCenter'")
cfg['lead_detection']['PaCER'][
'detection_method'] = 'contactAreaCenter' # when spacing too big, this detection method is recommended
elif max(CTimaging.spacing) > .7:
warnings.warn("\tSlice thickness > .7mm! Reliable contact detection not guaranteed. For certain "
"lead types with large contacts, it may, however, work.")
print("\tThresholding {}: {} for content w/ HU > {}".format(fileID[2], os.path.split(fileID[0])[1], threshold))
brainMask = np.zeros(shape=CTimaging.shape, dtype=bool)
if not self.PacerEmulation:
brainMask[brainMask_prob.abs() > .99] = True
else: # code compatible with debug from PaCER version in Lead-DBS package
sphere_test = np.array(Imaging.sphere(math.ceil(3 / max(CTimaging.spacing))))
brainMask[brainMask_prob.abs() > .5] = True
brainMask = ndimage.binary_erosion(brainMask, structure=sphere_test).astype(
bool) # erosion not necessary due to probabilistic maps and the possibility to change the threshold
CTImagingData = CTimaging.numpy()
CTImagingData[~brainMask] = np.nan
threshold_indices = np.zeros(shape=CTimaging.shape, dtype=bool)
threshold_indices[CTImagingData > threshold] = True # creates a mask with ones wherever inside the brain
# Largest connected components of metal inside brain represents the electrodes
cc = self.connected_objects(threshold_indices, connectivity_values=26)
print("\t{} potential metal components were detected within the brain.".format(np.max(cc)), end='')
ccProps = regionprops(label_image=cc, intensity_image=None, cache=True, coordinates=None)
minVoxelNumber = (1.2 * 1.27 / 2) ** 2 * math.pi * 40 / np.prod(
CTimaging.spacing) # according to PACER, 40 mm within brain & 20% partial voluming; source for values?
maxVoxelNumber = (3 * 1.27 / 2) ** 2 * math.pi * 80 / np.prod(
CTimaging.spacing) # assuming 80 mm in brain and 300 % partial voluming
areas = [] # Guessing areas of interest according to the minimum/maximum voxel number
[areas.append(a) for a in ccProps if minVoxelNumber <= a.area <= maxVoxelNumber]
print('Guessing {} of them being DBS-leads'.format(str(len(areas))))
leadPointCloudStruct, transformation_matrix = self.identifyLeads(fileID, CTimaging, threshold, areas) #
elecModels, intensityProfiles, skelSkalms = [[] for _ in range(3)]
for idx, leadPoints in enumerate(leadPointCloudStruct):
print("\nAnalysing lead no {} with {} pixels".format("\u0332".join(str(idx + 1)),
str(len(leadPoints['pixelList']))))
initialPoly, tPerMm, skeleton, totalLengthMm = self.electrodePointCloudModelEstimate(leadPoints,
CTimaging.spacing[2])
mod, prof, skel = \
self.refitElec(initialPoly, leadPoints['points'], leadPoints['pixelValues'],
CTspace=CTimaging.spacing[2])
mod['filenameCTimaging'] = os.path.split(fileID[0])
mod['transformation_matrix'] = transformation_matrix
elecModels.append(mod)
intensityProfiles.append(prof)
skelSkalms.append(skel)
return elecModels, intensityProfiles, skelSkalms
# ============================== PREPROCESSING (1. step) ==============================
def identifyLeads(self, fileID, CTimaging, threshold, areas):
"""Estimate number of electrodes found using regionprops routine and generate a PointCloud for every lead.
Details can be found in the book chapter by Husch et al (2015)"""
detected_leads = []
pca = PCA()
for i, comp in enumerate(areas):
X = np.multiply(comp.coords, np.tile(list(CTimaging.spacing), (len(comp.coords), 1)))
n_samples = X.shape[0]
X_transformed = pca.fit_transform(X)
X_centered = X - np.mean(X, axis=0)
cov_matrix = np.dot(X_centered.T, X_centered) / n_samples
latent = pca.explained_variance_
if self.debug: # sanity check
for latent_test, eigenvector in zip(latent, pca.components_):
print(np.dot(eigenvector.T, np.dot(cov_matrix, eigenvector)))
if len(latent) < 3:
continue
latent = np.sqrt(latent) * 2
lowerAxesLength = sorted(latent[1:3])
if (latent[0] >
float(cfg['lead_detection']['PaCER']['lambda']) and latent[0] / np.mean(latent[1:3]) > 10
and lowerAxesLength[1] / (lowerAxesLength[0] + .001) < 8):
detected_leads.append(comp)
if not detected_leads:
while threshold < 3000:
print("Trying a higher threshold to ensure leads are detected.")
leadpoint_cloudstruct = self.electrodeEstimation(fileID, threshold * 1.2)
return leadpoint_cloudstruct
else:
raise Exception("\t\tEven w/ thresholds around 3000 HU, no leads were detected. Double-check input!")
# TODO include transformation matrix from file if selected in options
transformation_matrix = np.multiply(np.eye(3), [round(f, 1) for f in
CTimaging.spacing]) # transformation only necessary if selected in options; otherwise all remains in "AN space"
leadpoint_cloudstruct = [] # initialise variable in workspace
items = ['pixelList', 'elecMask', 'points', 'pixelValues']
CTimagingData = CTimaging.numpy() # get the shape of CT imaging to later fill it with content
for i, leadID in enumerate(detected_leads):
leadpoint_cloudstruct.append({k: [] for k in items})
pixelList = leadID['coords']
leadpoint_cloudstruct[i]['pixelList'] = pixelList
leadpoint_cloudstruct[i]['points'] = pixelList @ abs(transformation_matrix[:3, :3])
leadpoint_cloudstruct[i]['pixelValues'] = np.array([CTimagingData[tuple(pixelList[i])]
for i, k in enumerate(pixelList)])
elecMask_temp = np.zeros(shape=CTimaging.shape)
for x, y, z in pixelList:
elecMask_temp[x, y, z] = 1
leadpoint_cloudstruct[i]['elecMask'] = elecMask_temp
filename_elecMask = os.path.join(os.path.split(fileID[0])[0], 'elecMask_no' + str(i) + '.nii')
ants.image_write(image=CTimaging.new_image_like(elecMask_temp), filename=filename_elecMask) # mask to NIFTI
return leadpoint_cloudstruct, transformation_matrix
# ============================== PREPROCESSING (2. step) ==============================
def electrodePointCloudModelEstimate(self, leadPoints, spacing, USE_REF_WEIGHTING=True, tol=0):
"""creates a skeleton of the leadPoints at the centroid of it and returns the coefficients of a polynomial"""
polynomial_ord = 8
zPlanes = np.unique(leadPoints['points'][:, -1])
if not len(zPlanes) < leadPoints['points'].shape[0]:
warnings.warn("\tCT planes in z-direction not perfectly aligned; trying with tolerance")
tol = .1
zPlanes[~(np.triu(np.abs(zPlanes[:, None] - zPlanes) <= tol, 1)).any(0)]
if not len(zPlanes) < leadPoints['points'].shape[0]:
raise Exception('Somethings is wrong with the CT imaging, please double-check!')
skeleton, sumInPlane = [[] * i for i in range(2)]
for zplaneID in zPlanes:
idx_zplane = np.where(abs(leadPoints['points'][:, -1] - zplaneID) <= tol)
if idx_zplane[0].shape[0] > 1: # ignore 'pseude-slices' with only one plane
inPlanePoints = leadPoints['points'][idx_zplane, :]
if USE_REF_WEIGHTING:
inPlaneIntensities = leadPoints['pixelValues'][idx_zplane].astype('float32') #
# estimates slice wise centroid weighted by image intensity values (Husch et al. 2015, Section 2.1)
skeleton.append(np.squeeze(inPlanePoints).T @ (inPlaneIntensities / np.sum(inPlaneIntensities)))
sumInPlane.append(np.sum(inPlaneIntensities))
else:
skeleton.append = np.mean(inPlanePoints)
skeleton, sumInPlane = np.array(skeleton), np.array(sumInPlane)
skeleton_filter = sumInPlane < np.median(sumInPlane) / 1.5
if sum(skeleton_filter) > 0: # # filter skeleton for valid points
print("\t\tApplied axial skeleton filter due to low intensity planes")
skeleton = np.squeeze(skeleton[np.where(~skeleton_filter), :])
if all(skeleton[1, :] == np.zeros(3)):
raise Exception(
"\t\t... empty skeleton. Was CT-imaging acquired in axial flow?") # What on earth does that mean?
# Approximate parameterized polynomial([x y z] = f(t))
if len(skeleton) < polynomial_ord + 1:
print("\t\tElectrodePointCloudModelEstimate: less data points {} than internal polynomial degree ({}). "
"caveat: lowering degree!".format(str(len(skeleton)), str(polynomial_ord)))
polynomial_ord = len(skeleton) - 1
r3polynomial, tPerMm = self.fitParamPolytoSkeleton(skeleton, degree=polynomial_ord) # Husch etal. 2015 eq.(1-4)
totalLengthMm = self.polyArcLength3(polyCoeff=r3polynomial)
return r3polynomial, tPerMm, skeleton, totalLengthMm
def refitElec(self, initialPoly, pointCloud, voxelValues, CTspace, xy_resolution=.1, z_resolution=.025,
limit_contactsearch_mm=20, final_degree=1):
""""""
from scipy.interpolate import griddata
totalLengthMm = self.polyArcLength3(initialPoly)
totalLengthMm = [float(i) for i in totalLengthMm]
XGrid, YGrid = np.meshgrid(np.arange(start=-1.5, stop=1.6, step=xy_resolution),
np.arange(start=-1.5, stop=1.6, step=xy_resolution))
oneMmEqivStep = 1 / totalLengthMm[0]
STEP_SIZE = z_resolution * oneMmEqivStep
interpolationF = scipy.interpolate.LinearNDInterpolator(points=pointCloud, values=voxelValues)
print("\n\tFirst run:")
skeleton2nd, _, _, _, _ = self.oor(initialPoly, STEP_SIZE, XGrid, YGrid, interpolationF)
print("\n\tSecond run:")
R3polynomial2nd, _ = self.fitParamPolytoSkeleton(np.array(skeleton2nd), degree=8)
msg2plot = "\t\tRefitting parametrised polynomial to re-sampled data (2nd run)"
skeleton3rd, medIntensity, orthIntensVol, _, skelScaleMm = self.oor(R3polynomial2nd, STEP_SIZE, XGrid, YGrid,
interpolationF, run_information=msg2plot)
dat1, dat2 = self.polyArcLength3(initialPoly), self.polyArcLength3(R3polynomial2nd)
print("\n\t1st pass electrode length within Brain Convex Hull {:.4}mm".format(dat1[0]))
print("\t2nd pass electrode length within Brain Convex Hull {:.4}mm".format(dat2[0]))
filterWidth = (0.25 / z_resolution) + 1
filteredIntensity = scipy.ndimage.filters.uniform_filter1d(medIntensity, size=int(filterWidth))
filterIdxs = np.where(skelScaleMm <= limit_contactsearch_mm)
peakLocs, peakWaveCenters, peakValues, threshIntensityProfile, threshold, contactAreaCenter, contactAreaWidth, \
xrayMarkerAreaCenter, xrayMarkerAreaWidth = self.getIntensityPeaks(filteredIntensity, skelScaleMm,
filterIdxs)
detection_method = cfg['lead_detection']['PaCER']['detection_method']
if detection_method == 'peakWaveCenters':
contact_pos = peakWaveCenters
else:
contact_pos = peakLocs
try:
lead_information, dataModelPeakRMS = self.determineElectrodeType(contact_pos)
except KeyError:
print("Falling back to contact detection method: contactAreaCenter")
detection_method == 'contactAreaCenter'
if len(contact_pos) < 4 or detection_method == 'contactAreaCenter':
if len(contact_pos) < 4:
warnings.warn("\tCould not detect independent electrode contacts. Check image quality")
return
lead_geometries = spio.loadmat(os.path.join(ROOTDIR, 'ext', 'PaCER', 'electrodeGeometries.mat'),
squeeze_me=True, simplify_cells=True)
lead_geometries = lead_geometries['electrodeGeometries']
lead_type = cfg['lead_detection']['PaCER']['lead_type']
if lead_type == 'unknown':
warnings.warn("\t\tNo lead specification provided, please set lead_type if possible. Meanwhile trying"
"to estimate type by width of contact area. Might be wrong, please double-check!")
if contactAreaWidth < 10.5:
print("Assuming Boston Scientific Directional or Medtronic 3389. Setting former.")
lead_information = lead_geometries[2]
else:
print("Assuming Medtronic 3387")
lead_information = lead_geometries[1]
else:
print("\t\tSetting user specified electrode type: {}".format(lead_type))
try:
idx_leadInformation = [i for i, x in enumerate([lead_type == k['string']
for k in lead_geometries]) if x][0]
lead_information = lead_geometries[idx_leadInformation]
except IndexError:
warnings.warn("\t\tUnknown lead-type provided. Assuming default i.e. idx=[-1]")
lead_information = lead_geometries[-1]
zeroT = self.invPolyArcLength3(R3polynomial2nd,
np.array(contactAreaCenter - np.mean(
lead_information['ringContactCentersMm']))) # calibrate zero
else:
dataModelPeakRMS = 0
if dataModelPeakRMS > 0.3: # original comment: "the MAX deviation might be a better measure than the RMS?"
print("\t\tSwitching to model-based contact positions because of high RMS "
"(Setting useDetectedContactPositions = 0).")
useDetectedContactPositions = 0 # TODO what is the purpose of this
zeroT = self.invPolyArcLength3(R3polynomial2nd,
contact_pos[0] - lead_information['zeroToFirstPeakMm'])
refittedContactDistances = contact_pos - (contact_pos[0] - lead_information['zeroToFirstPeakMm'])
if final_degree == 1:
elecEndT = self.invPolyArcLength3(R3polynomial2nd, np.array(limit_contactsearch_mm))
spacing = np.linspace(start=zeroT, stop=elecEndT, num=math.floor(totalLengthMm[0] / xy_resolution))
poly_coeffs = []
for i, coords in enumerate(R3polynomial2nd.T):
poly_coeffs.append(np.polyval(coords, spacing))
refittedR3PolyTmp = self.fitParamPolytoSkeleton(np.array(poly_coeffs).T, final_degree)
spacing = np.linspace(start=zeroT, stop=self.invPolyArcLength3(refittedR3PolyTmp[0],
np.array(totalLengthMm))[0],
num=math.floor(totalLengthMm[0] / xy_resolution))
poly_coeffs = []
for i, coords in enumerate(refittedR3PolyTmp[0].T):
poly_coeffs.append(np.polyval(coords, spacing))
refittedR3PolyReZeroed = self.fitParamPolytoSkeleton(np.array(poly_coeffs).T, final_degree)
else: # normal case
spacing = np.linspace(start=zeroT, stop=1, num=math.floor(totalLengthMm[0] / xy_resolution))
poly_coeffs = []
for i, coords in enumerate(R3polynomial2nd.T):
poly_coeffs.append(np.polyval(coords, spacing))
refittedR3PolyReZeroed = self.fitParamPolyToSkeleton(np.array(poly_coeffs), final_degree)
print("\t\t\tElectrode Length within Brain Convex Hull after contact detection and Zero-Point calibration: "
"{:.5} mm".format(str(self.polyArcLength3(refittedR3PolyReZeroed[0], 0, 1)[0])))
refitReZeroedElecMod = self.summarise_results(refittedR3PolyReZeroed[0], lead_information, lead_type, CTspace)
return refitReZeroedElecMod, filteredIntensity, skelScaleMm
def fitParamPolytoSkeleton(self, skeleton, degree=3):
"""This function models the lead in a parametrised way according to (sec 2.2) Husch et al. 2015"""
diff_vector = np.diff(skeleton, axis=0)
approxTotalLengthMm = 0 # approximated total length [in mm]
deltas, cumLengthMm = [np.zeros(len(diff_vector)) * i for i in range(2)]
for k in range(0, len(diff_vector)):
deltas[k] = np.linalg.norm(diff_vector[k, :]) # according to eq(3) in Husch et al 2015,
cumLengthMm[k] = np.sum(deltas)
approxTotalLengthMm = approxTotalLengthMm + deltas[k]
avgStepsPerMm = 1 / approxTotalLengthMm # average steps [per mm]
t = np.append(0, np.divide(cumLengthMm, approxTotalLengthMm)) # 0 at start->len(t)=len(skel.), norm. to [0, 1]
# Design matrix e.g.T = [t. ^ 4 t. ^ 3 t. ^ 2 t ones(length(t), 1)].'
T = np.ones(shape=(len(t), degree + 1)) # for details cf. eq(4) Husch et al. 2015
iter = -1
for k in range(degree, 0, -1):
iter += 1
T[:, iter] = t ** k
T = T.T
r3polynomial, _, _, _ = np.linalg.lstsq(T.T, skeleton,
rcond=None) # OLS solution for linear regression with T as coeffs.
fittingErrs = np.sqrt(np.sum((r3polynomial.T @ T - skeleton.T) ** 2, axis=0))
meanFittingError = np.mean(fittingErrs, axis=0)
stdFittingError = np.std(fittingErrs, axis=0)
maxFittingError = np.max(fittingErrs, axis=0)
print("\t\tMax off-model: {:.4}, Mean off-model: {:.4}".format(maxFittingError, meanFittingError))
if maxFittingError > 0.35 and maxFittingError > (meanFittingError + 3 * stdFittingError):
print("\t\t...Check for outliers/make sure chosen polynomial degree is appropriate.\n "
"\t\t...In most cases selection should be fine.\n")
return r3polynomial, avgStepsPerMm
def oor(self, r3Poly, step_size, xGrid, yGrid, interpolationF, run_information=''):
"""optimal oblique re-sampling; routine enabling automatic contact detection by creating perpendicular slices
with respect to the lead """
if not run_information:
run_information = "\t\tEstimating oblique slices which are orthogonal to first-pass electrode"
arcLength = self.polyArcLength3(r3Poly)
oneMmEqivStep = 1 / arcLength[0]
lookahead = 3 * oneMmEqivStep
poly_coeffs = []
for coords in r3Poly.T:
poly_coeffs.append(np.polyder(coords))
evalAtT = np.arange(start=-lookahead, stop=1, step=step_size) # create samples of all available datapoints
iters = len(evalAtT)
orthogonalSamplePoints, improvedSkeleton, avgIntensity, medIntensity, sumIntensity = [[] * i for i in range(5)]
orthSamplePointsVol = np.zeros(r3Poly.shape[1] * len(xGrid) ** 2 * len(evalAtT)).reshape(r3Poly.shape[1],
len(xGrid) ** 2,
len(evalAtT))
orthIntensVol = np.zeros(xGrid.shape[0] * xGrid.shape[1] * len(evalAtT)).reshape(xGrid.shape[0], xGrid.shape[1],
len(evalAtT))
print("{}".format(run_information))
Output.printProgressBar(0, iters, prefix="\t\tProgress:", suffix='Complete', length=50)
for ind, relative_location in enumerate(evalAtT):
time.sleep(.01)
Output.printProgressBar(ind + 1, iters, prefix="\t\tProgress:", suffix='Complete', length=50)
direction = [np.polyval(x, relative_location) for x in poly_coeffs]
currentPoint = np.polyval(r3Poly, relative_location)
directionNorm = direction / np.linalg.norm(direction)
ortho1 = np.cross(directionNorm, [0, 1, 0])
ortho1 = ortho1 / np.linalg.norm(ortho1)
ortho2 = np.cross(directionNorm, ortho1)
ortho2 = ortho2 / np.linalg.norm(ortho2)
orthogonalSamplePoints = np.add(currentPoint[:, None],
(np.dot(ortho1[:, None], np.ndarray.flatten(xGrid, order='F')[None, :]) +
np.dot(ortho2[:, None], np.ndarray.flatten(yGrid, order='F')[None, :])))
orthSamplePointsVol[:, :, ind] = orthogonalSamplePoints
intensities = interpolationF.__call__(orthogonalSamplePoints.T)
intensitiesNanZero = np.where(np.isnan(np.copy(intensities)), 0, intensities)
intensitiesNanZero[intensitiesNanZero < float(cfg['lead_detection']['PaCER']['snr_threshold'])] = 0
with np.errstate(divide='ignore', invalid='ignore'):
skelPoint = orthogonalSamplePoints @ intensitiesNanZero / sum(intensitiesNanZero)
if np.any(np.isnan(skelPoint)):
evalAtT = evalAtT[1:]
# evalAtT[ind] = np.nan
continue
else:
avgIntensity.append(np.nanmean(intensities)) # avgIntensity[ind] = np.nanmean(intensities)
sumIntensity.append(np.nansum(intensitiesNanZero)) # sumIntensity[ind] = np.nansum(intensitiesNanZero)
medIntensity.append(np.nanmedian(intensities)) # medIntensity[ind] = np.nanmedian(intensities)
improvedSkeleton.append(list(skelPoint))
orthIntensVol[:, :, ind] = np.reshape(intensitiesNanZero, (xGrid.shape[0], xGrid.shape[1],))
lowerLimits = np.zeros(shape=(len(evalAtT[~np.isnan(evalAtT)])))
upperLimits = evalAtT[~np.isnan(evalAtT)]
lowerLimits[upperLimits < 0] = upperLimits[upperLimits < 0]
upperLimits[upperLimits < 0] = 0
skeletonScaleMm = self.polyArcLength3(r3Poly, lowerLimit=lowerLimits, upperLimit=upperLimits)
skeletonScaleMm = np.array(skeletonScaleMm)
skeletonScaleMm[lowerLimits < 0] = -skeletonScaleMm[lowerLimits < 0]
return improvedSkeleton, medIntensity, orthIntensVol, orthSamplePointsVol, skeletonScaleMm
def getIntensityPeaks(self, filteredIntensity, skelScaleMm, filterIdxs):
"""Detection of center-line pointcloud using intensity-weighted means (see Husch et al. 2018, sec 2.4)"""
from scipy.signal import find_peaks
from scipy.ndimage import label
peaks, properties = find_peaks(filteredIntensity[filterIdxs],
distance=1.4, height=1.1 * np.nanmean(filteredIntensity),
prominence=.01 * np.nanmean(filteredIntensity))
xrayMarkerAreaWidth, xrayMarkerAreaCenter = [[] * i for i in range(2)]
try:
threshold = min(filteredIntensity[peaks[0:4]]) - (min(properties['prominences'][0:4]) / 4)
threshIntensityProfile = np.minimum(filteredIntensity, threshold)
contactSampleLabels = label(~(threshIntensityProfile[filterIdxs] < threshold))
values = MatlabEquivalent.accumarray(skelScaleMm[filterIdxs], contactSampleLabels[0])
counts = np.bincount(contactSampleLabels[0])
peakWaveCenters = values[1:5] / counts[1:5] # index 0 is the "zero label"
except:
print("\tpeakWaveCenter detection failed. Returning peaksLocs in peakWaveCenters.")
peakWaveCenters = skelScaleMm[peaks]
threshIntensityProfile = filteredIntensity
threshold = np.nan
# Detect 'contact area' as fallback for very low SNR signals where no single contacts are visible
thresholdArea = np.mean(filteredIntensity[filterIdxs])
threshIntensityProfileArea = np.minimum(filteredIntensity, thresholdArea)
contactSampleLabels = label(~(threshIntensityProfileArea[filterIdxs] < thresholdArea))
values = MatlabEquivalent.accumarray(skelScaleMm[filterIdxs], contactSampleLabels[0])
counts = np.bincount(contactSampleLabels[0])
contactAreaCenter = values[1] / counts[
1] # index 0 is the "zero label", index 2( value 1) is the contact region, index 3 (value 2) might be an X - Ray obaque arker
idxs = np.where(contactSampleLabels[0] + 1 == 2)
contactAreaWidth = np.abs(skelScaleMm[idxs[0][0]] - skelScaleMm[idxs[0][-1]])
if np.max(contactSampleLabels[0]) > 1:
print(
'\tMultiple metal areas found along electrode. Possibly an electrode type with addtional X-Ray marker!')
xrayMarkerAreaCenter = values[2] / counts[
2] # index 1 is the "zero label", index 2 (value 1) is the contact region, index 3 (value 2) might be an X-Ray obaque arker
idxs = np.where(contactSampleLabels[0] + 1 == 3)
xrayMarkerAreaWidth = np.abs(skelScaleMm[idxs[0][0]] - skelScaleMm[idxs[0][-1]])
if self.debug:
plt.plot(skelScaleMm[filterIdxs], filteredIntensity[filterIdxs])
plt.scatter(skelScaleMm[peaks], filteredIntensity[peaks], edgecolors='red')
plt.plot(skelScaleMm[filterIdxs], threshIntensityProfileArea[filterIdxs])
plt.scatter(peakWaveCenters, [threshold] * 4)
plt.grid(color='grey', linestyle='-', linewidth=.25)
return peaks, peakWaveCenters, properties, threshIntensityProfile, threshold, contactAreaCenter, \
contactAreaWidth, xrayMarkerAreaCenter, xrayMarkerAreaWidth
@staticmethod
def connected_objects(data_array, connectivity_values=26):
""" function creating a list of objects that are connected and satisfy certain conditions. This aims at
replacing Mathworks bwconncomp.m function https://www.mathworks.com/help/images/ref/bwconncomp.html"""
import cc3d
labels_out = cc3d.connected_components(np.array(data_array), connectivity=connectivity_values)
return labels_out
# ============================== local Helper functions ==============================
@staticmethod
def polyArcLength3(polyCoeff, lowerLimit=0, upperLimit=1): # equation (2) Husch et al. 2018
"""The arc length is defined as the integral of the norm of the derivatives of the parameterized equations.
#arcLength(i) = integral(f, lowerLimit(i), upperLimit(i)); """
from scipy.integrate import quad
try:
int(lowerLimit)
lowerLimit = [lowerLimit]
upperLimit = [upperLimit]
except TypeError:
epsilon = 0.001 # used to avoid numerical accuracy problems in assertion
if not np.all(lowerLimit[:] <= upperLimit[:] + epsilon):
raise ('There is an accuracy problem here!')
regX, regY, regZ = polyCoeff[:, 0], polyCoeff[:, 1], polyCoeff[:, 2]
x_d, y_d, z_d = np.polyder(regX), np.polyder(regY), np.polyder(regZ)
arcLength = []
f_t = lambda x: np.sqrt(np.polyval(x_d, x) ** 2 + np.polyval(y_d, x) ** 2 + np.polyval(z_d, x) ** 2)
for lowerlim, upperlim in zip(lowerLimit, upperLimit):
arcLength.append(quad(f_t, lowerlim, upperlim)[0])
return arcLength
def invPolyArcLength3(self, polyCoeff, arcLength): # eq. (3) in Husch et al. 2018
"""according to conditions resumed in paper, inverse of integral (arcLength) can be estimated as follows """
fx_invpolyArc = lambda x, a, coeff: np.abs(a - self.polyArcLength3(coeff, [0], x))
if len(arcLength.shape) != 0: # bulky snipped of code ensuring that single float values are processed as well
inv_arcLength = []
for i, arc_lgth in enumerate(arcLength):
inv_arcLength.append(scipy.optimize.fmin(func=fx_invpolyArc, x0=[0], args=(arc_lgth, polyCoeff,),
disp=0)[0])
else:
arcLength = arcLength[()]
inv_arcLength = scipy.optimize.fmin(func=fx_invpolyArc, x0=[0], args=(arcLength, polyCoeff,), disp=0)[0]
return inv_arcLength
@staticmethod
def determineElectrodeType(peakDistances):
"""determines the most suitable electrode type based on the euclidean distance between detected and specified
peaks in the collected data. Data should should be provided in the form of the file ‘electrodeGeometries.mat’"""
electrodeGeometries = spio.loadmat(os.path.join(ROOTDIR, 'ext', 'PaCER', 'electrodeGeometries.mat'),
squeeze_me=True, simplify_cells=True)
electrodeGeometries = electrodeGeometries['electrodeGeometries']
distances, rms = [np.zeros(len(electrodeGeometries)) for _ in range(2)]
for idx, geoms in enumerate(electrodeGeometries):
try:
distances[idx] = np.linalg.norm(np.diff(peakDistances) - geoms['diffsMm'])
rms[idx] = np.sqrt(np.mean(np.diff(peakDistances) - geoms['diffsMm']) ** 2)
except ValueError:
distances[idx] = float('inf')
rms[idx] = float('inf')
if np.all(np.isinf(distances)):
warnings.warn("\t\tCould NOT detect electrode type, thus contact detection might be flawed "
"(Image resolution? slice thickness!?) Set electrode type manually to continue with data")
elecStruct = electrodeGeometries[-1]
return elecStruct, rms
d = np.min(distances)
idx = np.argmin(distances)
rms = rms[np.where(distances == d)]
print("\t\tdetermineElectrodeType: data to model peak/contact spacing RMS distance is {} mm".format(str(rms)))
elecStruct = electrodeGeometries[idx]
return elecStruct, rms
def summarise_results(self, r3polynomial, lead_information, lead_type, CTspace):
"""function aiming at providing an overview of the results obtained"""
items1 = 'lead_diameter', 'lead_color', 'active_contact_color', 'alpha', 'metal_color', \
'r3polynomial', 'activeContact', 'detectedContactPosition', 'useDetectedContactPosition', \
'skeleton', 'contactPositions', 'getContactPositions3D', 'trajectory', 'markers_head', \
'markers_tail', 'normtraj_vector', 'orth', 'markers_x', 'markers_y', 'rotation', \
'manual_correction', 'first_run', 'transformation_matrix', 'filenameCTimaging'
refitReZeroedElecMod = {k: [] for k in items1}
refitReZeroedElecMod['lead_information'] = lead_information
refitReZeroedElecMod['lead_diameter'] = 1.27
refitReZeroedElecMod['lead_color'] = 0.1171875, 0.5625, 1
refitReZeroedElecMod['active_contact_color'] = 1, 0.83984375, 0
refitReZeroedElecMod['alpha'] = .9
refitReZeroedElecMod['metal_color'] = .75, .75, .75
refitReZeroedElecMod['r3polynomial'] = r3polynomial
refitReZeroedElecMod['activecontact'] = np.nan
refitReZeroedElecMod['detectedContactPosition'] = []
refitReZeroedElecMod['UseDetectedContactPosition'] = False
refitReZeroedElecMod['skeleton'] = self.create_skeleton(r3polynomial)
refitReZeroedElecMod['contactPositions'] = .75, 2.75, 4.75, 6.75
refitReZeroedElecMod['activeContactPoint'] = []
positions = self.invPolyArcLength3(r3polynomial, np.array(refitReZeroedElecMod['contactPositions']))
poly_coeffs = []
for i, coords in enumerate(r3polynomial.T):
poly_coeffs.append(np.polyval(coords, positions))
refitReZeroedElecMod['getContactPositions3D'] = np.concatenate((poly_coeffs,
np.ones((1, 4)))).T @ np.eye(4) / CTspace
refitReZeroedElecMod['getContactPositions3D'] = refitReZeroedElecMod['getContactPositions3D'][:, :3]
trajectory = []
for dim in range(3):
trajectory.append(np.linspace(start=refitReZeroedElecMod['getContactPositions3D'][0, dim],
stop=refitReZeroedElecMod['getContactPositions3D'][0, dim] +
10 * (refitReZeroedElecMod['getContactPositions3D'][0, dim] -
refitReZeroedElecMod['getContactPositions3D'][-1, dim]),
num=20)) # TODO: differences between last contact and first are better suited for the trajectory
refitReZeroedElecMod['trajectory'] = np.array(trajectory).T
refitReZeroedElecMod['markers_head'] = refitReZeroedElecMod['getContactPositions3D'][0, :]
refitReZeroedElecMod['markers_tail'] = refitReZeroedElecMod['getContactPositions3D'][3, :]
refitReZeroedElecMod['normtraj_vector'] = np.divide((refitReZeroedElecMod['markers_tail'] -
refitReZeroedElecMod['markers_head']),
np.linalg.norm(refitReZeroedElecMod['markers_tail'] -
refitReZeroedElecMod['markers_head']))
refitReZeroedElecMod['orth'] = np.multiply(self.null(refitReZeroedElecMod['normtraj_vector']),
(refitReZeroedElecMod['lead_diameter'] / 2))
refitReZeroedElecMod['markers_x'] = refitReZeroedElecMod['getContactPositions3D'][0, :] + \
refitReZeroedElecMod['orth'][:, 0]
refitReZeroedElecMod['markers_y'] = refitReZeroedElecMod['getContactPositions3D'][0, :] + \
refitReZeroedElecMod['orth'][:, 1]
refitReZeroedElecMod['model'] = lead_type
refitReZeroedElecMod['manual_correction'] = False
refitReZeroedElecMod['first_run'] = False
return refitReZeroedElecMod
@staticmethod
def create_skeleton(r3polynomial):
evalAtT = np.arange(start=0, stop=1, step=1 / 1000) # create samples of all available datapoints
refittedSkeleton = [np.polyval(x, evalAtT) for x in r3polynomial.T]
return np.array(refittedSkeleton).T
@staticmethod
def null(A, atol=1e-13, rtol=0):
A = np.atleast_2d(A)
u, s, vh = np.linalg.svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return ns
|
{"hexsha": "bd715ed80229ab9ba1fe0cd87433acc274b8c3a5", "size": 40819, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/preprocLeadCT.py", "max_stars_repo_name": "dpedrosac/cDBS", "max_stars_repo_head_hexsha": "75ddb6a37a6f3b25f428005afc4e882bf31b09bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/preprocLeadCT.py", "max_issues_repo_name": "dpedrosac/cDBS", "max_issues_repo_head_hexsha": "75ddb6a37a6f3b25f428005afc4e882bf31b09bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-09-04T23:35:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-16T00:09:38.000Z", "max_forks_repo_path": "utils/preprocLeadCT.py", "max_forks_repo_name": "dpedrosac/cDBS", "max_forks_repo_head_hexsha": "75ddb6a37a6f3b25f428005afc4e882bf31b09bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.7719054242, "max_line_length": 168, "alphanum_fraction": 0.6229696955, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 9889}
|
"""Main script to execute simulation (and inference)."""
import argparse
from datetime import datetime
from pathlib import Path
import numpy as np
import pandas as pd
from forecast.protocol.simulation_steps import sorting_and_sequencing
from forecast.util.simulation import Simulation
def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser(description="Simulation of Flow-Seq dataset.")
parser.add_argument("--f_max", type=float, default=1e5, help="Fluorescence max of the FACS.")
parser.add_argument(
"--distribution", type=str, default="gamma", help="Fluorescence distribution name"
)
parser.add_argument(
"--csv_parameters",
type=str,
help="csv file with distribution parameters",
required=False,
)
parser.add_argument("--bins", type=int, default=12, help="Number of bins.")
parser.add_argument(
"--size", type=float, default=1e6, help="Number of bacteria sorted trough the FACS."
)
parser.add_argument(
"--reads", type=float, default=1e5, help="Number of reads allocated to sequencing."
)
parser.add_argument(
"--ratio_amplification", type=float, default=1e2, help="PCR amplification ratio."
)
parser.add_argument("--bias_library", type=bool, default=False, help="Bias in the library.")
parser.add_argument(
"--metadata_path",
type=Path,
help="Folder path containing library data.",
default="data",
)
parser.add_argument(
"--output_path",
type=Path,
default="out/simulation_" + datetime.now().strftime("%Y%m%d-%H%M%S"),
help="Path for the output folder.",
)
parser.add_argument(
"--fluorescence_amplification",
type=int,
default=1,
help="Ratio fluorescence/protein.",
)
args = parser.parse_args()
return args
def main(): # noqa: CCR001
"""Main script."""
# parse args
args = parse_args()
output_path = args.output_path
output_path.mkdir(parents=True, exist_ok=True)
if args.distribution == "gamma":
name_library = "library_gamma.csv"
theta1 = "a"
theta2 = "b"
elif args.distribution == "lognormal":
name_library = "library_normal.csv"
theta1 = "mu"
theta2 = "sigma"
df = pd.read_csv(Path(args.metadata_path) / f"{name_library}")
theta1 = df.iloc[:, 0].to_numpy()
theta2 = (
args.fluorescence_amplification * df.iloc[:, 1].to_numpy()
) # Fluorescence protein ratio
diversity = len(theta1)
# Create an instance of class experiment
my_simulation = Simulation(
args.bins,
diversity,
args.size,
args.reads,
args.f_max,
args.distribution,
args.ratio_amplification,
theta1,
theta2,
args.bias_library,
)
sequencing_matrix, sorted_matrix = sorting_and_sequencing(my_simulation)
np.savetxt(args.output_path / "sequencing.csv", sequencing_matrix, comments="", delimiter=",")
np.savetxt(args.output_path / "cells_bins.csv", sorted_matrix[None], delimiter=",")
if __name__ == "__main__":
main()
|
{"hexsha": "aa35ab80bd05cbd4b26a9a8e21a508667fbd8ece", "size": 3185, "ext": "py", "lang": "Python", "max_stars_repo_path": "forecast/run_simulation.py", "max_stars_repo_name": "Pierre-Aurelien/forecast", "max_stars_repo_head_hexsha": "d19c62bc7313c62836699bba2246afd0e79f1b53", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "forecast/run_simulation.py", "max_issues_repo_name": "Pierre-Aurelien/forecast", "max_issues_repo_head_hexsha": "d19c62bc7313c62836699bba2246afd0e79f1b53", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "forecast/run_simulation.py", "max_forks_repo_name": "Pierre-Aurelien/forecast", "max_forks_repo_head_hexsha": "d19c62bc7313c62836699bba2246afd0e79f1b53", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3333333333, "max_line_length": 98, "alphanum_fraction": 0.643956044, "include": true, "reason": "import numpy", "num_tokens": 731}
|
"""
cifar-10 dataset, with support for random labels
"""
import numpy as np
import torch
import torchvision.datasets as datasets
class CIFAR10RandomLabels(datasets.CIFAR10):
"""CIFAR10 dataset, with support for randomly corrupt labels.
Params
------
corrupt_prob: float
Default 0.0. The probability of a label being replaced with random label.
random_pixel_prob: float
Default 0.0. The probability of an image being replaced with Gaussian distributed random pixels
shuffle_pixels: int 0/1/2
Default 0. 0: no permutation of pixels; 1: a random permutation is applied to all images; 2: different random permutations are applied to images
num_classes: int
Default 10. The number of classes in the dataset.
"""
def __init__(self, corrupt_prob=0.0, random_pixel_prob=0.0, shuffle_pixels=0, num_classes=10, **kwargs):
super(CIFAR10RandomLabels, self).__init__(**kwargs)
self.n_classes = num_classes
if corrupt_prob > 0:
self.corrupt_labels(corrupt_prob)
if random_pixel_prob > 0:
self.corrupt_pixels(random_pixel_prob)
if shuffle_pixels == 1:
self.shuffle(shuffle_pixels)
if shuffle_pixels == 2:
self.shuffle(shuffle_pixels)
def corrupt_labels(self, corrupt_prob):
labels = np.array(self.train_labels if self.train else self.test_labels)
np.random.seed(12345)
mask = np.random.rand(len(labels)) <= corrupt_prob
rnd_labels = np.random.choice(self.n_classes, mask.sum())
labels[mask] = rnd_labels
# we need to explicitly cast the labels from npy.int64 to
# builtin int type, otherwise pytorch will fail...
labels = [int(x) for x in labels]
if self.train:
self.train_labels = labels
else:
self.test_labels = labels
def corrupt_pixels(self, random_pixel_prob):
if self.train:
data = self.train_data
else:
data = self.test_data
# np.random.seed(12345)
mean = [x for x in [125.3, 123.0, 113.9]]
std = [x for x in [63.0, 62.1, 66.7]]
corrimgs = []
for img in data:
gaussian = []
for k in zip(mean, std):
gaussian.append(np.random.normal(k[0], k[1], size=(32, 32)))
gaussian = np.array(gaussian).transpose((1, 2, 0)) # (32,32,3) array
cor_img = (1 - random_pixel_prob) * img + random_pixel_prob * gaussian
corrimgs.append(cor_img.astype('uint8'))
if self.train:
self.train_data = np.array(corrimgs)
else:
self.test_data = np.array(corrimgs)
def shuffle(self, shuffle_pixels):
if self.train:
data = self.train_data
else:
data = self.test_data
shuff_imgs = []
if shuffle_pixels == 1: # same permutation for all
np.random.seed(12345) # fix the random permutation
shuffle_pat = np.random.permutation(32 * 32)
for img in data:
img = img.transpose(2, 0, 1).reshape(3, -1) # 3*1024 array
shuffle_img = img[:, shuffle_pat]
shuffle_img = shuffle_img.transpose(1, 0).reshape(32, 32, 3)
shuff_imgs.append(shuffle_img)
if shuffle_pixels == 2: # different permutations
for img in data:
shuffle_pat = np.random.permutation(32 * 32)
img = img.transpose(2, 0, 1).reshape(3, -1) # 3*1024 array
shuffle_img = img[:, shuffle_pat]
shuffle_img = shuffle_img.transpose(1, 0).reshape(32, 32, 3)
shuff_imgs.append(shuffle_img)
if self.train:
self.train_data = np.array(shuff_imgs)
else:
self.test_data = np.array(shuff_imgs)
|
{"hexsha": "35164f6889ebcf8f52932a006c748ed82be1edd5", "size": 3493, "ext": "py", "lang": "Python", "max_stars_repo_path": "cifar10_data.py", "max_stars_repo_name": "huweiATgithub/fitting-random", "max_stars_repo_head_hexsha": "e5527f3c75cc19807c8cbd25790d99d48aed7e2c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cifar10_data.py", "max_issues_repo_name": "huweiATgithub/fitting-random", "max_issues_repo_head_hexsha": "e5527f3c75cc19807c8cbd25790d99d48aed7e2c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cifar10_data.py", "max_forks_repo_name": "huweiATgithub/fitting-random", "max_forks_repo_head_hexsha": "e5527f3c75cc19807c8cbd25790d99d48aed7e2c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2828282828, "max_line_length": 148, "alphanum_fraction": 0.6696249642, "include": true, "reason": "import numpy", "num_tokens": 950}
|
#include "tool/container/mapbox_vector_tile.hpp"
#include <protozero/pbf_writer.hpp>
#include <protozero/varint.hpp>
#include <boost/assert.hpp>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wsign-conversion"
#include <boost/geometry.hpp>
#include <boost/geometry/geometries/geometries.hpp>
#include <boost/geometry/geometries/point_xy.hpp>
#include <boost/geometry/multi/geometries/multi_linestring.hpp>
#pragma GCC diagnostic pop
#include <algorithm>
#include <cmath>
#include <type_traits>
#include "geometric/coordinate.hpp"
namespace nepomuk
{
namespace tool
{
namespace container
{
namespace vector_tile
{
const constexpr std::uint32_t ID_TAG = 1;
const constexpr std::uint32_t NAME_TAG = 1;
const constexpr std::uint32_t FEATURE_TAG = 2;
const constexpr std::uint32_t LAYER_TAG = 3;
const constexpr std::uint32_t GEOMETRY_TAG = 3;
const constexpr std::uint32_t KEY_TAG = 3;
const constexpr std::uint32_t VARIANT_TAG = 4;
const constexpr std::uint32_t EXTENT_TAG = 5;
const constexpr std::uint32_t VERSION_TAG = 15;
const constexpr std::uint32_t FEATURE_ATTRIBUTES_TAG = 2;
const constexpr std::uint32_t FEATURE_GEOMETRIES_TAG = 4;
const constexpr std::uint32_t GEOMETRY_TYPE_POINT = 1;
const constexpr std::uint32_t GEOMETRY_TYPE_LINE = 2;
// https://github.com/mapbox/vector-tile-spec/blob/ab55f2bd7b8c0af5d9a845e4bb0b133811ce3ccf/2.1/vector_tile.proto#L17-L28
const constexpr std::uint32_t VARIANT_TYPE_STRING = 1;
const constexpr std::uint32_t VARIANT_TYPE_FLOAT = 2;
const constexpr std::uint32_t VARIANT_TYPE_DOUBLE = 3;
const constexpr std::uint32_t VARIANT_TYPE_UINT64 = 5;
const constexpr std::uint32_t VARIANT_TYPE_SINT64 = 6;
const constexpr std::uint32_t VARIANT_TYPE_BOOL = 7;
// Vector tiles are 4096 virtual pixels on each side
const constexpr double BUFFER = 128.0;
const constexpr double TILE_SIZE = 2 * BUFFER;
const constexpr double EXTENT = 16 * TILE_SIZE;
const constexpr int MOVETO_COMMAND = 9;
} // namespace vector_tile
namespace
{
template <typename content_type>
std::size_t get_map_index(std::map<content_type, std::size_t> &map, content_type const &content)
{
auto const itr = map.find(content);
if (itr == map.end())
{
auto result = map.size();
map.insert(std::make_pair(content, result));
return result;
}
else
return itr->second;
}
using IPoint = boost::geometry::model::point<std::int64_t, 2, boost::geometry::cs::cartesian>;
using Linestring = boost::geometry::model::linestring<IPoint>;
using MultiLinestring = boost::geometry::model::multi_linestring<Linestring>;
using ClipBox = boost::geometry::model::box<IPoint>;
// clipping a single tile
const static ClipBox clip_box(IPoint(-vector_tile::BUFFER, -vector_tile::BUFFER),
IPoint(vector_tile::EXTENT + vector_tile::BUFFER,
vector_tile::EXTENT + vector_tile::BUFFER));
IPoint to_tile_point(geometric::WGS84Coordinate coordiante,
geometric::MercatorBoundingBox const &bounding_box)
{
geometric::MercatorCoordinate merc(std::move(coordiante));
auto const px_longitude = (geometric::doubleFromLatLon(merc.longitude) -
geometric::doubleFromLatLon(bounding_box.lower_left.longitude)) /
bounding_box.width();
auto const px_latitude = (geometric::doubleFromLatLon(merc.latitude) -
geometric::doubleFromLatLon(bounding_box.lower_left.latitude)) /
bounding_box.height();
return {static_cast<std::int64_t>(std::llround(px_longitude * vector_tile::EXTENT)),
static_cast<std::int64_t>(std::llround(px_latitude * vector_tile::EXTENT))};
}
} // namespace
bool VectorTileValue::operator==(VectorTileValue const &other) const
{
// on equal types, we need to check the contained types, otherwise both entries are not empty
if (type == other.type)
{
switch (type)
{
case VectorTileValueType::BOOL:
return boost::get<bool>(value) == boost::get<bool>(other.value);
case VectorTileValueType::INT:
return boost::get<std::int64_t>(value) == boost::get<std::int64_t>(other.value);
case VectorTileValueType::UINT:
return boost::get<std::uint64_t>(value) == boost::get<std::uint64_t>(other.value);
case VectorTileValueType::DOUBLE:
return boost::get<double>(value) == boost::get<double>(other.value);
case VectorTileValueType::FLOAT:
return boost::get<float>(value) == boost::get<float>(other.value);
default:
BOOST_ASSERT(type == VectorTileValueType::STRING);
return boost::get<std::string>(value) == boost::get<std::string>(other.value);
}
}
else
return false;
}
bool VectorTileValue::operator<(VectorTileValue const &other) const
{
if (type == other.type)
{
switch (type)
{
case VectorTileValueType::BOOL:
return boost::get<bool>(value) < boost::get<bool>(other.value);
case VectorTileValueType::INT:
return boost::get<std::int64_t>(value) < boost::get<std::int64_t>(other.value);
case VectorTileValueType::UINT:
return boost::get<std::uint64_t>(value) < boost::get<std::uint64_t>(other.value);
case VectorTileValueType::DOUBLE:
return boost::get<double>(value) < boost::get<double>(other.value);
case VectorTileValueType::FLOAT:
return boost::get<float>(value) < boost::get<float>(other.value);
default:
BOOST_ASSERT(type == VectorTileValueType::STRING);
return boost::get<std::string>(value) < boost::get<std::string>(other.value);
}
}
else
{
return type < other.type;
}
}
void VectorTileValue::write(protozero::pbf_writer &pbf_writer) const
{
protozero::pbf_writer values_writer(pbf_writer, vector_tile::VARIANT_TAG);
switch (type)
{
case VectorTileValueType::BOOL:
values_writer.add_bool(vector_tile::VARIANT_TYPE_BOOL, boost::get<bool>(value));
break;
case VectorTileValueType::INT:
values_writer.add_int64(vector_tile::VARIANT_TYPE_SINT64, boost::get<std::int64_t>(value));
break;
case VectorTileValueType::UINT:
values_writer.add_int64(vector_tile::VARIANT_TYPE_UINT64, boost::get<std::uint64_t>(value));
break;
case VectorTileValueType::DOUBLE:
values_writer.add_double(vector_tile::VARIANT_TYPE_DOUBLE, boost::get<double>(value));
break;
case VectorTileValueType::FLOAT:
values_writer.add_float(vector_tile::VARIANT_TYPE_FLOAT, boost::get<float>(value));
break;
default:
BOOST_ASSERT(type == VectorTileValueType::STRING);
values_writer.add_string(vector_tile::VARIANT_TYPE_STRING, boost::get<std::string>(value));
break;
}
}
MapboxVectorTileLayer::MapboxVectorTileLayer(protozero::pbf_writer &pbf_writer,
std::string const &name,
geometric::MercatorBoundingBox bounding_box)
: feature_id(1), layer_writer(pbf_writer, vector_tile::LAYER_TAG),
bounding_box(std::move(bounding_box))
{
// each layer starts of with its version
layer_writer.add_uint32(vector_tile::VERSION_TAG, 2);
// Field 1 is the "layer name" field, it's a string
layer_writer.add_string(vector_tile::NAME_TAG, name);
// Field 5 is the tile extent. It's a uint32 and should be set to 4096
layer_writer.add_uint32(vector_tile::EXTENT_TAG, vector_tile::EXTENT);
}
MapboxVectorTileLayer::~MapboxVectorTileLayer()
{
// write the key/value tables
write_key_table();
write_value_table();
}
void MapboxVectorTileLayer::write_key_table()
{
std::vector<std::string> ordered_keys(keys.size());
for (auto const &pair : keys)
ordered_keys[pair.second] = pair.first;
auto const add_key_tag = [this](auto const &key) {
layer_writer.add_string(vector_tile::KEY_TAG, key);
};
std::for_each(ordered_keys.begin(), ordered_keys.end(), add_key_tag);
}
void MapboxVectorTileLayer::write_value_table()
{
std::vector<VectorTileValue> ordered_values(values.size());
for (auto const &pair : values)
ordered_values[pair.second] = pair.first;
auto const call_write = [this](auto const &value) { value.write(layer_writer); };
std::for_each(ordered_values.begin(), ordered_values.end(), call_write);
}
void MapboxVectorTileLayer::add_point(
geometric::WGS84Coordinate const coordinate,
std::vector<std::pair<std::string, VectorTileValue>> const &features)
{
// only add the point, if it is within the tile
auto const tile_point = to_tile_point(std::move(coordinate), bounding_box);
if (!boost::geometry::within(tile_point, clip_box))
return;
protozero::pbf_writer feature_writer(layer_writer, vector_tile::FEATURE_TAG);
feature_writer.add_enum(vector_tile::GEOMETRY_TAG, vector_tile::GEOMETRY_TYPE_POINT);
feature_writer.add_uint64(vector_tile::ID_TAG, feature_id++);
encode_features(feature_writer, features);
// encode the point, scoping for flushing
{
protozero::packed_field_uint32 geometry(feature_writer,
vector_tile::FEATURE_GEOMETRIES_TAG);
geometry.add_element(vector_tile::MOVETO_COMMAND);
geometry.add_element(protozero::encode_zigzag32(tile_point.get<0>()));
geometry.add_element(protozero::encode_zigzag32(tile_point.get<1>()));
}
}
void MapboxVectorTileLayer::add_line(
std::vector<geometric::WGS84Coordinate> const &line_string,
std::vector<std::pair<std::string, VectorTileValue>> const &features)
{
if (line_string.size() <= 1)
return;
// clip the linestring to the tilebox
Linestring unclipped_line;
auto const append_tile_coordinate = [this,
&unclipped_line](geometric::WGS84Coordinate coordinate) {
auto const tile_point = to_tile_point(std::move(coordinate), bounding_box);
boost::geometry::append(unclipped_line, tile_point);
};
std::for_each(line_string.begin(), line_string.end(), append_tile_coordinate);
MultiLinestring clipped_line;
boost::geometry::intersection(clip_box, unclipped_line, clipped_line);
// only start encoding, if there is an actual line string within the box
if (std::find_if(clipped_line.begin(), clipped_line.end(), [](auto const &line) {
return line.size() >= 2;
}) == clipped_line.end())
return;
protozero::pbf_writer feature_writer(layer_writer, vector_tile::FEATURE_TAG);
feature_writer.add_enum(vector_tile::GEOMETRY_TAG, vector_tile::GEOMETRY_TYPE_LINE);
feature_writer.add_uint64(vector_tile::ID_TAG, feature_id++);
encode_features(feature_writer, features);
for (auto const &line_segment : clipped_line)
{
// only encode lines
if (line_segment.size() >= 2)
{
protozero::packed_field_uint32 geometry(feature_writer,
vector_tile::FEATURE_GEOMETRIES_TAG);
geometry.add_element(vector_tile::MOVETO_COMMAND);
std::int32_t cur_lon = 0, cur_lat = 0;
// if this is auto, somehow it ends up as `expecting expression` to access the
// coordinates
auto const encode_point = [&cur_lon, &cur_lat, &geometry](IPoint const &tile_point) {
geometry.add_element(protozero::encode_zigzag32(tile_point.get<0>() - cur_lon));
geometry.add_element(protozero::encode_zigzag32(tile_point.get<1>() - cur_lat));
cur_lon = tile_point.get<0>();
cur_lat = tile_point.get<1>();
};
// encode the very first point
encode_point(line_segment.front());
// See:
// https://github.com/mapbox/vector-tile-spec/tree/master/2.1#example-command-integers
geometry.add_element(((line_segment.size() - 1) << 3) | 2u);
std::for_each(line_segment.begin() + 1, line_segment.end(), encode_point);
}
}
}
void MapboxVectorTileLayer::encode_features(
protozero::pbf_writer &feature_writer,
std::vector<std::pair<std::string, VectorTileValue>> const &features)
{
protozero::packed_field_uint32 field(feature_writer, vector_tile::FEATURE_ATTRIBUTES_TAG);
auto const encode = [this, &field](auto const &entry) {
auto const name_id = get_map_index(keys, entry.first);
auto const value_id = get_map_index(values, entry.second);
field.add_element(name_id);
field.add_element(value_id);
};
std::for_each(features.begin(), features.end(), encode);
}
MapboxVectorTile::MapboxVectorTile(std::uint32_t const horizontal,
std::uint32_t const vertical,
std::uint32_t const zoom_level)
: tile_writer(pbf_buffer), bounding_box(horizontal, vertical, zoom_level)
{
}
MapboxVectorTile::operator std::string() const { return pbf_buffer; }
MapboxVectorTileLayer MapboxVectorTile::new_layer(std::string const &name)
{
return MapboxVectorTileLayer(tile_writer, name, bounding_box);
}
} // namespace container
} // namespace tool
} // namespace nepomuk
|
{"hexsha": "65758dac181e5aa02fd349ea4d5ee880dc12fa95", "size": 13508, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/tool/container/mapbox_vector_tile.cpp", "max_stars_repo_name": "mapbox/nepomuk", "max_stars_repo_head_hexsha": "8771482edb9b16bb0f5a152c15681c57eb3bb6b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22.0, "max_stars_repo_stars_event_min_datetime": "2017-05-12T11:52:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T06:05:08.000Z", "max_issues_repo_path": "src/tool/container/mapbox_vector_tile.cpp", "max_issues_repo_name": "mapbox/nepomuk", "max_issues_repo_head_hexsha": "8771482edb9b16bb0f5a152c15681c57eb3bb6b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 49.0, "max_issues_repo_issues_event_min_datetime": "2017-05-11T16:13:58.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-13T11:19:17.000Z", "max_forks_repo_path": "src/tool/container/mapbox_vector_tile.cpp", "max_forks_repo_name": "mapbox/nepomuk", "max_forks_repo_head_hexsha": "8771482edb9b16bb0f5a152c15681c57eb3bb6b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-11-19T12:04:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-06T06:14:25.000Z", "avg_line_length": 38.2662889518, "max_line_length": 121, "alphanum_fraction": 0.6787829434, "num_tokens": 3140}
|
export TensorMesh3D, getTensorMesh3D
export getCellCenteredGrid, getNodalGrid, getFaceGrids, getEdgeGrids
export getCellCenteredAxes, getNodalAxes,getBoundaryNodes
export getVolume, getVolumeInv, getFaceArea, getFaceAreaInv, getLength, getLengthInv
"""
mutable struct jInv.Mesh.TensorMesh3D <: AbstractTensorMesh
Fields:
h1::Vector{Float64} - cell size in x1 direction
h2::Vector{Float64} - cell size in x2 direction
h3::Vector{Float64} - cell size in x3 direction
x0::Vector{Float64} - origin
dim::Int - dimension (dim=3)
n::Vector{Int64} - number of cells in each direction
nc::Int - nc total number of cells (nc=prod(n))
nf::Vector{Int64} - number of faces
ne::Vector{Int64} - number of edges
Persistent Operators:
Operators should not be accessed directly. They will be built, if needed,
when accessing them using specified method. clear!(M) will release all
memory.
Div::SparseMatrixCSC - divergence (faces -> cell-centers)
Access via: getDivergenceMatrix(M)
Grad::SparseMatrixCSC - gradient (nodal -> edges)
Access via: getNodalGradientMatrix(M)
Curl::SparseMatrixCSC - curl (edges -> faces)
Access via: getCurlMatrix(M)
Af::SparseMatrixCSC - face average (faces -> cell-centers)
Access via: getFaceAverageMatrix(M)
Ae::SparseMatrixCSC - edge average (edges -> cell-centers)
Access via: getEdgeAverageMatrix(M)
An::SparseMatrixCSC - nodal average (nodes -> cell-centers)
Access via: getNodalAverageMatrix(M)
V::SparseMatrixCSC - cell volumes (diagonal matrix)
Access via: getVolume(M)
F::SparseMatrixCSC - face area (diagonal matrix)
Access via: getFaceArea(M)
L::SparseMatrixCSC - edge length (diagonal matrix)
Access via: getLength(M)
Vi::SparseMatrixCSC - inverse cell volumes (diagonal matrix)
Access via: getVolumeInv(M)
Fi::SparseMatrixCSC - inverse face area (diagonal matrix)
Access via: getFaceAreaInv(M)
Li::SparseMatrixCSC - inverse edge length (diagonal matrix)
Access via: getLengthAreaInv(M)
nLap::SparseMatrixCSC - nodal Laplacian
Access via: getNodalLaplacian(M)
Example:
h1 = rand(4); h2 = rand(6); h3 = rand(5);
M = getTensorMesh2D(h1,h2,h3)
"""
mutable struct TensorMesh3D <: AbstractTensorMesh
h1::Vector{Float64}
h2::Vector{Float64}
h3::Vector{Float64}
x0::Vector{Float64}
dim::Int
n::Vector{Int64}
nc::Int
nf::Vector{Int64}
ne::Vector{Int64}
Div::SparseMatrixCSC
Grad::SparseMatrixCSC
Curl::SparseMatrixCSC
Af::SparseMatrixCSC
Ae::SparseMatrixCSC
An::SparseMatrixCSC
V::SparseMatrixCSC
F::SparseMatrixCSC
L::SparseMatrixCSC
Vi::SparseMatrixCSC
Fi::SparseMatrixCSC
Li::SparseMatrixCSC
nLap::SparseMatrixCSC
end
"""
function jInv.Mesh.getTensorMesh3D
constructs TensorMesh3D
Required Input:
h1::Array - cell-sizes in x1 direction
h2::Array - cell-sizes in x2 direction
h3::Array - cell-sizes in x3 direction
Optional Input
x0::Array - origin (default = zeros(3))
"""
function getTensorMesh3D(h1::Array,h2::Array,h3::Array,x0=zeros(3))
n = [length(h1); length(h2); length(h3)]
nc = prod(n)
nf = [(n[1]+1)*n[2]*n[3]; n[1]*(n[2]+1)*n[3]; n[1]*n[2]*(n[3]+1) ]
ne = [n[1]*(n[2]+1)*(n[3]+1); (n[1]+1)*n[2]*(n[3]+1); (n[1]+1)*(n[2]+1)*n[3] ]
empt = spzeros(0,0);
dim = 3
return TensorMesh3D(h1,h2,h3,x0,dim,n,nc,nf,ne,empt,empt,empt,empt,empt,empt,empt,empt,empt,empt,empt,empt,empt)
end
import Base.==
function ==(M1::TensorMesh3D,M2::TensorMesh3D)
isEqual = fill(true,20)
# check mandatory fields
isEqual[1] = (M1.h1 == M2.h1) & (M1.h2 == M2.h2) & (M1.h3 == M2.h3)
isEqual[3] = (M1.x0 == M2.x0)
isEqual[4] = (M1.dim == M2.dim)
isEqual[5] = (M1.n == M2.n)
isEqual[6] = (M1.nc == M2.nc)
isEqual[7] = (M1.nf == M2.nf)
isEqual[8] = (M1.ne == M2.ne)
# check fields that might be empty
if !(isempty(M1.Div)) && !(isempty(M2.Div))
isEqual[9] = (M1.Div == M2.Div)
end
if !(isempty(M1.Grad)) && !(isempty(M2.Grad))
isEqual[10] = (M1.Grad == M2.Grad)
end
if !(isempty(M1.Curl)) && !(isempty(M2.Curl))
isEqual[11] = (M1.Curl == M2.Curl)
end
if !(isempty(M1.Af)) && !(isempty(M2.Af))
isEqual[12] = (M1.Af == M2.Af)
end
if !(isempty(M1.Ae)) && !(isempty(M2.Ae))
isEqual[13] = (M1.Ae == M2.Ae)
end
if !(isempty(M1.An)) && !(isempty(M2.An))
isEqual[14] = (M1.An == M2.An)
end
if !(isempty(M1.V)) && !(isempty(M2.V))
isEqual[15] = (M1.V == M2.V)
end
if !(isempty(M1.F)) && !(isempty(M2.F))
isEqual[16] = (M1.F == M2.F)
end
if !(isempty(M1.L)) && !(isempty(M2.L))
isEqual[17] = (M1.L == M2.L)
end
if !(isempty(M1.Vi)) && !(isempty(M2.Vi))
isEqual[18] = (M1.Vi == M2.Vi)
end
if !(isempty(M1.Fi)) && !(isempty(M2.Fi))
isEqual[19] = (M1.Fi == M2.Fi)
end
if !(isempty(M1.Li)) && !(isempty(M2.Li))
isEqual[20] = (M1.Li == M2.Li)
end
return all(isEqual)
end
function getNodalAxes(Mesh::TensorMesh3D)
xn,yn,zn = getNodalAxes(Mesh.h1,Mesh.h2,Mesh.h3)
return xn.+Mesh.x0[1],yn.+Mesh.x0[2],zn.+Mesh.x0[3]
end
function getCellCenteredAxes(Mesh::TensorMesh3D)
xc,yc,zc = getCellCenteredAxes(Mesh.h1,Mesh.h2,Mesh.h3)
return xc.+Mesh.x0[1],yc.+Mesh.x0[2],zc.+Mesh.x0[3]
end
function getNodalGrid(Mesh::TensorMesh3D)
# X = getNodalGrid(Mesh::TensorMesh3D)
xn,yn,zn = getNodalAxes(Mesh)
Xn,Yn,Zn = ndgrid(xn,yn,zn)
return [vec(Xn) vec(Yn) vec(Zn)]
end
function getCellCenteredGrid(Mesh::TensorMesh3D)
# X = getCellCenteredGrid(Mesh::TensorMesh3D)
xc,yc,zc = getCellCenteredAxes(Mesh)
Xc,Yc,Zc = ndgrid(xc,yc,zc);
return [vec(Xc) vec(Yc) vec(Zc)]
end
function getEdgeGrids(Mesh::TensorMesh3D)
# [Xe1, Xe2, Xe3] = getEdgeGrids(Mesh::TensorMesh3D)
xn,yn,zn = getNodalAxes(Mesh)
xc,yc,zc = getCellCenteredAxes(Mesh)
Xe1,Ye1,Ze1 = ndgrid(xc,yn,zn)
Xe2,Ye2,Ze2 = ndgrid(xn,yc,zn)
Xe3,Ye3,Ze3 = ndgrid(xn,yn,zc)
return [vec(Xe1) vec(Ye1) vec(Ze1)], [vec(Xe2) vec(Ye2) vec(Ze2)], [vec(Xe3) vec(Ye3) vec(Ze3)]
end
function getFaceGrids(Mesh::TensorMesh3D)
# [Xf1, Xf2, Xf3] = getFaceGrids(Mesh::TensorMesh3D)
xn,yn,zn = getNodalAxes(Mesh)
xc,yc,zc = getCellCenteredAxes(Mesh)
Xf1,Yf1,Zf1 = ndgrid(xn,yc,zc)
Xf2,Yf2,Zf2 = ndgrid(xc,yn,zc)
Xf3,Yf3,Zf3 = ndgrid(xc,yc,zn)
return [vec(Xf1) vec(Yf1) vec(Zf1)], [ vec(Xf2) vec(Yf2) vec(Zf2)], [vec(Xf3) vec(Yf3) vec(Zf3)]
end
function getNodalAxes(h1,h2,h3)
nc = [length(h1); length(h2); length(h3)]
x = zeros(nc[1]+1); for i=1:nc[1], x[i+1] = x[i] + h1[i]; end
y = zeros(nc[2]+1); for i=1:nc[2], y[i+1] = y[i] + h2[i]; end
z = zeros(nc[3]+1); for i=1:nc[3], z[i+1] = z[i] + h3[i]; end
return x,y,z
end
function getCellCenteredAxes(h1,h2,h3)
x,y,z = getNodalAxes(h1,h2,h3)
# cell centered grids
xc = x[1:end-1] + h1/2;
yc = y[1:end-1] + h2/2;
zc = z[1:end-1] + h3/2;
return xc,yc,zc
end
function getNodalGrid(h1,h2,h3)
# X = getNodalGrid(h1,h2,h3)
xn,yn,zn = getNodalAxes(h1,h2,h3)
Xn,Yn,Zn = ndgrid(xn,yn,zn)
return [vec(Xn) vec(Yn) vec(Zn)]
end
function getCellCenteredGrid(h1,h2,h3)
# X = getCellCenteredGrid(h1,h2,h3)
xc,yc,zc = getCellCenteredAxes(h1,h2,h3)
Xc,Yc,Zc = ndgrid(xc,yc,zc);
return [vec(Xc) vec(Yc) vec(Zc)]
end
function getEdgeGrids(h1,h2,h3)
# [Xe1,Xe2,Xe3] = getEdgeGrids(h1,h2,h3)
xn,yn,zn = getNodalAxes(h1,h2,h3)
xc,yc,zc = getCellCenteredAxes(h1,h2,h3)
Xe1,Ye1,Ze1 = ndgrid(xc,yn,zn)
Xe2,Ye2,Ze2 = ndgrid(xn,yc,zn)
Xe3,Ye3,Ze3 = ndgrid(xn,yn,zc)
return [vec(Xe1) vec(Ye1) vec(Ze1)], [vec(Xe2) vec(Ye2) vec(Ze2)], [vec(Xe3) vec(Ye3) vec(Ze3)]
end
function getFaceGrids(h1,h2,h3)
# [Xf1, Xf2, Xf3] = getFaceGrids(h1,h2,h3)
xn,yn,zn = getNodalAxes(h1,h2,h3)
xc,yc,zc = getCellCenteredAxes(h1,h2,h3)
Xf1,Yf1,Zf1 = ndgrid(xn,yc,zc)
Xf2,Yf2,Zf2 = ndgrid(xc,yn,zc)
Xf3,Yf3,Zf3 = ndgrid(xc,yc,zn)
return [vec(Xf1) vec(Yf1) vec(Zf1)], [ vec(Xf2) vec(Yf2) vec(Zf2)], [vec(Xf3) vec(Yf3) vec(Zf3)]
end
# --- linear operators for tensor mesh
function getVolume(Mesh::TensorMesh3D;saveMat::Bool=true)
# Mesh.V = getVolume(Mesh::TensorMesh3D) computes volumes v, returns diag(v)
if isempty(Mesh.Vi)
V = kron(sdiag(Mesh.h3),kron(sdiag(Mesh.h2),sdiag(Mesh.h1)))
if saveMat
Mesh.V = V;
end
return V;
else
return Mesh.V
end
end
function getVolumeInv(Mesh::TensorMesh3D;saveMat::Bool=true)
# Mesh.Vi = getVolumeInv(Mesh::TensorMesh3D) returns sdiag(1 ./v)
if isempty(Mesh.Vi)
Vi = kron(sdiag(1 ./Mesh.h3),kron(sdiag(1 ./Mesh.h2),sdiag(1 ./Mesh.h1)))
if saveMat
Mesh.Vi = Vi;
end
return Vi;
else
return Mesh.Vi
end
end
function getFaceArea(Mesh::TensorMesh3D)
# Mesh.F = getFaceArea(Mesh::TensorMesh3D) computes face areas a, returns sdiag(a)
if isempty(Mesh.F)
f1 = kron(sdiag(Mesh.h3) ,kron(sdiag(Mesh.h2) ,sparse(1.0I,Mesh.n[1]+1,Mesh.n[1]+1)))
f2 = kron(sdiag(Mesh.h3) ,kron(sparse(1.0I,Mesh.n[2]+1,Mesh.n[2]+1) ,sdiag(Mesh.h1)))
f3 = kron(sparse(1.0I,Mesh.n[3]+1,Mesh.n[3]+1) ,kron(sdiag(Mesh.h2) ,sdiag(Mesh.h1)))
Mesh.F = blockdiag(blockdiag(f1,f2),f3)
end
return Mesh.F
end
function getFaceAreaInv(Mesh::TensorMesh3D)
# Mesh.Fi = getFaceAreaInv(Mesh::TensorMesh3D) computes inverse of face areas, returns sdiag(1 ./a)
if isempty(Mesh.Fi)
f1i = kron(sdiag(1 ./Mesh.h3) ,kron(sdiag(1 ./Mesh.h2) ,sparse(1.0I,Mesh.n[1]+1,Mesh.n[1]+1)))
f2i = kron(sdiag(1 ./Mesh.h3) ,kron(sparse(1.0I,Mesh.n[2]+1,Mesh.n[2]+1) ,sdiag(1 ./Mesh.h1)))
f3i = kron(sparse(1.0I,Mesh.n[3]+1,Mesh.n[3]+1) ,kron(sdiag(1 ./Mesh.h2) ,sdiag(1 ./Mesh.h1)))
Mesh.Fi = blockdiag(blockdiag(f1i,f2i),f3i)
end
return Mesh.Fi
end
function getLength(Mesh::TensorMesh3D)
# Mesh.L = getLength(Mesh::TensorMesh3D) computes edge lengths l, returns sdiag(l)
if isempty(Mesh.L)
l1 = kron(sparse(1.0I,Mesh.n[3]+1,Mesh.n[3]+1),kron(sparse(1.0I,Mesh.n[2]+1,Mesh.n[2]+1),sdiag(Mesh.h1)))
l2 = kron(sparse(1.0I,Mesh.n[3]+1,Mesh.n[3]+1),kron(sdiag(Mesh.h2),sparse(1.0I,Mesh.n[1]+1,Mesh.n[1]+1)))
l3 = kron(sdiag(Mesh.h3),kron(sparse(1.0I,Mesh.n[2]+1,Mesh.n[2]+1),sparse(1.0I,Mesh.n[1]+1,Mesh.n[1]+1)))
Mesh.L = blockdiag(blockdiag(l1,l2),l3);
end
return Mesh.L
end
function getLengthInv(Mesh::TensorMesh3D)
# Mesh.L = getLength(Mesh::TensorMesh3D) computes inverse of edge lengths l, returns sdiag(1 ./l)
if isempty(Mesh.Li)
l1i = kron(sparse(1.0I,Mesh.n[3]+1,Mesh.n[3]+1),kron(sparse(1.0I,Mesh.n[2]+1,Mesh.n[2]+1),sdiag(1 ./Mesh.h1)))
l2i = kron(sparse(1.0I,Mesh.n[3]+1,Mesh.n[3]+1),kron(sdiag(1 ./Mesh.h2),sparse(1.0I,Mesh.n[1]+1,Mesh.n[1]+1)))
l3i = kron(sdiag(1 ./Mesh.h3),kron(sparse(1.0I,Mesh.n[2]+1,Mesh.n[2]+1),sparse(1.0I,Mesh.n[1]+1,Mesh.n[1]+1)))
Mesh.Li = blockdiag(blockdiag(l1i,l2i),l3i);
end
return Mesh.Li
end
"""
function jInv.Mesh.getBoundaryNodes(Mesh)
Returns an array tuple (boundary node indices, inner node indices)
Input:
Mesh::Abstract Mesh
Output:
Tuple{Array{Int64,1},Array{Int64,1}}
"""
function getBoundaryNodes(M::AbstractTensorMesh)
# number of cells in each direction
N = M.n
# nodal matrix
nodal_mat = 1:prod(N.+1)
# check dimension
if M.dim==2
nodal_mat = reshape(nodal_mat, N[1]+1, N[2]+1)
iin = nodal_mat[2:end-1,2:end-1]
ib = setdiff(nodal_mat, iin)
elseif M.dim==3
nodal_mat = reshape(nodal_mat, N[1]+1, N[2]+1, N[3]+1)
iin = nodal_mat[2:end-1,2:end-1,2:end-1]
ib = setdiff(nodal_mat, iin)
end
return (vec(ib), vec(iin))
end
include("getEdgeIntegralOfPolygonalChain.jl")
|
{"hexsha": "23566e23ce4e0c870bacbcef1fae6d1e0ab90137", "size": 11649, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Mesh/tensor.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/jInv.jl-3dacf901-f8cd-5544-86ed-7a705f85c244", "max_stars_repo_head_hexsha": "2e7305f231a29bd8e1e803b82cc2bc8e9b7a205a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2016-04-11T22:51:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-15T21:58:53.000Z", "max_issues_repo_path": "src/Mesh/tensor.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/jInv.jl-3dacf901-f8cd-5544-86ed-7a705f85c244", "max_issues_repo_head_hexsha": "2e7305f231a29bd8e1e803b82cc2bc8e9b7a205a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2016-03-23T18:24:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-08T15:52:47.000Z", "max_forks_repo_path": "src/Mesh/tensor.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/jInv.jl-3dacf901-f8cd-5544-86ed-7a705f85c244", "max_forks_repo_head_hexsha": "2e7305f231a29bd8e1e803b82cc2bc8e9b7a205a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2016-03-23T16:52:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T17:04:41.000Z", "avg_line_length": 30.4947643979, "max_line_length": 113, "alphanum_fraction": 0.6497553438, "num_tokens": 4610}
|
# -*-coding:utf-8-*-
import sys
sys.path.append('.')
import tensorflow as tf
import tensorflow.contrib.slim as slim
import time
import numpy as np
import cv2
from lib.dataset.dataietr import DataIter
from lib.core.model.net.pruned_ssd import DSFD
from mac_config import config as cfg
from lib.helper.logger import logger
class trainner():
def __init__(self):
self.train_ds = DataIter(cfg.DATA.root_path, cfg.DATA.train_txt_path, training_flag=True)
self.val_ds = DataIter(cfg.DATA.root_path, cfg.DATA.val_txt_path, training_flag=False)
self.inputs = []
self.outputs = []
self.val_outputs = []
self.ite_num = 1
self._graph = tf.Graph()
self.summaries = []
self.ema_weights = False
def get_opt(self):
with self._graph.as_default():
##set the opt there
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), dtype=tf.int32, trainable=False)
# Decay the learning rate
lr = tf.train.piecewise_constant(global_step,
cfg.TRAIN.lr_decay_every_step,
cfg.TRAIN.lr_value_every_step
)
opt = tf.train.MomentumOptimizer(lr, momentum=0.9, use_nesterov=False)
return opt, lr, global_step
def load_weight(self):
with self._graph.as_default():
# if cfg.MODEL.continue_train:
# #restore the params
# variables_restore = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# # for v in tf.global_variables():
# # if 'moving_mean' in v.name or 'moving_variance' in v.name:
# # variables_restore.append(v)
# saver2 = tf.train.Saver(variables_restore, max_to_keep=None)
# saver2.restore(self.sess, cfg.MODEL.pretrained_model)
if cfg.MODEL.trained_model is not None:
#restore the params
variables_restore = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for v in tf.global_variables():
if 'moving_mean' in v.name or 'moving_variance' in v.name:
if cfg.MODEL.net_structure in v.name:
variables_restore.append(v)
# print(variables_restore)
variables_restore_n = [v for v in variables_restore if
'GN' not in v.name] # Conv2d_1c_1x1 Bottleneck
# print(variables_restore_n)
saver2 = tf.train.Saver(variables_restore_n, max_to_keep=None)
saver2.restore(self.sess, cfg.MODEL.trained_model)
else:
logger.info('no trained model, train from sctrach')
raise
# Build an initialization operation to run below.
def frozen(self):
with self._graph.as_default():
variables_need_grads = []
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for v in variables:
training_flag = True
if cfg.TRAIN.frozen_stages >= 0:
if '%s/conv1' % cfg.MODEL.net_structure in v.name:
training_flag = False
for i in range(1, 1 + cfg.TRAIN.frozen_stages):
if '%s/block%d' % (cfg.MODEL.net_structure, i) in v.name:
training_flag = False
break
if training_flag:
variables_need_grads.append(v)
else:
v_stop = tf.stop_gradient(v)
return variables_need_grads
def add_summary(self, event):
self.summaries.append(event)
def tower_loss(self, scope, images, labels, boxes, L2_reg, training):
"""Calculate the total loss on a single tower running the model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
images: Images. 4D tensor of shape [batch_size, height, width, 3].
labels: Labels. 1D tensor of shape [batch_size].
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
ssd = DSFD()
reg_loss, cla_loss, endpoint = ssd.forward(images, boxes, labels, L2_reg, training)
print(endpoint)
regularization_losses = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), name='l2_loss')
# regularization_losses = tf.reduce_sum(tf.zeros([1]))
lowest_loss = []
for num,v in enumerate(endpoint):
loss = tf.reduce_max(v, axis=[1, 2])
lowest_k = tf.cast(tf.floor(tf.scalar_mul(cfg.TRAIN.pruned_ratio[num], tf.cast(tf.shape(loss)[1], dtype=tf.float32))),
dtype=tf.int32) # hyperparameter
# print(lowest_k)
sorted_loss = tf.sort(loss)
sorted_loss = tf.reduce_mean(sorted_loss[:, 0:lowest_k])
sorted_loss = tf.where(tf.is_nan(sorted_loss), tf.zeros_like(sorted_loss), sorted_loss)
lowest_loss.append(sorted_loss)
lowest_loss = tf.add_n(lowest_loss)
pruned_loss = tf.scalar_mul(cfg.TRAIN.pruned_alpha, lowest_loss)
return reg_loss, cla_loss, regularization_losses, pruned_loss, endpoint
def average_gradients(self, tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
try:
g = tf.clip_by_value(g, -5., 5.)
expanded_g = tf.expand_dims(g, 0)
except:
print(_)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def build(self):
with self._graph.as_default(), tf.device('/cpu:0'):
# Create an optimizer that performs gradient descent.
opt, lr, global_step = self.get_opt()
##some global placeholder
L2_reg = tf.placeholder(tf.float32, name="L2_reg")
training = tf.placeholder(tf.bool, name="training_flag")
total_loss_to_show = 0.
images_place_holder_list = []
labels_place_holder_list = []
boxes_place_holder_list = []
weights_initializer = slim.xavier_initializer()
biases_initializer = tf.constant_initializer(0.)
biases_regularizer = tf.no_regularizer
weights_regularizer = tf.contrib.layers.l2_regularizer(L2_reg)
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in range(cfg.TRAIN.num_gpu):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % (i)) as scope:
with slim.arg_scope([slim.model_variable, slim.variable], device='/cpu:0'):
images_ = tf.placeholder(tf.float32, [None, None, None, 3], name="images")
boxes_ = tf.placeholder(tf.float32, [cfg.TRAIN.batch_size, None, 4], name="input_boxes")
labels_ = tf.placeholder(tf.int64, [cfg.TRAIN.batch_size, None], name="input_labels")
###total anchor
images_place_holder_list.append(images_)
labels_place_holder_list.append(labels_)
boxes_place_holder_list.append(boxes_)
with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
slim.conv2d_transpose, slim.separable_conv2d,
slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
weights_initializer=weights_initializer,
biases_initializer=biases_initializer):
reg_loss, cla_loss, l2_loss, pruned_loss, endpoint = self.tower_loss(
scope, images_, labels_, boxes_, L2_reg, training)
##use muti gpu ,large batch
if i == cfg.TRAIN.num_gpu - 1:
total_loss = tf.add_n([reg_loss, cla_loss, l2_loss, pruned_loss])
else:
total_loss = tf.add_n([reg_loss, cla_loss, pruned_loss])
# raise Exception('no reg loss!!!!')
total_loss_to_show += total_loss
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
##when use batchnorm, updates operations only from the
## final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
bn_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=scope)
# Retain the summaries from the final tower.
self.summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
###freeze some params
train_var_list = self.frozen()
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(total_loss, train_var_list)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = self.average_gradients(tower_grads)
# Add a summary to track the learning rate.
self.add_summary(tf.summary.scalar('learning_rate', lr))
self.add_summary(tf.summary.scalar('total_loss', total_loss_to_show))
self.add_summary(tf.summary.scalar('loc_loss', reg_loss))
self.add_summary(tf.summary.scalar('cla_loss', cla_loss))
self.add_summary(tf.summary.scalar('l2_loss', l2_loss))
# # Add histograms for gradients.
# for grad, var in grads:
# if grad is not None:
# self.add_summary(tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
self.add_summary(tf.summary.histogram(var.op.name, var))
if self.ema_weights:
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
0.9, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op, *bn_update_ops)
else:
train_op = tf.group(apply_gradient_op, *bn_update_ops)
###set inputs and ouputs
self.inputs = [images_place_holder_list,
boxes_place_holder_list,
labels_place_holder_list,
L2_reg,
training]
self.outputs = [train_op,
total_loss_to_show,
reg_loss,
cla_loss,
l2_loss,
pruned_loss,
endpoint,
lr]
self.val_outputs = [total_loss_to_show,
reg_loss,
cla_loss,
l2_loss,
pruned_loss,
endpoint,
lr]
tf_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
tf_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=tf_config)
##init all variables
init = tf.global_variables_initializer()
self.sess.run(init)
######
def calculate_flops(self, endpoint):
flops = 0
var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var= [i for i in var if not 'ssdout' in i.name]
var = [i for i in var if 'weights' in i.name]
for i in range(len(endpoint)):
if i ==0:
flops += endpoint[i].shape[1] *var[i].shape[0]*var[i].shape[1] * endpoint[i].shape[2] * 3 * endpoint[i].shape[3]
else:
flops += endpoint[i].shape[1]*var[i].shape[0]*var[i].shape[1]*endpoint[i].shape[2]*endpoint[i-1].shape[3]*endpoint[i].shape[3]
# print(flops)
return flops
def calculate_avgpruned_flops(self, endpoint):
flops = 0
var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var= [i for i in var if not 'ssdout' in i.name]
var = [i for i in var if 'weights' in i.name]
# channel
# [print(np.shape(i)) for i in endpoint]
# if len(endpoint[0].shape) == 1:
# avg_channel = endpoint
# else:
avg_channel = [np.amax(i, axis=(0, 1, 2)) for i in endpoint]
# avg_channel = [np.amax(i, axis=(0, 1, 2)) for i in endpoint]
# [print(np.shape(i)) for i in avg_channel]
avg_endpoint = [np.count_nonzero(i) for i in avg_channel]
# og_channel = [np.shape(i)[1] for i in og_channel]
# print(og_channel)
# print(avg_endpoint)
# avg_channel = [np.average(i) for i in max_endpoint]
# print(avg_channel)
# for j in range(avg_endpoint[0].shape[0]):
for i in range(len(avg_endpoint)):
if i ==0:
flops += endpoint[i].shape[1]*endpoint[i].shape[2]*var[i].shape[0]*var[i].shape[1]*3*avg_endpoint[i]
else:
flops += endpoint[i].shape[1]*endpoint[i].shape[2]*var[i].shape[0]*var[i].shape[1]*avg_endpoint[i-1]*avg_endpoint[i]
# print(int(flops)/127822233600 *100)
return int(flops)/127822233600 *100, avg_endpoint
def calculate_pruned_parameters(self,endpoint):
num_param = 0
og_param = 0
var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var = [i for i in var if not 'ssdout' in i.name]
var = [i for i in var if 'weights' in i.name]
# print(var)
# channel
# if len(endpoint[0].shape) ==1:
# avg_channel = endpoint
# else:
avg_channel = [np.amax(i, axis=(0, 1, 2)) for i in endpoint]
# [print(np.shape(i)[0]) for i in avg_channel]
avg_endpoint = [np.count_nonzero(i) for i in avg_channel]
# print(avg_endpoint)
for i in range(len(avg_endpoint)):
if i == 0:
og_param += var[i].shape[0] * var[i].shape[1] * 3 * np.shape(avg_channel[i])[0]
num_param += var[i].shape[0] * var[i].shape[1] * np.shape(avg_channel[i-1])[0] * np.shape(avg_channel[i])[0]
else:
og_param +=var[i].shape[0] * var[i].shape[1] * np.shape(avg_channel[i-1])[0] * np.shape(avg_channel[i])[0]
num_param += var[i].shape[0] * var[i].shape[1] * avg_endpoint[i - 1] * avg_endpoint[i]
return int(num_param)/int(og_param)*100
def train_loop(self):
"""Train faces data for a number of epoch."""
self.build()
self.load_weight()
with self._graph.as_default():
# Create a saver.
self.saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
self.summary_op = tf.summary.merge(self.summaries)
self.summary_writer = tf.summary.FileWriter(cfg.MODEL.model_path, self.sess.graph)
val_loss, val_param, val_mac, val_channel = self._val()
logger.info('==================== Results ====================\n'
'Pruned MAC : %f %%' % (val_mac))
logger.info('=================================================')
self.sess.close()
def _val(self):
logger.info('Evaluating on validation set')
all_total_loss = 0
for step in range(cfg.TRAIN.val_iter):
feed_dict = {}
examples = next(self.val_ds)
for n in range(cfg.TRAIN.num_gpu):
feed_dict[self.inputs[0][n]] = examples[0][n * cfg.TRAIN.batch_size:(n + 1) * cfg.TRAIN.batch_size]
feed_dict[self.inputs[1][n]] = examples[1][n * cfg.TRAIN.batch_size:(n + 1) * cfg.TRAIN.batch_size]
feed_dict[self.inputs[2][n]] = examples[2][n * cfg.TRAIN.batch_size:(n + 1) * cfg.TRAIN.batch_size]
feed_dict[self.inputs[3]] = 0
feed_dict[self.inputs[4]] = False
total_loss_value, reg_loss_value, cla_loss_value, l2_loss_value, pruned_loss_value, endpoint_value, lr_value = \
self.sess.run([*self.val_outputs],
feed_dict=feed_dict)
if step==0:
endpoint_list = endpoint_value
else:
for i in range(len(endpoint_list)):
endpoint_list[i]+= endpoint_value[i]
all_total_loss += total_loss_value
pruned_param = self.calculate_pruned_parameters(endpoint_list)
avg_mac, avg_channel = self.calculate_avgpruned_flops(endpoint_list)
return all_total_loss / cfg.TRAIN.val_iter , pruned_param, avg_mac, avg_channel
def train(self):
self.train_loop()
|
{"hexsha": "7c6053ad852b1a1a7fe78b14192fd02d64f51b55", "size": 20740, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/core/base_trainer/mac_net_work.py", "max_stars_repo_name": "airacid/pruned-face-detector", "max_stars_repo_head_hexsha": "ef587e274ccf87633af653694890eb6712d6b3eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-01T02:39:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-01T02:39:36.000Z", "max_issues_repo_path": "lib/core/base_trainer/mac_net_work.py", "max_issues_repo_name": "airacid/pruned-face-detector", "max_issues_repo_head_hexsha": "ef587e274ccf87633af653694890eb6712d6b3eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/core/base_trainer/mac_net_work.py", "max_forks_repo_name": "airacid/pruned-face-detector", "max_forks_repo_head_hexsha": "ef587e274ccf87633af653694890eb6712d6b3eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-01T02:39:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-01T02:39:37.000Z", "avg_line_length": 44.4111349036, "max_line_length": 143, "alphanum_fraction": 0.5352459016, "include": true, "reason": "import numpy", "num_tokens": 4212}
|
from DSPbox import MFCC
import scipy.io.wavfile as wav
import numpy as np
import librosa
rate, signal = wav.read('./Observation.wav')
obser = MFCC(signal, rate)
result = []
for i in range(5):
rate, signal = wav.read('./{:d}.wav'.format(i+1))
compare = MFCC(signal, rate)
d = np.zeros((len(obser)+1, len(compare)+1))
for x in range(len(obser)):
d[x+1, 1] = abs(compare[0] - obser[x]) + d[x, 1]
for y in range(len(compare)):
d[1, y+1] = abs(compare[y] - obser[0]) + d[1, y]
for y in range(2, len(compare)+1):
for x in range(2, len(obser)+1):
d[x, y] = abs(compare[y-1] - obser[x-1]) + min(d[x-1, y], d[x, y-1], d[x-1, y-1])
result.append(d[-1, -1])
print(i+1, "->", d[-1, -1])
print(i+1, "->", librosa.dtw(obser,compare)[0][-1, -1], "(by librosa)")
print("最相似:", np.argmin(result)+1)
|
{"hexsha": "7812175b4f8bfa2665340fb47d9aabe1ed398ae7", "size": 855, "ext": "py", "lang": "Python", "max_stars_repo_path": "HW16/HW16.py", "max_stars_repo_name": "JasonLiTW/assignment-speech-recognition", "max_stars_repo_head_hexsha": "794907a417d054477812c1f50695312601eae929", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-07-25T20:44:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-02T14:39:55.000Z", "max_issues_repo_path": "HW16/HW16.py", "max_issues_repo_name": "JasonLiTW/assignment-speech-recognition", "max_issues_repo_head_hexsha": "794907a417d054477812c1f50695312601eae929", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HW16/HW16.py", "max_forks_repo_name": "JasonLiTW/assignment-speech-recognition", "max_forks_repo_head_hexsha": "794907a417d054477812c1f50695312601eae929", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-07-27T11:15:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-02T14:39:57.000Z", "avg_line_length": 35.625, "max_line_length": 93, "alphanum_fraction": 0.5590643275, "include": true, "reason": "import numpy,import scipy", "num_tokens": 309}
|
"""Custom Transform """
from typing import Optional, Union, Tuple
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, bboxes):
for t in self.transforms:
img, bboxes = t(img, bboxes)
return img, bboxes
class ToTensor:
def __call__(
self,
image: Image.Image,
bboxes: np.ndarray
):
img = np.array(image)
img = torch.from_numpy(
np.moveaxis(img / (255.0 if img.dtype == np.uint8 else 1), -1, 0).astype(np.float32)
)
def __repr__(self):
return self.__class__.__name__ + '()'
class Normalize(nn.Module):
def __init__(self, mean: tuple, std: tuple, inplace=False):
super().__init__()
self.mean = mean
self.std = std
self.inplace = inplace
def forward(
self,
img: Union[torch.Tensor, np.ndarray],
bboxes: Union[torch.Tensor, np.ndarray]
) -> Tuple[Union[torch.Tensor, np.ndarray], Union[torch.Tensor, np.ndarray]]:
return F.normalize(img, self.mean, self.std, self.inplace), bboxes
class Resize(nn.Module):
def __init__(
self,
size: int = (448, 448),
interpolation: Optional[int] = Image.BILINEAR,
):
super().__init__()
self.size = size
self.interpolation = interpolation
def forward(
self,
img: Union[torch.Tensor, np.ndarray],
bboxes: Union[torch.Tensor, np.ndarray]
) -> Tuple[Union[torch.Tensor, np.ndarray], Union[torch.Tensor, np.ndarray]]:
width, height = self.size
old_width, old_height = img.size
scale_x = width / old_width
scale_y = height / old_height
img = F.resize(img, (height, width))
if isinstance(bboxes, torch.Tensor):
bboxes[..., 0] = torch.round(scale_x * bboxes[..., 0])
bboxes[..., 1] = torch.round(scale_y * bboxes[..., 1])
bboxes[..., 2] = torch.round(scale_x * bboxes[..., 2])
bboxes[..., 3] = torch.round(scale_y * bboxes[..., 3])
elif isinstance(bboxes, np.ndarray):
bboxes[..., 0] = np.rint(scale_x * bboxes[..., 0])
bboxes[..., 1] = np.rint(scale_y * bboxes[..., 1])
bboxes[..., 2] = np.rint(scale_x * bboxes[..., 2])
bboxes[..., 3] = np.rint(scale_y * bboxes[..., 3])
return img, bboxes
class RandomHorizontalFlip(nn.Module):
def __init__(self, p: float = 0.5):
super().__init__()
self.p = p
def forward(
self,
img: Union[torch.Tensor, np.ndarray], bboxes: Union[torch.Tensor, np.ndarray]
) -> Tuple[Union[torch.Tensor, np.ndarray], Union[torch.Tensor, np.ndarray]]:
width, _ = img.size
if torch.rand(1) < self.p:
img = F.hflip(img)
bboxes[..., 0] = width - bboxes[..., 0] - 1
bboxes[..., 2] = width - bboxes[..., 2] - 1
return img, bboxes
class RandomVerticalFlip(nn.Module):
def __init__(self, p: float = 0.5):
super().__init__()
self.p = p
def forward(
self,
img: Union[torch.Tensor, np.ndarray], bboxes: Union[torch.Tensor, np.ndarray]
) -> Tuple[Union[torch.Tensor, np.ndarray], Union[torch.Tensor, np.ndarray]]:
_, height = img.size
if torch.rand(1) < self.p:
img = F.vflip(img)
bboxes[..., 1] = height - bboxes[..., 1] + 1
bboxes[..., 3] = height - bboxes[..., 3] + 1
return img, bboxes
|
{"hexsha": "4e8f097e5ac4c7cb0044cc4b37048a6a342e97d0", "size": 3807, "ext": "py", "lang": "Python", "max_stars_repo_path": "yolo/data/transforms/transforms.py", "max_stars_repo_name": "DavianYang/yolo.ai", "max_stars_repo_head_hexsha": "0856d4f1e84428667046ee27270ff1bf742e658a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-21T22:14:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T22:14:46.000Z", "max_issues_repo_path": "yolo/data/transforms/transforms.py", "max_issues_repo_name": "DavianYang/yolo.ai", "max_issues_repo_head_hexsha": "0856d4f1e84428667046ee27270ff1bf742e658a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-21T12:12:52.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-21T12:12:52.000Z", "max_forks_repo_path": "yolo/data/transforms/transforms.py", "max_forks_repo_name": "DavianYang/yolo.ai", "max_forks_repo_head_hexsha": "0856d4f1e84428667046ee27270ff1bf742e658a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7016129032, "max_line_length": 96, "alphanum_fraction": 0.5442605726, "include": true, "reason": "import numpy", "num_tokens": 975}
|
\documentclass[full]{subfiles}
\ifthenelse{\value{singlefile} = 1}
{\title{SUBJECT -- Lecture 1}
\date{31st July 2013}}
{}
\begin{document}\ifthenelse{\value{singlefile} = 1}{\maketitle}{\chapter{Lecture 1}}
\section*{Subject}
\end{document}
|
{"hexsha": "1c5ef1998c64401500f553f4645a45e40662e7ec", "size": 245, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "LectureNotes/Lecture1.tex", "max_stars_repo_name": "harrisony/uni-latex-template", "max_stars_repo_head_hexsha": "b7051ee69743c5923d426b6e5a8132d409f17844", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "LectureNotes/Lecture1.tex", "max_issues_repo_name": "harrisony/uni-latex-template", "max_issues_repo_head_hexsha": "b7051ee69743c5923d426b6e5a8132d409f17844", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LectureNotes/Lecture1.tex", "max_forks_repo_name": "harrisony/uni-latex-template", "max_forks_repo_head_hexsha": "b7051ee69743c5923d426b6e5a8132d409f17844", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.4166666667, "max_line_length": 84, "alphanum_fraction": 0.7142857143, "num_tokens": 86}
|
from numpy import linspace, sin
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool, DragZoom
from enable.component_editor import ComponentEditor
from traits.api import HasTraits, Instance, List
from traitsui.api import Item, View, CheckListEditor
class ToolChooserExample(HasTraits):
plot = Instance(Plot)
tools = List(editor=CheckListEditor(values = ["PanTool", "ZoomTool", "DragZoom"]))
traits_view = View(Item("tools", label="Tools", style="custom"),
Item('plot', editor=ComponentEditor(), show_label=False),
width=800, height=600, resizable=True,
title="Tool Chooser")
def __init__(self):
# Create the data and the PlotData object
x = linspace(-14, 14, 500)
y = sin(x) * x**3
plotdata = ArrayPlotData(x = x, y = y)
# Create a Plot and associate it with the PlotData
plot = Plot(plotdata)
# Create a line plot in the Plot
plot.plot(("x", "y"), type="line", color="blue")
self.plot = plot
def _tools_changed(self):
classes = [eval(class_name) for class_name in self.tools]
# Remove all tools from the plot
plot_tools = self.plot.tools
for tool in plot_tools:
plot_tools.remove(tool)
# Create new instances for the selected tool classes
for cls in classes:
self.plot.tools.append(cls(self.plot))
#===============================================================================
# demo object that is used by the demo.py application.
#===============================================================================
demo = ToolChooserExample()
if __name__ == "__main__":
demo.configure_traits()
|
{"hexsha": "16c79b50472f65f244dde738a7fc74ac85bc99b1", "size": 1779, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/tutorials/scipy2008/tool_chooser.py", "max_stars_repo_name": "martinRenou/chaco", "max_stars_repo_head_hexsha": "1888da3ecee89f9b2d11900cda9333b32fc5e89a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-09-17T17:32:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T13:04:43.000Z", "max_issues_repo_path": "examples/tutorials/scipy2008/tool_chooser.py", "max_issues_repo_name": "martinRenou/chaco", "max_issues_repo_head_hexsha": "1888da3ecee89f9b2d11900cda9333b32fc5e89a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/tutorials/scipy2008/tool_chooser.py", "max_forks_repo_name": "martinRenou/chaco", "max_forks_repo_head_hexsha": "1888da3ecee89f9b2d11900cda9333b32fc5e89a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2015-05-17T16:08:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-23T09:23:42.000Z", "avg_line_length": 34.8823529412, "max_line_length": 86, "alphanum_fraction": 0.5834738617, "include": true, "reason": "from numpy", "num_tokens": 377}
|
#!/usr/bin/python
# Author: Srikanth Malla
# Date: 28 Aug, 2020
# project lidar points in top view image
import matplotlib.pyplot as plt
import numpy as np
import glob
from joblib import Parallel, delayed
import multiprocessing
def lidar_top_view(lidar_file):
lidar_points = np.load(lidar_file)
fig = plt.figure(frameon=False)
DPI = fig.get_dpi()
fig.set_size_inches(1080.0/float(DPI),1080.0/float(DPI))
ax = fig.add_subplot(111, xticks=[], yticks=[])
height = lidar_points[:,2]
intensity = lidar_points[:,3]
######## style 1: combined height and intensity map ########
# height = np.interp(height, (height.min(), height.max()), (0, 1))
# # height = np.clip(height, 0, 1)
# height = np.expand_dims(height, axis=1)
# intensity = np.expand_dims(intensity, axis=1)
# zeros = np.zeros_like(height)
# colors = np.hstack((zeros, height, intensity))
# ax.scatter(x = lidar_points[:,0], y=lidar_points[:,1], s = 0.01, c=colors)
######## style 2: using height to visuzalize ground and obstacles (precog paper style) ########
gray = [153/255, 153/255, 153/255]
red = [228/255, 27/255, 28/255]
ground_points = lidar_points[height<0.7,:] #meters threshold
non_ground_points = lidar_points[height>0.7,:] #meters threshold
ax.scatter(x = ground_points[:,0], y=ground_points[:,1], s = 0.01, c=np.tile(gray,(ground_points.shape[0],1)))
ax.scatter(x = non_ground_points[:,0], y=non_ground_points[:,1], s = 0.01, c=np.tile(red,(non_ground_points.shape[0],1)))
### plot adjustments
ax.set_xlim(-60,60)
ax.set_ylim(-60,60)
ax.axis('off')
fig.subplots_adjust(bottom = 0)
fig.subplots_adjust(top = 1)
fig.subplots_adjust(right = 1)
fig.subplots_adjust(left = 0)
ax.axis('on')
# plt.show()
out_file = lidar_file.replace("pointclouds", "lidar_top")
out_file = out_file.replace("npy", "png")
print(out_file)
fig.savefig(out_file)
plt.close('all')
if __name__ == '__main__':
waymo_root = "/home/smalla/datasets/waymo_data/outputs/"
waymo_files = glob.glob(waymo_root+"*/pointclouds/*.npy")
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores)(delayed(lidar_top_view)(file) for file in waymo_files)
|
{"hexsha": "546ccae43bd90b6d6af028f3bf2c69adfdab00f7", "size": 2186, "ext": "py", "lang": "Python", "max_stars_repo_path": "top_view_lidar.py", "max_stars_repo_name": "srikanthmalla/waymo_data_utils", "max_stars_repo_head_hexsha": "26588bb2cfc04dd1034d122ae8b14d3c2b3920c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-07-27T08:01:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-08T06:37:27.000Z", "max_issues_repo_path": "top_view_lidar.py", "max_issues_repo_name": "srikanthmalla/waymo_data_utils", "max_issues_repo_head_hexsha": "26588bb2cfc04dd1034d122ae8b14d3c2b3920c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "top_view_lidar.py", "max_forks_repo_name": "srikanthmalla/waymo_data_utils", "max_forks_repo_head_hexsha": "26588bb2cfc04dd1034d122ae8b14d3c2b3920c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-15T12:52:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-25T15:48:36.000Z", "avg_line_length": 35.8360655738, "max_line_length": 123, "alphanum_fraction": 0.6898444648, "include": true, "reason": "import numpy", "num_tokens": 664}
|
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.tree import DecisionTreeClassifier
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
class FeatureSelection:
classifiers =[
BernoulliNB(),
MultinomialNB(),
GaussianNB(),
DecisionTreeClassifier(),
RandomForestClassifier(n_estimators=10),
OneVsRestClassifier(LinearSVC(random_state=0)),
OneVsRestClassifier(LogisticRegression()),
OneVsRestClassifier(SGDClassifier()),
OneVsRestClassifier(RidgeClassifier()),
]
def univariateFeatureSelection(self,X,y,nfolds,clf,nfeats,clfname,scoreFunc):
kfold = KFold(X.shape[0],n_folds=nfolds)
acc = 0
i = 0
print("%s (#-features = %d).... %"% (clfname,nfeats))
for train,test in kfold:
i += 1
X_train,X_test,y_train,y_test = X[train],X[test],y[train],y[test]
clf.fit(X_train,y_train)
|
{"hexsha": "dc8aa13eea7f7ac5abd2535aec196da176f8a5e3", "size": 1704, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/features/build_features.py", "max_stars_repo_name": "satishkt/lending_club_classifier", "max_stars_repo_head_hexsha": "0b09d7dc7ff2dd998afe5ead5486e8156bc559f7", "max_stars_repo_licenses": ["FTL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/features/build_features.py", "max_issues_repo_name": "satishkt/lending_club_classifier", "max_issues_repo_head_hexsha": "0b09d7dc7ff2dd998afe5ead5486e8156bc559f7", "max_issues_repo_licenses": ["FTL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/features/build_features.py", "max_forks_repo_name": "satishkt/lending_club_classifier", "max_forks_repo_head_hexsha": "0b09d7dc7ff2dd998afe5ead5486e8156bc559f7", "max_forks_repo_licenses": ["FTL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1509433962, "max_line_length": 81, "alphanum_fraction": 0.7535211268, "include": true, "reason": "import numpy", "num_tokens": 388}
|
from pathlib import Path
import cv2
import numpy as np
from tqdm import tqdm
pascal_classes_id = {
1: 'aeroplane',
2: 'bicycle',
4: 'boat',
5: 'bottle',
6: 'bus',
7: 'car',
9: 'chair',
11: 'diningtable',
14: 'motorbike',
18: 'sofa',
19: 'train',
20: 'tvmonitor'
}
def process_Hariharan_masks(dataset_dir, results_dir):
for pascal_class_id, pascal_class in pascal_classes.items():
print(pascal_class)
result_dir = results_dir / f'{pascal_class}'
for sub_dir in ['masks_hariharan']:
curr_dir = result_dir / sub_dir
if not curr_dir.is_dir():
curr_dir.mkdir(parents=True, exist_ok=True)
with open(str(dataset_dir / 'annotations' / f'pascal3d_{pascal_class}_keypoints.txt'), 'r') as fp:
pascal_annotations = fp.readlines()
for i, line in enumerate(pascal_annotations):
line = line.replace('\n', '')
pascal_annotations[i] = line.split(',')
with open(str(dataset_dir / 'annotations' / f'pascal3d_{pascal_class}_difficulty.txt'), 'r') as fp:
pascal_difficulty_annotations = fp.readlines()
for i, line in enumerate(pascal_difficulty_annotations):
line = line.replace('\n', '')
pascal_difficulty_annotations[i] = line.split(',')
pascal_difficulty_annotations = np.asarray(pascal_difficulty_annotations)
for n, annot in tqdm(enumerate(pascal_annotations)):
if annot[1] != 'pascal':
continue
occluded = int(annot[25])
truncated = int(annot[26])
difficult = int(pascal_difficulty_annotations[n, 1])
if occluded or truncated or difficult:
continue
mask_path = dataset_dir / 'masks_hariharan' / str(pascal_class_id) # type: Path
masks = sorted(mask_path.glob('%s__???.png' % annot[2][:-4]))
if len(masks) == 0:
print(f'Masks not found for image "{annot[2]}"!')
continue
masks = [cv2.imread(str(x), cv2.IMREAD_GRAYSCALE) for x in masks]
img_h = int(annot[3])
img_w = int(annot[4])
# object bbox
bbox = [int(annot[7]), int(annot[8]), int(annot[9]), int(annot[10])]
if bbox[0] < 0:
bbox[0] = 0
if bbox[1] < 0:
bbox[1] = 0
if bbox[2] >= img_w:
bbox[2] = img_w - 1
if bbox[3] >= img_h:
bbox[3] = img_h - 1
# compute and save object mask
bbox_pixels = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
found_masks = []
diff_masks = []
for mask in masks:
sum_mask = np.sum(mask) / 255
sum_mask_bbox = np.sum(mask[bbox[1]:bbox[3], bbox[0]:bbox[2]]) / 255
if sum_mask * 0.95 < sum_mask_bbox < sum_mask * 1.05 and sum_mask > bbox_pixels * 0.2:
found_masks.append(mask)
diff_masks.append(sum_mask - sum_mask_bbox)
if len(found_masks) > 0:
assert not (result_dir / 'masks_hariharan' / f'{n:05}.png').is_file()
cv2.imwrite(str(result_dir / 'masks_hariharan' / f'{n:05}.png'), found_masks[np.argmin(np.abs(diff_masks))])
if not (result_dir / 'masks_hariharan' / f'{n:05}.png').is_file():
print('No mask found for image', annot[2], 'with bbox', bbox, 'annotation id', n)
|
{"hexsha": "42bd08c69de8b389d98c47941d07e61d16860faa", "size": 3565, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/pascal3d/preprocess_VOC_hariharan_masks.py", "max_stars_repo_name": "aimagelab/MCMR", "max_stars_repo_head_hexsha": "eb3556bffebc734c19e7f3e39dcf018ba28c63b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-10-11T12:48:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T02:00:00.000Z", "max_issues_repo_path": "datasets/pascal3d/preprocess_VOC_hariharan_masks.py", "max_issues_repo_name": "aimagelab/MCMR", "max_issues_repo_head_hexsha": "eb3556bffebc734c19e7f3e39dcf018ba28c63b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-19T00:47:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-23T14:52:45.000Z", "max_forks_repo_path": "datasets/pascal3d/preprocess_VOC_hariharan_masks.py", "max_forks_repo_name": "aimagelab/MCMR", "max_forks_repo_head_hexsha": "eb3556bffebc734c19e7f3e39dcf018ba28c63b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-08T14:53:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T14:53:54.000Z", "avg_line_length": 37.9255319149, "max_line_length": 124, "alphanum_fraction": 0.5464235624, "include": true, "reason": "import numpy", "num_tokens": 944}
|
r"""
Emulator
--------------
Precomputed synthetic spectral models are awesome but imperfect and rigid. Here we clone the most prominent spectral lines and continuum appearance of synthetic spectral models to turn them into tunable, flexible, semi-empirical models. We can ultimately learn the properties of the pre-computed models with a neural network training loop, and then transfer those weights to real data, where a second transfer-learning training step can take place. The spectrum has :math:`N_{pix} \sim 300,000` pixels and :math:`N_{lines} \sim 5000` spectral lines. The number of lines is set by the `prominence=` kwarg: lower produces more lines and higher (up to about 0.3) produces fewer lines.
PhoenixEmulator
###############
"""
import math
import torch
from torch import nn
import numpy as np
from scipy.signal import find_peaks, peak_prominences, peak_widths
from tqdm import tqdm
class PhoenixEmulator(nn.Module):
r"""
A PyTorch layer that clones precomputed synthetic spectra
wl_native (int): The input wavelength
flux_native (float): The output wavelength
Currently hardcoded to assume your PHOENIX grid is stored at: ~/libraries/raw/PHOENIX/
"""
def __init__(self, wl_native, flux_native, prominence=0.03):
super().__init__()
# Read in the synthetic spectra at native resolution
self.wl_native = torch.tensor(wl_native)
self.flux_native = torch.tensor(flux_native)
(
lam_centers,
amplitudes,
widths_angstroms,
) = self.detect_lines(self.wl_native, self.flux_native, prominence=prominence)
self.n_pix = len(wl_native)
line_buffer = 30 # Angstroms
self.wl_min = wl_native.min()
self.wl_max = wl_native.max()
## Set up "active area", where the region-of-interest is:
active_buffer = 60 # Angstroms
active_lower, active_upper = (
self.wl_min + active_buffer,
self.wl_max - active_buffer,
)
active_mask = (wl_native > active_lower) & (wl_native < active_upper)
self.active_mask = torch.tensor(active_mask)
# Set up line threshold, where lines are computed outside the active area
line_threshold_lower, line_threshold_upper = (
self.wl_min + line_buffer,
self.wl_max - line_buffer,
)
# Restrict the lines to the active region plus 30 A buffer region
mask = (lam_centers > line_threshold_lower) & (
lam_centers < line_threshold_upper
)
lam_centers = lam_centers[mask]
amplitudes = amplitudes[mask]
widths_angstroms = widths_angstroms[mask]
self.n_lines = len(lam_centers)
# Experimentally determined scale factor tweaks
amp_tweak = 0.14
sigma_width_tweak = 1.28
gamma_width_tweak = 1.52
self.amplitudes = nn.Parameter(
torch.log(amplitudes * amp_tweak).clone().detach().requires_grad_(True)
)
self.sigma_widths = nn.Parameter(
np.log(widths_angstroms / math.sqrt(2) * sigma_width_tweak)
.clone()
.detach()
.requires_grad_(True)
)
self.gamma_widths = nn.Parameter(
np.log(widths_angstroms / math.sqrt(2) * gamma_width_tweak)
.clone()
.detach()
.requires_grad_(True)
)
# Fix the wavelength centers as gospel for now.
self.lam_centers = nn.Parameter(
lam_centers.clone().detach().requires_grad_(False)
)
self.a_coeff = nn.Parameter(
torch.tensor(1.0, requires_grad=False, dtype=torch.float64)
)
self.b_coeff = nn.Parameter(
torch.tensor(0.0, requires_grad=False, dtype=torch.float64)
)
self.c_coeff = nn.Parameter(
torch.tensor(0.0, requires_grad=False, dtype=torch.float64)
)
def forward(self, wl):
"""The forward pass of the spectral model
Returns:
(torch.tensor): the 1D generative spectral model destined for backpropagation parameter tuning
"""
# return self.product_of_lorentzian_model(wl)
return self.product_of_pseudovoigt_model(wl)
def product_of_lorentzian_model(self, wl):
"""Return the Lorentzian-only forward model, modulated by Blackbody and slopes"""
net_spectrum = (
1
- self.lorentzian_line(
self.lam_centers.unsqueeze(1),
torch.exp(self.sigma_widths).unsqueeze(1),
torch.exp(self.amplitudes).unsqueeze(1),
wl.unsqueeze(0),
)
).prod(0)
wl_normed = (wl - 10_500.0) / 2500.0
modulation = (
self.a_coeff + self.b_coeff * wl_normed + self.c_coeff * wl_normed ** 2
)
return net_spectrum * modulation
def product_of_pseudovoigt_model(self, wl):
"""Return the PseudoVoight forward model"""
net_spectrum = (1 - self.pseudo_voigt_profiles(wl)).prod(0)
wl_normed = (wl - 10_500.0) / 2500.0
modulation = (
self.a_coeff + self.b_coeff * wl_normed + self.c_coeff * wl_normed ** 2
)
return net_spectrum * modulation
def detect_lines(self, wl_native, flux_native, prominence=0.03):
"""Identify the spectral lines in the native model
Args:
wl_native (torch.tensor vector): The 1D vector of native model wavelengths (Angstroms)
flux_native (torch.tensor vector): The 1D vector of native model fluxes (Normalized)
Returns:
(tuple of tensors): The wavelength centers, prominences, and widths for all ID'ed spectral lines
-----
"""
peaks, _ = find_peaks(-flux_native, distance=4, prominence=prominence)
prominence_data = peak_prominences(-flux_native, peaks)
width_data = peak_widths(-flux_native, peaks, prominence_data=prominence_data)
lam_centers = wl_native[peaks]
prominences = torch.tensor(prominence_data[0])
widths = width_data[0]
d_lam = np.diff(wl_native)[peaks]
# Convert FWHM in pixels to Gaussian sigma in Angstroms
widths_angs = torch.tensor(widths * d_lam / 2.355)
return (lam_centers, prominences, widths_angs)
def lorentzian_line(self, lam_center, width, wavelengths):
"""Return a Lorentzian line, given properties"""
return 1 / 3.141592654 * width / (width ** 2 + (wavelengths - lam_center) ** 2)
def gaussian_line(self, lam_center, width, wavelengths):
"""Return a normalized Gaussian line, given properties"""
return (
1.0
/ (width * 2.5066)
* torch.exp(-0.5 * ((wavelengths - lam_center) / width) ** 2)
)
def _compute_eta(self, fwhm_L, fwhm):
"""Compute the eta parameter for pseudo Voigt"""
f_ratio = fwhm_L / fwhm
return 1.36603 * f_ratio - 0.47719 * f_ratio ** 2 + 0.11116 * f_ratio ** 3
def _compute_fwhm(self, fwhm_L, fwhm_G):
"""Compute the fwhm for pseudo Voigt using the approximation:
:math:`f = [f_G^5 + 2.69269 f_G^4 f_L + 2.42843 f_G^3 f_L^2 + 4.47163 f_G^2 f_L^3 + 0.07842 f_G f_L^4 + f_L^5]^{1/5}`
"""
return (
fwhm_G ** 5
+ 2.69269 * fwhm_G ** 4 * fwhm_L ** 1
+ 2.42843 * fwhm_G ** 3 * fwhm_L ** 2
+ 4.47163 * fwhm_G ** 2 * fwhm_L ** 3
+ 0.07842 * fwhm_G ** 1 * fwhm_L ** 4
+ fwhm_L ** 5
) ** (1 / 5)
def pseudo_voigt_profiles(self, wavelengths):
"""Compute the pseudo Voigt Profile, much faster than the full Voigt profile"""
fwhm_G = 2.3548 * torch.exp(self.sigma_widths).unsqueeze(1)
fwhm_L = 2.0 * torch.exp(self.gamma_widths).unsqueeze(1)
fwhm = self._compute_fwhm(fwhm_L, fwhm_G)
eta = self._compute_eta(fwhm_L, fwhm)
return torch.exp(self.amplitudes).unsqueeze(1) * (
eta
* self.lorentzian_line(
self.lam_centers.unsqueeze(1),
torch.exp(self.gamma_widths).unsqueeze(1),
wavelengths.unsqueeze(0),
)
+ (1 - eta)
* self.gaussian_line(
self.lam_centers.unsqueeze(1),
torch.exp(self.sigma_widths).unsqueeze(1),
wavelengths.unsqueeze(0),
)
)
class SparsePhoenixEmulator(PhoenixEmulator):
r"""
A sparse implementation of the PhoenixEmulator
wl_native (float vector): The input wavelength
flux_native (float vector): The native flux
prominence (int scalar): The threshold for detecting lines
device (Torch Device or str): GPU or CPU?
wing_cut_pixels (int scalar): the number of pixels centered on the line center
to evaluate in the sparse implementation, default: 1000 pixels
"""
def __init__(
self, wl_native, flux_native, prominence=0.01, device=None, wing_cut_pixels=None
):
super().__init__(wl_native, flux_native, prominence=prominence)
if device is None:
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
device = torch.device(device)
## Define the wing cut
# Currently defined in *pixels*
if wing_cut_pixels is None:
wing_cut_pixels = 1000
else:
wing_cut_pixels = int(wing_cut_pixels)
lines = self.lam_centers.detach().cpu().numpy()
wl_native = self.wl_native.cpu().numpy()
print("Initializing a sparse model with {:} spectral lines".format(len(lines)))
# Find the index position of each spectral line
center_indices = np.searchsorted(wl_native, lines)
# From that, determine the beginning and ending indices
zero_indices = center_indices - (wing_cut_pixels // 2)
too_low = zero_indices < 0
zero_indices[too_low] = 0
end_indices = zero_indices + wing_cut_pixels
too_high = end_indices > self.n_pix
zero_indices[too_high] = len(wl_native) - wing_cut_pixels
end_indices[too_high] = len(wl_native)
# Make a 2D array of the indices
indices_2D = np.linspace(
zero_indices, end_indices, num=wing_cut_pixels, endpoint=True
)
self.indices_2D = torch.tensor(indices_2D.T, dtype=torch.long, device=device)
self.indices_1D = self.indices_2D.reshape(-1)
self.indices = self.indices_1D.unsqueeze(0)
self.wl_2D = self.wl_native.to(device)[self.indices_2D]
self.wl_1D = self.wl_2D.reshape(-1)
self.active_mask = self.active_mask.to(device)
def forward(self):
"""The forward pass of the sparse implementation--- no wavelengths needed!
Returns:
(torch.tensor): the 1D generative spectral model destined for backpropagation parameter tuning
"""
# return self.sparse_gaussian_model()
return self.sparse_pseudo_Voigt_model()[self.active_mask]
def sparse_gaussian_model(self):
"""A sparse Gaussian-only model
Returns:
(torch.tensor): the 1D generative spectral model destined for backpropagation parameter tuning
"""
flux_2D = torch.exp(self.amplitudes).unsqueeze(1) * self.gaussian_line(
self.lam_centers.unsqueeze(1),
torch.exp(self.sigma_widths).unsqueeze(1),
self.wl_2D,
)
flux_1D = flux_2D.reshape(-1)
ln_term = torch.log(1 - flux_1D)
sparse_matrix = torch.sparse_coo_tensor(
self.indices, ln_term, size=(self.n_pix,), requires_grad=True
)
result_1D = sparse_matrix.to_dense()
return torch.exp(result_1D)
def sparse_pseudo_Voigt_model(self, RV=0.0):
"""A sparse pseudo-Voigt model
Note:
Almost the same as the base class implementation, may want to refactor
Returns:
(torch.tensor): the 1D generative spectral model destined for backpropagation parameter tuning
"""
fwhm_G = 2.3548 * torch.exp(self.sigma_widths).unsqueeze(1)
fwhm_L = 2.0 * torch.exp(self.gamma_widths).unsqueeze(1)
fwhm = self._compute_fwhm(fwhm_L, fwhm_G)
eta = self._compute_eta(fwhm_L, fwhm)
rv_shifted_centers = self.lam_centers * (1 + RV / 299_792.458)
flux_2D = torch.exp(self.amplitudes).unsqueeze(1) * (
eta
* self.lorentzian_line(
rv_shifted_centers.unsqueeze(1),
torch.exp(self.gamma_widths).unsqueeze(1),
self.wl_2D,
)
+ (1 - eta)
* self.gaussian_line(
rv_shifted_centers.unsqueeze(1),
torch.exp(self.sigma_widths).unsqueeze(1),
self.wl_2D,
)
)
flux_1D = flux_2D.reshape(-1)
ln_term = torch.log(1 - flux_1D)
sparse_matrix = torch.sparse_coo_tensor(
self.indices, ln_term, size=(self.n_pix,), requires_grad=True
)
result_1D = sparse_matrix.to_dense()
return torch.exp(result_1D)
class EchelleModel(nn.Module):
r"""
A Model for Echelle Spectra based on the SparseEmulator
wl_bin_edges (float vector): The input wavelength
device (Torch Device or str): GPU or CPU?
pretrained_emulator (SparsePhoenixEmulator): A pretrained emulator to use for modeling data
"""
def __init__(self, wl_bin_edges, device=None, pretrained_emulator=None):
super().__init__()
if device is None:
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
device = torch.device(device)
self.emulator = pretrained_emulator.to(device)
self.wl_bin_edges = wl_bin_edges
self.median_wl = np.median(wl_bin_edges)
# self.resolving_power = nn.Parameter(
# torch.tensor(45_000.0, requires_grad=True, dtype=torch.float64)
# )
self.ln_sigma_angs = nn.Parameter(
torch.tensor(-2.3, requires_grad=True, dtype=torch.float64)
)
self.ln_vsini = nn.Parameter(
torch.tensor(2.89, requires_grad=True, dtype=torch.float64)
)
self.vsini = nn.Parameter(
torch.tensor(18.0, requires_grad=True, dtype=torch.float64)
)
self.radial_velocity = nn.Parameter(
torch.tensor(0.0, requires_grad=True, dtype=torch.float64)
)
# Make a fine wavelength grid from -4.5 to 4.5 Angstroms for convolution
self.kernel_grid = torch.arange(
-4.5, 4.51, 0.01, dtype=torch.float64, device=device
)
labels = np.searchsorted(wl_bin_edges, self.emulator.wl_native)
indices = torch.tensor(labels)
_idx, vals = torch.unique(indices, return_counts=True)
self.label_spacings = tuple(vals)
def forward(self):
"""The forward pass of the data-based echelle model implementation--- no wavelengths needed!
Returns:
(torch.tensor): the 1D generative spectral model destined for backpropagation parameter tuning
"""
high_res_model = self.emulator.sparse_pseudo_Voigt_model(
RV=self.radial_velocity
)
sigma_angs = 0.01 + torch.exp(self.ln_sigma_angs) # Floor of 0.01 Angstroms
vsini = 0.2 + torch.exp(self.ln_vsini) # Floor of 0.2 km/s
rotationally_broadened = self.rotational_broaden(high_res_model, vsini)
return self.instrumental_broaden(rotationally_broadened, sigma_angs)
def resample_to_data(self, convolved_flux):
"""Resample the high resolution model to the data wavelength sampling"""
vs = torch.split_with_sizes(convolved_flux, self.label_spacings)
resampled_model_flux = torch.tensor([v.mean() for v in vs])
# Discard the first and last bins outside the spectrum extents
return resampled_model_flux[1:-1]
def instrumental_broaden(self, input_flux, sigma_angs):
"""Instrumental broaden the spectrum
sigma_angs (float scalar) The spectral resolution sigma in Angstroms
"""
weights = (
1
/ (sigma_angs * torch.sqrt(torch.tensor(2 * 3.1415926654)))
* torch.exp(-1.0 / 2.0 * self.kernel_grid ** 2 / sigma_angs ** 2)
)
output = torch.nn.functional.conv1d(
input_flux.unsqueeze(0).unsqueeze(1),
weights.unsqueeze(0).unsqueeze(1),
padding="same",
)
return output.squeeze()
def rotational_broaden(self, input_flux, vsini):
"""Rotationally broaden the spectrum"""
u1 = 0.0
u2 = 0.0
velocity_grid = 299792.458 * self.kernel_grid / self.median_wl
x = velocity_grid / vsini
x2 = x * x
kernel = torch.where(
x2 < 1.0,
3.141592654 / 2.0 * u1 * (1.0 - x2)
- 2.0 / 3.0 * torch.sqrt(1.0 - x2) * (-3.0 + 3.0 * u1 + u2 * 2.0 * u2 * x2),
0.0,
)
weights = kernel / torch.sum(kernel, axis=0)
output = torch.nn.functional.conv1d(
input_flux.unsqueeze(0).unsqueeze(1),
weights.unsqueeze(0).unsqueeze(1),
padding="same",
)
return output.squeeze()
|
{"hexsha": "df4bdfcd40a3ed719aad6f61ec6bf165c82eeea5", "size": 17470, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/blase/emulator.py", "max_stars_repo_name": "gully/blase", "max_stars_repo_head_hexsha": "1f7f4b2cf77bd4a5d0e4591b4fd3581d7c0bfe25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-12-07T03:37:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T15:02:48.000Z", "max_issues_repo_path": "src/blase/emulator.py", "max_issues_repo_name": "gully/blase", "max_issues_repo_head_hexsha": "1f7f4b2cf77bd4a5d0e4591b4fd3581d7c0bfe25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2020-12-01T19:15:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T15:29:56.000Z", "max_forks_repo_path": "src/blase/emulator.py", "max_forks_repo_name": "gully/blase", "max_forks_repo_head_hexsha": "1f7f4b2cf77bd4a5d0e4591b4fd3581d7c0bfe25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-07T03:00:05.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-07T03:00:05.000Z", "avg_line_length": 36.9344608879, "max_line_length": 686, "alphanum_fraction": 0.6168860904, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4343}
|
from csv_parser import Parser
import random
a = Parser('transport_data.csv')
a.open()
dots = a.get_data()
zero = []
first = []
second = []
q = []
for dot in dots:
if dot.label == '0':
zero.append(dot)
if dot.label == '1':
first.append(dot)
if dot.label == '2':
second.append(dot)
if dot.label == '?':
q.append(dot)
dots = first+second+zero
random.shuffle(dots)
def get_min(dot, arr, num_of):
dist = []
for d in arr:
if d == dot:
continue
dist.append(dot.dist(d))
ans = []
for i in range(num_of):
tmp = min(dist)
ans.append(tmp)
dist.remove(tmp)
return ans
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import matplotlib.pyplot as plt
from keras.utils import np_utils
import os
from sklearn.model_selection import train_test_split
batch_size = 200
num_classes = 3
epochs = 100
data_augmentation = True
X = []
y = []
rt = 3
if False:
for _, dot in enumerate(dots[:10000]):
z = np.matrix(get_min(dot, zero, rt**2)).reshape(rt,rt)
z /= np.linalg.norm(z)
f = np.matrix(get_min(dot, first, rt**2)).reshape(rt,rt)
f /= np.linalg.norm(f)
s = np.matrix(get_min(dot, second, rt**2)).reshape(rt,rt)
s /= np.linalg.norm(s)
ans = np.array([z, f, s]).reshape(rt,rt,3)
X.append(ans)
y.append(np_utils.to_categorical(dot.label, 3))
if _%1000 == 0:
print("=", end="")
import sys
sys.stdout.flush()
y = np.array(y)
np.save("y3x3", y)
X = np.array(X)
np.save("X3x3", X)
X = np.load("X3x3.npy")
y = np.load("y3x3.npy")
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=(rt, rt, 3)))
model.add(Activation('relu'))
model.add(Conv2D(128, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(1, 1)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
if __name__ == "__main__":
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
"""
history: данные полученные во время обучения сети, необходимые для построения различных графиков"""
history = model.fit(x=X, y=y, batch_size=batch_size, epochs=epochs,
shuffle=True, validation_data=(x_test, y_test))
"""if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
Данные полученные после тестирования сети - точность работы на тренировчном множестве """
scores = model.evaluate(x_test, y_test, verbose=1)
print("Точность работы на тестовых данных: %.2f%%" % (scores[1] * 100))
plt.plot(history.history['acc'])
plt.show()
"""
label 0 - маршрут 5
label 1 - маршрут 11
label 2 - маршрут 7
"""
|
{"hexsha": "5418f3b83eb7b149b25090adfd2c34b6e11ad277", "size": 3493, "ext": "py", "lang": "Python", "max_stars_repo_path": "transport_data_analis/neural_network.py", "max_stars_repo_name": "levkovalenko/pm_task_2018", "max_stars_repo_head_hexsha": "8bffa5327dc59b1640d922c4faa7381eb4255ead", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "transport_data_analis/neural_network.py", "max_issues_repo_name": "levkovalenko/pm_task_2018", "max_issues_repo_head_hexsha": "8bffa5327dc59b1640d922c4faa7381eb4255ead", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "transport_data_analis/neural_network.py", "max_forks_repo_name": "levkovalenko/pm_task_2018", "max_forks_repo_head_hexsha": "8bffa5327dc59b1640d922c4faa7381eb4255ead", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6641221374, "max_line_length": 103, "alphanum_fraction": 0.6309762382, "include": true, "reason": "import numpy", "num_tokens": 987}
|
# Illustrate einstein summation
# https://rockt.github.io/2018/04/30/einsum
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html
import superimport
import numpy as np
np.random.seed(42)
a = np.arange(3)
b = np.arange(3)
A = np.arange(6).reshape(2,3)
B = np.arange(15).reshape(3,5)
S = np.arange(9).reshape(3,3)
T = np.random.randn(2,2,2,2)
## Single argument
# Matrix transpose
assert np.allclose(A.T, np.einsum('ij->ji', A))
# Sum all elements
assert np.allclose(np.sum(A), np.einsum('ij->', A))
# Sum across rows
assert np.allclose(np.sum(A, axis=0), np.einsum('ij->j', A))
# Sum across columns
assert np.allclose(np.sum(A, axis=1), np.einsum('ij->i', A))
# Sum specific axis of tensor
assert np.allclose(np.sum(T, axis=1), np.einsum('ijkl->ikl', T))
assert np.allclose(np.sum(np.sum(T, axis=0), axis=0), np.einsum('ijkl->kl', T))
# repeated indices with one arg extracts diagonals
assert np.allclose(np.diag(S), np.einsum('ii->i', S))
# Trace
assert np.allclose(np.trace(S), np.einsum('ii->', S))
## Two arguments
# Matrix vector multiplication
assert np.allclose(np.dot(A, b), np.einsum('ik,k->i', A, b))
# Matrix matrix multiplication
assert np.allclose(np.dot(A, B), np.einsum('ik,kj->ij', A, B))
assert np.allclose(np.matmul(A, B), np.einsum('ik,kj->ij', A, B))
# Inner product
assert np.allclose(np.dot(a, b), np.einsum('i,i->', a, b))
assert np.allclose(np.inner(a, b), np.einsum('i,i->', a, b))
# Outer product
assert np.allclose(np.outer(a, b), np.einsum('i,j->ij', a, b))
# Elementwise product
assert np.allclose(a * a, np.einsum('i,i->i', a, a))
assert np.allclose(A * A, np.einsum('ij,ij->ij', A, A))
assert np.allclose(np.multiply(A, A), np.einsum('ij,ij->ij', A, A))
# Batch matrix multiplication
I= 3; J = 2; K = 5; L = 3;
AA = np.random.randn(I,J,K)
BB = np.random.randn(I,K,L)
# C[ijl] = sum_k A[ijk] B[ikl]
CC = np.zeros((I,J,L))
for i in range(I):
for j in range(J):
for l in range(L):
s = 0
for k in range(K):
s += AA[i,j,k] * BB[i,k,l]
CC[i,j,l] = s
assert np.allclose(CC, np.einsum('ijk,ikl->ijl', AA, BB))
## >2 arguments
# Batch sentence embedding and averaging
N = 2; C = 3; D = 4; K = 5; T = 6;
S = np.random.randn(N, T, K)
W = np.random.randn(K, D)
V = np.random.randn(D, C)
L = np.zeros((N,C))
for n in range(N):
for c in range(C):
s = 0
for d in range(D):
for k in range(K):
for t in range(T):
s += S[n,t,k] * W[k,d] * V[d,c]
L[n,c] = s
assert np.allclose(L, np.einsum('ntk,kd,dc->nc', S, W, V))
path = np.einsum_path('ntk,kd,dc->nc', S, W, V, optimize='optimal')[0]
assert np.allclose(L, np.einsum('ntk,kd,dc->nc', S, W, V, optimize=path))
import jax.numpy as jnp
path = jnp.einsum_path('ntk,kd,dc->nc', S, W, V, optimize='optimal')[0]
assert np.allclose(L, jnp.einsum('ntk,kd,dc->nc', S, W, V, optimize=path))
# Use full student network from KOller and Friedman
str = 'c,dc,gdi,si,lg,jls,hgj->'
K = 5
cptC = np.random.randn(K)
cptD = np.random.randn(K,K)
cptG = np.random.randn(K,K,K)
cptS = np.random.randn(K,K)
cptL = np.random.randn(K,K)
cptJ = np.random.randn(K,K,K)
cptH = np.random.randn(K,K,K)
cpts = [cptC, cptD, cptG, cptS, cptL, cptJ, cptH]
path_info = np.einsum_path(str, *cpts, optimize='optimal')
print(path_info[0]) # 'einsum_path', (0, 1), (0, 5), (0, 4), (0, 3), (0, 2), (0, 1)]
print(path_info[1])
'''
Complete contraction: c,dc,gdi,si,lg,jls,hgj->
Naive scaling: 8
Optimized scaling: 4
Naive FLOP count: 2.734e+06
Optimized FLOP count: 2.176e+03
Theoretical speedup: 1256.606
Largest intermediate: 1.250e+02 elements
--------------------------------------------------------------------------
scaling current remaining
--------------------------------------------------------------------------
2 dc,c->d gdi,si,lg,jls,hgj,d->
3 d,gdi->gi si,lg,jls,hgj,gi->
3 gi,si->gs lg,jls,hgj,gs->
3 gs,lg->gls jls,hgj,gls->
4 gls,jls->gj hgj,gj->
3 gj,hgj-> ->
'''
path_info = np.einsum_path(str, *cpts, optimize='greedy')
print(path_info[1])
'''
Complete contraction: c,dc,gdi,si,lg,jls,hgj->
Naive scaling: 8
Optimized scaling: 5
Naive FLOP count: 2.734e+06
Optimized FLOP count: 7.101e+03
Theoretical speedup: 385.069
Largest intermediate: 1.250e+02 elements
--------------------------------------------------------------------------
scaling current remaining
--------------------------------------------------------------------------
5 hgj,jls->gls c,dc,gdi,si,lg,gls->
3 gls,lg->gs c,dc,gdi,si,gs->
2 dc,c->d gdi,si,gs,d->
3 d,gdi->gi si,gs,gi->
3 gs,si->gi gi,gi->
2 gi,gi-> ->
'''
|
{"hexsha": "0a88c90e645debd1e52822284b20b674cca52104", "size": 5388, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/einsum_demo.py", "max_stars_repo_name": "vipavlovic/pyprobml", "max_stars_repo_head_hexsha": "59a2edc682d0163955db5e2f27491ad772b60141", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4895, "max_stars_repo_stars_event_min_datetime": "2016-08-17T22:28:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:07:15.000Z", "max_issues_repo_path": "scripts/einsum_demo.py", "max_issues_repo_name": "vipavlovic/pyprobml", "max_issues_repo_head_hexsha": "59a2edc682d0163955db5e2f27491ad772b60141", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 446, "max_issues_repo_issues_event_min_datetime": "2016-09-17T14:35:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T19:59:33.000Z", "max_forks_repo_path": "scripts/einsum_demo.py", "max_forks_repo_name": "vipavlovic/pyprobml", "max_forks_repo_head_hexsha": "59a2edc682d0163955db5e2f27491ad772b60141", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1160, "max_forks_repo_forks_event_min_datetime": "2016-08-18T23:19:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:44:07.000Z", "avg_line_length": 34.3184713376, "max_line_length": 84, "alphanum_fraction": 0.5079806978, "include": true, "reason": "import numpy,import jax", "num_tokens": 1678}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Forecasting with quadratic model
The quadratic (non-linear) regression model explores a linear relationship
between the forecast variable `y` (observed time series) and predictor
variables `x` and `x^2`, where `x` is the time.
"""
import logging
from typing import Any, Dict, List, Optional
import numpy as np
import pandas as pd
import statsmodels.api as sm
from kats.consts import Params, TimeSeriesData
from kats.models.model import Model
from statsmodels.sandbox.regression.predstd import wls_prediction_std
class QuadraticModelParams(Params):
"""Parameter class for Quadratic model.
This is the parameter class for the quadratic model.
Attributes:
alpha: The alpha level for the confidence interval. The default alpha =
0.05 returns a 95% confidence interval
"""
def __init__(self, alpha: float = 0.05, **kwargs: Any) -> None:
super().__init__()
self.alpha = alpha
logging.debug(
"Initialized QuadraticModel parameters. "
"alpha:{alpha}".format(alpha=alpha)
)
def validate_params(self) -> None:
"""Validate Quadratic Model Parameters
Since the quadratic model does not require key parameters to be defined
this is not required for this class
"""
logging.info("Method validate_params() is not implemented.")
pass
class QuadraticModel(Model[QuadraticModelParams]):
"""Model class for Quadratic Model.
This class provides the fit, predict and plot methods for the Quadratic Model
Attributes:
data: the input time series data as :class:`kats.consts.TimeSeriesData`
params: the parameter class defined with `QuadraticModelParams`
"""
past_length: Optional[int] = None
model: Optional[Model[QuadraticModelParams]] = None
freq: Optional[str] = None
_X_future: Optional[List[int]] = None
sdev: Optional[float] = None
dates: Optional[pd.DatetimeIndex] = None
y_fcst: Optional[pd.Series] = None
y_fcst_lower: Optional[pd.Series] = None
y_fcst_upper: Optional[pd.Series] = None
fcst_df: Optional[pd.DataFrame] = None
def __init__(self, data: TimeSeriesData, params: QuadraticModelParams) -> None:
super().__init__(data, params)
if not isinstance(self.data.value, pd.Series):
msg = "Only support univariate time series, but get {type}.".format(
type=type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
def fit(self) -> None:
"""fit Quadratic Model."""
logging.debug(
"Call fit() with parameters: "
"alpha:{alpha}".format(alpha=self.params.alpha)
)
self.past_length = len(self.data.time)
_X = list(range(self.past_length))
_X_quad = np.column_stack([_X, np.power(_X, 2)])
X_quad = sm.add_constant(_X_quad)
y = self.data.value
quad_model = sm.OLS(y, X_quad)
self.model = quad_model.fit()
# pyre-fixme[14]: `predict` overrides method defined in `Model` inconsistently.
def predict(
self, steps: int, include_history: bool = False, **kwargs: Any
) -> pd.DataFrame:
"""predict with fitted quadratic model.
Args:
steps: the steps or length of the prediction horizon
include_history: whether to include the historical data in the prediction
Returns:
The predicted dataframe with the following columns:
`time`, `fcst`, `fcst_lower`, and `fcst_upper`
"""
past_length = self.past_length
if past_length is None:
raise ValueError("Call fit() before predict().")
model = self.model
assert model is not None
logging.debug(
"Call predict() with parameters. "
"steps:{steps}, kwargs:{kwargs}".format(steps=steps, kwargs=kwargs)
)
self.freq = kwargs.get("freq", "D")
self.include_history = include_history
if include_history:
_X_future = list(range(0, past_length + steps))
else:
_X_future = list(range(past_length, past_length + steps))
self._X_future = _X_future
_X_fcst = np.column_stack([_X_future, np.power(_X_future, 2)])
X_fcst = sm.add_constant(_X_fcst)
y_fcst = model.predict(X_fcst)
sdev, y_fcst_lower, y_fcst_upper = wls_prediction_std(
self.model, exog=X_fcst, alpha=self.params.alpha
)
self.sdev = sdev
self.y_fcst = pd.Series(y_fcst)
self.y_fcst_lower = pd.Series(y_fcst_lower)
self.y_fcst_upper = pd.Series(y_fcst_upper)
# create future dates
last_date = self.data.time.max()
dates = pd.date_range(start=last_date, periods=steps + 1, freq=self.freq)
self.dates = dates[dates != last_date]
if include_history:
self.dates = np.concatenate((pd.to_datetime(self.data.time), self.dates))
self.fcst_df = fcst_df = pd.DataFrame(
{
"time": self.dates,
"fcst": self.y_fcst,
"fcst_lower": self.y_fcst_lower,
"fcst_upper": self.y_fcst_upper,
}
)
logging.debug("Return forecast data: {fcst_df}".format(fcst_df=self.fcst_df))
return fcst_df
def __str__(self) -> str:
return "Quadratic"
@staticmethod
def get_parameter_search_space() -> List[Dict[str, object]]:
"""get default parameter search space for Quadratic model."""
return [
{
"name": "alpha",
"type": "choice",
"value_type": "float",
"values": [0.01, 0.05, 0.1, 0.25],
"is_ordered": True,
},
]
|
{"hexsha": "db6568b9fbce199ac9c6f919d451f4cc2259265b", "size": 6019, "ext": "py", "lang": "Python", "max_stars_repo_path": "kats/models/quadratic_model.py", "max_stars_repo_name": "utkucanaytac/Kats", "max_stars_repo_head_hexsha": "9781615750a2f3b49f16cccf335b5c29fdfd181a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kats/models/quadratic_model.py", "max_issues_repo_name": "utkucanaytac/Kats", "max_issues_repo_head_hexsha": "9781615750a2f3b49f16cccf335b5c29fdfd181a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kats/models/quadratic_model.py", "max_forks_repo_name": "utkucanaytac/Kats", "max_forks_repo_head_hexsha": "9781615750a2f3b49f16cccf335b5c29fdfd181a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3942857143, "max_line_length": 85, "alphanum_fraction": 0.6225286592, "include": true, "reason": "import numpy,import statsmodels,from statsmodels", "num_tokens": 1409}
|
import numpy as np
import matplotlib.pyplot as plt
import stan_utils as stan
from mpl_utils import (mpl_style, common_limits)
plt.style.use(mpl_style)
def generate_data(N, M, D, scales=None, seed=None):
"""
Generate some toy data to play with. Here we assume all :math:`N` stars have
been observed by all :math:`M` surveys.
:param N:
The number of stars observed.
:param M:
The number of surveys.
:param D:
The dimensionality of the label space.
:param scales: [optional]
Optional values to provide for the relative scales on the latent factors.
:param seed: [optional]
An optional seed to provide to the random number generator.
:returns:
A two-length tuple containing the data :math:`y` and a dictionary with
the true values.
"""
if seed is not None:
np.random.seed(seed)
if scales is None:
scales = np.abs(np.random.normal(0, 1, size=D))
else:
scales = np.array(scales)
assert len(scales) == D
X = np.random.normal(
np.zeros(D),
scales,
size=(N, D))
# TODO: Better way to randomly generate positive semi-definite covariance
# matrices that are *very* close to an identity matrix.
# Use decomposition to ensure the resulting covariance matrix is positive
# semi-definite.
L = np.random.randn(M, D, D)
L[:, np.arange(D), np.arange(D)] = np.exp(L[:, np.arange(D), np.arange(D)])
i, j = np.triu_indices_from(L[0], 1)
L[:, i, j] = 0.0
# TODO: use matrix multiplication you idiot
theta = np.array([np.dot(L[i], L[i].T) for i in range(M)])
y = np.dot(X, theta)
# add noise.
phi = np.abs(np.random.normal(0, 0.1, size=(M, D)))
rank = np.random.normal(0, 1, size=y.shape)
noise = scales * rank * phi
y += noise
truths = dict(X=X, theta=theta, phi=phi, scales=scales, L=L, noise=noise)
return (y, truths)
N, M, D = (250, 10, 5)
scales = np.ones(D)
y, truths = generate_data(N=N, M=M, D=D, scales=scales)
model = stan.read_model("model.stan")
# Optimize the model
data = dict(y=y, N=N, M=M, D=D, scales=scales)
# TODO: initialize from true value.
op_kwds = dict(
data=data,
iter=100000,
tol_obj=7./3 - 4./3 - 1, # machine precision
tol_grad=7./3 - 4./3 - 1, # machine precision
tol_rel_grad=1e3,
tol_rel_obj=1e4
)
p_opt = model.optimizing(**op_kwds)
fig, axes = plt.subplots(1, D, figsize=(4 * D, 4))
for d, ax in enumerate(axes):
x = truths["X"].T[d]
y = p_opt["X"].T[d]
ax.scatter(x, y)
ax.set_xlabel(r"$X_{{{0},\textrm{{true}}}}$".format(d))
ax.set_ylabel(r"$X_{{{0},\textrm{{opt}}}}$".format(d))
common_limits(ax, plot_one_to_one=True)
fig.tight_layout()
fig, axes = plt.subplots(1, D, figsize=(4 * D, 4))
for d, ax in enumerate(axes):
x = truths["phi"].T[d]
y = p_opt["phi"][d]
ax.scatter(x, y)
ax.set_xlabel(r"$\phi_{{{0},\textrm{{true}}}}$".format(d))
ax.set_ylabel(r"$\phi_{{{0},\textrm{{opt}}}}$".format(d))
common_limits(ax, plot_one_to_one=True)
fig.tight_layout()
K = int(np.ceil(M**0.5))
L = int(np.ceil(M / K))
fig, axes = plt.subplots(K, L, figsize=(4 * L, 4 * K))
axes = np.array(axes).flatten()
for m, ax in enumerate(axes[:M]):
x = truths["theta"][m].flatten()
y = p_opt["theta"][m].flatten()
ax.scatter(x, y)
ax.set_xlabel(r"$\theta_{{{0},\textrm{{true}}}}$".format(m))
ax.set_ylabel(r"$\theta_{{{0},\textrm{{opt}}}}$".format(m))
common_limits(ax, plot_one_to_one=True)
for ax in axes[M:]:
ax.set_visible(False)
fig.tight_layout()
|
{"hexsha": "a873e6e776032792b2aee21f0ea9cce795362399", "size": 3657, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "andycasey/uber-chemi-cal", "max_stars_repo_head_hexsha": "8fc3cfbf7b54145779b1635496426c5473cedb56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model.py", "max_issues_repo_name": "andycasey/uber-chemi-cal", "max_issues_repo_head_hexsha": "8fc3cfbf7b54145779b1635496426c5473cedb56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model.py", "max_forks_repo_name": "andycasey/uber-chemi-cal", "max_forks_repo_head_hexsha": "8fc3cfbf7b54145779b1635496426c5473cedb56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.0301204819, "max_line_length": 81, "alphanum_fraction": 0.6065080667, "include": true, "reason": "import numpy", "num_tokens": 1102}
|
"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import numpy as np
import xarray as xr
import dask
from act.qc import qctests, comparison_tests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests, object):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, xarray_obj):
""" initialize """
self._obj = xarray_obj
def check_for_ancillary_qc(self, var_name, add_if_missing=True,
cleanup=True, flag_type=False):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from object.
cleanup : boolean
Option to run qc.clean.cleanup() method on the object
to ensure the object was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
"""
qc_var_name = None
try:
ancillary_variables = \
self._obj[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._obj[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._obj['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._obj.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray object. If the quality control variables are already cleaned
# the extra work is small since it's just checking.
if cleanup:
self._obj.clean.cleanup(handle_missing_value=True,
link_qc_variables=False)
return qc_var_name
def create_qc_variable(self, var_name, flag_type=False,
flag_values_set_value=0,
qc_var_name=None):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = ('Quality check results on field: ' +
self._obj[var_name].attrs['long_name'])
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._obj.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._obj[var_name].values, dtype=np.int32),
chunks=self._obj[var_name].data.chunksize)
except AttributeError:
qc_data = np.zeros_like(self._obj[var_name].values, dtype=np.int32)
# Updating to use coords instead of dim, which caused a loss of
# attribuets as noted in Issue 347
self._obj[qc_var_name] = xr.DataArray(
data=qc_data, coords=self._obj[var_name].coords,
attrs={"long_name": qc_variable_long_name,
"units": '1'}
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._obj[qc_var_name].values = \
self._obj[qc_var_name].values + int(flag_values_set_value)
# Add requried variable attributes.
if flag_type:
self._obj[qc_var_name].attrs['flag_values'] = []
else:
self._obj[qc_var_name].attrs['flag_masks'] = []
self._obj[qc_var_name].attrs['flag_meanings'] = []
self._obj[qc_var_name].attrs['flag_assessments'] = []
self._obj[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
"""
if qc_var_name is None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = \
self._obj[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables,
qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._obj[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(self, var_name, index=None, test_number=None,
test_meaning=None, test_assessment='Bad',
flag_value=False, recycle=False):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
recyle : boolean
Option to use number less than next highest test if available. For example
tests 1, 2, 4, 5 are set. Set to true the next test chosen will be 3, else
will be 6.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be lower case and then capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
> result = ds_object.qcfilter.add_test(
var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError('You need to provide a value for test_meaning '
'keyword when calling the add_test method')
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind == 'f':
index = index.astype(int)
# Ensure assessment is lowercase and capitalized to be consistent
test_assessment = test_assessment.lower().capitalize()
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, flag_type=flag_value)
if test_number is None:
test_number = self._obj.qcfilter.available_bit(
qc_var_name, recycle=recycle)
self._obj.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._obj[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._obj[qc_var_name].attrs['flag_values'] = [test_number]
else:
# Determine if flag_masks test number is too large for current data type.
# If so up convert data type.
flag_masks = np.array(self._obj[qc_var_name].attrs['flag_masks'])
mask_dtype = flag_masks.dtype
if not np.issubdtype(mask_dtype, np.integer):
mask_dtype = np.uint32
if np.iinfo(mask_dtype).max - set_bit(0, test_number) <= -1:
if mask_dtype == np.int8 or mask_dtype == np.uint8:
mask_dtype = np.uint16
elif mask_dtype == np.int16 or mask_dtype == np.uint16:
mask_dtype = np.uint32
elif mask_dtype == np.int32 or mask_dtype == np.uint32:
mask_dtype = np.uint64
flag_masks = flag_masks.astype(mask_dtype)
flag_masks = np.append(flag_masks, np.array(set_bit(0, test_number), dtype=mask_dtype))
self._obj[qc_var_name].attrs['flag_masks'] = list(flag_masks)
try:
self._obj[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._obj[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._obj[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._obj[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(self, var_name=None, qc_var_name=None, test_number=None, flag_value=False,
flag_values_reset_value=0):
"""
Method to remove a test/filter from a quality control variable. Must set
var_name or qc_var_name.
Parameters
----------
var_name : str or None
Data variable name.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
> ds_object.qcfilter.remove_test(
var_name, test_number=3)
"""
if test_number is None:
raise ValueError('You need to provide a value for test_number '
'keyword when calling the add_test() method')
if var_name is None and qc_var_name is None:
raise ValueError('You need to provide a value for var_name or qc_var_name '
'keyword when calling the add_test() method')
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._obj[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name=var_name, qc_var_name=qc_var_name, test_number=test_number,
return_index=True, flag_value=True)
self._obj.qcfilter.unset_test(var_name=var_name, qc_var_name=qc_var_name,
index=remove_index, test_number=test_number,
flag_value=flag_value,
flag_values_reset_value=flag_values_reset_value)
del flag_values[index]
self._obj[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name=var_name, qc_var_name=qc_var_name, test_number=test_number,
return_index=True)
self._obj.qcfilter.unset_test(var_name=var_name, qc_var_name=qc_var_name,
index=remove_index, test_number=test_number,
flag_value=flag_value)
del flag_masks[index]
self._obj[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._obj[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._obj[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._obj[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None,
flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds_object.qcfilter.set_test(
var_name, index=index, test_number=2)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._obj[qc_var_name].values)
# Determine if test number is too large for current data type. If so
# up convert data type.
dtype = qc_variable.dtype
if np.iinfo(dtype).max - set_bit(0, test_number) < -1:
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
elif dtype == np.int32:
dtype = np.int64
qc_variable = qc_variable.astype(dtype)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
qc_variable[index] = set_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def unset_test(self, var_name=None, qc_var_name=None, index=None, test_number=None,
flag_value=False, flag_values_reset_value=0):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str or None
Data variable name.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds_object.qcfilter.unset_test(
var_name, index=0, test_number=2)
"""
if index is None:
return
if var_name is None and qc_var_name is None:
raise ValueError('You need to provide a value for var_name or qc_var_name '
'keyword when calling the unset_test() method')
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
"""
try:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._obj[qc_var_name].attrs['flag_values']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError('Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected')
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(self, var_name=None, test_number=None, qc_var_name=None,
flag_value=False, return_index=False):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array. Must set var_name or qc_var_name
when calling.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to return array where test is set.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of 0 or 1 mask.
Returns
-------
test_mask : bool array
A numpy boolean array with False or True where the test number or
bit was set.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning='Birds!')
qc_var_name = result['qc_variable_name']
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result['test_number'], return_index=True)
print(mask)
array([0, 1, 2])
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result['test_number'])
print(mask)
array([ True, True, True, ..., False, False, False])
data = ds_object[var_name].values
print(data[mask])
array([7.84 , 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([ nan, nan, nan, ..., 7.6705, 7.6892, 7.6892],
dtype=float32)
"""
if var_name is None and qc_var_name is None:
raise ValueError('You need to provide a value for var_name or qc_var_name '
'keyword when calling the get_qc_test_mask() method')
if test_number is None:
raise ValueError('You need to provide a value for test_number '
'keyword when calling the get_qc_test_mask() method')
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
if flag_value:
tripped = np.where(qc_variable == test_number)
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = np.where(check_bit > 0)
test_mask = np.zeros(qc_variable.shape, dtype='int')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = 1
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(self, var_name, rm_assessments=None,
rm_tests=None, return_nan_array=False,
ma_fill_value=None, return_inverse=False):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning='Birds!')
data = ds_object.qcfilter.get_masked_data(var_name,
rm_assessments=['Bad', 'Indeterminate'])
print(data)
masked_array(data=[--, --, --, ..., 7.670499801635742,
7.689199924468994, 7.689199924468994],
mask=[ True, True, True, ..., False, False, False],
fill_value=1e+20, dtype=float32)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._obj[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._obj.qcfilter.get_qc_test_mask(
var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask,
fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(variable, mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(self, variables=None, rm_assessments=None, rm_tests=None,
np_ma=True, verbose=False, del_qc_var=True):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data variable is changed to to a numpy masked array with failing
data masked or, if requested, to numpy array with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
np_ma : boolean
Shoudl the data in the xarray DataArray be set to numpy masked
arrays. This should work with most xarray methods. If the xarray
processing method does not work with numpy masked array set to
False to use NaN.
verbose : boolean
Print processing information.
del_qc_var : boolean
Opttion to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables. If numpy masked arrays are used the data are not lost
but would need to be extracted and set to DataArray to return the
dataset back to original state.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_MET1
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = 'atmos_pressure'
ds_1 = ds.mean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment='Bad')
ds.qcfilter.datafilter(rm_assessments='Bad')
ds_2 = ds.mean()
print(f'All data: {ds_1[var_name].values}, Bad Removed: {ds_2[var_name].values}')
All data: 98.86097717285156, Bad Removed: 99.15148162841797
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._obj.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name,
add_if_missing=False,
cleanup=False)
if qc_var_name is None:
if verbose:
print(f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()')
continue
data = self.get_masked_data(var_name, rm_assessments=rm_assessments,
rm_tests=rm_tests, ma_fill_value=np_ma)
self._obj[var_name].values = data
if del_qc_var:
del self._obj[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given an a scalar or
array of values and a bit number.
Parameters
----------
array : int or numpy array
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= (1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
Example use removing bit 2 from an array called data:
> data = set_bit(0,2)
> data = set_bit(data,3)
> data
6
> data = unset_bit(data,2)
> data
4
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array = array & ~ (1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
> parse_bit(7)
array([1, 2, 3])
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError("Must be a single value.")
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError("Must be a positive integer.")
bit_number = []
qc_bit = int(qc_bit)
counter = 0
while qc_bit > 0:
temp_value = qc_bit % 2
qc_bit = qc_bit >> 1
counter += 1
if temp_value == 1:
bit_number.append(counter)
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
|
{"hexsha": "6b3a80fc6fa0d9a2ddfa98b2a6a0d2109d799776", "size": 38882, "ext": "py", "lang": "Python", "max_stars_repo_path": "act/qc/qcfilter.py", "max_stars_repo_name": "zssherman/ACT", "max_stars_repo_head_hexsha": "db87008aa6649d3d21b79ae97ea0f11d7f1f1935", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "act/qc/qcfilter.py", "max_issues_repo_name": "zssherman/ACT", "max_issues_repo_head_hexsha": "db87008aa6649d3d21b79ae97ea0f11d7f1f1935", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "act/qc/qcfilter.py", "max_forks_repo_name": "zssherman/ACT", "max_forks_repo_head_hexsha": "db87008aa6649d3d21b79ae97ea0f11d7f1f1935", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3506243996, "max_line_length": 99, "alphanum_fraction": 0.5846664266, "include": true, "reason": "import numpy", "num_tokens": 8227}
|
##python practice file
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
import math
field_of_view = 6#degrees
bear_mountain = EarthLocation(lat=42.6*u.deg, lon=-83.14*u.deg, height=0.0*u.m)
utcoffset = -4*u.hour # Eastern Daylight Time
time = Time('2017-2-1 23:00:00') - utcoffset
midnight = Time('2017-2-2 00:00:00') - utcoffset
delta_midnight = np.linspace(-6, 10, 100)*u.hour
'''
plt.plot(delta_midnight, m33airmasss_July13night)
plt.xlim(-2, 10)
plt.ylim(1, 4)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Airmass [Sec(z)]')
plt.show()
'''
times_July12_to_13 = midnight + delta_midnight
frame_July12_to_13 = AltAz(obstime=times_July12_to_13, location=bear_mountain)
def objectSky(name):
starobject = SkyCoord.from_name(name)
starobjectaltaz = starobject.transform_to(AltAz(obstime=time,location=bear_mountain))
print(name+"'s Altitude = {0.alt:.2}".format(starobjectaltaz))
frame_July13night = AltAz(obstime=midnight+delta_midnight,
location=bear_mountain)
starobjectaltazs_July13night = starobject.transform_to(frame_July13night)
starobjectairmasss_July13night = starobjectaltazs_July13night.secz
starobjectaltaz_July12_to_13 = starobject.transform_to(frame_July12_to_13)
###specific stars
#plt.scatter(delta_midnight, starobjectaltazs_July12_to_13.alt,
# c=starobjectaltazs_July12_to_13.az, label=name, lw=0, s=8,
# cmap='viridis')
return starobjectaltaz_July12_to_13
SAO = objectSky("SAO 109740")
Sirrah = objectSky("Sirrah")
print(SAO[-1])
print(SAO[1])
#difference = map(lambda x: math.fabs(x[0],x[1]),zip(SAO[-1],Sirrah[-1]))
#print(difference)
for i in range(len(SAO)):
#print(.lon_offset,SAO[i].lat_offset)
dra, ddec = Sirrah[i].spherical_offsets_to(SAO[i])
print(Sirrah[i].obstime,dra.to(u.deg)/60.0,ddec.to(u.deg)/60.0)
'''
#after shit.
plt.fill_between(delta_midnight.to('hr').value, 0, 90,
sunaltazs_July12_to_13.alt < -0*u.deg, color='0.5', zorder=0)
plt.fill_between(delta_midnight.to('hr').value, 0, 90,
sunaltazs_July12_to_13.alt < -18*u.deg, color='k', zorder=0)
plt.colorbar().set_label('Azimuth [deg]')
plt.legend(loc='upper left')
plt.xlim(-12, 12)
plt.xticks(np.arange(13)*2 -12)
plt.ylim(0, 90)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Altitude [deg]')
plt.show()
'''
|
{"hexsha": "48e859e9885fad690fe3e4a45089af694cef3624", "size": 2622, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/samedec.py", "max_stars_repo_name": "luciencd/starhopper", "max_stars_repo_head_hexsha": "564eae99bc770b143855312d583bbeaaad676ff1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/samedec.py", "max_issues_repo_name": "luciencd/starhopper", "max_issues_repo_head_hexsha": "564eae99bc770b143855312d583bbeaaad676ff1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-02-02T00:40:27.000Z", "max_issues_repo_issues_event_max_datetime": "2017-04-11T21:40:53.000Z", "max_forks_repo_path": "src/samedec.py", "max_forks_repo_name": "luciencd/starhopper", "max_forks_repo_head_hexsha": "564eae99bc770b143855312d583bbeaaad676ff1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4606741573, "max_line_length": 89, "alphanum_fraction": 0.7200610221, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 815}
|
# -*- coding: utf-8 -*-
import numpy as np
from pyam.logger import logger
from pyam.utils import isstr, cast_years_to_int
# %%
def fill_series(x, year):
"""Returns the value of a timeseries (indexed over years) for a year
by linear interpolation.
Parameters
----------
x: pandas.Series
a timeseries to be interpolated
year: int
year of interpolation
"""
x = x.dropna()
if year in x.index and not np.isnan(x[year]):
return x[year]
else:
prev = [i for i in x.index if i < year]
nxt = [i for i in x.index if i > year]
if prev and nxt:
p = max(prev)
n = min(nxt)
return ((n - year) * x[p] + (year - p) * x[n]) / (n - p)
else:
return np.nan
def cumulative(x, first_year, last_year):
"""Returns the cumulative sum of a timeseries (indexed over years),
implements linear interpolation between years, ignores nan's in the range.
The function includes the last-year value of the series, and
raises a warning if start_year or last_year is outside of
the timeseries range and returns nan
Parameters
----------
x: pandas.Series
a timeseries to be summed over time
first_year: int
first year of the sum
last_year: int
last year of the sum (inclusive)
"""
# if the timeseries does not cover the range `[first_year, last_year]`,
# return nan to avoid erroneous aggregation
if min(x.index) > first_year:
logger().warning('the timeseries `{}` does not start by {}'.format(
x.name or x, first_year))
return np.nan
if max(x.index) < last_year:
logger().warning('the timeseries `{}` does not extend until {}'
.format(x.name or x, last_year))
return np.nan
# cast tiemseries colums to `int` if necessary
if not x.index.dtype == 'int64':
cast_years_to_int(x, index=True)
x[first_year] = fill_series(x, first_year)
x[last_year] = fill_series(x, last_year)
years = [i for i in x.index if i >= first_year and i <= last_year
and ~np.isnan(x[i])]
years.sort()
# loop over years
if not np.isnan(x[first_year]) and not np.isnan(x[last_year]):
value = 0
for (i, yr) in enumerate(years[:-1]):
next_yr = years[i+1]
# the summation is shifted to include the first year fully in sum,
# otherwise, would return a weighted average of `yr` and `next_yr`
value += ((next_yr - yr - 1) * x[next_yr] +
(next_yr - yr + 1) * x[yr]) / 2
# the loop above does not include the last element in range
# (`last_year`), therefore added explicitly
value += x[last_year]
return value
def cross_threshold(x, threshold=0, direction=['from above', 'from below']):
"""Returns a list of the years in which a timeseries (indexed over years)
crosses a given threshold
Parameters
----------
x: pandas.Series
a timeseries indexed over years
threshold: float, default 0
the threshold that the timeseries is checked against
direction: str, optional, default `['from above', 'from below']`
whether to return all years where the threshold is crossed
or only where threshold is crossed in a specific direction
"""
prev_yr, prev_val = None, None
years = []
direction = [direction] if isstr(direction) else list(direction)
if not set(direction).issubset(set(['from above', 'from below'])):
raise ValueError('invalid direction `{}`'.format(direction))
for yr, val in zip(x.index, x.values):
if np.isnan(val): # ignore nans in the timeseries
continue
if prev_val is None:
prev_yr, prev_val = yr, val
continue
if not np.sign(prev_val - threshold) == np.sign(val - threshold):
if ('from above' in direction and prev_val > val) \
or ('from below' in direction and prev_val < val):
change = (val - prev_val) / (yr - prev_yr)
# add one because int() rounds down
cross_yr = prev_yr + int((threshold - prev_val) / change) + 1
years.append(cross_yr)
prev_yr, prev_val = yr, val
return years
|
{"hexsha": "c4c932f56e3e16447c8c262809ff6b5b214dde01", "size": 4360, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyam/timeseries.py", "max_stars_repo_name": "khaeru/pyam", "max_stars_repo_head_hexsha": "c91551d753c739316fca7dae043d4605211e05cc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyam/timeseries.py", "max_issues_repo_name": "khaeru/pyam", "max_issues_repo_head_hexsha": "c91551d753c739316fca7dae043d4605211e05cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-26T01:45:37.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-04T06:06:01.000Z", "max_forks_repo_path": "pyam/timeseries.py", "max_forks_repo_name": "khaeru/pyam", "max_forks_repo_head_hexsha": "c91551d753c739316fca7dae043d4605211e05cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.88, "max_line_length": 78, "alphanum_fraction": 0.597706422, "include": true, "reason": "import numpy", "num_tokens": 1069}
|
[STATEMENT]
lemma finter_transfer [transfer_rule]:
assumes "bi_unique A"
shows "(rel_fset A ===> rel_fset A ===> rel_fset A) finter finter"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (rel_fset A ===> rel_fset A ===> rel_fset A) (|\<inter>|) (|\<inter>|)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
bi_unique A
goal (1 subgoal):
1. (rel_fset A ===> rel_fset A ===> rel_fset A) (|\<inter>|) (|\<inter>|)
[PROOF STEP]
unfolding rel_fun_def
[PROOF STATE]
proof (prove)
using this:
bi_unique A
goal (1 subgoal):
1. \<forall>x y. rel_fset A x y \<longrightarrow> (\<forall>xa ya. rel_fset A xa ya \<longrightarrow> rel_fset A (x |\<inter>| xa) (y |\<inter>| ya))
[PROOF STEP]
using inter_transfer[unfolded rel_fun_def, rule_format, Transfer.transferred]
[PROOF STATE]
proof (prove)
using this:
bi_unique A
\<lbrakk>bi_unique ?A; rel_fset ?A ?x ?y; rel_fset ?A ?xa ?ya\<rbrakk> \<Longrightarrow> rel_fset ?A (?x |\<inter>| ?xa) (?y |\<inter>| ?ya)
goal (1 subgoal):
1. \<forall>x y. rel_fset A x y \<longrightarrow> (\<forall>xa ya. rel_fset A xa ya \<longrightarrow> rel_fset A (x |\<inter>| xa) (y |\<inter>| ya))
[PROOF STEP]
by blast
|
{"llama_tokens": 506, "file": null, "length": 4}
|
struct LayerNotFoundException <: Exception
var::String
end
function Base.showerror(io::IO, e::LayerNotFoundException)
println(io, typeof(e), ": ", e.var)
end
|
{"hexsha": "6ad20ba4b48e4b5b0936b69511564c6ba81872e1", "size": 166, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/exceptions.jl", "max_stars_repo_name": "quatrix/HTTP.jl", "max_stars_repo_head_hexsha": "657dbf9a5d1d87e6e085158a5a053553fcaed032", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/exceptions.jl", "max_issues_repo_name": "quatrix/HTTP.jl", "max_issues_repo_head_hexsha": "657dbf9a5d1d87e6e085158a5a053553fcaed032", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/exceptions.jl", "max_forks_repo_name": "quatrix/HTTP.jl", "max_forks_repo_head_hexsha": "657dbf9a5d1d87e6e085158a5a053553fcaed032", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.7142857143, "max_line_length": 58, "alphanum_fraction": 0.7228915663, "num_tokens": 39}
|
"""Tests ring polymer contraction.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
"""
import sys
sys.path.append("../")
sys.path.append("../../")
from ipi.utils import nmtransform
import numpy as np
from numpy.testing import assert_almost_equal as assert_equals
def check_up_and_down_scaling(n, q):
"""Check if q expanding and then contracting a ring polymer is a no-op.
Args:
n: The number of beads in the scaled ring polymer.
q: The original position array.
"""
rescale = nmtransform.nm_rescale(q.shape[0], n)
print "Initial position of the beads:"
print q, q.shape, (q.shape[0], n)
# rescale up to the n beads
beads_n = rescale.b1tob2(q)
print "Upscaled to %d beads:"%n
print beads_n, beads_n.shape
beads_final = rescale.b2tob1(beads_n)
print "Final position of the beads:"
print beads_final
assert_equals(q, beads_final)
return beads_n
def check_rpc_consistency(n, q):
"""Check if q expanding and then contracting a ring polymer is a no-op.
Args:
n: The number of beads in the scaled ring polymer.
q: The original position array.
"""
rescale1 = nmtransform.nm_rescale(q.shape[0], n)
rescale2 = nmtransform.nm_rescale(n,q.shape[0])
beads_n=rescale1.b1tob2(q)
beads_1=rescale1.b2tob1(beads_n)
beads_2=rescale2.b1tob2(beads_n)
assert_equals(beads_1, beads_2)
def check_centroid_pos(n, q):
"""Check if expanding and then contracting a ring polymer
maintains the centroid.
Args:
n: The number of beads in the scaled ring polymer.
q: The original position array.
"""
beads_big = check_up_and_down_scaling(n, q)
rescale_big = nmtransform.mk_rs_matrix(n, 1)
rescale_q = nmtransform.mk_rs_matrix(q.shape[0], 1)
centroid_big = np.dot(rescale_big, beads_big)
centroid_q = np.dot(rescale_q, q)
assert_equals(centroid_q, centroid_big)
numbers_to_check = range(10, 56, 9)
def test_1_to_n():
"""One bead tests."""
for n in numbers_to_check:
q = np.array([[0.0,0.0,0.0, 1.0,0.0,0.0]])
yield check_up_and_down_scaling, n, q
yield check_rpc_consistency, n, q
yield check_centroid_pos, n, q
def test_2_to_n():
"""Two bead tests."""
for n in numbers_to_check:
q = np.array([[0.0,0.0,0.0, 1.0,0.0,0.0],
[0.0,0.1,0.0, 1.0,0.1,0.0]])
yield check_up_and_down_scaling, n, q
yield check_rpc_consistency, n, q
yield check_centroid_pos, n, q
def test_3_to_n():
"""Three bead tests."""
for n in numbers_to_check:
q = np.array([[0.0, 0.0,0.0, 1.0, 0.0,0.0],
[0.0, 0.1,0.0, 1.0, 0.1,0.0],
[0.0,-0.1,0.0, 1.0,-0.1,0.0]])
yield check_up_and_down_scaling, n, q
yield check_rpc_consistency, n, q
yield check_centroid_pos, n, q
def test_4_to_n():
"""Four bead tests."""
for n in numbers_to_check:
q = np.array([[0.0, 0.0,0.0, 1.0, 0.0,0.0],
[0.0, 0.1,0.0, 1.0, 0.1,0.0],
[0.0, 0.2,0.0, 1.0, 0.2,0.0],
[0.0,-0.1,0.0, 1.0,-0.1,0.0]])
yield check_up_and_down_scaling, n, q
yield check_rpc_consistency, n, q
yield check_centroid_pos, n, q
|
{"hexsha": "ad9556fbea41f9e72ba5fd5bdd1baefb08fe4297", "size": 3843, "ext": "py", "lang": "Python", "max_stars_repo_path": "lammps-master/tools/i-pi/ipi/tests/test_contraction.py", "max_stars_repo_name": "rajkubp020/helloword", "max_stars_repo_head_hexsha": "4bd22691de24b30a0f5b73821c35a7ac0666b034", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lammps-master/tools/i-pi/ipi/tests/test_contraction.py", "max_issues_repo_name": "rajkubp020/helloword", "max_issues_repo_head_hexsha": "4bd22691de24b30a0f5b73821c35a7ac0666b034", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lammps-master/tools/i-pi/ipi/tests/test_contraction.py", "max_forks_repo_name": "rajkubp020/helloword", "max_forks_repo_head_hexsha": "4bd22691de24b30a0f5b73821c35a7ac0666b034", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0234375, "max_line_length": 75, "alphanum_fraction": 0.6593806922, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1174}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.