text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
"""
Connectivity{T}
Stores connection data between cells. Connections are stored in a compressed
sparse column (CSC) adjacency matrix: only non-zero values are stored.
Primarily, this consist of two vectors:
* the row value vector holds the cell indices of neighbors
* the column pointers marks the start and end index of the row value vector
This matrix is square: n = m = ncell. nconnection is equal to nnz (the number of
non-zero values).
* ncell: the number of (active) cells in the simulation
* nconnection: the number of connections between cells
* length1: for every connection, the length in the first cell, size nconnection
* length2: for every connection, the length in the second cell, size nconnection
* width: width for every connection, (approx.) perpendicular to length1 and
length2, size nconnection
* colptr: CSC column pointer (size ncell + 1)
* rowval: CSC row value (size nconnection)
"""
struct Connectivity{T}
ncell::Int
nconnection::Int
length1::Vector{T}
length2::Vector{T}
width::Vector{T}
colptr::Vector{Int}
rowval::Vector{Int}
end
"""
connections(C::Connectivity, id::Int)
Returns connections for a single cell, identified by ``id``.
"""
connections(C::Connectivity, id::Int) = C.colptr[id]:(C.colptr[id+1]-1)
"""
connection_geometry(I, J, Δx, Δy)
Compute geometrical properties of connections for structured input.
"""
function connection_geometry(I, J, Δx, Δy)
if I[1] != J[1] # connection in y
length1 = 0.5 * Δy[I[1]]
length2 = 0.5 * Δy[J[1]]
width = Δx[I[2]]
elseif I[2] != J[2] # connection in x
length1 = 0.5 * Δx[I[2]]
length2 = 0.5 * Δx[J[2]]
width = Δy[I[1]]
else
# TODO: more specific exception? --> Martijn
error("Inconsistent index")
end
return (length1, length2, width)
end
# Define cartesian indices for neighbors
const neighbors = (
CartesianIndex(0, -1),
CartesianIndex(-1, 0),
CartesianIndex(1, 0),
CartesianIndex(0, 1),
)
# Constructor for the Connectivity structure for structured input
function Connectivity(indices, reverse_indices, Δx::Vector{T}, Δy::Vector{T}) where {T}
# indices: These map from the 1D internal domain to the 2D external domain.
# reverse_indices: from the 2D external domain to the 1D internal domain,
# providing an Int which can be used as a linear index
nrow, ncol = size(reverse_indices)
# Pre-allocate output, allocate for full potential number of neighbors (4)
ncell = length(indices)
colptr = Vector{Int}(undef, ncell + 1)
rowval = Vector{Int}(undef, ncell * 4)
length1 = similar(rowval, T)
length2 = similar(rowval, T)
width = similar(rowval, T)
i = 1 # column index of sparse matrix
j = 1 # row index of sparse matrix
for I in indices # loop over active indices
colptr[j] = i
# Strictly increasing numbering for any row
# (Required by a CSCSparseMatrix, if you want to convert)
for neighbor in neighbors
J = I + neighbor
if (1 <= J[1] <= nrow) && (1 <= J[2] <= ncol && reverse_indices[J] != 0) # Check if it's inbounds and neighbor is active
rowval[i] = reverse_indices[J]
length1[i], length2[i], width[i] = connection_geometry(I, J, Δx, Δy)
i += 1
end
end
j += 1
end
colptr[j] = i
nconnection = i - 1
return Connectivity(
ncell,
nconnection,
length1[1:nconnection],
length2[1:nconnection],
width[1:nconnection],
colptr,
rowval[1:nconnection],
)
end
|
{"hexsha": "cee252817a665f0ebeb23a45d10aee60fa753003", "size": 3655, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/groundwater/connectivity.jl", "max_stars_repo_name": "DirkEilander/Wflow.jl", "max_stars_repo_head_hexsha": "18b6203d8e90e566998928808a84c906c322fd5d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/groundwater/connectivity.jl", "max_issues_repo_name": "DirkEilander/Wflow.jl", "max_issues_repo_head_hexsha": "18b6203d8e90e566998928808a84c906c322fd5d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/groundwater/connectivity.jl", "max_forks_repo_name": "DirkEilander/Wflow.jl", "max_forks_repo_head_hexsha": "18b6203d8e90e566998928808a84c906c322fd5d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0614035088, "max_line_length": 132, "alphanum_fraction": 0.6473324213, "num_tokens": 995}
|
(***************************** LIFLF - TPX ************************************)
(************* Evaluation pratique en temps limité : 30' **********************)
(******************************************************************************)
Require Import List.
Import ListNotations.
(***************************** fonction mystère *******************************)
(* On donne le corps suivant d'une fonction
match ls with
| [] => None
| x::xs => Some x
end.
*)
(* EXERCICE : Définir la fonction "mystere" avec le corps donné ci-dessus. *)
(* Donner son type *)
(* Expliquer simplement ce que fait cette fonction. *)
Definition mystere (ls : list nat) : option nat :=
match ls with
| [] => None
| x::xs => Some x
end.
(***************************** "take" *****************************************)
(* EXERCICE : définir "take n ls" qui renvoie le préfixe de ls de longueur n
EXEMPLE : voir les tests unitaires ci-dessous
*)
(* nombre element liste list nb elem concat list ++ :: n-1*)
Fixpoint take (n: nat) (ls: list nat) : list nat :=
match ls with
| [] => []
| n'::l' => if (Nat.eqb n 0) then [] else n'::[] ++ (take (n-1) l')
end.
Goal take 0 [1;2;3;4;5] = [].
simpl.
reflexivity.
Qed.
Goal take 6 [1;2;3;4;5] = [1;2;3;4;5].
simpl.
reflexivity.
Qed.
Goal take 3 [1;2;3;4;5] = [1;2;3].
simpl.
reflexivity.
Qed.
Print app.
(***************************** "drop" *****************************************)
(* EXERCICE : définir "drop n ls" qui renvoie le suffixe de ls privée de ses n
premiers éléments
EXEMPLE : voir les tests unitaires ci-dessous
*)
(* enlever element liste list drop elements number list*)
Fixpoint drop (n: nat) (l: list nat) : list nat :=
match l with
| [] => []
| n'::l' => if (Nat.eqb n 0) then l else drop (n-1) l'
end.
Goal drop 0 [1;2;3;4;5] = [1;2;3;4;5].
simpl.
reflexivity.
Qed.
Goal drop 6 [1;2;3;4;5] = [].
simpl.
reflexivity.
Qed.
Goal drop 3 [1;2;3;4;5] = [4;5].
simpl.
reflexivity.
Qed.
(*********************** explication d'énoncé *********************************)
(* EXERCICE : On donne l'énoncé suivant "a_expliquer".
Expliquer en français cette propriété
*)
Lemma a_expliquer : forall n la, (fun p => app (fst p) (snd p) ) (take n la, drop n la) = la.
Admitted.
(* c'est le theoreme qui verifie que pour tout entier n et tout liste la si on applique
take et drop en parallele sur la meme liste, le resultat de leurs deux liste concatenes
revient a la liste de depart la*)
Compute (take 3 [1;2;3;4;5;6]).
Compute (drop 3 [1;2;3;4;5;6]).
Compute (take 3 [1;2;3;4;5;6]) ++ (drop 3 [1;2;3;4;5;6]).
Compute (a_expliquer 3 [1;2;3;4;5;6]).
|
{"author": "KevinFroissart", "repo": "coqTP", "sha": "f050bf832a49be9262aea70f4844a7394d112817", "save_path": "github-repos/coq/KevinFroissart-coqTP", "path": "github-repos/coq/KevinFroissart-coqTP/coqTP-f050bf832a49be9262aea70f4844a7394d112817/liflf/liflf_B.v"}
|
\section{Statistical postulates}
<<<<<<< HEAD
So far we have looked at the macroscopic properties of a thermodynamics system and at some ways of calculating properties of random processes that obey some given probability distribution. Now it is time to combine these ideas and have a first attempt at linking the microscopic behaviour of a thermodynamic system (an idealised gas) with some of its macroscopic properties.
=======
So far we have looked at the macroscopic properties of a thermodynamical system and at some ways of calculating properties of random processes that obey some given probability distribution. Now it is time to combine these ideas and have a first attempt at linking the microscopic behaviour of a thermodynamic system (an idealised gas) with some of its macroscopic properties.
>>>>>>> pr/2
The behaviour of a given (mechanical) system depends on both the structure of the system --- described by its equations of motion --- and on the \emph{initial conditions} of the system. Therefore, in order to describe the behaviour of the system, we need both the laws of mechanics for the system along with some statistical postulates about the initial conditions of the system. (We could also require that we know the exact initial conditions for all the particles in the system, but this is not realistic for $\mathcal{O}(10^{23})$ particles.) Different choices of the statistical postulates can lead to different behaviour (not just different states) of the system. We will look at Maxwell's postulates for the initial positions and velocities of a dilute collection of gas particles. We will see that they allow us to derive the ideal gas law from a microscopic basis.
\subsection{An ideal gas}
We want to keep the equations of motion for our system of particles as simple as possible. We are going to assume that our system has the following properties:
\begin{itemize}
\item We have $N$ identical point particles, each with mass $m$, constrained in a volume $V$.
\item There are no mutual interactions --- no van der Waals effects or inter-particle forces for us!
\item The walls of the container constraining the particles are perfectly reflecting.
\item The mechanical state of the system is known when the position $\bf r$ and velocity $\bf v$ is known for each particle and these variables evolve according to Newton's laws of motion.
\end{itemize}
\subsection{Maxwell's postulates}
We assume that the vectors describing the initial conditions of the system are randomly distributed. More specifically:
\begin{enumerate}
\item The vectors relating to each particle are independent from each other. (e.g. there is no interaction between particles when they are close.) This is generally not true for any but the most dilute systems. For such a non-interacting system, the system's state is determined when $dN=f({\bf r},{\bf v})$ is known, where $dN$ is the number of particles in a box with sides $d{\bf r}=(dx,dy,dz)$ centered on the point ${\bf r} = (x,y,z)$ and where the corresponding velocities of the particles lie within a box d${\bf v}=(dv_x,dv_y,dv_z)$ centered on ${\bf v}=(v_x,v_y,v_z)$. This relationship defines the \emph{single-particle distribution} $f({\bf r},{\bf v})$.
\item The positions ${\bf r}$ are independent of the velocities ${\bf v}$. This means we can factorise $f({\bf r},{\bf v})$ as $f({\bf r},{\bf v})$ =$f_r({\bf r})$ $f_v({\bf v})$ .
\item The density of the gas is uniform within the volume so we can write $f({\bf r}) = N/V=\rho=$ constant inside the volume (and zero outside it).
\item The velocity components are independent of each other so we can factorise $f_v$ as $f_v({\bf v})=f_x(v_x)f_y(v_y)f_z(v_z)$.
\item The distribution $f_v({\bf v})$ is isotropic in velocity space so that $f_v$ depends only on the magnitude of the velocity $|{\bf v}| = v$
\end{enumerate}
\subsection{Equation of state}
Maxwell's postulates allow us to derive the equation of state for an ideal gas (the ideal gas law). They provide a microscopic interpretation of absolute temperature in terms of kinetic energy.
Consider a particle with velocity ${\bf v} = (v_x,v_y,v_z)$, moving from the left, that hits a wall parallel to the $yz$-plane. After impact its velocity is ${\bf v'}=(-v_x,v_y,v_z)$. Hence, the change in momentum is $\Delta {\bf p}= {\bf p'} - {\bf p} = m({\bf v'}-{\bf v}) = m(-2v_x,0,0)$.
How many such impacts occur in a given time interval $\Delta t$ on a region of the wall with area $A$?
Take a box with a side of area $A$ on the wall and perpendicular sides of length $v_x\Delta t$. This contains the particles of velocity ${\bf v}$ that can hit the wall. The volume of the box is $Av_x\Delta t$ and the number of particles it contains is $\rho Av_x\Delta t f_v({\bf v})$, where $\rho=N/V$ is the density of all particles and $f_v$ is the probability that a particle has velocity $\bf v$.
The total momentum $\Delta P$ transmitted to the wall is therefore
$$\Delta P =\int_0^\infty dv_x \int_{-\infty}^\infty dv_y \int_{-\infty}^\infty dv_z f_v({\bf v})\rho A\Delta t(-2m)v_x^2{\bf i}$$
where ${\bf i} =(1,0,0)$ picks out the $x$-component. I.e. integrate over the number of particles times the momentum per particle. The first integral only runs from zero since we are only interested in particles traveling towards the right, i.e. ${\bf v}>0$.
The integral above gives
$$\Delta P = -2m\rho A\Delta t \langle v_x^2\rangle\frac12$$ where the average square velocities come from integrating over the velocity distribution and the factor of half comes from the integral over $v_x$, i.e. particles moving towards the right.
The force exerted by the gas on the wall is $F=\Delta P/\Delta t$ and pressure is force/area = $|F|/A$.
We therefore have
\begin{equation}
p=m\rho \langle v_x^2\rangle.
\label{eq1.2}
\end{equation}
Compare this with the classical equation of state for an ideal gas $pV=nRT$ where $n=N/N_A$, $T$ is temperature and $R=8.3 J/K$ is a constant. If we introduce the Boltzman constant $k_B = R/N_A$ and the particle density $\rho =n/V$. The ideal gas law becomes $p=\rho k_BT$. Comparing the ideal gas law with equation \ref{eq1.2}
we find that we can relate the absolute temperature $T$ to the mean square velocities of the particles.
|
{"hexsha": "721bcec4f36cea3231937916331c954c6145d6f3", "size": 6243, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "04-statisticalPostulates.tex", "max_stars_repo_name": "notdroneale/708Notes2018", "max_stars_repo_head_hexsha": "4808fc20291758193ffb24c1201844816858932d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "04-statisticalPostulates.tex", "max_issues_repo_name": "notdroneale/708Notes2018", "max_issues_repo_head_hexsha": "4808fc20291758193ffb24c1201844816858932d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "04-statisticalPostulates.tex", "max_forks_repo_name": "notdroneale/708Notes2018", "max_forks_repo_head_hexsha": "4808fc20291758193ffb24c1201844816858932d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 117.7924528302, "max_line_length": 873, "alphanum_fraction": 0.7477174435, "num_tokens": 1599}
|
from datetime import datetime
from random import randint
import numpy as np
import pandas as pd
import pytest
from cognite.v05 import dto, timeseries
TS_NAME = None
dps_params = [
{"start": 1522188000000, "end": 1522620000000},
{"start": datetime(2018, 4, 1), "end": datetime(2018, 4, 2)},
{"start": datetime(2018, 4, 1), "end": datetime(2018, 4, 2), "protobuf": True},
]
@pytest.fixture(autouse=True, scope="class")
def ts_name():
global TS_NAME
TS_NAME = "test_ ts_{}".format(randint(1, 2 ** 53 - 1))
class TestTimeseries:
@pytest.fixture(scope="class", params=[True, False])
def get_timeseries_response_obj(self, request):
yield timeseries.get_timeseries(prefix=TS_NAME, limit=1, include_metadata=request.param)
def test_post_timeseries(self):
tso = dto.TimeSeries(TS_NAME)
res = timeseries.post_time_series([tso])
assert res == {}
def test_update_timeseries(self):
tso = dto.TimeSeries(TS_NAME, unit="celsius")
res = timeseries.update_time_series([tso])
assert res == {}
def test_timeseries_unit_correct(self, get_timeseries_response_obj):
assert get_timeseries_response_obj.to_json()[0]["unit"] == "celsius"
def test_get_timeseries_output_format(self, get_timeseries_response_obj):
print(get_timeseries_response_obj.to_pandas())
from cognite.v05.dto import TimeSeriesResponse
assert isinstance(get_timeseries_response_obj, TimeSeriesResponse)
assert isinstance(get_timeseries_response_obj.to_ndarray(), np.ndarray)
assert isinstance(get_timeseries_response_obj.to_pandas(), pd.DataFrame)
assert isinstance(get_timeseries_response_obj.to_json()[0], dict)
def test_get_timeseries_no_results(self):
result = timeseries.get_timeseries(prefix="not_a_timeseries_prefix")
assert result.to_pandas().empty
assert not result.to_json()
def test_delete_timeseries(self):
res = timeseries.delete_time_series(TS_NAME)
assert res == {}
def test_get_timeseries_with_config_variables_from_argument(self, unset_config_variables):
ts = timeseries.get_timeseries(prefix=TS_NAME, limit=1,
api_key=unset_config_variables[0], project=unset_config_variables[1])
assert ts
@pytest.fixture(scope="class")
def datapoints_fixture():
tso = dto.TimeSeries(TS_NAME)
timeseries.post_time_series([tso])
yield
timeseries.delete_time_series(TS_NAME)
@pytest.mark.usefixtures("datapoints_fixture")
class TestDatapoints:
@pytest.fixture(scope="class", params=dps_params)
def get_dps_response_obj(self, request):
yield timeseries.get_datapoints(
name="constant",
start=request.param["start"],
end=request.param["end"],
protobuf=request.param.get("protobuf", False),
)
def test_post_datapoints(self):
dps = [dto.Datapoint(i, i * 100) for i in range(10)]
res = timeseries.post_datapoints(TS_NAME, datapoints=dps)
assert res == {}
def test_get_datapoints(self, get_dps_response_obj):
from cognite.v05.dto import DatapointsResponse
assert isinstance(get_dps_response_obj, DatapointsResponse)
def test_get_dps_output_formats(self, get_dps_response_obj):
assert isinstance(get_dps_response_obj.to_ndarray(), np.ndarray)
assert isinstance(get_dps_response_obj.to_pandas(), pd.DataFrame)
assert isinstance(get_dps_response_obj.to_json(), dict)
def test_get_dps_correctly_spaced(self, get_dps_response_obj):
timestamps = get_dps_response_obj.to_pandas().timestamp.values
deltas = np.diff(timestamps, 1)
assert (deltas != 0).all()
assert (deltas % 10000 == 0).all()
def test_get_dps_with_limit(self):
res = timeseries.get_datapoints(name="constant", start=0, limit=1)
assert len(res.to_json().get("datapoints")) == 1
def test_get_dps_with_limit_with_config_variables_from_argument(self, unset_config_variables):
res = timeseries.get_datapoints(name="constant",
start=0,
limit=1,
api_key=unset_config_variables[0],
project=unset_config_variables[1])
assert len(res.to_json().get("datapoints")) == 1
def test_get_dps_with_config_variables_from_argument(self, unset_config_variables):
res = timeseries.get_datapoints(name="constant",
start=1522188000000,
end=1522620000000,
api_key=unset_config_variables[0],
project=unset_config_variables[1])
assert res
class TestLatest:
def test_get_latest(self):
from cognite.v05.dto import LatestDatapointResponse
response = timeseries.get_latest("constant")
assert isinstance(response, LatestDatapointResponse)
assert isinstance(response.to_ndarray(), np.ndarray)
assert isinstance(response.to_pandas(), pd.DataFrame)
assert isinstance(response.to_json(), dict)
class TestDatapointsFrame:
@pytest.fixture(scope="class", params=dps_params[:2])
def get_datapoints_frame_response_obj(self, request):
yield timeseries.get_datapoints_frame(
time_series=["constant"],
start=request.param["start"],
end=request.param["end"],
aggregates=["avg"],
granularity="1m",
)
def test_get_dps_frame_output_format(self, get_datapoints_frame_response_obj):
assert isinstance(get_datapoints_frame_response_obj, pd.DataFrame)
def test_get_dps_frame_correctly_spaced(self, get_datapoints_frame_response_obj):
timestamps = get_datapoints_frame_response_obj.timestamp.values
deltas = np.diff(timestamps, 1)
assert (deltas != 0).all()
assert (deltas % 60000 == 0).all()
def test_get_dps_frame_with_limit(self):
df = timeseries.get_datapoints_frame(
time_series=["constant"], aggregates=["avg"], granularity="1m", start=0, limit=1
)
assert df.shape[0] == 1
def test_get_dps_frame_with_limit_with_config_values_from_argument(self, unset_config_variables):
df = timeseries.get_datapoints_frame(
time_series=["constant"],
aggregates=["avg"],
granularity="1m",
start=0,
limit=1,
api_key=unset_config_variables[0],
project=unset_config_variables[1]
)
assert df.shape[0] == 1
def test_get_dps_frame_with_config_values_from_argument(self, unset_config_variables):
res = timeseries.get_datapoints_frame(
time_series=["constant"],
start=1522188000000,
end=1522620000000,
aggregates=["avg"],
granularity="1m",
api_key=unset_config_variables[0],
project=unset_config_variables[1]
)
assert isinstance(res, pd.DataFrame)
class TestMultiTimeseriesDatapoints:
@pytest.fixture(scope="class", params=dps_params[:2])
def get_multi_time_series_dps_response_obj(self, request):
from cognite.v05.dto import DatapointsQuery
dq1 = DatapointsQuery("constant")
dq2 = DatapointsQuery("sinus", aggregates=["avg"], granularity="30s")
yield list(
timeseries.get_multi_time_series_datapoints(
datapoints_queries=[dq1, dq2],
start=request.param["start"],
end=request.param["end"],
aggregates=["avg"],
granularity="60s",
)
)
def test_post_multitag_datapoints(self):
from cognite.v05 import dto
from cognite.v05.dto import TimeseriesWithDatapoints
from unittest import mock
import cognite._utils as utils
timeseries_with_too_many_datapoints = TimeseriesWithDatapoints(
name="test", datapoints=[dto.Datapoint(x, x) for x in range(100001)]
)
timeseries_with_99999_datapoints = TimeseriesWithDatapoints(
name="test", datapoints=[dto.Datapoint(x, x) for x in range(99999)]
)
with mock.patch.object(utils, "post_request") as post_request_mock:
post_request_mock: mock.MagicMock = post_request_mock
timeseries.post_multi_tag_datapoints([timeseries_with_too_many_datapoints])
assert post_request_mock.call_count == 2
with mock.patch.object(utils, "post_request") as post_request_mock:
post_request_mock: mock.MagicMock = post_request_mock
timeseries.post_multi_tag_datapoints(
[timeseries_with_99999_datapoints, timeseries_with_too_many_datapoints]
)
assert post_request_mock.call_count == 2
def test_get_multi_time_series_dps_output_format(self, get_multi_time_series_dps_response_obj):
from cognite.v05.dto import DatapointsResponse
assert isinstance(get_multi_time_series_dps_response_obj, list)
for dpr in get_multi_time_series_dps_response_obj:
assert isinstance(dpr, DatapointsResponse)
def test_get_multi_time_series_dps_response_length(self, get_multi_time_series_dps_response_obj):
assert len(list(get_multi_time_series_dps_response_obj)) == 2
def test_get_multi_timeseries_dps_correctly_spaced(self, get_multi_time_series_dps_response_obj):
m = list(get_multi_time_series_dps_response_obj)
timestamps = m[0].to_pandas().timestamp.values
deltas = np.diff(timestamps, 1)
assert (deltas != 0).all()
assert (deltas % 60000 == 0).all()
timestamps = m[1].to_pandas().timestamp.values
deltas = np.diff(timestamps, 1)
assert (deltas != 0).all()
assert (deltas % 30000 == 0).all()
def test_split_TimeseriesWithDatapoints_if_over_limit():
from cognite.v05.dto import TimeseriesWithDatapoints
from cognite.v05.dto import Datapoint
from cognite.v05.timeseries import _split_TimeseriesWithDatapoints_if_over_limit
from typing import List
timeseries_with_datapoints_over_limit: TimeseriesWithDatapoints = TimeseriesWithDatapoints(
name="test", datapoints=[Datapoint(x, x) for x in range(1000)]
)
result: List[TimeseriesWithDatapoints] = _split_TimeseriesWithDatapoints_if_over_limit(
timeseries_with_datapoints_over_limit, 100
)
assert isinstance(result[0], TimeseriesWithDatapoints)
assert len(result) == 10
result = _split_TimeseriesWithDatapoints_if_over_limit(timeseries_with_datapoints_over_limit, 1000)
assert isinstance(result[0], TimeseriesWithDatapoints)
assert len(result) == 1
|
{"hexsha": "8ed593491f4e3fb85a19dbcb309143356d7ebe3d", "size": 10914, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/v05/test_timeseries.py", "max_stars_repo_name": "boyeah/cognite-sdk-python", "max_stars_repo_head_hexsha": "39abf5c98d758c59609cb33f5f3e2c009712005d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/v05/test_timeseries.py", "max_issues_repo_name": "boyeah/cognite-sdk-python", "max_issues_repo_head_hexsha": "39abf5c98d758c59609cb33f5f3e2c009712005d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/v05/test_timeseries.py", "max_forks_repo_name": "boyeah/cognite-sdk-python", "max_forks_repo_head_hexsha": "39abf5c98d758c59609cb33f5f3e2c009712005d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5434782609, "max_line_length": 108, "alphanum_fraction": 0.6762873374, "include": true, "reason": "import numpy", "num_tokens": 2498}
|
// Copyright (C) 2015, Pawel Tomulik <ptomulik@meil.pw.edu.pl>
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost::org/LICENSE_1_0.txt)
#define BOOST_TEST_MODULE test_txpl_vm_object_find
#include <txpl/test_config.hpp>
#include <boost/test/unit_test.hpp>
#ifndef TXPL_TEST_SKIP_VM_OBJECT_FIND
#include <txpl/vm/object_find.hpp>
#include <txpl/vm/value.hpp>
#include <txpl/vm/basic_types.hpp>
#include <boost/variant/get.hpp>
#include <type_traits>
struct error_handler
{
error_handler(std::string& msg) : msg(msg) { }
void operator()(std::string const& msg) const { this->msg = msg; }
std::string& msg;
};
BOOST_AUTO_TEST_CASE(test__object_find)
{
using namespace txpl::vm;
using boost::get;
typedef basic_types<>::int_type int_type;
std::string emsg;
error_handler eh(emsg);
object<value<> > ob0;
object<value<> > ob1;
object<value<> > ob2;
object<value<> > ob3;
value<> v0 = int_type{0};
value<> v1 = int_type{1};
value<> v2 = int_type{2};
value<> v3 = int_type{3};
ob3["v3"] = v3;
ob2["ob3"] = ob3;
ob2["v2"] = v2;
ob1["ob2"] = ob2;
ob1["v1"] = v1;
ob0["ob1"] = ob1;
ob0["v0"] = v0;
{
value<> val;
emsg = "";
BOOST_CHECK(object_find(ob0, "ob0", "ob1", val, eh));
BOOST_CHECK_NO_THROW(get<object<value<> > >(val));
BOOST_CHECK_EQUAL(emsg, "");
}
{
value<> val;
emsg = "";
BOOST_CHECK(object_find(ob0, "ob0", "ob1.ob2", val, eh));
BOOST_CHECK_NO_THROW(get<object<value<> > >(val));
BOOST_CHECK_EQUAL(emsg, "");
}
{
value<> val;
emsg = "";
BOOST_CHECK(object_find(ob0, "ob0", "ob1.ob2.ob3", val, eh));
BOOST_CHECK_NO_THROW(get<object<value<> > >(val));
BOOST_CHECK_EQUAL(emsg, "");
}
{
value<> val;
value<> v;
int_type x = int_type{654};
emsg = "";
BOOST_CHECK(object_find(ob0, "ob0", "v0", val, eh));
BOOST_CHECK_NO_THROW(x = get<int_type>(val));
BOOST_CHECK(x == int_type{0});
BOOST_CHECK_EQUAL(emsg, "");
}
{
value<> val;
value<> v;
int_type x = int_type{654};
emsg = "";
BOOST_CHECK(object_find(ob0, "ob0", "ob1.v1", val, eh));
BOOST_CHECK_NO_THROW(x = get<int_type>(val));
BOOST_CHECK(x == int_type{1});
BOOST_CHECK_EQUAL(emsg, "");
}
{
value<> val;
value<> v;
emsg = "";
int_type x = int_type{654};
BOOST_CHECK(object_find(ob0, "ob0", "ob1.ob2.v2", val, eh));
BOOST_CHECK_NO_THROW(x = get<int_type>(val));
BOOST_CHECK(x == int_type{2});
BOOST_CHECK_EQUAL(emsg, "");
}
{
value<> val;
value<> v;
emsg = "";
int_type x = int_type{654};
BOOST_CHECK(object_find(ob0, "ob0", "ob1.ob2.ob3.v3", val, eh));
BOOST_CHECK_NO_THROW(x = get<int_type>(val));
BOOST_CHECK(x == int_type{3});
BOOST_CHECK_EQUAL(emsg, "");
}
{
value<> val;
value<> v;
emsg = "";
int_type x = int_type{654};
BOOST_CHECK(object_find(ob2, "ob1.ob2", "ob3.v3", val, eh));
BOOST_CHECK_NO_THROW(x = get<int_type>(val));
BOOST_CHECK(x == int_type{3});
BOOST_CHECK_EQUAL(emsg, "");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", "", val, eh));
BOOST_CHECK_EQUAL(emsg, "empty names not allowed");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", ".", val, eh));
BOOST_CHECK_EQUAL(emsg, "malformed member name .");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", ".ob1", val, eh));
BOOST_CHECK_EQUAL(emsg, "malformed member name .ob1");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", "ob1.", val, eh));
BOOST_CHECK_EQUAL(emsg, "malformed member name ob1.");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", ".ob1.", val, eh));
BOOST_CHECK_EQUAL(emsg, "malformed member name .ob1.");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", ".ob1.foo", val, eh));
BOOST_CHECK_EQUAL(emsg, "malformed member name .ob1.foo");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", "foo", val, eh));
BOOST_CHECK_EQUAL(emsg, "ob0 has no member named foo");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", "ob1.foo", val, eh));
BOOST_CHECK_EQUAL(emsg, "ob0.ob1 has no member named foo");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", "ob1.foo.ob3", val, eh));
BOOST_CHECK_EQUAL(emsg, "ob0.ob1 has no member named foo");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", "ob1.ob2.foo", val, eh));
BOOST_CHECK_EQUAL(emsg, "ob0.ob1.ob2 has no member named foo");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", "ob1.ob2.ob3.foo", val, eh));
BOOST_CHECK_EQUAL(emsg, "ob0.ob1.ob2.ob3 has no member named foo");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", "v0.foo", val, eh));
BOOST_CHECK_EQUAL(emsg, "ob0.v0 is not an object");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", "ob1.v1.foo", val, eh));
BOOST_CHECK_EQUAL(emsg, "ob0.ob1.v1 is not an object");
}
{
value<> val;
emsg = "";
BOOST_CHECK(!object_find(ob0, "ob0", "ob1.ob2.v2.foo", val, eh));
BOOST_CHECK_EQUAL(emsg, "ob0.ob1.ob2.v2 is not an object");
}
}
#else
BOOST_AUTO_TEST_CASE(dummy)
{
BOOST_CHECK(true);
}
#endif
|
{"hexsha": "0d0c0bb74e647dcbd37e4803c4bc619b1e418c75", "size": 5524, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/txpl/vm/object_find_test.cpp", "max_stars_repo_name": "ptomulik/txpl", "max_stars_repo_head_hexsha": "109b5847abe0d46c598ada46f411f98ebe8dc4c8", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/txpl/vm/object_find_test.cpp", "max_issues_repo_name": "ptomulik/txpl", "max_issues_repo_head_hexsha": "109b5847abe0d46c598ada46f411f98ebe8dc4c8", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 14.0, "max_issues_repo_issues_event_min_datetime": "2015-03-02T14:02:32.000Z", "max_issues_repo_issues_event_max_datetime": "2015-05-17T21:50:30.000Z", "max_forks_repo_path": "test/txpl/vm/object_find_test.cpp", "max_forks_repo_name": "ptomulik/txpl", "max_forks_repo_head_hexsha": "109b5847abe0d46c598ada46f411f98ebe8dc4c8", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5511111111, "max_line_length": 71, "alphanum_fraction": 0.5990224475, "num_tokens": 1758}
|
import os
import argparse
from io import BytesIO
from tqdm.auto import tqdm
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, Subset
from torchvision import transforms
from PIL import Image
import lmdb
from torch_tools.utils import numerical_order, wrap_with_tqdm
def _filename(path):
return os.path.basename(path).split('.')[0]
def imagenet_transform(size):
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
return transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop([size, size]),
transforms.ToTensor(),
normalize,])
def adaptive_image_resize(x, h, w):
if x.size[0] > x.size[1]:
t = transforms.Resize(h)
else:
t = transforms.Resize(w)
return t(x)
class UnannotatedDataset(Dataset):
def __init__(self, root_dir, numerical_sort=False, force_rgb=True,
transform=transforms.Compose(
[
transforms.ToTensor(),
lambda x: 2 * x - 1
])):
self.img_files = []
for root, _, files in os.walk(root_dir):
for file in numerical_order(files) if numerical_sort else sorted(files):
if UnannotatedDataset.file_is_img(file):
self.img_files.append(os.path.join(root, file))
self.transform = transform
self.force_rgb = force_rgb
@staticmethod
def file_is_img(name):
extension = os.path.basename(name).split('.')[-1]
return extension in ['jpg', 'jpeg', 'png', 'webp', 'JPEG']
def align_names(self, target_names):
new_img_files = []
img_files_names_dict = {_filename(f): f for f in self.img_files}
for name in target_names:
try:
new_img_files.append(img_files_names_dict[_filename(name)])
except KeyError:
print('names mismatch: absent {}'.format(_filename(name)))
self.img_files = new_img_files
def __len__(self):
return len(self.img_files)
def __getitem__(self, item):
img = Image.open(self.img_files[item])
if self.force_rgb:
img = img.convert('RGB')
if self.transform is not None:
return self.transform(img)
else:
return img
class LabeledDatasetImagesExtractor(Dataset):
def __init__(self, ds, img_field=0):
self.source = ds
self.img_field = img_field
def __len__(self):
return len(self.source)
def __getitem__(self, item):
return self.source[item][self.img_field]
class DatasetLabelWrapper(Dataset):
def __init__(self, ds, label, transform=None):
self.source = ds
self.label = label
self.transform = transform
def __len__(self):
return len(self.source)
def __getitem__(self, item):
img = self.source[item]
if self.transform is not None:
img = self.transform(img)
return (img, self.label[item])
class FilteredDataset(Dataset):
def __init__(self, source, filterer=lambda i, s: s[1], target=[], verbose=True):
self.source = source
if not isinstance(target, list):
target = [target]
self.indices = [i for i, s in wrap_with_tqdm(enumerate(source), verbose)
if filterer(i, s) in target]
def __len__(self):
return len(self.indices)
def __getitem__(self, index):
return self.source[self.indices[index]]
class TransformedDataset(Dataset):
def __init__(self, source, transform, img_index=0):
self.source = source
self.transform = transform
self.img_index = img_index
def __len__(self):
return len(self.source)
def __getitem__(self, index):
out = self.source[index]
if isinstance(out, tuple):
return self.transform(out[self.img_index]), out[1 - self.img_index]
else:
return self.transform(out)
class TensorsDataset(Dataset):
def __init__(self, source_dir):
self.source_files = [os.path.join(source_dir, f) for f in os.listdir(source_dir)\
if f.endswith('.pt')]
def __len__(self):
return len(self.source_files)
def __getitem__(self, index):
return torch.load(self.source_files[index])
class TensorDataset(Dataset):
def __init__(self, source, device='cpu'):
self.data = torch.load(source)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
return self.data[index]
class LMDBDataset(Dataset):
def __init__(self, path,
transform=transforms.Compose(
[transforms.ToTensor(), lambda x: 2 * x - 1])
):
self.env = lmdb.open(
path,
max_readers=32,
readonly=True,
lock=False,
readahead=False,
meminit=False,
)
if not self.env:
raise IOError('Cannot open lmdb dataset', path)
with self.env.begin(write=False) as txn:
self.length = int(txn.get('length'.encode('utf-8')).decode('utf-8'))
self.transform = transform
def __len__(self):
return self.length
def __getitem__(self, index):
index = int(index)
with self.env.begin(write=False) as txn:
key = f'{str(index).zfill(8)}'.encode('utf-8')
img_bytes = txn.get(key)
buffer = BytesIO(img_bytes)
img = Image.open(buffer)
if self.transform is not None:
img = self.transform(img)
return img
class RGBDataset(Dataset):
def __init__(self, source_dataset):
super(RGBDataset, self).__init__()
self.source = source_dataset
def __len__(self):
return len(self.source)
def __getitem__(self, index):
out = self.source[index]
if out.shape[0] == 1:
out = out.repeat([3, 1, 1])
return out
def directory_rgb_iterator(path, batch_size=32, total=None):
ds = UnannotatedDataset(path, transform=transforms.Compose(
[
lambda img: img.convert('RGB'),
transforms.ToTensor(),
]
))
if total is not None:
ds = Subset(ds, list(range(min(total, len(ds)))))
return DataLoader(ds, batch_size=batch_size)
def make_lmdb(args):
dataset = UnannotatedDataset(args.data, transform=None)
with lmdb.open(args.out, map_size=1024 ** 4, readahead=False) as env:
for i, img in enumerate(tqdm(dataset)):
key = f'{str(i).zfill(8)}'.encode('utf-8')
with env.begin(write=True) as txn:
img_byte_arr = BytesIO()
img.save(img_byte_arr, format='png')
txn.put(key, img_byte_arr.getvalue())
with env.begin(write=True) as txn:
txn.put('length'.encode('utf-8'), str(len(dataset)).encode('utf-8'))
def make_tensor(args):
dataset = UnannotatedDataset(args.data, transform=None)
sample_shape = np.array(dataset[0]).shape
data = np.empty([len(dataset)] + list(sample_shape), dtype=np.uint8)
for i, img in enumerate(tqdm(dataset)):
data[i] = np.array(img)
torch.save(torch.from_numpy(data).permute(0, 3, 1, 2), args.out)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LMDB dataset creation')
parser.add_argument('--data', type=str, help='path to the image dataset')
parser.add_argument('--out', type=str, help='filename of the result lmdb dataset')
parser.add_argument('--n_worker', type=int, default=8,
help='number of workers for preparing dataset')
parser.add_argument('command', choices=['make_lmdb', 'make_tensor'])
args = parser.parse_args()
func = locals()[args.command]
func(args)
|
{"hexsha": "7e382c1a02591ac00096109f2d051061a1f0e6ca", "size": 7916, "ext": "py", "lang": "Python", "max_stars_repo_path": "torch_tools/data.py", "max_stars_repo_name": "anon-auth-2022/i2i_synth", "max_stars_repo_head_hexsha": "e5ef30c57f336240bd1e14f4008cfbf455c52069", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "torch_tools/data.py", "max_issues_repo_name": "anon-auth-2022/i2i_synth", "max_issues_repo_head_hexsha": "e5ef30c57f336240bd1e14f4008cfbf455c52069", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "torch_tools/data.py", "max_forks_repo_name": "anon-auth-2022/i2i_synth", "max_forks_repo_head_hexsha": "e5ef30c57f336240bd1e14f4008cfbf455c52069", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8716981132, "max_line_length": 89, "alphanum_fraction": 0.6078827691, "include": true, "reason": "import numpy", "num_tokens": 1804}
|
[STATEMENT]
lemma has_field_derivative_bernpoly:
"(bernpoly (Suc n) has_field_derivative
(of_nat (n + 1) * bernpoly n x :: 'a :: real_normed_field)) (at x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (bernpoly (Suc n) has_field_derivative of_nat (n + 1) * bernpoly n x) (at x)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (bernpoly (Suc n) has_field_derivative of_nat (n + 1) * bernpoly n x) (at x)
[PROOF STEP]
have "(bernpoly (Suc n) has_field_derivative
(\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) *
of_real (bernoulli k)))) (at x)" (is "(_ has_field_derivative ?D) _")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (bernpoly (Suc n) has_field_derivative (\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k)))) (at x)
[PROOF STEP]
unfolding bernpoly_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. \<Sum>k\<le>Suc n. of_nat (Suc n choose k) * of_real (bernoulli k) * x ^ (Suc n - k)) has_field_derivative (\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k)))) (at x)
[PROOF STEP]
by (rule DERIV_cong) (fast intro!: derivative_intros, simp)
[PROOF STATE]
proof (state)
this:
(bernpoly (Suc n) has_field_derivative (\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k)))) (at x)
goal (1 subgoal):
1. (bernpoly (Suc n) has_field_derivative of_nat (n + 1) * bernpoly n x) (at x)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(bernpoly (Suc n) has_field_derivative (\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k)))) (at x)
goal (1 subgoal):
1. (bernpoly (Suc n) has_field_derivative of_nat (n + 1) * bernpoly n x) (at x)
[PROOF STEP]
have "?D = of_nat (n + 1) * bernpoly n x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k))) = of_nat (n + 1) * bernpoly n x
[PROOF STEP]
unfolding bernpoly_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k))) = of_nat (n + 1) * (\<Sum>k\<le>n. of_nat (n choose k) * of_real (bernoulli k) * x ^ (n - k))
[PROOF STEP]
by (subst sum_distrib_left, intro sum.cong refl, subst of_nat_binomial_Suc) simp_all
[PROOF STATE]
proof (state)
this:
(\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k))) = of_nat (n + 1) * bernpoly n x
goal (1 subgoal):
1. (bernpoly (Suc n) has_field_derivative of_nat (n + 1) * bernpoly n x) (at x)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(bernpoly (Suc n) has_field_derivative (\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k)))) (at x)
(\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k))) = of_nat (n + 1) * bernpoly n x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(bernpoly (Suc n) has_field_derivative (\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k)))) (at x)
(\<Sum>k\<le>n. of_nat (Suc n - k) * x ^ (n - k) * (of_nat (Suc n choose k) * of_real (bernoulli k))) = of_nat (n + 1) * bernpoly n x
goal (1 subgoal):
1. (bernpoly (Suc n) has_field_derivative of_nat (n + 1) * bernpoly n x) (at x)
[PROOF STEP]
by (auto simp del: of_nat_Suc One_nat_def)
[PROOF STATE]
proof (state)
this:
(bernpoly (Suc n) has_field_derivative of_nat (n + 1) * bernpoly n x) (at x)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1738, "file": "Bernoulli_Bernoulli", "length": 12}
|
import tensorflow as tf
import tensorflow.contrib as tc
import pickle
import numpy as np
class VNect():
def __init__(self, input_size, is_training=False):
self.is_training = is_training
self.input_holder = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 3))
self._create_network()
def _create_network(self):
# Conv
self.conv1 = tc.layers.conv2d(self.input_holder, kernel_size=7, num_outputs=64, stride=2, scope='conv1')
self.pool1 = tc.layers.max_pool2d(self.conv1, kernel_size=3, padding='same', scope='pool1')
# Residual block 2a
self.res2a_branch2a = tc.layers.conv2d(self.pool1, kernel_size=1, num_outputs=64, scope='res2a_branch2a')
self.res2a_branch2b = tc.layers.conv2d(self.res2a_branch2a, kernel_size=3, num_outputs=64, scope='res2a_branch2b')
self.res2a_branch2c = tc.layers.conv2d(self.res2a_branch2b, kernel_size=1, num_outputs=256, activation_fn=None, scope='res2a_branch2c')
self.res2a_branch1 = tc.layers.conv2d(self.pool1, kernel_size=1, num_outputs=256, activation_fn=None, scope='res2a_branch1')
self.res2a = tf.add(self.res2a_branch2c, self.res2a_branch1, name='res2a_add')
self.res2a = tf.nn.relu(self.res2a, name='res2a')
# Residual block 2b
self.res2b_branch2a = tc.layers.conv2d(self.res2a, kernel_size=1, num_outputs=64, scope='res2b_branch2a')
self.res2b_branch2b = tc.layers.conv2d(self.res2b_branch2a, kernel_size=3, num_outputs=64, scope='res2b_branch2b')
self.res2b_branch2c = tc.layers.conv2d(self.res2b_branch2b, kernel_size=1, num_outputs=256, activation_fn=None, scope='res2b_branch2c')
self.res2b = tf.add(self.res2b_branch2c, self.res2a, name='res2b_add')
self.res2b = tf.nn.relu(self.res2b, name='res2b')
# Residual block 2c
self.res2c_branch2a = tc.layers.conv2d(self.res2b, kernel_size=1, num_outputs=64, scope='res2c_branch2a')
self.res2c_branch2b = tc.layers.conv2d(self.res2b_branch2a, kernel_size=3, num_outputs=64, scope='res2c_branch2b')
self.res2c_branch2c = tc.layers.conv2d(self.res2b_branch2b, kernel_size=1, num_outputs=256, activation_fn=None, scope='res2c_branch2c')
self.res2c = tf.add(self.res2c_branch2c, self.res2b, name='res2c_add')
self.res2c = tf.nn.relu(self.res2b, name='res2c')
# Residual block 3a
self.res3a_branch2a = tc.layers.conv2d(self.res2c, kernel_size=1, num_outputs=128, stride=2, scope='res3a_branch2a')
self.res3a_branch2b = tc.layers.conv2d(self.res3a_branch2a, kernel_size=3, num_outputs=128, scope='res3a_branch2b')
self.res3a_branch2c = tc.layers.conv2d(self.res3a_branch2b, kernel_size=1, num_outputs=512, activation_fn=None,scope='res3a_branch2c')
self.res3a_branch1 = tc.layers.conv2d(self.res2c, kernel_size=1, num_outputs=512, activation_fn=None, stride=2, scope='res3a_branch1')
self.res3a = tf.add(self.res3a_branch2c, self.res3a_branch1, name='res3a_add')
self.res3a = tf.nn.relu(self.res3a, name='res3a')
# Residual block 3b
self.res3b_branch2a = tc.layers.conv2d(self.res3a, kernel_size=1, num_outputs=128, scope='res3b_branch2a')
self.res3b_branch2b = tc.layers.conv2d(self.res3b_branch2a, kernel_size=3, num_outputs=128,scope='res3b_branch2b')
self.res3b_branch2c = tc.layers.conv2d(self.res3b_branch2b, kernel_size=1, num_outputs=512, activation_fn=None,scope='res3b_branch2c')
self.res3b = tf.add(self.res3b_branch2c, self.res3a, name='res3b_add')
self.res3b = tf.nn.relu(self.res3b, name='res3b')
# Residual block 3c
self.res3c_branch2a = tc.layers.conv2d(self.res3b, kernel_size=1, num_outputs=128, scope='res3c_branch2a')
self.res3c_branch2b = tc.layers.conv2d(self.res3c_branch2a, kernel_size=3, num_outputs=128,scope='res3c_branch2b')
self.res3c_branch2c = tc.layers.conv2d(self.res3c_branch2b, kernel_size=1, num_outputs=512, activation_fn=None,scope='res3c_branch2c')
self.res3c = tf.add(self.res3c_branch2c, self.res3b, name='res3c_add')
self.res3c = tf.nn.relu(self.res3c, name='res3c')
# Residual block 3d
self.res3d_branch2a = tc.layers.conv2d(self.res3c, kernel_size=1, num_outputs=128, scope='res3d_branch2a')
self.res3d_branch2b = tc.layers.conv2d(self.res3d_branch2a, kernel_size=3, num_outputs=128,scope='res3d_branch2b')
self.res3d_branch2c = tc.layers.conv2d(self.res3d_branch2b, kernel_size=1, num_outputs=512, activation_fn=None,scope='res3d_branch2c')
self.res3d = tf.add(self.res3d_branch2c, self.res3b, name='res3d_add')
self.res3d = tf.nn.relu(self.res3d, name='res3d')
# Residual block 4a
self.res4a_branch2a = tc.layers.conv2d(self.res3d, kernel_size=1, num_outputs=256, stride=2, scope='res4a_branch2a')
self.res4a_branch2b = tc.layers.conv2d(self.res4a_branch2a, kernel_size=3, num_outputs=256,scope='res4a_branch2b')
self.res4a_branch2c = tc.layers.conv2d(self.res4a_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None,scope='res4a_branch2c')
self.res4a_branch1 = tc.layers.conv2d(self.res3d, kernel_size=1, num_outputs=1024, activation_fn=None, stride=2, scope='res4a_branch1')
self.res4a = tf.add(self.res4a_branch2c, self.res4a_branch1, name='res4a_add')
self.res4a = tf.nn.relu(self.res4a, name='res4a')
# Residual block 4b
self.res4b_branch2a = tc.layers.conv2d(self.res4a, kernel_size=1, num_outputs=256, scope='res4b_branch2a')
self.res4b_branch2b = tc.layers.conv2d(self.res4b_branch2a, kernel_size=3, num_outputs=256, scope='res4b_branch2b')
self.res4b_branch2c = tc.layers.conv2d(self.res4b_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res4b_branch2c')
self.res4b = tf.add(self.res4b_branch2c, self.res4a, name='res4b_add')
self.res4b = tf.nn.relu(self.res4b, name='res4b')
# Residual block 4c
self.res4c_branch2a = tc.layers.conv2d(self.res4b, kernel_size=1, num_outputs=256, scope='res4c_branch2a')
self.res4c_branch2b = tc.layers.conv2d(self.res4c_branch2a, kernel_size=3, num_outputs=256, scope='res4c_branch2b')
self.res4c_branch2c = tc.layers.conv2d(self.res4c_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res4c_branch2c')
self.res4c = tf.add(self.res4c_branch2c, self.res4b, name='res4c_add')
self.res4c = tf.nn.relu(self.res4c, name='res4c')
# Residual block 4d
self.res4d_branch2a = tc.layers.conv2d(self.res4c, kernel_size=1, num_outputs=256, scope='res4d_branch2a')
self.res4d_branch2b = tc.layers.conv2d(self.res4d_branch2a, kernel_size=3, num_outputs=256, scope='res4d_branch2b')
self.res4d_branch2c = tc.layers.conv2d(self.res4d_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res4d_branch2c')
self.res4d = tf.add(self.res4d_branch2c, self.res4c, name='res4d_add')
self.res4d = tf.nn.relu(self.res4d, name='res4d')
# Residual block 4e
self.res4e_branch2a = tc.layers.conv2d(self.res4d, kernel_size=1, num_outputs=256, scope='res4e_branch2a')
self.res4e_branch2b = tc.layers.conv2d(self.res4e_branch2a, kernel_size=3, num_outputs=256, scope='res4e_branch2b')
self.res4e_branch2c = tc.layers.conv2d(self.res4e_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res4e_branch2c')
self.res4e = tf.add(self.res4e_branch2c, self.res4d, name='res4e_add')
self.res4e = tf.nn.relu(self.res4e, name='res4e')
# Residual block 4f
self.res4f_branch2a = tc.layers.conv2d(self.res4e, kernel_size=1, num_outputs=256, scope='res4f_branch2a')
self.res4f_branch2b = tc.layers.conv2d(self.res4f_branch2a, kernel_size=3, num_outputs=256, scope='res4f_branch2b')
self.res4f_branch2c = tc.layers.conv2d(self.res4f_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res4f_branch2c')
self.res4f = tf.add(self.res4f_branch2c, self.res4e, name='res4f_add')
self.res4f = tf.nn.relu(self.res4f, name='res4f')
# Residual block 5a
self.res5a_branch2a_new = tc.layers.conv2d(self.res4f, kernel_size=1, num_outputs=512, scope='res5a_branch2a_new')
self.res5a_branch2b_new = tc.layers.conv2d(self.res5a_branch2a_new, kernel_size=3, num_outputs=512, scope='res5a_branch2b_new')
self.res5a_branch2c_new = tc.layers.conv2d(self.res5a_branch2b_new, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res5a_branch2c_new')
self.res5a_branch1_new = tc.layers.conv2d(self.res4f, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res5a_branch1_new')
self.res5a = tf.add(self.res5a_branch2c_new, self.res5a_branch1_new, name='res5a_add')
self.res5a = tf.nn.relu(self.res5a, name='res5a')
# Residual block 5b
self.res5b_branch2a_new = tc.layers.conv2d(self.res5a, kernel_size=1, num_outputs=256, scope='res5b_branch2a_new')
self.res5b_branch2b_new = tc.layers.conv2d(self.res5b_branch2a_new, kernel_size=3, num_outputs=128, scope='res5b_branch2b_new')
self.res5b_branch2c_new = tc.layers.conv2d(self.res5b_branch2b_new, kernel_size=1, num_outputs=256, scope='res5b_branch2c_new')
# Transpose Conv
self.res5c_branch1a = tf.layers.conv2d_transpose(self.res5b_branch2c_new, kernel_size=4, filters=63, activation=None, strides=2, padding='same', use_bias=False, name='res5c_branch1a')
self.res5c_branch2a = tf.layers.conv2d_transpose(self.res5b_branch2c_new, kernel_size=4, filters=128, activation=None, strides=2, padding='same', use_bias=False, name='res5c_branch2a')
self.bn5c_branch2a = tc.layers.batch_norm(self.res5c_branch2a, scale=True, is_training=self.is_training, scope='bn5c_branch2a')
self.bn5c_branch2a = tf.nn.relu(self.bn5c_branch2a)
self.res5c_delta_x, self.res5c_delta_y, self.res5c_delta_z = tf.split(self.res5c_branch1a, num_or_size_splits=3, axis=3)
self.res5c_branch1a_sqr = tf.multiply(self.res5c_branch1a, self.res5c_branch1a, name='res5c_branch1a_sqr')
self.res5c_delta_x_sqr, self.res5c_delta_y_sqr, self.res5c_delta_z_sqr = tf.split(self.res5c_branch1a_sqr, num_or_size_splits=3, axis=3)
self.res5c_bone_length_sqr = tf.add(tf.add(self.res5c_delta_x_sqr, self.res5c_delta_y_sqr), self.res5c_delta_z_sqr)
self.res5c_bone_length = tf.sqrt(self.res5c_bone_length_sqr)
self.res5c_branch2a_feat = tf.concat([self.bn5c_branch2a, self.res5c_delta_x, self.res5c_delta_y, self.res5c_delta_z, self.res5c_bone_length],
axis=3, name='res5c_branch2a_feat')
self.res5c_branch2b = tc.layers.conv2d(self.res5c_branch2a_feat, kernel_size=3, num_outputs=128, scope='res5c_branch2b')
self.res5c_branch2c = tf.layers.conv2d(self.res5c_branch2b, kernel_size=1, filters=84, activation=None, use_bias=False, name='res5c_branch2c')
self.heapmap, self.x_heatmap, self.y_heatmap, self.z_heatmap = tf.split(self.res5c_branch2c, num_or_size_splits=4, axis=3)
@property
def all_vars(self):
return tf.global_variables()
def load_weights(self, sess, weight_file):
# Read pretrained model file
model_weights = pickle.load(open(weight_file, 'rb'))
# For each layer each var
with tf.variable_scope('', reuse=True):
for variable in tf.global_variables():
var_name = variable.name.split(':')[0]
self._assign_weights_from_dict(var_name, model_weights, sess)
def _assign_weights_from_dict(self, var_name, model_weights, sess):
with tf.variable_scope('', reuse=True):
var_tf = tf.get_variable(var_name)
# print(var_tf)
sess.run(tf.assign(var_tf, model_weights[var_name]))
np.testing.assert_allclose(var_tf.eval(sess), model_weights[var_name])
if __name__ == '__main__':
model_file = 'vnect.pkl'
model = VNect(368)
with tf.Session() as sess:
saver = tf.train.Saver()
tf_writer = tf.summary.FileWriter(logdir='./', graph=sess.graph)
sess.run(tf.global_variables_initializer())
print(model.res5b_branch2c_new)
print(model.heapmap, model.x_heatmap, model.y_heatmap, model.z_heatmap)
|
{"hexsha": "b75a17fc1d4521188d9394a2261b7f59a496e705", "size": 12461, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/vnect_model.py", "max_stars_repo_name": "rrbarioni/VNect-tensorflow", "max_stars_repo_head_hexsha": "2137172dd61df5f83ce3fbe0cf972950b3cb23f7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/vnect_model.py", "max_issues_repo_name": "rrbarioni/VNect-tensorflow", "max_issues_repo_head_hexsha": "2137172dd61df5f83ce3fbe0cf972950b3cb23f7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/vnect_model.py", "max_forks_repo_name": "rrbarioni/VNect-tensorflow", "max_forks_repo_head_hexsha": "2137172dd61df5f83ce3fbe0cf972950b3cb23f7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 66.6363636364, "max_line_length": 192, "alphanum_fraction": 0.7206484231, "include": true, "reason": "import numpy", "num_tokens": 3863}
|
# Author: Uygar Sumbul, Olga Gliko, Rohan Gala
# Allen Institute
import numpy as np
import keras
import scipy as sp
import scipy.io as sio
from scipy.stats import norm
from keras.layers import Input, Dense, Lambda, Layer, Dropout, BatchNormalization
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.objectives import binary_crossentropy
from keras.callbacks import LearningRateScheduler
from keras.losses import mean_squared_error, mean_absolute_error
from keras.regularizers import l2
from keras.constraints import unit_norm
import tensorflow as tf
import sys
import os
# load data from .mat file containing expression of 6083 highly expressed genes
# and 47 neuropeptide genes in 22,439 neurons along with sample id
root_folder = "/nas5/peptides"
data = sio.loadmat(os.path.join(root_folder, 'mouse_V1_ALM_20180520_6_5byExpression_and_NP18andGPCR29.mat'))
pep = data['pep']
sample_id = data['sample_id']
thisRun = int(sys.argv[2])
foldCount = 13
foldSize = pep.shape[0] / foldCount
heldOutInd = (np.arange(thisRun*foldSize, (thisRun+1)*foldSize)).astype('int')
trainingInd = (np.setdiff1d(np.arange(pep.shape[0]), heldOutInd)).astype('int')
heldOutPep = pep[heldOutInd, :]
pep = pep[trainingInd, :]
original_dim_pep = pep.shape[1]
intermediate_dim1 = 100
intermediate_dim2 = 50%64
bottleneck_dim = int(sys.argv[1])
n_epoch1 = 10000
bat_size = 794
dropoutRate1 = 0.8
dropoutRate2 = 0.0
full_train_size = trainingInd.size
bb = int(sys.argv[1])
ff = trainingInd.size
# load a latent space representation of single autoencoder from .mat file
data = sio.loadmat(os.path.join(root_folder, 'singleAE_6_5byExpression_dim5_run0_iter50K_0.8Dropout_intermediate100_BN_bat956.mat'))
z1 = data['e1']
val_z1 = data['et1']
y = Input(shape=(original_dim_pep,), name='y')
hidden2 = Dropout(dropoutRate2, name='drop2')(y)
hidden2 = Dense(intermediate_dim2, activation='relu', name='dense10')(hidden2)
hidden2 = Dense(intermediate_dim2, activation='relu', name='dense11')(hidden2)
hidden2 = Dense(intermediate_dim2, activation='relu', name='dense12')(hidden2)
hidden2 = Dense(intermediate_dim2, activation='relu', name='dense13')(hidden2)
hidden2 = Dense(bottleneck_dim, activation='linear', name='dense14')(hidden2)
z2 = BatchNormalization(name='z2', center=False, scale=False,epsilon=1e-10)(hidden2)
hidden2 = Dense(intermediate_dim2, activation='linear', name='dense15')(z2)
hidden2 = Dense(intermediate_dim2, activation='relu', name='dense16')(hidden2)
hidden2 = Dense(intermediate_dim2, activation='relu', name='dense17')(hidden2)
hidden2 = Dense(intermediate_dim2, activation='relu', name='dense18')(hidden2)
xd2 = Dense(original_dim_pep, activation='relu', name='xd2')(hidden2)
caeDual = Model(inputs=[y], outputs=[xd2, z2])
def cae1_loss(y_true, y_pred):
return mean_squared_error(y_true, y_pred)
def loss_latentdim(y_true, y_pred):
zz1 = y_true - tf.reduce_mean(y_true, axis=0)
zz2 = y_pred - tf.reduce_mean(y_pred, axis=0)
s1 = tf.svd(zz1, compute_uv=False)
mins1 = tf.reduce_min(tf.square(s1))
s2 = tf.svd(zz2, compute_uv=False)
mins2 = tf.reduce_min(tf.square(s2))
denom = tf.minimum(mins1,mins2)
C = mean_squared_error(y_true, y_pred)/denom
return C
caeDual.compile(optimizer='adam', loss={'xd2': cae1_loss, 'z2': loss_latentdim}, loss_weights={'xd2': 1., 'z2': 100.})
history = caeDual.fit({'y':pep}, {'xd2' : pep, 'z2' : z1}, batch_size=bat_size, epochs=n_epoch1,
validation_data=({'y':heldOutPep}, {'xd2':heldOutPep, 'z2':val_z1}))
result = caeDual.predict(pep)
e2 = result[1]
result = caeDual.predict(heldOutPep)
et2 = result[1]
val_xd2_loss = history.history['val_xd2_loss']
xd2_loss = history.history['xd2_loss']
val_xd2_loss = val_xd2_loss[::10]
xd2_loss = xd2_loss[::10]
fileName = os.path.join(root_folder, 'dualAE_inputZ1_6_5byExpression_and_NP18GPCR29_dim' + sys.argv[1]
+ '_run' + sys.argv[2] + '_iter10K_loss1_100_0.0Dropout_intermediate_50_bat794_neuronsOnly.mat')
sio.savemat(fileName, {'e2':e2, 'et2':et2, 'sample_id':sample_id, 'val_xd2_loss':val_xd2_loss, 'xd2_loss':xd2_loss})
|
{"hexsha": "ad7fa1c3fe83b81bd251ae0a915adf89aab21cf7", "size": 4833, "ext": "py", "lang": "Python", "max_stars_repo_path": "dualAE_inputZ1_47genes.py", "max_stars_repo_name": "elifesciences-publications/PeptidergicNetworks", "max_stars_repo_head_hexsha": "1e7c2c56dc57c789272282aff32559c7cc51f23f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-08-15T10:29:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-14T16:15:16.000Z", "max_issues_repo_path": "dualAE_inputZ1_47genes.py", "max_issues_repo_name": "elifesciences-publications/PeptidergicNetworks", "max_issues_repo_head_hexsha": "1e7c2c56dc57c789272282aff32559c7cc51f23f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dualAE_inputZ1_47genes.py", "max_forks_repo_name": "elifesciences-publications/PeptidergicNetworks", "max_forks_repo_head_hexsha": "1e7c2c56dc57c789272282aff32559c7cc51f23f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-26T16:18:12.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-26T16:18:12.000Z", "avg_line_length": 49.824742268, "max_line_length": 140, "alphanum_fraction": 0.643078833, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1321}
|
from numba import jit
''' Lecture1 '''
@jit
def lec1_first_forward(n, dx, f, df_approx_f):
for i in range(1, n):
df_approx_f[i] = (f[i + 1] - f[i]) / dx
return
@jit
def lec1_second_central(n, dx, f, df_approx_c):
for i in range(1, n):
df_approx_c[i] = (f[i + 1] - f[i - 1]) / dx / 2
return
''' Lecture2 '''
@jit
def lec2_first_func(dx, f):
f1_approx = (0.5 * f[0] + 0.5 * f[1]) * dx
return f1_approx
@jit
def lec2_second_func(dx, f):
f2_approx = (1 * f[0]) * dx
return f2_approx
''' Lecture3 '''
@jit
def lec3_euler(n, dt, f_approx):
f1_approx = f_approx[1]
for i in range(2, n):
f_approx[i] = f1_approx + dt * (1.0 * f1_approx)
f1_approx = f_approx[i]
return
@jit
def lec3_ab(n, dt, f_approx):
f1_approx = f_approx[1]
f2_approx = f_approx[0]
for i in range(2, n):
f_approx[i] = f1_approx + dt * (1.5 * f1_approx - 0.5 * f2_approx)
f2_approx = f1_approx
f1_approx = f_approx[i]
return
@jit
def lec3_tra(n, dt, f_approx):
f1_approx = f_approx[1]
for i in range(2, n):
f_approx[i] = f1_approx + dt * (0.5 * f1_approx + 0.5 * f_approx[i])
f1_approx = f_approx[i]
return
''' Lecture4 '''
@jit
def lec4_exact(m, dl, c, x, p_exact):
for i in range(1, m + 1):
p_exact[i] = x[i] * (x[i] - 2 * dl) * (c / 2)
# print(i, x[i], p_exact[i])
return
|
{"hexsha": "c34f819110c4ba382b0b8e58c0ad74f2bed3c61b", "size": 1430, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/func/operation.py", "max_stars_repo_name": "wakky927/Computational-Engineering-B", "max_stars_repo_head_hexsha": "3720d96668a32dc73f38ed0bc8afe4705452de9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-03T09:11:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-03T09:11:35.000Z", "max_issues_repo_path": "src/func/operation.py", "max_issues_repo_name": "wakky927/Computational-Engineering-B", "max_issues_repo_head_hexsha": "3720d96668a32dc73f38ed0bc8afe4705452de9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/func/operation.py", "max_forks_repo_name": "wakky927/Computational-Engineering-B", "max_forks_repo_head_hexsha": "3720d96668a32dc73f38ed0bc8afe4705452de9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.875, "max_line_length": 76, "alphanum_fraction": 0.5538461538, "include": true, "reason": "from numba", "num_tokens": 531}
|
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2004-2007. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/interprocess for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_INTERPROCESS_TEST_SEMAPHORE_TEST_TEMPLATE_HEADER
#define BOOST_INTERPROCESS_TEST_SEMAPHORE_TEST_TEMPLATE_HEADER
#include <boost/interprocess/detail/config_begin.hpp>
#include <boost/interprocess/exceptions.hpp>
#include "boost_interprocess_check.hpp"
#include "util.hpp"
#include <boost/thread/thread.hpp>
#include <boost/thread/xtime.hpp>
#include <iostream>
namespace boost { namespace interprocess { namespace test {
template <typename P>
struct test_wait
{
void operator()()
{/*
mutex_type interprocess_mutex;
boost::interprocess::interprocess_condition interprocess_condition;
// Test the lock's constructors.
{
wait_type lock(interprocess_mutex, boost::interprocess::defer_lock);
BOOST_INTERPROCES_CHECK(!lock);
}
wait_type lock(interprocess_mutex);
BOOST_INTERPROCES_CHECK(lock ? true : false);
// Test the lock and unlock methods.
lock.unlock();
BOOST_INTERPROCES_CHECK(!lock);
lock.lock();
BOOST_INTERPROCES_CHECK(lock ? true : false);*/
}
};
template <typename P>
struct test_try_wait
{
void operator()()
{/*
mutex_type interprocess_mutex;
boost::interprocess::interprocess_condition interprocess_condition;
// Test the lock's constructors.
{
try_to_wait_type lock(interprocess_mutex, boost::interprocess::try_to_lock);
BOOST_INTERPROCES_CHECK(lock ? true : false);
}
{
try_to_wait_type lock(interprocess_mutex, boost::interprocess::defer_lock);
BOOST_INTERPROCES_CHECK(!lock);
}
try_to_wait_type lock(interprocess_mutex);
BOOST_INTERPROCES_CHECK(lock ? true : false);
// Test the lock, unlock and trylock methods.
lock.unlock();
BOOST_INTERPROCES_CHECK(!lock);
lock.lock();
BOOST_INTERPROCES_CHECK(lock ? true : false);
lock.unlock();
BOOST_INTERPROCES_CHECK(!lock);
BOOST_INTERPROCES_CHECK(lock.try_lock());
BOOST_INTERPROCES_CHECK(lock ? true : false);*/
}
};
template <typename P>
struct test_timed_wait
{
void operator()()
{/*
mutex_type interprocess_mutex;
boost::interprocess::interprocess_condition interprocess_condition;
// Test the lock's constructors.
{
// Construct and initialize an xtime for a fast time out.
boost::posix_time::ptime pt = delay(100, 0);
timed_wait_type lock(interprocess_mutex, pt);
BOOST_INTERPROCES_CHECK(lock ? true : false);
}
{
timed_wait_type lock(interprocess_mutex, boost::interprocess::defer_lock);
BOOST_INTERPROCES_CHECK(!lock);
}
timed_wait_type lock(interprocess_mutex);
BOOST_INTERPROCES_CHECK(lock ? true : false);
// Test the lock, unlock and timedlock methods.
lock.unlock();
BOOST_INTERPROCES_CHECK(!lock);
lock.lock();
BOOST_INTERPROCES_CHECK(lock ? true : false);
lock.unlock();
BOOST_INTERPROCES_CHECK(!lock);
boost::posix_time::ptime pt = delay(10, 0);
BOOST_INTERPROCES_CHECK(lock.timed_lock(pt));
BOOST_INTERPROCES_CHECK(lock ? true : false);*/
}
};
template <typename P>
struct test_recursive_lock
{
void operator()()
{/*
mutex_type mx;
{
wait_type lock1(mx);
wait_type lock2(mx);
}
{
wait_type lock1(mx, defer_lock);
wait_type lock2(mx, defer_lock);
}
{
wait_type lock1(mx, try_to_lock);
wait_type lock2(mx, try_to_lock);
}
{
//This should always lock
boost::posix_time::ptime pt = delay(3);
wait_type lock1(mx, pt);
wait_type lock2(mx, pt);
}*/
}
};
// plain_exclusive exercises the "infinite" lock for each
// read_write_mutex type.
template<typename P>
void wait_and_sleep(void *arg, P &sm)
{
data<P> *pdata = static_cast<data<P>*>(arg);
boost::interprocess::scoped_lock<P> l(sm);
boost::thread::sleep(xsecs(3*BaseSeconds));
++shared_val;
pdata->m_value = shared_val;
}
template<typename P>
void try_wait_and_sleep(void *arg, P &sm)
{
data<P> *pdata = static_cast<data<P>*>(arg);
boost::interprocess::scoped_lock<P> l(sm, boost::interprocess::defer_lock);
if (l.try_lock()){
boost::thread::sleep(xsecs(3*BaseSeconds));
++shared_val;
pdata->m_value = shared_val;
}
}
template<typename P>
void timed_wait_and_sleep(void *arg, P &sm)
{
data<P> *pdata = static_cast<data<P>*>(arg);
boost::posix_time::ptime pt(delay(pdata->m_secs));
boost::interprocess::scoped_lock<P>
l (sm, boost::interprocess::defer_lock);
if (l.timed_lock(pt)){
boost::thread::sleep(xsecs(3*BaseSeconds));
++shared_val;
pdata->m_value = shared_val;
}
}
template<typename P>
void test_mutex_lock(P &sm)
{
shared_val = 0;
data<P> m1(1,sm);
data<P> m2(2,sm);
// Locker one launches, holds the lock for 3*BaseSeconds seconds.
boost::thread tm1(thread_adapter<P>(&wait_and_sleep, &m1, sm));
//Wait 1*BaseSeconds
boost::thread::sleep(xsecs(1*BaseSeconds));
// Locker two launches, holds the lock for 3*BaseSeconds seconds.
boost::thread tm2(thread_adapter<P>(&wait_and_sleep, &m2, sm));
//Wait completion
tm1.join();
tm2.join();
assert(m1.m_value == 1);
assert(m2.m_value == 2);
}
template<typename P>
void test_mutex_try_lock(P &sm)
{
shared_val = 0;
data<P> m1(1,sm);
data<P> m2(2,sm);
// Locker one launches, holds the lock for 3*BaseSeconds seconds.
boost::thread tm1(thread_adapter<P>(&try_wait_and_sleep, &m1, sm));
//Wait 1*BaseSeconds
boost::thread::sleep(xsecs(1*BaseSeconds));
// Locker two launches, holds the lock for 3*BaseSeconds seconds.
boost::thread tm2(thread_adapter<P>(&try_wait_and_sleep, &m2, sm));
//Wait completion
tm1.join();
tm2.join();
//Only the first should succeed locking
assert(m1.m_value == 1);
assert(m2.m_value == -1);
}
template<typename P>
void test_mutex_timed_lock(P &sm)
{
{
shared_val = 0;
data<P> m1(1, sm, 3);
data<P> m2(2, sm, 3);
// Locker one launches, holds the lock for 3*BaseSeconds seconds.
boost::thread tm1(thread_adapter<P>(&timed_wait_and_sleep, &m1, sm));
//Wait 1*BaseSeconds
boost::thread::sleep(xsecs(1*BaseSeconds));
// Locker two launches, holds the lock for 3*BaseSeconds seconds.
boost::thread tm2(thread_adapter<P>(&timed_wait_and_sleep, &m2, sm));
//Wait completion
tm1.join();
tm2.join();
//Both should succeed locking
assert(m1.m_value == 1);
assert(m2.m_value == 2);
}
{
shared_val = 0;
data<P> m1(1, sm, 3);
data<P> m2(2, sm, 3);
// Locker one launches, holds the lock for 3*BaseSeconds seconds.
boost::thread tm1(thread_adapter<P>(&timed_wait_and_sleep, &m1, sm));
//Wait 1*BaseSeconds
boost::thread::sleep(xsecs(1*BaseSeconds));
// Locker two launches, holds the lock for 3*BaseSeconds seconds.
boost::thread tm2(thread_adapter<P>(&timed_wait_and_sleep, &m2, sm));
//Wait completion
tm1.join();
tm2.join();
//Both should succeed locking
assert(m1.m_value == 1);
assert(m2.m_value == 2);
}
}
template <typename P>
inline void test_all_lock()
{
//Now generic interprocess_mutex tests
std::cout << "test_wait<" << typeid(P).name() << ">" << std::endl;
test_wait<P>()();
std::cout << "test_try_wait<" << typeid(P).name() << ">" << std::endl;
test_try_wait<P>()();
std::cout << "test_timed_wait<" << typeid(P).name() << ">" << std::endl;
test_timed_wait<P>()();
}
template <typename P>
inline void test_all_recursive_lock()
{
//Now generic interprocess_mutex tests
std::cout << "test_recursive_lock<" << typeid(P).name() << ">" << std::endl;
test_recursive_lock<P>()();
}
template<typename P>
void test_all_mutex()
{
P mut;
std::cout << "test_mutex_lock<" << typeid(P).name() << ">" << std::endl;
test_mutex_lock(mut);
std::cout << "test_mutex_try_lock<" << typeid(P).name() << ">" << std::endl;
test_mutex_try_lock(mut);
std::cout << "test_mutex_timed_lock<" << typeid(P).name() << ">" << std::endl;
test_mutex_timed_lock(mut);
}
}}} //namespace boost { namespace interprocess { namespace test {
#include <boost/interprocess/detail/config_end.hpp>
#endif //BOOST_INTERPROCESS_TEST_SEMAPHORE_TEST_TEMPLATE_HEADER
|
{"hexsha": "8b313cb3f682fdfff039aa77a3a3983000979270", "size": 8906, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "libs/interprocess/test/semaphore_test_template.hpp", "max_stars_repo_name": "mike-code/boost_1_38_0", "max_stars_repo_head_hexsha": "7ff8b2069344ea6b0b757aa1f0778dfb8526df3c", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-08-22T17:17:41.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-22T17:17:41.000Z", "max_issues_repo_path": "libs/interprocess/test/semaphore_test_template.hpp", "max_issues_repo_name": "mike-code/boost_1_38_0", "max_issues_repo_head_hexsha": "7ff8b2069344ea6b0b757aa1f0778dfb8526df3c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/interprocess/test/semaphore_test_template.hpp", "max_forks_repo_name": "mike-code/boost_1_38_0", "max_forks_repo_head_hexsha": "7ff8b2069344ea6b0b757aa1f0778dfb8526df3c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-03-07T05:20:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-07T05:20:43.000Z", "avg_line_length": 27.7445482866, "max_line_length": 85, "alphanum_fraction": 0.6406916685, "num_tokens": 2262}
|
Jed Alexander is an local artists artist, illustrator and educator who lives in the Davis area. He is the founding member and original organizer of The Davis Figure Drawing Group. His work has been shown at the Pence Gallery and a number of local businesses. As an illustrator hes done covers for Sacramento News & Review and illustrations for a number of childrens magazines, including Nickelodeon and Cricket. He is currently represented by http://redfoxliterary.com as an author/illustrator. If youd like to see his work or buy his posters and prints you can visit his website at http://jedalexander.com.
|
{"hexsha": "186455481a1f40687fc954536572e56434d66fd0", "size": 609, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Jed_Alexander.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Jed_Alexander.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Jed_Alexander.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 203.0, "max_line_length": 607, "alphanum_fraction": 0.8095238095, "num_tokens": 125}
|
import numpy as np
import time # temp timer
from classy import Class
from scipy.optimize import fsolve
from scipy import special # bessel functions
# constants
h = 0.67
c = 299793. # km/s
H0 = 100*h
omega_m = 0.27 + 0.049
omega_rad = 2.47e-05/(h*h)
omega_lambda = 1 - omega_m - omega_rad
T_nu = 2.7255*(4/11)**(1/3)*(3.046/3)**(1/4) # present cnb temperature
k_B = 8.617333e-05 # eV/K
a_ndec = 1e-10
massless_cutoff = 2e-5 # for whether to treat neutrinos as massless
scaling = 1e12 # for scaling cl to the correct unit/magnitude
As_default = 2.215e-9
pivot_default = 5e-2 # default k_pivot
pivot = 5e-4
ns = 0.9619
As = As_default*(pivot/pivot_default)**(ns-1)
def get_distanceToPresent(start_index, present_index, a_array, q, m_nu):
'''
Args:
start_index (int): index for start time (scale factor) in a_array
present_index (int): index for present time in a_array
a_array (np.array of float): array of scale factors
q (float): q (eV) of neutrino
m_nu (float): neutrino mass (eV)
Return (float): distance (Mpc) traveled by neutrinos between time marked
by start_index and the present
'''
distance_nu = 0
for i in range (start_index, present_index):
da = a_array[i+1]-a_array[i]
H = H0*np.sqrt(omega_m/a_array[i]**3 + omega_rad/a_array[i]**4 + omega_lambda)
epsilon = np.sqrt(q**2 + (a_array[i]*m_nu)**2)
distance_nu += c*(q/epsilon)/(a_array[i]**2*H)*da # c is to correct the unit to distance
return distance_nu
def get_distancesToPresent(start_index, present_index, a_array, q, m_nu):
'''
Optimized version of get_distanceToPresent and returns distances for
all indices as an array.
Args:
start_index (int): index for start time (scale factor) in a_array
present_index (int): index for present time in a_array
a_array (np.array of float): array of scale factors
q (float): q (eV) of neutrino
m_nu (float): neutrino mass (eV)
Return (np.array of len (present_index-start_index-1)): distances (Mpc)
traveled by neutrinos at times between start_index time and the present
'''
da_array = a_array[start_index+1:present_index+1]-a_array[start_index:present_index]
H_array = H0*np.sqrt(omega_m/a_array[start_index:present_index]**3+omega_rad/a_array[start_index:present_index]**4+omega_lambda)
epsilon_array = np.sqrt(q**2 + (a_array[start_index:present_index]*m_nu)**2)
# distance in each step of integration
distances_step = c*(q/epsilon_array)/(a_array[start_index:present_index]**2*H_array)*da_array # c is to correct the unit to distance
distances_sum = np.cumsum(distances_step[::-1])[::-1] # cumulative sum w/ smallest on right (ex. [1,2,3]->[6,5,3])
return distances_sum
# outdated but good to see integrals done without the speedup using np.arrays
def get_deltaIntegrand(k, l_index, q_index, pt, nu_masses, nu_index, tau_0):
'''
Args:
k (int): k value
l_index (int): l index for Cl
q_index (int): q index for neutrinos
pt: perturbations output from CLASS for k
nu_masses (list): neutrino masses
nu_index (int): index for neutrino species
tau_0: conformal age output from CLASS
Return ([float, float, float]): all the terms in Delta (including
one constant phi term and phi and phiprime integrals) at l_index for the
q at q_index and and the nu_index neutrino species
'''
a = pt['a']
tau = pt['tau [Mpc]']
phi = pt['phi']
phi_prime = pt['phi_prime']
qs = np.zeros(n_qbins)
for q_i in range(n_qbins):
qs[q_i] = pt['q_ncdm[{},{}]'.format(nu_index, q_i)][0]*k_B*T_nu # q now in units of eV
m_nu = nu_masses[nu_index] # neutrino mass
# get the index of the closest value in tau array to tau_0 or tau_ndec
taundec_index = np.argmin(np.abs(a - a_ndec)) # neutrino decoupling (take index 0 if times not early enough)
tau0_index = np.argmin(np.abs(tau - tau_0)) # should be same as len(tau)-1
# get distances traveled by neutrinos at each time in a array between decoupling and present
if (m_nu < massless_cutoff): distancesToPresent = tau[tau0_index]-tau[taundec_index:tau0_index]
else: distancesToPresent = get_distancesToPresent(taundec_index, tau0_index, a, qs[q_index], m_nu)
# first term (not an integral)
phi_const = 0.5*phi[taundec_index]*special.spherical_jn(l_index, k*distanceToPresent)
phi_integral = 0
phiprime_integral = 0
# calculate the integrals over time steps
for i in range (taundec_index, tau0_index):
dx = tau[i+1]-tau[i] # change of variable: x = tau_0-lambda => lambda = tau_0-x
da = a[i+1]-a[i]
H = H0*np.sqrt(omega_m/a[i]**3 + omega_rad/a[i]**4 + omega_lambda)
epsilon = np.sqrt((qs[q_index])**2 + (a[i]*nu_masses[nu_index])**2)
j_kdistance = special.spherical_jn(l_index, k*distanceToPresent)
if (nu_masses[nu_index] < massless_cutoff): distanceToPresent = tau[tau0_index]-tau[i+1]
else: distanceToPresent -= c*(qs[q_index]/epsilon)/(a[i]**2*H)*da # subtract earliest distance interval from the total
if (nu_masses[nu_index] < massless_cutoff):
phiprime_term = 2*phi_prime[i]*dx*j_kdistance
phi_term = 0
else:
phiprime_term = (2+(a[i]*nu_masses[nu_index]/qs[q_index])**2)*phi_prime[i]*dx*j_kdistance
phi_term = 2*(a[i]*nu_masses[nu_index]/qs[q_index])**2*a[i]*H/c*phi[i]*dx*j_kdistance
phi_integral += phi_term
phiprime_integral += phiprime_term
return phi_const, phi_integral, phiprime_integral
def get_deltaIntegrand_opt(k, l_index, q_index, pt, earliest_tf, nu_masses, nu_index, tau_0, distance_cutoff):
'''
Optimized version of get_deltaIntegrand with the implementation of a
distance cut.
Args:
k (int): k value
l_index (int): l index for Cl
q_index (int): q index for neutrinos
pt: perturbations output from CLASS for k
earliest_tfs (tuple): phi and psi arguments as (phi, psi) of the earliest transfer fn
nu_masses (list): neutrino masses
nu_index (int): index for neutrino species
tau_0: conformal age output from CLASS
distance_cutoff (float): cutoff for chi for which to calculate the integrals
Return ([float, float, float]): all the terms in Delta (including
one constant phi term and phi and phiprime integrals) at l_index for the
q at q_index and and the nu_index neutrino species
'''
a = pt['a']
tau = pt['tau [Mpc]']
phi = pt['phi']
phi_prime = pt['phi_prime']
psi = pt['psi']
phi_earliest, psi_earliest = earliest_tf
qs = np.zeros(n_qbins)
for q_i in range(n_qbins):
qs[q_i] = pt['q_ncdm[{},{}]'.format(nu_index, q_i)][0]*k_B*T_nu # q now in units of eV
m_nu = nu_masses[nu_index]
# get the index of the closest value in tau array to tau_0 or tau_ndec
taundec_index = np.argmin(np.abs(a - a_ndec)) # neutrino decoupling (take index 0 if times not early enough)
tau0_index = np.argmin(np.abs(tau - tau_0)) # should be same as len(tau)-1
# get distances traveled by neutrinos at each time in a array between decoupling and present
if (m_nu < massless_cutoff): distancesToPresent = tau[tau0_index]-tau[taundec_index:tau0_index]
else: distancesToPresent = get_distancesToPresent(taundec_index, tau0_index, a, qs[q_index], m_nu)
start_index = taundec_index
# get the index of first item<distance_cutoff (assuming list in descending order)
if (distance_cutoff < np.infty):
start_index = np.searchsorted(-distancesToPresent, -distance_cutoff)
# only keep the part whose distance is within the cutoff
tau_array = tau[start_index:tau0_index+1]
a_array = a[start_index:tau0_index+1]
a_centers = 0.5*(a_array[:-1] + a_array[1:])
phi_prime_centers = 0.5*(phi_prime[start_index:tau0_index] + phi_prime[start_index+1:tau0_index+1])
phi_centers = 0.5*(phi[start_index:tau0_index] + phi[start_index+1:tau0_index+1])
psi_centers = 0.5*(psi[start_index:tau0_index] + psi[start_index+1:tau0_index+1])
dpsi = psi[1:]-psi[:-1]
dtau = tau[1:]-tau[:-1]
# to avoid divide by 0 error if we have repeated tau values
psi_prime = np.divide(dpsi, dtau, out=np.zeros_like(dpsi), where=(dtau != 0.))
#psi_prime[np.where(psi_prime == np.inf)] = 0
#psi_prime[np.where(psi_prime == -np.inf)] = 0
# calculate the distance to last scattering in case the input pt don't go early enough
a_temp = np.logspace(-10, 0, base=10, num=1000) # a_ndec=1e-10
chi_dec = get_distancesToPresent(0, len(a_temp)-1, a_temp, qs[q_index], m_nu)[0]
# calculate each step of the integral as an array
dx_array = tau_array[1:]-tau_array[:-1] # dtau array
H_array = H0*np.sqrt(omega_m/a_centers**3+omega_rad/a_centers**4+omega_lambda)
jk_distances = special.spherical_jn(l_index, k*distancesToPresent[start_index:tau0_index])
# each term carries a - sign when integrate from 0 to dec
if (nu_masses[nu_index] < massless_cutoff):
# first term (not an integral)
phi_const = (psi_earliest-0.5*phi_earliest)*special.spherical_jn(l_index, k*chi_dec)
phiprime_terms = (phi_prime_centers + psi_prime)*dx_array*jk_distances
phi_terms = 0
else:
phi_const = 0.5*phi_earliest*special.spherical_jn(l_index, k*chi_dec)
phiprime_terms = (2+(a_centers*m_nu/qs[q_index])**2)*phi_prime_centers*dx_array*jk_distances
phi_terms = 2*(a_centers*m_nu/qs[q_index])**2*a_centers*H_array/c*phi_centers*dx_array*jk_distances
# do integral by sum
phi_integral = np.sum(phi_terms)
phiprime_integral = np.sum(phiprime_terms)
return phi_const, phi_integral, phiprime_integral
def get_clqcomponents_LoS(nu_index, q_index, pts, earliest_tfs, nu_masses, is_lnk, k_magnitudes, ls, tau_0, distance_cutoff):
'''
Args:
nu_index (int): index for neutrino species
q_index (int): q index for neutrinos
pts: perturbations output from CLASS at all ks
earliest_tfs (tuple): phi and psi arguments as (phi, psi) of the earliest transfer fn
nu_masses (list): neutrino masses
is_lnk (boolean): whether to do dlnk integral (True if assuming Harrison-Zel'dovich-Peebles spectrum)
k_magnitudes (np.array): array of k values
tau_0: conformal age output from CLASS
distance_cutoff (float):
Calculate Cl using line-of-sight integrals.
Return (np.arrays): cls, and the contributions of (phi_constant)^2, (phi_integral)^2, and
(phiprime_integral)^2 to the total cl integral.
'''
l_min = ls[0]
l_max = ls[-1]
# the value at index 0 is not in the correct k range so get rid of it
phi_earliest = earliest_tfs['phi'][1:]
psi_earliest = earliest_tfs['psi'][1:]
cls = []
phiconst_contribs = []
phi_contribs = []
phiprime_contribs = []
# l used for Bessel equations
for l_index in range(l_min, l_max+1):
cl = 0
phiconst_contrib = 0
phi_contrib = 0
phiprime_contrib = 0
# calculate the cl integral
for i in range(len(k_magnitudes)-1): # k_index = 0 -- n_kmodes - 1
phi_const, phi_integral, phiprime_integral = get_deltaIntegrand_opt(k_magnitudes[i], l_index, q_index, pts[i], [phi_earliest[i], psi_earliest[i]], nu_masses, nu_index, tau_0, distance_cutoff)
# whether to integrate over lnk or k
if (is_lnk):
dlnk = np.log(k_magnitudes[i+1])-np.log(k_magnitudes[i])
cl += T_nu**2*(4*np.pi)*As_default*(phi_const+phi_integral+phiprime_integral)**2*dlnk
phiconst_contrib += T_nu**2*(4*np.pi)*As_default*(phi_const)**2*dlnk
phi_contrib += T_nu**2*(4*np.pi)*As_default*(phi_integral)**2*dlnk
phiprime_contrib += T_nu**2*(4*np.pi)*As_default*(phiprime_integral)**2*dlnk
else:
dk = k_magnitudes[i+1]-k_magnitudes[i]
cl += T_nu**2*(4*np.pi)*As_default*k_magnitudes[i]**(ns-2)*(phi_const+phi_integral+phiprime_integral)**2*dk
phiconst_contrib += T_nu**2*(4*np.pi)*As_default*k_magnitudes[i]**(ns-2)*(phi_const)**2*dk
phi_contrib += T_nu**2*(4*np.pi)*As_default*k_magnitudes[i]**(ns-2)*(phi_integral)**2*dk
phiprime_contrib += T_nu**2*(4*np.pi)*As_default*k_magnitudes[i]**(ns-2)*(phiprime_integral)**2*dk
# scaling for the correct units
cls.append(cl*scaling)
phiconst_contribs.append(phiconst_contrib*scaling)
phi_contribs.append(phi_contrib*scaling)
phiprime_contribs.append(phiprime_contrib*scaling)
return np.array(cls), np.array(phiconst_contribs), np.array(phi_contribs), np.array(phiprime_contribs)
def get_clq_LoS(nu_index, q_index, pts, earliest_tfs, nu_masses, is_lnk, k_magnitudes, ls, tau_0, distance_cutoff, **kwargs):
'''
Args:
nu_index (int): index for neutrino species
q_index (int): q index for neutrinos
pts: perturbations output from CLASS at all ks
earliest_tfs (tuple): phi and psi arguments as (phi, psi) of the earliest transfer fn
nu_masses (list): neutrino masses
is_lnk (boolean): whether to do dlnk integral (True if assuming Harrison-Zel'dovich-Peebles spectrum)
k_magnitudes (np.array): array of k values
ls (list): list of l values for which to calculate Cls
tau_0: conformal age output from CLASS
distance_cutoff (float):
Calculate Cl using line-of-sight integrals.
Return (np.array): Cls
'''
if (not is_lnk):
k_pivot = kwargs.get('k_pivot', None)
l_min = ls[0]
l_max = ls[-1]
# the value at index 0 is not in the correct k range so get rid of it
phi_earliest = earliest_tfs['phi'][1:]
psi_earliest = earliest_tfs['psi'][1:]
cls = []
# l used for Bessel equations
for l_index in range(l_min, l_max+1):
cl = 0
# calculate the cl integral
for i in range(len(k_magnitudes)-1): # k_index = 0 -- n_kmodes - 1
phi_const, phi_integral, phiprime_integral = get_deltaIntegrand_opt(k_magnitudes[i], l_index, q_index, pts[i],
[phi_earliest[i], psi_earliest[i]], nu_masses,
nu_index, tau_0, distance_cutoff)
# whether to integrate over lnk or k
if (is_lnk):
dlnk = np.log(k_magnitudes[i+1])-np.log(k_magnitudes[i])
cl += (phi_const+phi_integral+phiprime_integral)**2*dlnk
else:
dk = k_magnitudes[i+1]-k_magnitudes[i]
cl += k_magnitudes[i]**(ns-2)*(phi_const+phi_integral+phiprime_integral)**2*dk
# prefactor in cl definition
coeff = T_nu**2*(4*np.pi)*As_default
cl *= coeff
# scaling for the correct temperature units
cls.append(cl*scaling)
return np.array(cls)
def get_dcl_dlnk(nu_index, q_index, l_index, pts, earliest_tfs, nu_masses, k_magnitudes, tau_0):
'''
Args:
nu_index (int): index for neutrino species
q_index (int): q index for neutrinos
pts: perturbations output from CLASS at all ks
earliest_tfs (tuple): phi and psi arguments as (phi, psi) of the earliest transfer fn
nu_masses (list): neutrino masses
k_magnitudes (np.array): array of k values
tau_0: conformal age output from CLASS
Return (list): list of dcl/dlnk values of dim len(k_magnitudes)-1
from line-of-sight integral method
'''
# the value at index 0 is not in the correct k range so get rid of it
phi_earliest = earliest_tfs['phi'][1:]
psi_earliest = earliest_tfs['psi'][1:]
dcls = []
for i in range(len(k_magnitudes)-1): # k_index = 0 -- n_kmodes - 1
phi_const, phi_integral, phiprime_integral = get_deltaIntegrand_opt(k_magnitudes[i], l_index, q_index, pts[i],
[phi_earliest[i], psi_earliest[i]], nu_masses,
nu_index, tau_0, np.inf)
dcl = scaling*T_nu**2*(4*np.pi)*As_default*(phi_const+phi_integral+phiprime_integral)**2
dcls.append(dcl)
return dcls
def get_clq_BH(q_index, pts, k_magnitudes, ls, is_lnk, n, **kwargs):
'''
Args:
q_index (int): q index for neutrinos
pts: perturbations output from CLASS at all ks
k_magnitudes (np.array): array of k values
ls (list): list of l values for which to calculate Cls
is_lnk (boolean): whether to do dlnk integral (True if assuming Harrison-Zel'dovich-Peebles spectrum)
n (float): n_s value
Calculate Cl using Boltzmann hierarchy method
Return (np.arrays): Cls for the specified q_index for all 3 neutrino species
in the order of their indices
'''
if (not is_lnk):
k_pivot = kwargs.get('k_pivot', None)
l_min = ls[0]
l_max = ls[-1]
Cl0 = []
Cl1 = []
Cl2 = []
for l_index in range(l_min, l_max+1):
Cl0q = 0.0
Cl1q = 0.0
Cl2q = 0.0
for k_index in range(len(k_magnitudes)-1):
pt = pts[k_index]
a = pt['a']
a_index = np.where(a>=1.0)[0][0] # get index of a=1
Theta0ql = pt['Theta_n_q_l_ncdm[{},{},{}]'.format(0, q_index, l_index)]
Theta1ql = pt['Theta_n_q_l_ncdm[{},{},{}]'.format(1, q_index, l_index)]
Theta2ql = pt['Theta_n_q_l_ncdm[{},{},{}]'.format(2, q_index, l_index)]
if (is_lnk): # assuming ns=1
delta_lnk = np.log(k_magnitudes[k_index+1])-np.log(k_magnitudes[k_index])
Cl0q += T_nu**2*(4*np.pi)*(delta_lnk)*As_default*Theta0ql[a_index]*Theta0ql[a_index]
Cl1q += T_nu**2*(4*np.pi)*(delta_lnk)*As_default*Theta1ql[a_index]*Theta1ql[a_index]
Cl2q += T_nu**2*(4*np.pi)*(delta_lnk)*As_default*Theta2ql[a_index]*Theta2ql[a_index]
else:
delta_k = k_magnitudes[k_index+1]-k_magnitudes[k_index]
Cl0q += T_nu**2*(4*np.pi)*(delta_k/k)*As*(k/k_pivot)**(n-1)*Theta0ql[a_index]*Theta0ql[a_index]
Cl1q += T_nu**2*(4*np.pi)*(delta_k/k)*As*(k/k_pivot)**(n-1)*Theta1ql[a_index]*Theta1ql[a_index]
Cl2q += T_nu**2*(4*np.pi)*(delta_k/k)*As*(k/k_pivot)**(n-1)*Theta2ql[a_index]*Theta2ql[a_index]
Cl0.append(Cl0q*scaling)
Cl1.append(Cl1q*scaling)
Cl2.append(Cl2q*scaling)
return np.array(Cl0), np.array(Cl1), np.array(Cl2)
def get_clqindep(nu_index, q_indices, pts, earliest_tfs, nu_masses, k_magnitudes, ls, tau_0):
'''
Args:
nu_index (int): index for neutrino species
q_indices (int): q indices to integrate over to get q-indep average
pts: perturbations output from CLASS at all ks
earliest_tfs (tuple): phi and psi arguments as (phi, psi) of the earliest transfer fn
nu_masses (list): neutrino masses
k_magnitudes (np.array): array of k values
ls (list): list of l values for which to calculate Cls
tau_0: conformal age output from CLASS
Return (np.array): Cls averaged over the given range of q over q_indices
'''
l_min = ls[0]
l_max = ls[-1]
# the value at index 0 is not in the correct k range so get rid of it
phi_earliest = earliest_tfs['phi'][1:]
psi_earliest = earliest_tfs['psi'][1:]
cls = []
# get the q/T values used for integration
qs = np.arange(1.5, 1.5*(n_qbins+1), 1.5)
qs = qs[q_indices]
dq = qs[1:] - qs[:n_qbins-1]
# l used for Bessel equations
for l_index in range(l_min, l_max+1):
cl = 0
# calculate the cl integral
for i in range(len(k_magnitudes)-1): # k_index = 0 -- n_kmodes - 1
deltaqs = []
for q in q_indices:
phi_const, phi_integral, phiprime_integral = get_deltaIntegrand_opt(k_magnitudes[i], l_index, q, pts[i],
[phi_earliest[i], psi_earliest[i]],
nu_masses, nu_index, tau_0, np.infty)
deltaqs.append(phi_const+phi_integral+phiprime_integral)
# integrate over q
delta = 2/3/special.zeta(3)*np.sum(deltaqs[:n_qbins-1]*qs[:n_qbins-1]**2*dq/(np.exp(qs[:n_qbins-1])+1))
# integrate over k
dlnk = np.log(k_magnitudes[i+1])-np.log(k_magnitudes[i])
cl += delta**2*dlnk
# prefactor in cl definition
coeff = T_nu**2*(4*np.pi)*As_default
cl *= coeff
# scaling for the correct units
cls.append(cl*scaling)
return np.array(cls)
def run_class(parameters, gettransfer):
'''
Run CLASS with the input parameters and return the perturbations and
the value of tau_0 (which should be fixed but we still return the value
for the purpose of checking) and the earliest transfer (if asked)
Args:
parameters: parameters to run CLASS
gettransfer (boolean): whether to get the earliest transfer
Return: (pts, tau_0) if gettransfer=False and (pts, tau_0, transfer) otherwise
'''
start_time = time.time()
cosmo = Class()
cosmo.set(parameters)
cosmo.compute()
pts = cosmo.get_perturbations()['scalar']
tau_0 = cosmo.get_current_derived_parameters(['conformal_age'])['conformal_age']
print("--- %s seconds ---" % (time.time() - start_time))
# 45999 is the largest redshift possible
if (gettransfer):
tf = cosmo.get_transfer(45999)
return pts, tau_0, tf
return pts, tau_0
|
{"hexsha": "7845c76466bd510cbe1a5bcde034290c3f79dfde", "size": 22412, "ext": "py", "lang": "Python", "max_stars_repo_path": "cnb_utils.py", "max_stars_repo_name": "gemyxzhang/cnb-anisotropies", "max_stars_repo_head_hexsha": "f087e5f18dd253d413f4a47a3265bec516ae3612", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cnb_utils.py", "max_issues_repo_name": "gemyxzhang/cnb-anisotropies", "max_issues_repo_head_hexsha": "f087e5f18dd253d413f4a47a3265bec516ae3612", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cnb_utils.py", "max_forks_repo_name": "gemyxzhang/cnb-anisotropies", "max_forks_repo_head_hexsha": "f087e5f18dd253d413f4a47a3265bec516ae3612", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.381981982, "max_line_length": 203, "alphanum_fraction": 0.6288595395, "include": true, "reason": "import numpy,from scipy", "num_tokens": 6484}
|
"""Calculate area of a mask."""
import argparse
import math
import logging
import sys
from osgeo import gdal
from osgeo import osr
import pygeoprocessing
import numpy
gdal.SetCacheMax(2**27)
logging.basicConfig(
level=logging.DEBUG,
format=(
'%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s'
' [%(funcName)s:%(lineno)d] %(message)s'),
stream=sys.stdout)
LOGGER = logging.getLogger(__name__)
def area_of_pixel(pixel_size, center_lat):
"""Calculate m^2 area of a wgs84 square pixel.
Adapted from: https://gis.stackexchange.com/a/127327/2397
Args:
pixel_size (float): length of side of pixel in degrees.
center_lat (float): latitude of the center of the pixel. Note this
value +/- half the `pixel-size` must not exceed 90/-90 degrees
latitude or an invalid area will be calculated.
Returns:
Area of square pixel of side length `pixel_size` centered at
`center_lat` in m^2.
"""
a = 6378137 # meters
b = 6356752.3142 # meters
e = math.sqrt(1 - (b/a)**2)
area_list = []
for f in [center_lat+pixel_size/2, center_lat-pixel_size/2]:
zm = 1 - e*math.sin(math.radians(f))
zp = 1 + e*math.sin(math.radians(f))
area_list.append(
math.pi * b**2 * (
math.log(zp/zm) / (2*e) +
math.sin(math.radians(f)) / (zp*zm)))
return abs(pixel_size / 360. * (area_list[0] - area_list[1]))
def mask_op(mask_array, value_array):
"""Mask out value to 0 if mask array is not 1."""
result = numpy.copy(value_array)
result[mask_array != 1] = 0.0
return result
def calculate_mask_area(base_mask_raster_path):
"""Calculate area of mask==1."""
base_raster_info = pygeoprocessing.get_raster_info(
base_mask_raster_path)
base_srs = osr.SpatialReference()
base_srs.ImportFromWkt(base_raster_info['projection_wkt'])
if base_srs.IsProjected():
# convert m^2 of pixel size to Ha
pixel_conversion = numpy.array([[
abs(base_raster_info['pixel_size'][0] *
base_raster_info['pixel_size'][1])]]) / 10000.0
else:
# create 1D array of pixel size vs. lat
n_rows = base_raster_info['raster_size'][1]
pixel_height = abs(base_raster_info['geotransform'][5])
# the / 2 is to get in the center of the pixel
miny = base_raster_info['bounding_box'][1] + pixel_height/2
maxy = base_raster_info['bounding_box'][3] - pixel_height/2
lat_vals = numpy.linspace(maxy, miny, n_rows)
pixel_conversion = 1.0 / 10000.0 * numpy.array([
[area_of_pixel(pixel_height, lat_val)] for lat_val in lat_vals])
nodata = base_raster_info['nodata'][0]
area_raster_path = 'tmp_area_mask.tif'
pygeoprocessing.raster_calculator(
[(base_mask_raster_path, 1), pixel_conversion], mask_op,
area_raster_path, gdal.GDT_Float32, nodata)
area_sum = 0.0
for _, area_block in pygeoprocessing.iterblocks((area_raster_path, 1)):
area_sum += numpy.sum(area_block)
return area_sum
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Calculate area of pixel mask.')
parser.add_argument(
'input_mask',
help='Path to a mask pixels are 1 or not 1.')
args = parser.parse_args()
LOGGER.info(f'calculating area of pixels that are 1 in {args.input_mask}')
mask_area = calculate_mask_area(args.input_mask)
LOGGER.info(f'calculated area for {args.input_mask} is {mask_area}Ha')
|
{"hexsha": "5c5bed94245fbd6c5cf80444cb8da625da8ae87f", "size": 3586, "ext": "py", "lang": "Python", "max_stars_repo_path": "area_of_mask.py", "max_stars_repo_name": "richpsharp/raster_calculations", "max_stars_repo_head_hexsha": "28b18c34f49c2c275c46e332d7021a27703053cd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-07T23:46:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-26T00:31:31.000Z", "max_issues_repo_path": "area_of_mask.py", "max_issues_repo_name": "richpsharp/raster_calculations", "max_issues_repo_head_hexsha": "28b18c34f49c2c275c46e332d7021a27703053cd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "area_of_mask.py", "max_forks_repo_name": "richpsharp/raster_calculations", "max_forks_repo_head_hexsha": "28b18c34f49c2c275c46e332d7021a27703053cd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-06T21:05:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T22:08:35.000Z", "avg_line_length": 32.6, "max_line_length": 78, "alphanum_fraction": 0.6514221974, "include": true, "reason": "import numpy", "num_tokens": 958}
|
#!/usr/bin/env python
# -*-coding:utf-8-*-
# @Author : Weiqun Wu
# @Time : 2018-11-23
import math
import random
import os
import cv2 as cv
import numpy as np
from scipy.io import loadmat
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.inf)
def fspecial(ksize, sigma):
"""
Generates 2d Gaussian kernel
:param ksize: an integer, represents the size of Gaussian kernel
:param sigma: a float, represents standard variance of Gaussian kernel
:return: 2d Gaussian kernel, the shape is [ksize, ksize]
"""
# [left, right)
left = -ksize / 2 + 0.5
right = ksize / 2 + 0.5
x, y = np.mgrid[left:right, left:right]
# generate 2d Gaussian Kernel by normalization
gaussian_kernel = np.exp(-(np.square(x) + np.square(y)) / (2 * np.power(sigma, 2))) / (2 * np.power(sigma, 2)).sum()
sum = gaussian_kernel.sum()
normalized_gaussian_kernel = gaussian_kernel / sum
return normalized_gaussian_kernel
def get_avg_distance(position, points, k):
"""
Computes the average distance between a pedestrian and its k nearest neighbors
:param position: the position of the current point, the shape is [1,1]
:param points: the set of all points, the shape is [num, 2]
:param k: a integer, represents the number of mearest neibor we want
:return: the average distance between a pedestrian and its k nearest neighbors
"""
# in case that only itself or the k is lesser than or equal to num
num = len(points)
if num == 1:
return 1.0
elif num <= k:
k = num - 1
euclidean_distance = np.zeros((num, 1))
for i in range(num):
x = points[i, 1]
y = points[i, 0]
# Euclidean distance
euclidean_distance[i, 0] = math.sqrt(math.pow(position[1] - x, 2) + math.pow(position[0] - y, 2))
# the all distance between current point and other points
euclidean_distance[:, 0] = np.sort(euclidean_distance[:, 0])
avg_distance = euclidean_distance[1:k + 1, 0].sum() / k
return avg_distance
def get_density_map(scaled_crowd_img_size, scaled_points, knn_phase, k, scaled_min_head_size, scaled_max_head_size):
"""
Generates the correspoding ground truth density map
:param scaled_crowd_img_size: the size of ground truth density map
:param scaled_points: the position set of all points, but were divided into scale already
:param knn_phase: True or False, determine wheather use geometry-adaptive Gaussian kernel or general one
:param k: number of k nearest neighbors
:param scaled_min_head_size: the scaled maximum value of head size for original pedestrian head
(in corresponding density map should be divided into scale)
:param scaled_max_head_size:the scaled minimum value of head size for original pedestrian head
(in corresponding density map should be divided into scale)
:return: density map, the shape is [scaled_img_size[0], scaled_img_size[1]]
"""
h, w = scaled_crowd_img_size[0], scaled_crowd_img_size[1]
density_map = np.zeros((h, w))
# In case that there is no one in the image
num = len(scaled_points)
if num == 0:
return density_map
for i in range(num):
# For a specific point in original points label of dataset, it represents as position[oy, ox],
# so points[i, 1] is x, and points[i, 0] is y Also in case that the negative value
x = min(h, max(0, abs(int(math.floor(scaled_points[i, 1])))))
y = min(w, max(0, abs(int(math.floor(scaled_points[i, 0])))))
# now for a specific point, it represents as position[x, y]
position = [x, y]
sigma = 1.5
beta = 0.3
ksize = 25
if knn_phase:
avg_distance = get_avg_distance(position, scaled_points, k=k)
avg_distance = max(min(avg_distance, scaled_max_head_size), scaled_min_head_size)
sigma = beta * avg_distance
ksize = 1.0 * avg_distance
# Edge processing
x1 = x - int(math.floor(ksize / 2))
y1 = y - int(math.floor(ksize / 2))
x2 = x + int(math.ceil(ksize / 2))
y2 = y + int(math.ceil(ksize / 2))
if x1 < 0 or y1 < 0 or x2 > h or y2 > w:
x1 = max(0, x1)
y1 = max(0, y1)
x2 = min(h, x2)
y2 = min(w, y2)
tmp = x2 - x1 if (x2 - x1) < (y2 - y1) else y2 - y1
ksize = min(tmp, ksize)
ksize = int(math.floor(ksize / 2))
H = fspecial(ksize, sigma)
density_map[x1:x1 + ksize, y1:y1 + ksize] = density_map[x1:x1 + ksize, y1:y1 + ksize] + H
return np.asarray(density_map, dtype=np.float32)
def get_cropped_crowd_image(ori_crowd_img, points, crop_size):
"""
Crops a sub-crowd image randomly
:param ori_crowd_img: original crowd image, the shape is [h, w, channel]
:param points: the original position set of all points
:param crop_size: the cropped crowd image size we need
:return: cropped crowd image, cropped points, cropped crowd count
"""
h, w = ori_crowd_img.shape[0], ori_crowd_img.shape[1]
# if the original image size < the crooped_image size, reduce the crop size
if h < crop_size or w < crop_size:
crop_size = crop_size // 2
# random to get the crop area
x1 = random.randint(0, h - crop_size)
y1 = random.randint(0, w - crop_size)
x2 = x1 + crop_size
y2 = y1 + crop_size
# the crowd image after croppig
cropped_crowd_img = ori_crowd_img[x1:x2, y1:y2, ...]
# img_gray_crop = img_gray[x1:x2, y1:y2]
# img_gray_crop = cv.resize(img_gray_crop, (img_gray_crop.shape[1] // (scale), img_gray_crop.shape[0] // (scale)))
# Computes the points after cropping
cropped_points = []
for i in range(len(points)):
if x1 <= points[i, 1] <= x2 and y1 <= points[i, 0] <= y2:
points[i, 0] = points[i, 0] - y1
points[i, 1] = points[i, 1] - x1
cropped_points.append(points[i])
cropped_points = np.asarray(cropped_points)
cropped_crowd_count = len(cropped_points)
return cropped_crowd_img, cropped_points, cropped_crowd_count
def get_scaled_crowd_image_and_points(crowd_img, points, scale):
"""
Gets scaled crowc image and scaled points for corresponding density map
:param crowd_image: the crowd image that wanted to be scaled to generate ground truth density map
:param points: the position set of all points that wanted to be scaled to generate ground truth density map
:param scale: the scale factor
:return: sacled crowd image, scaled points
"""
h = crowd_img.shape[0]
w = crowd_img.shape[1]
scaled_crowd_img = cv.resize(crowd_img, (w // scale, h // scale))
for i in range(len(points)):
points[i] = points[i] / scale
return scaled_crowd_img, points
def read_train_data(img_path, gt_path, crop_size=256, scale=8, knn_phase=True, k=2, min_head_size=16, max_head_size=200):
"""
read_the trianing data from datasets ad the input and label of network
:param img_path: the crowd image path
:param gt_path: the label(ground truth) data path
:param crop_size: the crop size
:param scale: the scale factor, accorting to the accumulated downsampling factor
:param knn_phase: True or False, determines wheather to use geometry-adaptive Gaussain kernel or general one
:param k: a integer, the number of neareat neighbor
:param min_head_size: the minimum value of the head size in original crowd image
:param max_head_size: the maximum value of the head size in original crowd image
:return: the crwod image as the input of network, the scaled density map as the ground truth of network,
the ground truth crowd count
"""
ori_crowd_img = cv.imread(img_path)
# read the .mat file in dataset
label_data = loadmat(gt_path)
points = label_data['image_info'][0][0]['location'][0][0]
# crowd_count = label_data['image_info'][0][0]['number'][0][0]
cropped_crowd_img, cropped_points, cropped_crowd_count = get_cropped_crowd_image(ori_crowd_img, points, crop_size=crop_size)
cropped_scaled_crowd_img, cropped_scaled_points = get_scaled_crowd_image_and_points(cropped_crowd_img, cropped_points, scale=scale)
# cropped_scaled_crowd_count = cropped_crowd_count
cropped_scaled_crowd_img_size = [cropped_scaled_crowd_img.shape[0], cropped_scaled_crowd_img.shape[1]]
scaled_min_head_size = min_head_size / scale
scaled_max_head_size = max_head_size / scale
# after cropped and scaled
density_map = get_density_map(cropped_scaled_crowd_img_size, cropped_scaled_points,
knn_phase, k, scaled_min_head_size, scaled_max_head_size)
# cropped_crowd_img = np.asarray(cropped_crowd_img)
cropped_crowd_img = cropped_crowd_img.reshape((1, cropped_crowd_img.shape[0], cropped_crowd_img.shape[1], cropped_crowd_img.shape[2]))
cropped_crowd_count = np.asarray(cropped_crowd_count).reshape((1, 1))
cropped_scaled_density_map = density_map.reshape((1, density_map.shape[0], density_map.shape[1], 1))
return cropped_crowd_img, cropped_scaled_density_map, cropped_crowd_count
# def read_
def read_test_data(img_path, gt_path, scale=4, deconv_is_used=False, knn_phase=True, k=6, min_head_size=40, max_head_size=200):
"""
read_the testing data from datasets ad the input and label of network
:param img_path: the crowd image path
:param gt_path: the label(ground truth) data path
:param scale: the scale factor, accorting to the accumulated downsampling factor
:param knn_phase: True or False, determines wheather to use geometry-adaptive Gaussain kernel or general one
:param k: a integer, the number of neareat neighbor
:param min_head_size: the minimum value of the head size in original crowd image
:param max_head_size: the maximum value of the head size in original crowd image
:return: the crwod image as the input of network, the scaled density map as the ground truth of network,
the ground truth crowd count
"""
ori_crowd_img = cv.imread(img_path)
width = ori_crowd_img.shape[1]
height = ori_crowd_img.shape[0]
# read the .mat file in dataset
points = []
label_data = open(gt_path)
for line in label_data.readlines():
[_, _, x, y, w, h] = line.split(' ')
xc = float(x) * width
yc = float(y) * height
points.append([xc, yc])
points = np.array(points)
# crowd_count = label_data['image_info'][0][0]['number'][0][0]
h, w = ori_crowd_img.shape[0], ori_crowd_img.shape[1]
if deconv_is_used:
h_ = h - (h // scale) % 2
rh = h_ / h
w_ = w - (w // scale) % 2
rw = w_ / w
ori_crowd_img = cv.resize(ori_crowd_img, (w_, h_))
points[:, 1] = points[:, 1] * rh
points[:, 0] = points[:, 0] * rw
# scaled_crowd_img, scaled_points = ori_crowd_img,points
scaled_crowd_img, scaled_points = get_scaled_crowd_image_and_points(ori_crowd_img, points, scale=scale)
# scaled_crowd_count = crowd_count
scaled_crowd_img_size = [scaled_crowd_img.shape[0], scaled_crowd_img.shape[1]]
scaled_min_head_size = min_head_size / scale
scaled_max_head_size = max_head_size / scale
# after cropped and scaled
density_map = get_density_map(scaled_crowd_img_size, scaled_points, knn_phase, k, scaled_min_head_size, scaled_max_head_size)
ori_crowd_img = ori_crowd_img.reshape((1, ori_crowd_img.shape[0], ori_crowd_img.shape[1], ori_crowd_img.shape[2]))
# crowd_count = np.asarray(crowd_count).reshape((1, 1))
scaled_density_map = density_map.reshape((1, density_map.shape[0], density_map.shape[1], 1))
# return ori_crowd_img, scaled_density_map, crowd_count
return ori_crowd_img, scaled_density_map
def mae_metric(ground_truth, inference):
return np.abs(np.subtract(ground_truth, inference)).mean()
def se_metric(ground_truth, inference):
return np.power(np.subtract(ground_truth, inference), 2).mean()
def show_density_map(density_map, name):
"""
show the density map to help us analysis the distribution of the crowd
:param density_map: the density map, the shape is [h, w]
"""
plt.imshow(density_map, cmap='jet')
plt.savefig(f'./{name}.png')
def set_gpu(gpu=0):
"""
the gpu used setting
:param gpu: gpu id
:return:
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
|
{"hexsha": "1b9a63503d11a7ac86a5150cfa7f7c1ad8b7ae76", "size": 12545, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lib/utils/density.py", "max_stars_repo_name": "Anothorld/FairMOT", "max_stars_repo_head_hexsha": "6dbd7bbfac4c665c664baeeb9c1dd8f292e53cbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lib/utils/density.py", "max_issues_repo_name": "Anothorld/FairMOT", "max_issues_repo_head_hexsha": "6dbd7bbfac4c665c664baeeb9c1dd8f292e53cbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/utils/density.py", "max_forks_repo_name": "Anothorld/FairMOT", "max_forks_repo_head_hexsha": "6dbd7bbfac4c665c664baeeb9c1dd8f292e53cbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.402640264, "max_line_length": 138, "alphanum_fraction": 0.6806695895, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3459}
|
#pragma once
#include <boost/filesystem.hpp>
#include <fstream>
#include <stdint.h>
#include <cstddef>
#include <eosio/chain/block_header.hpp>
#include <eosio/chain/combined_database.hpp>
#include <eosio/chain/exceptions.hpp>
#include <eosio/chain/log_catalog.hpp>
#include <eosio/chain/log_data_base.hpp>
#include <eosio/chain/log_index.hpp>
#include <eosio/chain/types.hpp>
#include <eosio/state_history/transaction_trace_cache.hpp>
#include <fc/bitutil.hpp>
#include <fc/io/cfile.hpp>
#include <fc/io/datastream.hpp>
#include <fc/log/logger.hpp>
namespace eosio {
namespace bfs = boost::filesystem;
/*
* *.log:
* +---------+----------------+-----------+------------------+-----+---------+----------------+
* | Entry i | Pos of Entry i | Entry i+1 | Pos of Entry i+1 | ... | Entry z | Pos of Entry z |
* +---------+----------------+-----------+------------------+-----+---------+----------------+
*
* *.index:
* +----------------+------------------+-----+----------------+
* | Pos of Entry i | Pos of Entry i+1 | ... | Pos of Entry z |
* +----------------+------------------+-----+----------------+
*
* each entry:
* state_history_log_header
* payload
*/
inline uint64_t ship_magic(uint32_t version) {
using namespace eosio::chain::literals;
return "ship"_n.to_uint64_t() | version;
}
inline bool is_ship(uint64_t magic) {
using namespace eosio::chain::literals;
return (magic & 0xffff'ffff'0000'0000) == "ship"_n.to_uint64_t();
}
inline uint32_t get_ship_version(uint64_t magic) { return magic; }
inline bool is_ship_supported_version(uint64_t magic) { return get_ship_version(magic) <= 1; }
static const uint32_t ship_current_version = 1;
struct state_history_log_header {
uint64_t magic = ship_magic(ship_current_version);
chain::block_id_type block_id = {};
uint64_t payload_size = 0;
};
static const int state_history_log_header_serial_size = sizeof(state_history_log_header::magic) +
sizeof(state_history_log_header::block_id) +
sizeof(state_history_log_header::payload_size);
class state_history_log_data : public chain::log_data_base<state_history_log_data> {
std::string filename;
public:
state_history_log_data() = default;
state_history_log_data(const fc::path& path, mapmode mode = mapmode::readonly)
: filename(path.string()) {
open(path, mode);
}
void open(const fc::path& path, mapmode mode = mapmode::readonly) {
if (file.is_open())
file.close();
file.open(path.string(), mode);
return;
}
uint32_t version() const { return get_ship_version(chain::read_buffer<uint64_t>(file.const_data())); }
uint32_t first_block_num() const { return block_num_at(0); }
uint32_t first_block_position() const { return 0; }
std::pair<fc::datastream<const char*>, uint32_t> ro_stream_at(uint64_t pos) const {
uint32_t ver = get_ship_version(chain::read_buffer<uint64_t>(file.const_data() + pos));
return std::make_pair(
fc::datastream<const char*>(file.const_data() + pos + sizeof(state_history_log_header), payload_size_at(pos)),
ver);
}
std::pair<fc::datastream<char*>, uint32_t> rw_stream_at(uint64_t pos) const {
uint32_t ver = get_ship_version(chain::read_buffer<uint64_t>(file.const_data() + pos));
return std::make_pair(
fc::datastream<char*>(file.data() + pos + sizeof(state_history_log_header), payload_size_at(pos)), ver);
}
uint32_t block_num_at(uint64_t position) const {
return fc::endian_reverse_u32(
chain::read_buffer<uint32_t>(file.const_data() + position + offsetof(state_history_log_header, block_id)));
}
chain::block_id_type block_id_at(uint64_t position) const {
return chain::read_buffer<chain::block_id_type>(file.const_data() + position +
offsetof(state_history_log_header, block_id));
}
uint64_t payload_size_at(uint64_t pos) const;
void construct_index(const fc::path& index_file_name) const;
};
struct state_history_config {
bfs::path log_dir;
bfs::path retained_dir;
bfs::path archive_dir;
uint32_t stride = UINT32_MAX;
uint32_t max_retained_files = 10;
};
class state_history_log {
private:
using cfile_stream = fc::datastream<fc::cfile>;
const char* const name = "";
cfile_stream index;
uint32_t _begin_block = 0;
uint32_t _end_block = 0;
chain::block_id_type last_block_id;
uint32_t version = ship_current_version;
uint32_t stride;
protected:
cfile_stream write_log;
cfile_stream read_log;
using catalog_t = chain::log_catalog<state_history_log_data, chain::log_index<chain::state_history_exception>>;
catalog_t catalog;
public:
// The type aliases below help to make it obvious about the meanings of member function return values.
using block_num_type = uint32_t;
using version_type = uint32_t;
using file_position_type = uint64_t;
using config_type = state_history_config;
state_history_log(const char* const name, const state_history_config& conf);
block_num_type begin_block() const {
block_num_type result = catalog.first_block_num();
return result != 0 ? result : _begin_block;
}
block_num_type end_block() const { return _end_block; }
template <typename F>
void write_entry(state_history_log_header& header, const chain::block_id_type& prev_id, F write_payload) {
auto [block_num, start_pos] = write_entry_header(header, prev_id);
try {
write_payload(write_log);
write_entry_position(header, start_pos, block_num);
} catch (...) {
write_log.close();
boost::filesystem::resize_file(write_log.get_file_path(), start_pos);
write_log.open("rb+");
throw;
}
}
std::optional<chain::block_id_type> get_block_id(block_num_type block_num);
protected:
void get_entry_header(block_num_type block_num, state_history_log_header& header);
private:
void read_header(state_history_log_header& header, bool assert_version = true);
void write_header(const state_history_log_header& header);
bool get_last_block(uint64_t size);
void recover_blocks(uint64_t size);
void open_log(bfs::path filename);
void open_index(bfs::path filename);
file_position_type get_pos(block_num_type block_num);
void truncate(block_num_type block_num);
void split_log();
/**
* @returns the block num and the file position
**/
std::pair<block_num_type,file_position_type> write_entry_header(const state_history_log_header& header, const chain::block_id_type& prev_id);
void write_entry_position(const state_history_log_header& header, file_position_type pos, block_num_type block_num);
}; // state_history_log
class state_history_traces_log : public state_history_log {
state_history::transaction_trace_cache cache;
public:
bool trace_debug_mode = false;
state_history::compression_type compression = state_history::compression_type::zlib;
state_history_traces_log(const state_history_config& conf);
static bool exists(bfs::path state_history_dir);
void add_transaction(const chain::transaction_trace_ptr& trace, const chain::packed_transaction_ptr& transaction) {
cache.add_transaction(trace, transaction);
}
chain::bytes get_log_entry(block_num_type block_num);
void block_start(uint32_t block_num) { cache.clear(); }
void store(const chainbase::database& db, const chain::block_state_ptr& block_state);
/**
* @param[in,out] ids The ids to been pruned and returns the ids not found in the specified block
**/
void prune_transactions(block_num_type block_num, std::vector<chain::transaction_id_type>& ids);
};
class state_history_chain_state_log : public state_history_log {
public:
state_history_chain_state_log(const state_history_config& conf);
chain::bytes get_log_entry(block_num_type block_num);
void store(const chain::combined_database& db, const chain::block_state_ptr& block_state);
};
} // namespace eosio
FC_REFLECT(eosio::state_history_log_header, (magic)(block_id)(payload_size))
|
{"hexsha": "595628da155674e3e4c558dcd8aaacf0e7859557", "size": 8538, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "libraries/state_history/include/eosio/state_history/log.hpp", "max_stars_repo_name": "forfreeday/eos", "max_stars_repo_head_hexsha": "11d35f0f934402321853119d36caeb7022813743", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 13162.0, "max_stars_repo_stars_event_min_datetime": "2017-05-29T22:08:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T19:25:05.000Z", "max_issues_repo_path": "libraries/state_history/include/eosio/state_history/log.hpp", "max_issues_repo_name": "forfreeday/eos", "max_issues_repo_head_hexsha": "11d35f0f934402321853119d36caeb7022813743", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": 6450.0, "max_issues_repo_issues_event_min_datetime": "2017-05-30T14:41:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:30:04.000Z", "max_forks_repo_path": "libraries/state_history/include/eosio/state_history/log.hpp", "max_forks_repo_name": "forfreeday/eos", "max_forks_repo_head_hexsha": "11d35f0f934402321853119d36caeb7022813743", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 4491.0, "max_forks_repo_forks_event_min_datetime": "2017-05-29T22:08:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T07:09:52.000Z", "avg_line_length": 37.4473684211, "max_line_length": 144, "alphanum_fraction": 0.664675568, "num_tokens": 1949}
|
module Builtin.Reflection where
open import Prelude hiding (abs)
open import Prelude.Equality.Unsafe
open import Builtin.Float
open import Container.Traversable
open import Control.Monad.Zero
open import Agda.Builtin.Reflection as Builtin
open Builtin public
hiding ( primQNameEquality
; primQNameLess
; primShowQName
; primMetaEquality
; primMetaLess
; primShowMeta )
--- Names ---
instance
ShowName : Show Name
showsPrec {{ShowName}} _ x = showString (primShowQName x)
instance
EqName : Eq Name
_==_ {{EqName}} x y with primQNameEquality x y
... | true = yes unsafeEqual
... | false = no unsafeNotEqual
data LessName (x y : Name) : Set where
less-name : primQNameLess x y ≡ true → LessName x y
private
cmpName : ∀ x y → Comparison LessName x y
cmpName x y with inspect (primQNameLess x y)
... | true with≡ eq = less (less-name eq)
... | false with≡ _ with inspect (primQNameLess y x)
... | true with≡ eq = greater (less-name eq)
... | false with≡ _ = equal unsafeEqual
instance
OrdName : Ord Name
OrdName = defaultOrd cmpName
OrdLawsName : Ord/Laws Name
Ord/Laws.super OrdLawsName = it
less-antirefl {{OrdLawsName}} (less-name eq) = unsafeNotEqual eq
less-trans {{OrdLawsName}} (less-name _) (less-name _) = less-name unsafeEqual
--- Meta variables ---
instance
ShowMeta : Show Meta
showsPrec {{ShowMeta}} _ x = showString (primShowMeta x)
instance
EqMeta : Eq Meta
_==_ {{EqMeta}} x y with primMetaEquality x y
... | true = yes unsafeEqual
... | false = no unsafeNotEqual
data LessMeta (x y : Meta) : Set where
less-meta : primMetaLess x y ≡ true → LessMeta x y
private
cmpMeta : ∀ x y → Comparison LessMeta x y
cmpMeta x y with inspect (primMetaLess x y)
... | true with≡ eq = less (less-meta eq)
... | false with≡ _ with inspect (primMetaLess y x)
... | true with≡ eq = greater (less-meta eq)
... | false with≡ _ = equal unsafeEqual
instance
OrdMeta : Ord Meta
OrdMeta = defaultOrd cmpMeta
OrdLawsMeta : Ord/Laws Meta
Ord/Laws.super OrdLawsMeta = it
less-antirefl {{OrdLawsMeta}} (less-meta eq) = unsafeNotEqual eq
less-trans {{OrdLawsMeta}} (less-meta _) (less-meta _) = less-meta unsafeEqual
--- Literals ---
instance
ShowLiteral : Show Literal
showsPrec {{ShowLiteral}} _ (nat n) = shows n
showsPrec {{ShowLiteral}} _ (word64 n) = shows n
showsPrec {{ShowLiteral}} _ (float x) = shows x
showsPrec {{ShowLiteral}} _ (char c) = shows c
showsPrec {{ShowLiteral}} _ (string s) = shows s
showsPrec {{ShowLiteral}} _ (name x) = shows x
showsPrec {{ShowLiteral}} _ (meta x) = shows x
--- Terms ---
pattern vArg x = arg (arg-info visible relevant) x
pattern hArg x = arg (arg-info hidden relevant) x
pattern iArg x = arg (arg-info instance′ relevant) x
unArg : ∀ {A} → Arg A → A
unArg (arg _ x) = x
getArgInfo : ∀ {A} → Arg A → ArgInfo
getArgInfo (arg i _) = i
getVisibility : ∀ {A} → Arg A → Visibility
getVisibility (arg (arg-info v _) _) = v
getRelevance : ∀ {A} → Arg A → Relevance
getRelevance (arg (arg-info _ r) _) = r
isVisible : ∀ {A} → Arg A → Bool
isVisible (arg (arg-info visible _) _) = true
isVisible _ = false
instance
FunctorArg : Functor Arg
fmap {{FunctorArg}} f (arg i x) = arg i (f x)
TraversableArg : Traversable Arg
traverse {{TraversableArg}} f (arg i x) = ⦇ (arg i) (f x) ⦈
unAbs : ∀ {A} → Abs A → A
unAbs (abs _ x) = x
instance
FunctorAbs : Functor Abs
fmap {{FunctorAbs}} f (abs s x) = abs s (f x)
TraversableAbs : Traversable Abs
traverse {{TraversableAbs}} f (abs s x) = ⦇ (abs s) (f x) ⦈
absurd-lam : Term
absurd-lam = pat-lam (absurd-clause (vArg absurd ∷ []) ∷ []) []
--- TC monad ---
mapTC : ∀ {a b} {A : Set a} {B : Set b} → (A → B) → TC A → TC B
mapTC f m = bindTC m λ x → returnTC (f x)
instance
FunctorTC : ∀ {a} → Functor {a} TC
fmap {{FunctorTC}} = mapTC
ApplicativeTC : ∀ {a} → Applicative {a} TC
pure {{ApplicativeTC}} = returnTC
_<*>_ {{ApplicativeTC}} = monadAp bindTC
MonadTC : ∀ {a} → Monad {a} TC
_>>=_ {{MonadTC}} = bindTC
FunctorTC′ : ∀ {a b} → Functor′ {a} {b} TC
fmap′ {{FunctorTC′}} = mapTC
ApplicativeTC′ : ∀ {a b} → Applicative′ {a} {b} TC
_<*>′_ {{ApplicativeTC′}} = monadAp′ bindTC
MonadTC′ : ∀ {a b} → Monad′ {a} {b} TC
_>>=′_ {{MonadTC′}} = bindTC
FunctorZeroTC : ∀ {a} → FunctorZero {a} TC
empty {{FunctorZeroTC}} = typeError []
AlternativeTC : ∀ {a} → Alternative {a} TC
_<|>_ {{AlternativeTC}} = catchTC
Tactic = Term → TC ⊤
give : Term → Tactic
give v = λ hole → unify hole v
define : Arg Name → Type → List Clause → TC ⊤
define f a cs = declareDef f a >> defineFun (unArg f) cs
newMeta : Type → TC Term
newMeta = checkType unknown
newMeta! : TC Term
newMeta! = newMeta unknown
typeErrorS : ∀ {a} {A : Set a} → String → TC A
typeErrorS s = typeError (strErr s ∷ [])
blockOnMeta! : ∀ {a} {A : Set a} → Meta → TC A
blockOnMeta! x = commitTC >>=′ λ _ → blockOnMeta x
inferNormalisedType : Term → TC Type
inferNormalisedType t = withNormalisation true (inferType t)
--- Convenient wrappers ---
-- Zero for non-datatypes
getParameters : Name → TC Nat
getParameters d =
caseM getDefinition d of λ
{ (data-type n _) → pure n
; _ → pure 0 }
getConstructors : Name → TC (List Name)
getConstructors d =
caseM getDefinition d of λ
{ (data-type _ cs) → pure cs
; (record-type c _) → pure (c ∷ [])
; _ → typeError (strErr "Cannot get constructors of non-data or record type" ∷ nameErr d ∷ [])
}
getClauses : Name → TC (List Clause)
getClauses d =
caseM getDefinition d of λ
{ (function cs) → pure cs
; _ → typeError (strErr "Cannot get constructors of non-function type" ∷ nameErr d ∷ [])
}
-- Get the constructor of a record type (or single-constructor data type)
recordConstructor : Name → TC Name
recordConstructor r =
caseM getConstructors r of λ
{ (c ∷ []) → pure c
; _ → typeError $ strErr "Cannot get constructor of non-record type" ∷ nameErr r ∷ [] }
-- Injectivity of constructors
arg-inj₁ : ∀ {A i i′} {x x′ : A} → arg i x ≡ arg i′ x′ → i ≡ i′
arg-inj₁ refl = refl
arg-inj₂ : ∀ {A i i′} {x x′ : A} → arg i x ≡ arg i′ x′ → x ≡ x′
arg-inj₂ refl = refl
arg-info-inj₁ : ∀ {v v′ r r′} → arg-info v r ≡ arg-info v′ r′ → v ≡ v′
arg-info-inj₁ refl = refl
arg-info-inj₂ : ∀ {v v′ r r′} → arg-info v r ≡ arg-info v′ r′ → r ≡ r′
arg-info-inj₂ refl = refl
abs-inj₁ : ∀ {A s s′} {x x′ : A} → abs s x ≡ abs s′ x′ → s ≡ s′
abs-inj₁ refl = refl
abs-inj₂ : ∀ {A s s′} {x x′ : A} → abs s x ≡ abs s′ x′ → x ≡ x′
abs-inj₂ refl = refl
--- Terms ---
var-inj₁ : ∀ {x x′ args args′} → Term.var x args ≡ var x′ args′ → x ≡ x′
var-inj₁ refl = refl
var-inj₂ : ∀ {x x′ args args′} → Term.var x args ≡ var x′ args′ → args ≡ args′
var-inj₂ refl = refl
con-inj₁ : ∀ {c c′ args args′} → Term.con c args ≡ con c′ args′ → c ≡ c′
con-inj₁ refl = refl
con-inj₂ : ∀ {c c′ args args′} → Term.con c args ≡ con c′ args′ → args ≡ args′
con-inj₂ refl = refl
def-inj₁ : ∀ {f f′ args args′} → def f args ≡ def f′ args′ → f ≡ f′
def-inj₁ refl = refl
def-inj₂ : ∀ {f f′ args args′} → def f args ≡ def f′ args′ → args ≡ args′
def-inj₂ refl = refl
meta-inj₁ : ∀ {f f′ args args′} → Term.meta f args ≡ meta f′ args′ → f ≡ f′
meta-inj₁ refl = refl
meta-inj₂ : ∀ {f f′ args args′} → Term.meta f args ≡ meta f′ args′ → args ≡ args′
meta-inj₂ refl = refl
lam-inj₁ : ∀ {v v′ t t′} → lam v t ≡ lam v′ t′ → v ≡ v′
lam-inj₁ refl = refl
lam-inj₂ : ∀ {v v′ t t′} → lam v t ≡ lam v′ t′ → t ≡ t′
lam-inj₂ refl = refl
pat-lam-inj₁ : ∀ {v v′ t t′} → pat-lam v t ≡ pat-lam v′ t′ → v ≡ v′
pat-lam-inj₁ refl = refl
pat-lam-inj₂ : ∀ {v v′ t t′} → pat-lam v t ≡ pat-lam v′ t′ → t ≡ t′
pat-lam-inj₂ refl = refl
pi-inj₁ : ∀ {t₁ t₁′ t₂ t₂′} → pi t₁ t₂ ≡ pi t₁′ t₂′ → t₁ ≡ t₁′
pi-inj₁ refl = refl
pi-inj₂ : ∀ {t₁ t₁′ t₂ t₂′} → pi t₁ t₂ ≡ pi t₁′ t₂′ → t₂ ≡ t₂′
pi-inj₂ refl = refl
sort-inj : ∀ {x y} → agda-sort x ≡ agda-sort y → x ≡ y
sort-inj refl = refl
lit-inj : ∀ {x y} → Term.lit x ≡ lit y → x ≡ y
lit-inj refl = refl
--- Sorts ---
set-inj : ∀ {x y} → set x ≡ set y → x ≡ y
set-inj refl = refl
slit-inj : ∀ {x y} → Sort.lit x ≡ lit y → x ≡ y
slit-inj refl = refl
--- Patterns ---
pcon-inj₁ : ∀ {x y z w} → Pattern.con x y ≡ con z w → x ≡ z
pcon-inj₁ refl = refl
pcon-inj₂ : ∀ {x y z w} → Pattern.con x y ≡ con z w → y ≡ w
pcon-inj₂ refl = refl
pvar-inj : ∀ {x y} → Pattern.var x ≡ var y → x ≡ y
pvar-inj refl = refl
plit-inj : ∀ {x y} → Pattern.lit x ≡ lit y → x ≡ y
plit-inj refl = refl
proj-inj : ∀ {x y} → Pattern.proj x ≡ proj y → x ≡ y
proj-inj refl = refl
--- Clauses ---
clause-inj₁ : ∀ {x y z w} → clause x y ≡ clause z w → x ≡ z
clause-inj₁ refl = refl
clause-inj₂ : ∀ {x y z w} → clause x y ≡ clause z w → y ≡ w
clause-inj₂ refl = refl
absurd-clause-inj : ∀ {x y} → absurd-clause x ≡ absurd-clause y → x ≡ y
absurd-clause-inj refl = refl
--- Literals ---
nat-inj : ∀ {x y} → nat x ≡ nat y → x ≡ y
nat-inj refl = refl
word64-inj : ∀ {x y} → word64 x ≡ word64 y → x ≡ y
word64-inj refl = refl
float-inj : ∀ {x y} → float x ≡ float y → x ≡ y
float-inj refl = refl
char-inj : ∀ {x y} → char x ≡ char y → x ≡ y
char-inj refl = refl
string-inj : ∀ {x y} → string x ≡ string y → x ≡ y
string-inj refl = refl
name-inj : ∀ {x y} → name x ≡ name y → x ≡ y
name-inj refl = refl
meta-inj : ∀ {x y} → Literal.meta x ≡ meta y → x ≡ y
meta-inj refl = refl
|
{"hexsha": "d483be0e226f8ff5c93ab3131ca6fb43a17ab366", "size": 9402, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/Builtin/Reflection.agda", "max_stars_repo_name": "lclem/agda-prelude", "max_stars_repo_head_hexsha": "75016b4151ed601e28f4462cd7b6b1aaf5d0d1a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Builtin/Reflection.agda", "max_issues_repo_name": "lclem/agda-prelude", "max_issues_repo_head_hexsha": "75016b4151ed601e28f4462cd7b6b1aaf5d0d1a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Builtin/Reflection.agda", "max_forks_repo_name": "lclem/agda-prelude", "max_forks_repo_head_hexsha": "75016b4151ed601e28f4462cd7b6b1aaf5d0d1a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8628571429, "max_line_length": 96, "alphanum_fraction": 0.6117847267, "num_tokens": 3486}
|
import json
import re
import sys
import time
from multiprocessing import Process
import cv2
import imutils
import numpy as np
from imutils.video import FileVideoStream
from kafka import KafkaProducer, TopicPartition
from kafka.partitioner import RoundRobinPartitioner, Murmur2Partitioner
from .utils import np_to_json
class StreamVideo(Process):
def __init__(self, video_path,
topic,
topic_partitions=8,
use_cv2=False,
pub_obj_key="original",
group=None,
target=None,
name=None,
verbose=False,
rr_distribute=False):
"""Video Streaming Producer Process Class. Publishes frames from a video source to a topic.
:param video_path: video path or url
:param topic: kafka topic to publish stamped encoded frames.
:param topic_partitions: number of partitions this topic has, for distributing messages among partitions
:param use_cv2: send every frame, using cv2 library, else will use imutils to speedup training
:param pub_obj_key: associate tag with every frame encoded, can be used later to separate raw frames
:param group: group should always be None; it exists solely for compatibility with threading.
:param target: Process Target
:param name: Process name
:param verbose: print logs on stdout
:param rr_distribute: use round robin partitioner, should be set same as consumers.
"""
super(StreamVideo, self).__init__(group=group, target=target, name=name)
# URL for streaming video
self.video_path = video_path
# TOPIC TO PUBLISH
self.frame_topic = topic
self.topic_partitions = topic_partitions
self.camera_num = int(re.findall(r"StreamVideo-([0-9]*)", self.name)[0])
self.use_cv2 = use_cv2
self.object_key = pub_obj_key
self.verbose = verbose
self.rr_distribute = rr_distribute
def run(self):
"""Publish video frames as json objects, timestamped, marked with camera number.
Source:
self.video_path: URL for streaming video
self.kwargs["use_cv2"]: use raw cv2 streaming, set to false to use smart fast streaming --> not every frame is sent.
Publishes:
A dict {"frame": string(base64encodedarray), "dtype": obj.dtype.str, "shape": obj.shape,
"timestamp": time.time(), "camera": camera, "frame_num": frame_num}
"""
if self.rr_distribute:
partitioner = RoundRobinPartitioner(partitions=
[TopicPartition(topic=self.frame_topic, partition=i)
for i in range(self.topic_partitions)])
else:
partitioner = Murmur2Partitioner(partitions=
[TopicPartition(topic=self.frame_topic, partition=i)
for i in range(self.topic_partitions)])
# Producer object, set desired partitioner
frame_producer = KafkaProducer(bootstrap_servers=["my-cluster-kafka-brokers:9092"],
key_serializer=lambda key: str(key).encode(),
value_serializer=lambda value: json.dumps(value).encode(),
partitioner=partitioner)
print("[CAM {}] URL: {}, SET PARTITIONS FOR FRAME TOPIC: {}".format(self.camera_num,
self.video_path,
frame_producer.partitions_for(
self.frame_topic)))
# Use either option
video = cv2.VideoCapture(self.video_path) if self.use_cv2 else FileVideoStream(self.video_path).start()
#video.set(cv2.CAP_PROP_FPS,30)
# Track frame number
frame_num = 0
start_time = time.time()
print("[CAM {}] START TIME {}: ".format(self.camera_num, start_time))
# Read URL, Transform, Publish
while True:
# using raw cv2, frame by frame
if self.use_cv2:
success, image = video.read()
# check if the file has read
if not success:
if self.verbose:
print("[CAM {}] URL: {}, END FRAME: {}".format(self.name,
self.video_path,
frame_num))
break
# using smart, only unique frames, skips frames, faster fps
else:
image = video.read()
# check if the file has read
if image is None:
if self.verbose:
print("[CAM {}] URL: {}, END FRAME: {}".format(self.name,
self.video_path,
frame_num))
break
# Attach metadata to frame, transform into JSON
message = self.transform(frame=image,
frame_num=frame_num,
object_key=self.object_key,
camera=self.camera_num,
verbose=self.verbose)
# Partition to be sent to
part = frame_num % self.topic_partitions
# Logging
if self.verbose:
print("\r[PRODUCER][Cam {}] FRAME: {} TO PARTITION: {}".format(message["camera"],
frame_num, part))
# Publish to specific partition
frame_producer.send(self.frame_topic, key="{}-{}".format(self.camera_num, frame_num), value=message)
# if frame_num % 1000 == 0:
frame_producer.flush()
frame_num += 1
# clear the capture
if self.use_cv2:
video.release()
else:
video.stop()
if self.verbose:
print("[CAM {}] FINISHED. STREAM TIME {}: ".format(self.camera_num, time.time() - start_time))
return True if frame_num > 0 else False
@staticmethod
def transform(frame, frame_num, object_key="original", camera=0, verbose=False):
"""Serialize frame, create json message with serialized frame, camera number and timestamp.
:param frame: numpy.ndarray, raw frame
:param frame_num: frame number in the particular video/camera
:param object_key: identifier for these objects
:param camera: Camera Number the frame is from
:param verbose: print out logs
:return: A dict {"frame": string(base64encodedarray), "dtype": obj.dtype.str, "shape": obj.shape,
"timestamp": time.time(), "camera": camera, "frame_num": frame_num}
"""
frame = imutils.resize(frame, width=400)
if verbose:
# print raw frame size
print("\nRAW ARRAY SIZE: ", sys.getsizeof(frame))
# serialize frame
frame_dict = np_to_json(frame.astype(np.uint8), prefix_name=object_key)
# Metadata for frame
message = {"timestamp": time.time(), "camera": camera, "frame_num": frame_num}
# add frame and metadata related to frame
message.update(frame_dict)
if verbose:
# print message size
print("\nMESSAGE SIZE: ", sys.getsizeof(message))
return message
|
{"hexsha": "8a7c3768054afa6e38df2eed1409bcf2f3a15fb3", "size": 7868, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/frame_producer.py", "max_stars_repo_name": "karanveersingh5623/pico-test", "max_stars_repo_head_hexsha": "b24ec8835193c9c71579686bebf55d5993cea0a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/frame_producer.py", "max_issues_repo_name": "karanveersingh5623/pico-test", "max_issues_repo_head_hexsha": "b24ec8835193c9c71579686bebf55d5993cea0a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/frame_producer.py", "max_forks_repo_name": "karanveersingh5623/pico-test", "max_forks_repo_head_hexsha": "b24ec8835193c9c71579686bebf55d5993cea0a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9945355191, "max_line_length": 128, "alphanum_fraction": 0.5396542959, "include": true, "reason": "import numpy", "num_tokens": 1469}
|
# <-- encoding UTF-8 -->
# Empirical study on NHSS dataset (county level)
# -------------------------------------
## DOC STRING
#
#
# Tianhao Zhao (GitHub: Clpr)
# Dec 2018
# -------------------------------------
# -------------------------------------
## SECTION 0: ENVIRONMENT
library(sqldf) # sql enquiry
library(openxlsx) # easy xlsx IO
source("./src/mathtools.r") # math tools, e.g. Lorenz, Theil, descriptive stats
source("./src/mapplots.r") # tools of map plots, using ggplot2
# environment par dict
envNHSS$Output <- "./output/proc_2_NHSS/" # alter output directory
# -------------------------------------
## SECTION 1: GENERAL DESCRIPTIVE STATISTICS
cat("\nGeneral descriptive statistics of NHSS dataset:\n")
cat("(please note: the income now is the residuals of income ~ edu)\n")
cat("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
cat("PLEASE NOTE: because we have normalized PCA components, we only do descriptive statistics for income & edu!!!!!!")
cat("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# --------------
li_Descript_NHSS <- list() # a list of descriptive statistics
for(tmpYname in envNHSS$Ynames){
# 1.1 get a namelist of all numeric variables, both Y and Xcore
tmp <- c( tmpYname, envNHSS$Xcore )
li_Descript_NHSS[[tmpYname]] <- func_DescrStat( li_Dat_NHSS[[tmpYname]][,tmp] )
# 1.2 print
print(li_Descript_NHSS[[tmpYname]]); cat("-----------------------------------------\n")
}
# output to xlsx
openxlsx::write.xlsx(li_Descript_NHSS, paste(sep="",envNHSS$Output,"Descript_NHSS.xlsx") )
# -------------------------------------
## SECTION 2: COUNTY-LEVEL LORENZ CURVE & GINI COEF
# NOTE: in this section, we use counties as units/individuals to plot Lorenz curves in every year (98,03,08),
# meanwhile, Gini coefficients in the three years are calculated;
# later, in following sections, we will add other inequlity indices
# 2.0 prepare a df for Gini coefs
df_Gini_NHSS <- data.frame(
year = c(1998, 2003, 2008), # years
counties = c( sum(df_NHSS$year == 1998),sum(df_NHSS$year == 2003),sum(df_NHSS$year == 2008) ), # number of counties in every year's data
illnessratio = NA, # prevalence
illnessday = NA,
chronicratio = NA
)
# 2.1 use functions in mathtools.r to compute Lorenz Curve & Gini coefficients
# NOTE: please refer to mathtools.r for how we calculated the both
# and, if you want to see the plotting of Lorenz Curve, you may DIY (using tmp$LorenzX and tmp$LorenzY)
for(tmpYear in df_Gini_NHSS$year){
for(tmpYname in envNHSS$Ynames){
# 2.1.1 compute Lorenz Curve & Gini in specific year
# eval(parse(text=paste(sep="",
# "tmp <- LorenzCurve( df_NHSS$",tmpYname,"[df_NHSS$",envNHSS$flagT," == ",tmpYear,"] )"
# )))
tmp <- li_Dat_NHSS[[tmpYname]]
tmp <- LorenzCurve( tmp[ tmp[,envNHSS$flagT] == tmpYear ,tmpYname] )
# 2.1.2 save Gini coef
df_Gini_NHSS[,tmpYname][df_Gini_NHSS$year == tmpYear] <- tmp$GiniIdx
}
}
# print & output to xlsx
print(df_Gini_NHSS)
# NOTE: the results will be output to a csv file with other kinds of indices, later
# -------------------------------------
## SECTION 3: COUNTY-LEVEL THEIL I & II INDEXES
# NOTE: in this section, we use counties as units/individuals to calculate Theil-I & Theil-II indexes.
# meanwhile, indices in the three years are calculated;
# later, in following sections, we will add other inequlity indices
# 3.0 prepare two df
df_Theil1_NHSS <- data.frame(
year = c(1998, 2003, 2008), # years
counties = c( sum(df_NHSS$year == 1998),sum(df_NHSS$year == 2003),sum(df_NHSS$year == 2008) ), # number of counties in every year's data
illnessratio = NA, # prevalence
illnessday = NA,
chronicratio = NA
)
df_Theil2_NHSS <- df_Theil1_NHSS
# 3.1 use functions in mathtools.r to do computing
for(tmpYear in df_Gini_NHSS$year){ # loop on years
for(tmpYname in envNHSS$Ynames){ # loop on health outcomes
# slice a temp dataset
tmpdf <- li_Dat_NHSS[[tmpYname]]
# 3.1.1 compute Theil-I & Theil-II in a specific year
tmp <- Theil( tmpdf[,tmpYname][tmpdf[,envNHSS$flagT] == tmpYear], Type = "T" )
df_Theil1_NHSS[,tmpYname][ df_Theil1_NHSS$year == tmpYear ] <- tmp
tmp <- Theil( tmpdf[,tmpYname][tmpdf[,envNHSS$flagT] == tmpYear], Type = "L" )
df_Theil2_NHSS[,tmpYname][ df_Theil2_NHSS$year == tmpYear ] <- tmp
}
}
# 3.2 print info
cat("\nThe county-level Theil-I type index in every year of NHSS dataset are:\n")
print(df_Theil1_NHSS)
cat("\nThe county-level Theil-II type index in every year of NHSS dataset are:\n")
print(df_Theil2_NHSS)
# NOTE: the results will be output to a csv file with other kinds of indices, later
# -------------------------------------
## SECTION 4: COUNTY-LEVEL COEF OF VARIANCE (C.V.) & VARIANCE
# NOTE: in this section, we use counties as units/individuals to calculate C.V. & variances
# meanwhile, indices in the three years are calculated;
# 4.0 prepare two df
df_CoefVar_NHSS <- data.frame(
year = c(1998, 2003, 2008), # years
counties = c( sum(df_NHSS$year == 1998),sum(df_NHSS$year == 2003),sum(df_NHSS$year == 2008) ), # number of counties in every year's data
illnessratio = NA, # prevalence
illnessday = NA,
chronicratio = NA
)
df_Variance_NHSS <- df_CoefVar_NHSS
# 4.1 calculation
for(tmpYear in df_Gini_NHSS$year){ # loop on years
for(tmpYname in envNHSS$Ynames){ # loop on health outcomes
# slice a temp dataset
tmpdf <- li_Dat_NHSS[[tmpYname]]
# 4.1.1 get data vector
tmp <- tmpdf[,tmpYname][ tmpdf[,envNHSS$flagT] == tmpYear ]
df_Variance_NHSS[,tmpYname][df_Variance_NHSS$year == tmpYear] <- var(tmp)
df_CoefVar_NHSS[,tmpYname][df_CoefVar_NHSS$year == tmpYear] <- sd(tmp) / mean(tmp)
}
}
# 4.2 print info
cat("\nThe county-level coef of variance in every year of NHSS dataset are:\n")
print(df_CoefVar_NHSS)
cat("\nThe county-level variances in every year of NHSS dataset are:\n")
print(df_Variance_NHSS)
# -------------------------------------
## SECTION 5: COLLECT INEQUALITY INDICES & OUTPUT TO CSV
# NOTE: in this section, we firstly bind all kinds of inequality indices to a single dframe,
# then, output the large dframe to a csv file
# 5.1 table joining
df_InequalIdx_NHSS <- data.frame( Index = rep(c("Gini","Theil-I","Theil-II","Variance","Coef of Variance"), each = 3) )
df_InequalIdx_NHSS <- cbind(
df_InequalIdx_NHSS, rbind( df_Gini_NHSS, df_Theil1_NHSS, df_Theil2_NHSS, df_Variance_NHSS, df_CoefVar_NHSS )
)
# 5.2 garbage recycling
rm( df_Gini_NHSS, df_Theil1_NHSS, df_Theil2_NHSS, df_Variance_NHSS, df_CoefVar_NHSS )
rm( tmpYear, tmpYname, tmpdf )
# 5.3 output to csv
write.csv( df_InequalIdx_NHSS, file = paste(sep="",envNHSS$Output,"InequalityIdx_NHSS.csv") )
# -------------------------------------
## INTERVAL: MATCH CITY GIS DATA WITH NHSS DATASET
# NOTE: in this section, we merge the China city/County GIS data with df_NHSS;
# the data have been preloaded by mapplots.r, and named as MapGIScity;
# in the next section, the dataset will be used to plot
# NOTE: we do not plot PCs -> health outcomes, therefore, just use df_NHSS!
tmpdf <- plyr::join( df_NHSS, MapGIScity[,c("ID","long","lat")], by = "ID" ) # we use GB (country standard) codes of city/county to match by
tmpdf <- na.omit(tmpdf)
cat("\nPlotting ...\n")
# -------------------------------------
## SECTION 6: COLORED MAPS OF HEALTH OUTCOMES
# NOTE: in this section, we plot colored maps for every health outcomes,
# where every health outcome (in three years) are seen as a pool data,
# to see the (geographic) distribution (descriptively, intuitively) of the outcomes;
# using ggplot2, and tools in mapplots.r
# finally, we output these maps/figures to PDF figures
# NOTE: please refer to mapplots.r for more information & technical details
# NOTE: in final plots, the size of circle indicates the values of independents, where the color indicates the values of health outcomes
# the plots work to display the relationship between X & Y
# --------
# 6.1 colored maps
# 1998,2003,2008 data are now seen as a pool dataset
for(tmpYname in envNHSS$Ynames){
# 6.1.1 temp slice: pooled health outcome & province tags
tmp <- tmpdf[,c(tmpYname,envNHSS$flagProv, "ID", "long", "lat" )]
# # 6.1.2 averaging by procinve tags (weighted by sample size/population of each county)
# tmp <- sqldf::sqldf(paste(sep="",
# "SELECT DISTINCT SUM(",envNHSS$flagSampleSize," * ",tmpYname,") / SUM(",envNHSS$flagSampleSize,") AS ",tmpYname,
# ", ",envNHSS$flagProv," FROM tmp GROUP BY ",envNHSS$flagProv
# ))
# 6.1.3 using mapplots.r to get an instance of current map figure
# eval(parse(text=paste(sep="",
# "tmpfig <- func_MapProv( tmp$",tmpYname,", tmp$",envNHSS$flagProv,", vecName = \"",tmpYname,"\" )"
# )))
# income -> health outcome
tmpfig1 <- func_MapCityXY( tmpdf$income, tmpdf[,tmpYname], tmpdf$lat, tmpdf$long,
Xname = "income", Yname = tmpYname , FontSize = 5,
ColorScale = c("skyblue","red"), CircleScale = c(2,15) )
# edu -> health outcome
tmpfig2 <- func_MapCityXY( tmpdf$edu, tmpdf[,tmpYname], tmpdf$lat, tmpdf$long,
Xname = "edu", Yname = tmpYname , FontSize = 5,
ColorScale = c("skyblue","red"), CircleScale = c(2,15) )
# output figures
eval(parse(text=paste(sep="",
"func_SaveMap2PDF( tmpfig1, \"",envNHSS$Output,"Map_income_2_",tmpYname,".pdf\" ) "
)))
eval(parse(text=paste(sep="",
"func_SaveMap2PDF( tmpfig2, \"",envNHSS$Output,"Map_edu_2_",tmpYname,".pdf\" ) "
)))
}
# 6.2 print info
cat("\n We have created colored maps for health outcomes and output them to the assigned output directory as PDF figures\n")
# --------------------------------------
# GARBAGE COLLECTION
rm( tmp, tmpfig1, tmpfig2, tmpYname, tmpdf )
#
# # -------------------------------------
# ## SECTION 7: COLORED MAPS OF CORE INDEPENDENTS (INCOME & EDU)
# # NOTE: please refer to mapplots.r for more information & technical details
#
# # 7.1 colored maps
# # 1998,2003,2008 data are now seen as a pool dataset
# for(tmpXname in envNHSS$Xcore){
# # 7.1.1 temp slice: pooled health outcome & province tags
# tmp <- df_NHSS[,c(tmpXname,envNHSS$flagProv,envNHSS$flagSampleSize)]
# # 7.1.2 averaging by procinve tags
# tmp <- sqldf::sqldf(paste(sep="",
# "SELECT DISTINCT SUM(",envNHSS$flagSampleSize," * ",tmpXname,") / SUM(",envNHSS$flagSampleSize,") AS ",tmpXname,
# ", ",envNHSS$flagProv," FROM tmp GROUP BY ",envNHSS$flagProv
# ))
# # 7.1.3 using mapplots.r to get an instance of current map figure
# eval(parse(text=paste(sep="",
# "tmpfig <- func_MapProv( tmp$",tmpXname,", tmp$",envNHSS$flagProv,", vecName = \"",tmpXname,"\" )"
# )))
# # 7.1.4 output figures
# eval(parse(text=paste(sep="",
# "func_SaveMap2PDF( tmpfig, \"",envNHSS$Output,"Map_",tmpXname,".pdf\" ) "
# )))
# }
#
# # 7.2 print info
# cat("\nWe have created colored maps for core independents (income & edu) and output them to the assigned output directory as PDF figures\n")
|
{"hexsha": "f162fecfad0b687fe89fcefb814882a5f9bee349", "size": 11558, "ext": "r", "lang": "R", "max_stars_repo_path": "scripts/proc_2_NHSS.r", "max_stars_repo_name": "Clpr/HealthInequality2018Dec", "max_stars_repo_head_hexsha": "d88d80c97e46f3e0b10c2c15e83eb0932957e69d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/proc_2_NHSS.r", "max_issues_repo_name": "Clpr/HealthInequality2018Dec", "max_issues_repo_head_hexsha": "d88d80c97e46f3e0b10c2c15e83eb0932957e69d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/proc_2_NHSS.r", "max_forks_repo_name": "Clpr/HealthInequality2018Dec", "max_forks_repo_head_hexsha": "d88d80c97e46f3e0b10c2c15e83eb0932957e69d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1952662722, "max_line_length": 144, "alphanum_fraction": 0.6252811905, "num_tokens": 3479}
|
# Copyright 2018 Anthony H Thomas and Arun Kumar
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
with open(__file__) as fh: print fh.read()
import sys
import os
import atexit
import numpy as np
import pandas as pd
import numpy.linalg as alg
ROOT = os.getenv('BENCHMARK_PROJECT_ROOT')
if (ROOT is None):
msg = 'Please set environment variable BENCHMARK_PROJECT_ROOT'
raise StandardError(msg)
sys.path.append(os.path.join(ROOT,'lib','python'))
from sql_cxn import SQLCxn
import np_timing_utils as utils
GPDB_PORT_MAP = {'1': 5481, '2': 6431, '4': 6431, '8': 6431, '16': 6431}
def doMatrixOp(kwargs):
opType = kwargs.get('opType')
mattype = kwargs.get('mattype')
fixedAxis = int(kwargs.get('fixedAxis'))
nrow_scale = map(lambda x: int(x), kwargs['nrows'].split(' '))
nproc = kwargs.get('nproc')
port = GPDB_PORT_MAP[nproc] if nproc is not None else None
if nproc is not None:
cxn = start_gpdb(port, nproc)
cxn.execute('DROP TABLE IF EXISTS M16_tall')
atexit.register(stop_gpdb, nproc, cxn)
else:
cxn = SQLCxn(username='ubuntu', db='ubuntu', timeout=10000)
colnames = ['rows','time1','time2','time3','time4','time5']
runTimes = pd.DataFrame(np.zeros((1,len(colnames))))
runTimes.columns = colnames
if nproc is None:
path = os.path.join('..','output','madlib_{}_{}.txt'.format(mattype, opType))
else:
path = os.path.join('..','output','madlib_cpu_{}_scale.txt'.format(opType))
for nr in nrow_scale:
nrow = fixedAxis if opType == 'GMM' else nr
ncol = nr if opType == 'GMM' else fixedAxis
print nrow
print ncol
Mname = 'M{}{}'.format(nrow,ncol)
if not cxn.table_exists('M{}{}'.format(nrow,ncol)):
cxn.randomMatrix(nrow, ncol, 'M{}{}'.format(nrow, ncol))
if (opType == 'GMM'):
if not cxn.table_exists('N{}{}'.format(ncol, nrow)):
cxn.randomMatrix(ncol, nrow, 'N{}{}'.format(ncol, nrow))
Nname = 'N{}{}'.format(ncol, nrow)
elif (opType == 'ADD'):
if not cxn.table_exists('N{}{}'.format(nrow, ncol)):
cxn.randomMatrix(nrow, ncol, 'N{}{}'.format(nrow, ncol))
Nname = 'N{}{}'.format(nrow, ncol)
cleanup = []
if (opType == 'TRANS'):
call = "matrix_trans('{}',NULL,'Mt',NULL)".format(Mname)
cleanup.append('Mt')
elif (opType == 'NORM'):
call = "matrix_norm('{}',NULL,'fro')".format(Mname)
elif (opType == 'GMM'):
call = "matrix_mult('{}',NULL,'{}',NULL,'MN',NULL)".format(Mname,Nname)
cleanup.append('MN')
elif (opType == 'MVM'):
array_call = 'SELECT array_agg(random()) FROM generate_series(1,{})'.format(
ncol)
call = "matrix_vec_mult('{}',NULL,({}))".format(Mname,array_call)
elif (opType == 'TSM'):
call = "matrix_mult('{0}','trans=True','{0}',NULL,'MtM',NULL)".format(Mname)
cleanup.append('MtM')
elif (opType == 'ADD'):
call = "matrix_add('{}',NULL,'{}',NULL,'M_N',NULL)".format(Mname, Nname)
cleanup.append('M_N')
else:
raise NotImplementedError('Invalid Operation')
sql_call = 'SELECT madlib.{}'.format(call)
runTimes.ix[:,'rows'] = nr if nproc is None else nproc
runTimes.ix[:,1:] = cxn.time(sql_call, cleanup)
writeHeader = False if (os.path.exists(path)) else True
runTimes.to_csv(path, index=False, header = writeHeader, mode = 'a')
def start_gpdb(port, nproc):
if port is None:
os.system('yes | gpstart')
cxn = SQLCxn(username='ubuntu', db='ubuntu', timeout=10000)
else:
call = 'yes | gpstart -d /gpsegs/gpdb-{}/master/gpseg-1'.format(nproc)
os.system(call)
cxn = SQLCxn(username='ubuntu', db='ubuntu', timeout=10000, port=port)
return cxn
def stop_gpdb(nproc, cxn):
cxn._cxn.close()
if nproc is None:
os.system('yes | gpstop')
else:
call = 'yes | gpstop -d /gpsegs/gpdb-{}/master/gpseg-1'.format(nproc)
os.system(call)
if __name__=='__main__':
kwargs = utils.parse_cmd_args(sys.argv[1:])
doMatrixOp(kwargs)
|
{"hexsha": "0a893bfb5b631146e65924ef953183a0642786c1", "size": 4748, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/SimpleMatrixOps (Single Node Dense)/src/madlib_matrix_ops.py", "max_stars_repo_name": "ADALabUCSD/SLAB", "max_stars_repo_head_hexsha": "86d71b345c50b3a73eefcad3da39dc8d919d9652", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-23T02:42:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-04T06:30:13.000Z", "max_issues_repo_path": "tests/SimpleMatrixOps (Single Node Dense)/src/madlib_matrix_ops.py", "max_issues_repo_name": "ADALabUCSD/SLAB", "max_issues_repo_head_hexsha": "86d71b345c50b3a73eefcad3da39dc8d919d9652", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/SimpleMatrixOps (Single Node Dense)/src/madlib_matrix_ops.py", "max_forks_repo_name": "ADALabUCSD/SLAB", "max_forks_repo_head_hexsha": "86d71b345c50b3a73eefcad3da39dc8d919d9652", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-10T15:27:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-10T15:27:15.000Z", "avg_line_length": 39.5666666667, "max_line_length": 88, "alphanum_fraction": 0.6080454928, "include": true, "reason": "import numpy", "num_tokens": 1311}
|
from numpy import arange
class AggregateSelector(object):
@staticmethod
def deciles_approx(
column: str,
min_decile: float = 0.0,
max_decile: float = 1.0,
as_name: str = None
) -> str:
if as_name is None:
as_name = f'{column}__deciles'
str_deciles = ', '.join([
str(round(val, 3))
for val in arange(min_decile, max_decile + 0.1, 0.1)
])
return f"approx_percentile({column}, ARRAY[{str_deciles}]) as {as_name}"
@staticmethod
def percentiles_approx(
column: str,
min_percentile: float = 0.0,
max_percentile: float = 1.0,
as_name: str = None
) -> str:
if as_name is None:
as_name = f'{column}__percentiles'
str_pcts = ', '.join([
str(round(val, 3))
for val in arange(min_percentile, max_percentile + 0.01, 0.01)
])
return f"approx_percentile({column}, ARRAY[{str_pcts}]) as {as_name}"
|
{"hexsha": "5c3b3f7f74ce61993bb5213012a160998c406c5a", "size": 1042, "ext": "py", "lang": "Python", "max_stars_repo_path": "aws_managers/athena/selectors/aggregate_selector.py", "max_stars_repo_name": "vahndi/aws-managers", "max_stars_repo_head_hexsha": "bdbfb2b8a9258a53e3ea4dfbbfe5491a34113899", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aws_managers/athena/selectors/aggregate_selector.py", "max_issues_repo_name": "vahndi/aws-managers", "max_issues_repo_head_hexsha": "bdbfb2b8a9258a53e3ea4dfbbfe5491a34113899", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aws_managers/athena/selectors/aggregate_selector.py", "max_forks_repo_name": "vahndi/aws-managers", "max_forks_repo_head_hexsha": "bdbfb2b8a9258a53e3ea4dfbbfe5491a34113899", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7714285714, "max_line_length": 80, "alphanum_fraction": 0.5403071017, "include": true, "reason": "from numpy", "num_tokens": 283}
|
/**
* author: Jochen K"upper
* created: Jan 2002
* file: pygsl/src/statisticsmodule.c
* $Id: floatmodule.c,v 1.8 2004/03/24 08:40:45 schnizer Exp $
*
* "
*/
#include <Python.h>
#include <gsl/gsl_statistics.h>
#include <pygsl/error_helpers.h>
#include <pygsl/block_helpers.h>
/* include real functions for default data-types (double in C) */
#define STATMOD_WEIGHTED
#define STATMOD_APPEND_PY_TYPE(X) X ## Float32
#define STATMOD_APPEND_PYC_TYPE(X) X ## FLOAT
#define STATMOD_FUNC_EXT(X, Y) X ## _float ## Y
#define STATMOD_PY_AS_C PyFloat_AsDouble
#define STATMOD_C_TYPE float
#include "functions.c"
/* initialization */
PyGSL_STATISTICS_INIT(float, "float")
/*
* Local Variables:
* mode: c
* c-file-style: "Stroustrup"
* End:
*/
|
{"hexsha": "3866d0671ed4692897ae5064981bf6222d6a115e", "size": 755, "ext": "c", "lang": "C", "max_stars_repo_path": "production/pygsl-0.9.5/src/statistics/floatmodule.c", "max_stars_repo_name": "juhnowski/FishingRod", "max_stars_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "production/pygsl-0.9.5/src/statistics/floatmodule.c", "max_issues_repo_name": "juhnowski/FishingRod", "max_issues_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "production/pygsl-0.9.5/src/statistics/floatmodule.c", "max_forks_repo_name": "juhnowski/FishingRod", "max_forks_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2018-10-02T06:18:07.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-02T06:18:07.000Z", "avg_line_length": 17.9761904762, "max_line_length": 65, "alphanum_fraction": 0.7086092715, "num_tokens": 218}
|
from PIL import Image
from torch.utils.data import Dataset
import numpy as np
import torch
def default_loader(path):
return Image.open(path).convert('RGB')
class csv_Dataset(Dataset):
def __init__(self, label_list, transform=None, target_transform=None, loader=default_loader):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['label']))
self.imgs = imgs
#print(len(self.imgs))
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
filename, label = self.imgs[index]
filename = filename.replace('../', '/content/datasets/')
img = self.loader(filename)
if self.transform is not None:
img = self.transform(img)
return img, label-1
def __len__(self):
return len(self.imgs)
# generate triplet example
class csv_triplet_Dataset(Dataset):
def __init__(self, label_list, transform=None, target_transform=None, loader=default_loader):
imgs = []
label_dict = {}
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['label']))
label = row['label']
if str(label) not in label_dict.keys():
label_dict[str(label)] = []
label_dict[str(label)].append(index)
else:
label_dict[str(label)].append(index)
self.imgs = imgs
self.label_dict = label_dict
#print(len(self.imgs))
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
filename, label = self.imgs[index]
pos_index = index
while pos_index==index:
random_index = np.random.randint(0, len(self.label_dict[str(label)]))
pos_index = self.label_dict[str(label)][random_index]
neg_label = label
while neg_label == label:
neg_label = np.random.randint(0, len(self.label_dict))+1
random_index = np.random.randint(0, len(self.label_dict[str(neg_label)]))
neg_index = self.label_dict[str(neg_label)][random_index]
pos_filename, _ = self.imgs[pos_index]
neg_filename, _ = self.imgs[neg_index]
img = self.loader(filename)
pos_img = self.loader(pos_filename)
neg_img = self.loader(neg_filename)
if self.transform is not None:
img = self.transform(img)
pos_img = self.transform(pos_img)
neg_img = self.transform(neg_img)
return img, label-1, pos_img, neg_img
def __len__(self):
return len(self.imgs)
class csv_pair_Dataset(Dataset):
def __init__(self, label_list, transform=None, target_transform=None, loader=default_loader):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['label']))
self.imgs = imgs
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
filename, label = self.imgs[index]
img = self.loader(filename)
if self.transform is not None:
img = self.transform(img)
return img, label-1
class csv_negative_Dataset(Dataset):
def __init__(self, label_list, transform=None, target_transform=None, loader=default_loader):
imgs = []
img_list = np.zeros([200,1])
img_number = np.zeros([200,1])
img_label = 1
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['label'], row['correct'],
row['top1'], row['top2'], row['top3'],
row['top4'], row['top5']))
img_number[row['label']-1,0] +=1
if row['label'] == img_label:
img_list[img_label - 1] = index
img_label +=1
self.imgs = imgs
#print(len(self.imgs))
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.img_list = np.uint16(img_list)
self.img_number = np.uint16(img_number)
def __getitem__(self, index):
filename, label, compute_stage, N_lable1, N_label2, N_label3, N_label4, N_label5 = self.imgs[index]
img = self.loader(filename)
if self.transform is not None:
img = self.transform(img)
tmp = np.random.randint(0, 20)
if tmp<=5:
N_label = N_lable1
elif tmp<=10:
N_label = N_label2
elif tmp<=14:
N_label = N_label3
elif tmp<=17:
N_label = N_label4
else:
N_label = N_label5
random_number = np.random.randint(0, self.img_number[N_label-1,0])
n_filename, n_label, _, _, _, _, _, _ = self.imgs[self.img_list[N_label-1,0]+random_number]
n_img = self.loader(n_filename)
if self.transform is not None:
n_img = self.transform(n_img)
return img, label-1, compute_stage, n_img, n_label-1
def __len__(self):
return len(self.imgs)
class csv_Dataset_big(Dataset):
def __init__(self, label_list, transform=None, target_transform=None, loader=default_loader):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['label'], row['biglabel']))
self.imgs = imgs
#print(len(self.imgs))
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
filename, label, big_label = self.imgs[index]
img = self.loader(filename)
if self.transform is not None:
img = self.transform(img)
return img, label-1, big_label-1
def __len__(self):
return len(self.imgs)
|
{"hexsha": "cf4ec19ac609802cdc73f71e0da026b26637d1b3", "size": 5986, "ext": "py", "lang": "Python", "max_stars_repo_path": "ZXX_utils/load_csv_data.py", "max_stars_repo_name": "RiyaoDong/HGSL", "max_stars_repo_head_hexsha": "19fa984b3bfde0e3b7acbce87dd40177cd64f9b0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ZXX_utils/load_csv_data.py", "max_issues_repo_name": "RiyaoDong/HGSL", "max_issues_repo_head_hexsha": "19fa984b3bfde0e3b7acbce87dd40177cd64f9b0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ZXX_utils/load_csv_data.py", "max_forks_repo_name": "RiyaoDong/HGSL", "max_forks_repo_head_hexsha": "19fa984b3bfde0e3b7acbce87dd40177cd64f9b0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4201183432, "max_line_length": 107, "alphanum_fraction": 0.6020715002, "include": true, "reason": "import numpy", "num_tokens": 1374}
|
\xname{setup}
\chapter{Setting up a Java Program for Analysis}
\label{chap:setup}
This chapter describes how to setup a Java program for analysis using Chord.
Suppose the program has the following directory structure:
\begin{framed}
\begin{verbatim}
example/
src/
foo/
Main.java
...
classes/
foo/
Main.class
...
lib/
src/
taz/
...
jar/
taz.jar
chord.properties
\end{verbatim}
\end{framed}
The above structure is typical: the program's Java source
files are under {\tt src/}, its class files are under {\tt classes/},
and the source and jar files of the libraries used by the program are
under \code{lib/src/} and \code{lib/jar/}, respectively. The
purpose of the \code{chord.properties} file is explained below.
The only way to specify inputs to Chord, including the program
to be analyzed, is via system properties.
Section \ref{sec:properties-setting} describes various ways by which
properties can be passed to Chord. Here, we describe the
simplest approach, in which all properties of the program to be analyzed
that might be needed by Chord are defined in a file named \code{chord.properties}
that is located in the top-level directory of the program (directory \code{example/} above).
Then, Chord can be applied to the program by running the following command:
\begin{framed}
\begin{verbatim}
ant -Dchord.work.dir=<WORK_DIR> run
\end{verbatim}
\end{framed}
This command instructs Chord to run in the directory denoted by \code{<WORK_DIR>}, where it searches for a file
named \code{chord.properties} and
loads all properties defined in that file, if it exists.
Thus, for the above program, \code{<WORK_DIR>} must be the absolute or relative path of the
\code{example/} directory. A sample \code{chord.properties} file for the above program is as follows:
\begin{framed}
\begin{verbatim}
chord.main.class=foo.Main
chord.class.path=classes:lib/jar/taz.jar
chord.src.path=src:lib/src
chord.run.ids=0,1
chord.args.0="-thread 1 -n 10"
chord.args.1="-thread 2 -n 50"
\end{verbatim}
\end{framed}
Each relative file/directory name in the value of any property
defined in this file (e.g., the \code{lib/src} directory name in the value of
property \code{chord.src.path} above) is treated relative to the directory
specified by property \code{chord.work.dir}, whose default value
is the current directory.
Section \ref{sec:program-props} presents all program properties that are
recognized by Chord. Here, we only describe those that are most commonly
used, namely, those defined in the above sample properties file:
\begin{itemize}
\item
\code{chord.main.class} specifies the fully-qualified name of the main
class of the program.
\item
\code{chord.class.path} specifies the application classpath
of the program (the JDK standard library classpath is implicitly
included).
\item
\code{chord.src.path} specifies the Java source path of the program.
All analyses in Chord operate on Java bytecode. The only use
of this property is to HTMLize the Java source files of the program so
that the results of analyses can be reported at the Java
source code level.
\item
\code{chord.run.ids} specifies a list of IDs to identify runs of the
program. It is used by dynamic analyses to determine how many
times the program must be run. An additional use of this property is
to allow specifying the command-line arguments to use in the run
having ID {\tt <id>} via property \code{chord.args.<id>}, as
illustrated by properties \code{chord.args.0} and \code{chord.args.1}
above.
\end{itemize}
The above command does not do much beyond making Chord load the above
properties file. For Chord to do something interesting,
additional properties must be set that specify the function(s)
Chord must perform. All functions are summarized in Section \ref{sec:func-props}.
The most common function is to run one or more analyses on the input program;
it is described in Chapter \ref{chap:running}.
|
{"hexsha": "6ebcda5d1224aa96aaa0bfbc20c97c801c46efbc", "size": 4134, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/setup.tex", "max_stars_repo_name": "CSA-PLLab/STAND", "max_stars_repo_head_hexsha": "2e41f21b842ab43f23aecbf5527f6043ce837b29", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-07-07T11:34:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-07T22:22:18.000Z", "max_issues_repo_path": "doc/setup.tex", "max_issues_repo_name": "CSA-PLLab/STAND", "max_issues_repo_head_hexsha": "2e41f21b842ab43f23aecbf5527f6043ce837b29", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/setup.tex", "max_forks_repo_name": "CSA-PLLab/STAND", "max_forks_repo_head_hexsha": "2e41f21b842ab43f23aecbf5527f6043ce837b29", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-07-14T09:27:41.000Z", "max_forks_repo_forks_event_max_datetime": "2017-07-14T09:27:41.000Z", "avg_line_length": 38.6355140187, "max_line_length": 112, "alphanum_fraction": 0.7305273343, "num_tokens": 1013}
|
from django.shortcuts import render
from django.http import *
import numpy as np
from . import autoencoder
from . import models
import json as js
import cv2, base64, utils
bad = HttpResponseBadRequest(js.dumps('nope'), content_type='application/json')
def submitFace(res):
#validate data
if not res.is_ajax() or not res.method == 'POST':
return bad
try:
json = js.loads(res.body)
except:
return bad
if (
json == None or
type(json) != dict or
not 'gender' in json or
not 'parameters' in json or
not 'choices' in json or
type(json['choices']) != dict
):
return bad
gender = 'f' if json['gender'] == 'f' else 'm'
pcount = autoencoder.models[autoencoder.default]['bottleneck']
arr = json['parameters']
if type(json['parameters']) != list or len(arr) != pcount:
return bad
try:
arr = np.nan_to_num(np.array(arr).astype(np.float64).clip(-20,20))
except:
return bad
choices = json['choices']
for key, choice in models.choices.items():
if key == 'g': continue # ignore gender choice
if not key in choices:
return bad
b = True
choice_ = choices[key]
for option in choice['options']:
if option[0] == choice_:
b = False
break
if b: return bad
#save data
uid = utils.uid()
img = autoencoder.decode(autoencoder.default, gender, arr)
cv2.imwrite('faces/' + uid + '.jpg',cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
face = models.Face(
uid = uid,
gender = gender,
parameters = js.dumps(list(arr)),
poster_gender = choices['pg'],
poster_sexuality = choices['ps'],
poster_race = choices['pr'],
poster_country = choices['pc']
)
face.save()
return HttpResponse(js.dumps('ok'), content_type='application/json')
def sinn(mapping, key, value):
if value is not None:
mapping[key] = value
def listFaces(res):
if not res.is_ajax():
return bad
tx = {
'faces': [],
'rid': 1000000000
}
if 'rid' in res.GET and res.GET['rid'].isnumeric():
tx['rid'] = int(res.GET['rid'])
options = {
'id__lt': tx['rid']
}
sinn(options, 'gender', res.GET.get('g'))
sinn(options, 'poster_gender', res.GET.get('pg'))
sinn(options, 'poster_sexuality', res.GET.get('ps'))
sinn(options, 'poster_race', res.GET.get('pr'))
sinn(options, 'poster_country', res.GET.get('pc'))
for face in models.Face.objects.filter(**options).order_by('-id')[0:100]:
tx['rid'] = min(tx['rid'], face.id)
tx['faces'].append({
'uid': face.uid,
'g': face.gender,
'pg': face.poster_gender,
'ps': face.poster_sexuality,
'pr': face.poster_race,
'pc': face.poster_country,
'pa': face.poster_age
})
return HttpResponse(js.dumps(tx), content_type="application/json")
def serveFace(res, uid):
if not utils.isUid(uid):
return Http404('nope')
try:
with open('faces/' + uid + '.jpg', "rb") as f:
return HttpResponse(f.read(), content_type='image/jpeg')
except:
raise Http404('nope')
def generateFace(res):
if not res.is_ajax() or not res.method == 'POST':
return bad
try:
json = js.loads(res.body)
except:
return bad
parameters = autoencoder.models[autoencoder.default]['bottleneck']
if json == None or type(json) != dict or not 'gender' in json:
return bad
gender = 'f' if json['gender'] == 'f' else 'm'
faces = []
arr_ = json['faces']
for arr in arr_:
if type(arr) != list or len(arr) != parameters:
return bad
try:
nums = np.nan_to_num(np.array(arr).astype(np.float64).clip(-20,20))
faces.append(nums)
except:
return bad
tx = []
for face in faces:
img = autoencoder.decode(autoencoder.default, gender, face)
_, buf = cv2.imencode('.jpg', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
tx.append(base64.b64encode(np.array(buf).tostring()).decode('utf-8'))
return HttpResponse(js.dumps(tx), content_type="application/json")
|
{"hexsha": "8d577b775370be154b4dc1074ae5595da68bcccb", "size": 3767, "ext": "py", "lang": "Python", "max_stars_repo_path": "main/rest.py", "max_stars_repo_name": "x13machine/facega", "max_stars_repo_head_hexsha": "eadff498344b35e3f413927ac72b88098f812268", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main/rest.py", "max_issues_repo_name": "x13machine/facega", "max_issues_repo_head_hexsha": "eadff498344b35e3f413927ac72b88098f812268", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main/rest.py", "max_forks_repo_name": "x13machine/facega", "max_forks_repo_head_hexsha": "eadff498344b35e3f413927ac72b88098f812268", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.54375, "max_line_length": 79, "alphanum_fraction": 0.6596761349, "include": true, "reason": "import numpy", "num_tokens": 1072}
|
function [A, b] = sllinrega(X, Y, varargin)
%SLLINREGA Performs Augmented Multivariate Linear Regression
%
% $ Syntax $
% - [A, b] = sllinrega(X, Y, ...)
%
% $ Arguments $
% - X: The sample matrix of x
% - Y: The sample matrix of y
% - A: The solved transform matrix
% - b: The solved shift vector
%
% $ Description $
% - [A, b] = sllinrega(X, Y, ...) solves the regression problem given
% by the following formula:
% y = A * x + b
% in least square error sense. The samples are stored in X and Y
% in column-wise manner.
% You can specify properties for regression as in sllinreg.
%
% $ Remarks $
% - The implementation is based on sllinreg with an augmented
% formulation as follows:
% y = [A, b] * [x; 1]
%
% $ History $
% - Created by Dahua Lin, on Sep 15th, 2006
%
%% parse and verify input arguments
if nargin < 2
raise_lackinput('sllinrega', 2);
end
if ~isnumeric(X) || ~isnumeric(Y) || ndims(X) ~= 2 || ndims(Y) ~= 2
error('sltoolbox:invalidarg', ...
'The X and Y should be both 2D numeric matrices');
end
%% main
% augment formulation
[dx, nx] = size(X);
Xa = [X; ones(1, nx)];
% solve
Aa = sllinreg(Xa, Y, varargin{:});
clear Xa;
% extract
A = Aa(:, 1:dx);
b = Aa(:, dx+1);
|
{"author": "lmthang", "repo": "nmt.hybrid", "sha": "50d5c025f18ed280ff0fd2e2adce327f4170a2c3", "save_path": "github-repos/MATLAB/lmthang-nmt.hybrid", "path": "github-repos/MATLAB/lmthang-nmt.hybrid/nmt.hybrid-50d5c025f18ed280ff0fd2e2adce327f4170a2c3/code/wordsim/code/sltoolbox_r101/sltoolbox_r101/sltoolbox/regression/sllinrega.m"}
|
# Maximum feasibility of 0.4769921436588103 for [1,3,5,8]
# Mean feasibility of 0.3221046443268665
include("CenterOfMass.jl")
include("setup_parameters.jl")
mass_resolution = 0.1
masses = collect(min_mass:mass_resolution:max_mass)
resolution = 0.1
max_robots = 4
actuator_limit = 6.0
#########################
# Myopic feasibility rate
#########################
boundary_vec = flatten(circle_ps)
min_p = minimum(boundary_vec, 2)[1:2]
max_p = maximum(boundary_vec, 2)[1:2]
position_ranges = map(x->x[1]:resolution:x[2], zip(min_p, max_p))
ranges = (position_ranges..., masses)
robots_feasible(robots, theta) =
lifting_feasibility(attachment_ws[robots],
theta[3]*g*point_to_wrench(theta[1:2]),
actuator_limit)
maximum_feasibility_rate = 0
maximum_feasibility_combination = zeros(Int64,0)
good_inds = zeros(Bool,map(length,ranges))
for (ii,x) = enumerate(ranges[1])
for (jj,y) = enumerate(ranges[2])
for (kk,m) = enumerate(ranges[3])
Convex.clearmemory()
if interior_q([x;y])
gravity_w = m*g*point_to_wrench([x;y])
feasible = check_feasible_configuration(Array{Array{Float64}}(0), attachment_ws,
gravity_w, actuator_limit, max_robots)
if feasible
good_inds[ii,jj,kk] = true
end
end
end
end
end
num_good = sum(good_inds)
mean_feasibility = 0.0
cs = combinations(1:length(attachment_ws), max_robots)
for (ii, c) = enumerate(cs)
num_feasible = 0
for (jj,x) = enumerate(ranges[1])
for (kk,y) = enumerate(ranges[2])
for (ll,m) = enumerate(ranges[3])
Convex.clearmemory()
theta = [x,y,m]
if good_inds[jj,kk,ll]
num_feasible += robots_feasible(c, theta)
end
end
end
end
feasibility = num_feasible / num_good
mean_feasibility += feasibility
if feasibility > maximum_feasibility_rate
maximum_feasibility_combination = c
maximum_feasibility_rate = feasibility
println("New best: $(feasibility)")
end
println("Finished $(ii) of $(length(cs)), best: $(maximum_feasibility_rate)")
end
mean_feasibility /= length(cs)
println("Maximum feasibility of $(maximum_feasibility_rate) for $(maximum_feasibility_combination)")
println("Mean feasibility of $(mean_feasibility)")
|
{"hexsha": "b1a77172d5d80e0d4a4148a28b3661ed7e625a21", "size": 2280, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "maximal_initial_feasibility.jl", "max_stars_repo_name": "mcorah/CenterOfMass", "max_stars_repo_head_hexsha": "84bd9124b042ed0cd4f5cd6ab3a705f72046f3ca", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "maximal_initial_feasibility.jl", "max_issues_repo_name": "mcorah/CenterOfMass", "max_issues_repo_head_hexsha": "84bd9124b042ed0cd4f5cd6ab3a705f72046f3ca", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "maximal_initial_feasibility.jl", "max_forks_repo_name": "mcorah/CenterOfMass", "max_forks_repo_head_hexsha": "84bd9124b042ed0cd4f5cd6ab3a705f72046f3ca", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2307692308, "max_line_length": 100, "alphanum_fraction": 0.6745614035, "num_tokens": 641}
|
# Neighbors in 1D
function has_neighbor(m::Mesh1D, cell, face)
m.isperiodic && return true
if cell.coord == 1 && face == :l
return false
elseif cell.coord == length(m.elements)-1 && face == :r
return false
else
return true
end
end
function neighbor(m::Mesh1D, cell, face)
@assert has_neighbor(m, cell, face)
if face == :l
if m.isperiodic && cell.coord == 1
return m[length(m.elements)-1]
else
return m[cell.coord - 1]
end
else
@assert face == :r
if m.isperiodic && cell.coord == length(m.elements)-1
return m[1]
else
return m[cell.coord + 1]
end
end
end
coord(face) = face == :l ? -1 : 1
oppcoord(face) = -coord(face)
oppface(cell::Cell1D,face) = face == :l ? :r : :l
oppface(m::Mesh1D,cell,face) = oppface(cell,face)
# Neighbors in 2D
has_neighbor(m::Meshes.Mesh, edge::Edge; ns = computeNeighbors(m)) = has_neighbor(m,edge,ns)
has_neighbor(m::Meshes.Mesh, cell::Cell2D, edgeid::Int64; ns = computeNeighbors(m)) = has_neighbor(m,cell,edgeid,ns)
function has_neighbor(m::Meshes.Mesh, edge::Edge, ns)
ns[edge.cid, edgeid(m,edge)] != -1
end
function has_neighbor(m::Meshes.Mesh, cell::Cell2D, edgeid::Int64, ns)
ns[cell.cid, edgeid] != -1
end
has_neighbor(m::Meshes.Mesh, cell::Cell2D, edge::Edge, ns) = has_neighbor(m,edge, ns)
has_neighbor(m::Meshes.Mesh, cell::Cell2D, edge::Edge; ns = computeNeighbors(m)) =
has_neighbor(m,edge, ns)
function neighbor(m::Meshes.Mesh, cell::Cell2D, edgeid; ns = computeNeighbors(m))
m[ns[cell.cid, edgeid]]
end
neighbor(m::Meshes.Mesh, cell::Cell2D, edge::Edge; ns = computeNeighbors(m)) =
neighbor(m,cell,edge,ns)
function neighbor(m::Meshes.Mesh, cell::Cell2D, edge::Edge, ns)
m[ns[cell.cid, edgeid(cell,edge)]]
end
function neighbor(m::Meshes.Mesh, edge::Edge, ns = computeNeighbors(m))
m[ns[cid(edge), edgeid(m[cid(edge)],edge)]]
end
function oppface(m::Mesh, cell::Cell2D, face, ns)
n = neighbor(m, cell,face, ns)
n[edgeid(n,reverse(face))]
end
oppface(m::Mesh, cell::Cell2D, face; ns = computeNeighbors(m)) = oppface(m,cell,face,ns)
canon(a,b) = (a<b) ? (a,b) : (b,a)
function computeNeighbors(m)
fs = ((Int64,Int64)=>(Int64,Int64))[]
ns = Array(Int64,length(m.faces),3)
fill!(ns,-1)
for i in 1:length(m.faces)
f = m.faces[i]
for (nn,a,b) in ((1,:v1,:v2),(2,:v2,:v3),(3,:v3,:v1))
c = canon(f.(a),f.(b))
if haskey(fs,c)
(j,num) = fs[c]
ns[i,nn] = j
ns[j,num] = i
else
fs[c] = (i,nn)
end
end
end
ns
end
# Tests if the vertex p is contained in cell x
function facingface(x::Cell2D,p::Vertex2)
v0 = x.p2 - x.p1
v1 = x.p3 - x.p1
v2 = p - x.p1
dot00 = dot(v0, v0)
dot01 = dot(v0, v1)
dot02 = dot(v0, v2)
dot11 = dot(v1, v1)
dot12 = dot(v1, v2)
invDenom = 1 / (dot00 * dot11 - dot01 * dot01)
λ1 = u = (dot11 * dot02 - dot01 * dot12) * invDenom
λ2 = v = (dot00 * dot12 - dot01 * dot02) * invDenom
λ3 = 1-u-v
pt = clamp(λ1,0.0,1.0)*x.p1 + clamp(λ2,0.0,1.0)*x.p2 + clamp(λ3,0.0,1.0)*x.p3
dist = norm(p-pt)
if (u >= 0) && (v >= 0) && (u + v < 1)
return (0,0.0)
elseif (u <= 0) && (v <= 0)
return ((abs(u) > abs(v) ? 3 : 2),dist)
elseif (u <= 0)
return (3,dist)
elseif (v <= 0)
return (2,dist)
else
return (1,dist)
end
end
contains(x::Cell2D,p::Vertex2) = facingface(x,p)[1] == 0
|
{"hexsha": "ccef4880dc35d0bc25876836702eac843104db67", "size": 3612, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/neighbors.jl", "max_stars_repo_name": "Keno/AC274.jl", "max_stars_repo_head_hexsha": "9eafcba152019a563b4501c9626f1699814e37b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2016-04-27T07:38:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-07T10:11:02.000Z", "max_issues_repo_path": "src/neighbors.jl", "max_issues_repo_name": "Keno/AC274.jl", "max_issues_repo_head_hexsha": "9eafcba152019a563b4501c9626f1699814e37b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/neighbors.jl", "max_forks_repo_name": "Keno/AC274.jl", "max_forks_repo_head_hexsha": "9eafcba152019a563b4501c9626f1699814e37b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.572519084, "max_line_length": 116, "alphanum_fraction": 0.5675526024, "num_tokens": 1257}
|
import tensorflow as tf
import tensorflow_addons as tfa
import random
import numpy as np
import itertools
def augment(*images: tf.Tensor, mask_image = False, size=None):
p1 = np.random.uniform((), 0, 1)
p2 = np.random.uniform((), 0, 1)
# p3 = tf.random.uniform((), 0, 1)
random_state = np.random.RandomState(None)
if images[0].shape.__len__() == 4:
random_angles = tf.random.uniform(shape=(tf.shape(images[0])[0],), minval=-np
.pi / 8, maxval=np.pi / 8)
if images[0].shape.__len__() == 3:
random_angles = tf.random.uniform(shape=(), minval=-np
.pi / 8, maxval=np.pi / 8)
crop = random.uniform(0.7, 0.9)
random_rot = tf.random.uniform([], minval=0, maxval=3,dtype=tf.int32)
if mask_image:
if size is None:
size = images[0].shape[1:3] if images[0].shape.__len__() == 4 else images[0].shape[0:2]
random_cutout_offset = tf.random.uniform(shape=(2,), minval=0, maxval=min(size), dtype=tf.int32)
random_cutout_size = tf.random.uniform(shape=(2,), minval=0, maxval=min(size) // 2, dtype=tf.int32)
# random_mask = create_mask(size)
def aug(image):
if p1 > 0.5:
image = tf.image.flip_left_right(image)
if p2 > 0.5:
image = tf.image.flip_up_down(image)
if mask_image:
# image = tf.where(tf.expand_dims(random_mask, axis=-1) != 0, image, tf.zeros_like(image))
image = tfa.image.cutout(image[tf.newaxis, :, :, :], random_cutout_size, random_cutout_offset)[0]
image = tfa.image.rotate(image, random_angles)
# image = tf.image.rot90(image, random_rot)
image = tf.image.central_crop(image, crop)
# image = elastic_transform(image, image.shape[1] * 2, image.shape[1] * p2 / 5, image.shape[1] * p1 / 5, random_state)
return image
return list(map(aug, images))
def permute_mask(mask, gt):
l = mask.shape[-1]
roll_l = np.random.randint(0, l)
# perms = list(itertools.permutations(range(l),l))
# perm = perms[np.random.randint(0, len(perms))]
mask = tf.roll(mask, roll_l, axis=-1)
gt_shape = gt.shape
gt = tf.reshape(gt, (-1,l,3))
gt = tf.roll(gt, roll_l, axis=1)
gt = tf.reshape(gt, gt_shape)
return mask, gt
def color(*images: tf.Tensor):
rand_hue = tf.random.uniform((), 0, 0.3)
rand_sat = tf.random.uniform((), 0.8, 2)
rand_brihgt = tf.random.uniform((), 0, 0.2)
rand_cont = tf.random.uniform((), 0.8, 1.5)
def transform(img):
img = tf.image.adjust_hue(img, rand_hue)
img = tf.image.adjust_saturation(img, rand_sat)
img = tf.image.adjust_brightness(img, rand_brihgt)
img = tf.image.adjust_contrast(img, rand_cont)
return img
if len(images) == 1:
return transform(images[0])
images = list(map(transform, images))
return images
import cv2
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
@tf.function
def contrast_histogram_stretching(image):
f_max = tf.reduce_max(image)
f_min = tf.reduce_min(image)
stretched = (image - f_min)/(f_max - f_min)
return stretched
# Function to distort image
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
"""
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
# Random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dz = np.zeros_like(dx)
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
return tf.constant(map_coordinates(image, indices, order=1, mode='reflect').reshape(shape))
def create_mask(size=tf.constant((480, 480)), transpose=tf.constant(False), make_positive=True):
if transpose:
start = tf.random.uniform([], 0, size[0], tf.int32)
line_index = tf.range(size[1], dtype=tf.int32)
line = tf.zeros((size[1]), dtype=tf.int32)
x_size = size[0]
y_size = size[1]
else:
start = tf.random.uniform([], 0, size[1], tf.int32)
line_index = tf.range(size[0], dtype=tf.int32)
line = tf.zeros((size[0]), dtype=tf.int32)
x_size = size[1]
y_size = size[0]
y_current = -1
x_current = start
prob = tf.random.uniform([5], 0, 100, tf.float32)
def cond(x_size, x_current, y_size, y_current, line, line_index):
return tf.math.less(y_current, y_size)
def body(x_size, x_current, y_size, y_current, line, line_index):
select = tf.constant([0, 1, 2, 3, 4])
sample = tf.squeeze(tf.random.categorical(tf.math.log([prob]), 1))
step = select[sample]
if tf.equal(step, 0):
x_current -= 1
replace_value = tf.ones((y_size), dtype=tf.int32) * x_current
line = tf.where(line_index == y_current, replace_value, line)
elif tf.equal(step, 1):
x_current -= 1
y_current += 1
replace_value = tf.ones((y_size), dtype=tf.int32) * x_current
line = tf.where(line_index == y_current, replace_value, line)
elif tf.equal(step, 2):
y_current += 1
replace_value = tf.ones((y_size), dtype=tf.int32) * x_current
line = tf.where(line_index == y_current, replace_value, line)
elif tf.equal(step, 3):
x_current += 1
y_current += 1
replace_value = tf.ones((y_size), dtype=tf.int32) * x_current
line = tf.where(line_index == y_current, replace_value, line)
else:
x_current += 1
replace_value = tf.ones((y_size), dtype=tf.int32) * x_current
line = tf.where(line_index == y_current, replace_value, line)
return x_size, x_current, y_size, y_current, line, line_index
_, _, _, _, line, _ = tf.while_loop(cond, body, [x_size, x_current, y_size, y_current, line, line_index],
parallel_iterations=10)
if transpose:
x = tf.tile([tf.range(0, size[0], dtype=tf.int32)], [size[1], 1])
rez = x - tf.expand_dims(line, 1)
rez = tf.transpose(rez)
else:
x = tf.tile([tf.range(0, size[1], dtype=tf.int32)], [size[0], 1])
rez = x - tf.expand_dims(line, 1)
ones = tf.ones(size, tf.int32)
zeros = tf.zeros(size, tf.int32)
rez = tf.where(rez >= 0, ones, zeros)
if make_positive:
nz = tf.cast(tf.math.count_nonzero(rez), tf.int32)
if nz < size[0] * size[1] // 2:
rez = 1 - rez
return rez
if __name__ == '__main__':
import time
import visualizer
start = time.time_ns()
mask = create_mask()
end = time.time_ns()
dur1 = end - start
visualizer.visualize([mask])
start = time.time_ns()
mask = create_mask(make_positive=False)
end = time.time_ns()
dur2 = end - start
print(f"{dur1 / 1000 / 1000}, {dur2/1000/1000}")
|
{"hexsha": "91f0674aa44582a8e740f4de2a7b3f504a6dc6ff", "size": 8151, "ext": "py", "lang": "Python", "max_stars_repo_path": "augmentations.py", "max_stars_repo_name": "donikv/IlluminationBase", "max_stars_repo_head_hexsha": "4aade52bb8a1065f10b94ffda09645a681d8160c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "augmentations.py", "max_issues_repo_name": "donikv/IlluminationBase", "max_issues_repo_head_hexsha": "4aade52bb8a1065f10b94ffda09645a681d8160c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "augmentations.py", "max_forks_repo_name": "donikv/IlluminationBase", "max_forks_repo_head_hexsha": "4aade52bb8a1065f10b94ffda09645a681d8160c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2191780822, "max_line_length": 126, "alphanum_fraction": 0.6147711937, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2257}
|
// Copyright (c) Facebook, Inc. and its affiliates.
#include <glog/logging.h>
#include <sys/socket.h>
#include <zlib.h>
#include <array>
#include <boost/uuid/uuid.hpp>
#include <boost/uuid/uuid_io.hpp>
#include <iostream>
#include <memory>
#include <thread>
#include "../lib/rapidjson/include/rapidjson/document.h"
#include "../lib/rapidjson/include/rapidjson/stringbuffer.h"
#include "event.h"
#include "event_handler.h"
#include "nbt_tag.h"
#include "packet_reader.h"
#include "types.h"
#include "util.h"
using namespace std;
PacketReader::PacketReader(int sock, EventHandler* eventHandler) {
socket_ = sock;
eventHandler_ = eventHandler;
}
thread PacketReader::startThread() {
thread t([&]() {
try {
while (true) {
int pid = readPacket();
if (!inPlayState_) {
// login state
switch (pid) {
case 0x02:
loginSuccess();
break;
case 0x03:
setCompression();
break;
default:
LOG(FATAL) << "Bad pid 0x" << hex << pid << " in login state\n";
}
} else {
// play state: most stuff happens here
switch (pid) {
case 0x03:
spawnMob();
break;
case 0x05:
spawnPlayer();
break;
case 0x0b:
blockChange();
break;
case 0x0d:
serverDifficulty();
break;
case 0x0f:
chatMessage();
break;
case 0x10:
multiBlockChange();
break;
case 0x11:
confirmTransaction();
break;
case 0x13:
openWindow();
break;
case 0x14:
windowItems();
break;
case 0x16:
setSlot();
break;
case 0x1f:
keepAlive();
break;
case 0x20:
chunkData();
break;
case 0x23:
joinGame();
break;
case 0x26:
entityRelativeMove();
break;
case 0x27:
entityLookAndRelativeMove();
break;
case 0x28:
entityLook();
break;
case 0x2e:
playerListItem();
break;
case 0x2f:
playerPositionAndLook();
break;
case 0x36:
entityHeadLook();
break;
case 0x3f:
entityEquipment();
break;
case 0x4c:
entityTeleport();
break;
case 0x46:
spawnPosition();
break;
case 0x41:
updateHealth();
break;
default:
if (ignoredPids_.count(pid) == 0) {
LOG(INFO) << "ignored pid=0x" << hex << pid;
ignoredPids_.insert(pid);
}
data_.clear();
data_off_ = 0;
}
}
}
} catch (ExitGracefully* e) {
}
});
return t;
}
////////////////
// Private
// Read entire next packet into data_ and return the pid
int PacketReader::readPacket() {
if (data_.size() != data_off_) {
LOG(FATAL) << "unread data, size=" << data_.size() << " off=" << data_off_;
}
data_off_ = 0;
// Read packet packet length
long packetLen;
varintFromStream(&packetLen);
unsigned bytesLeft = packetLen;
// If compression enabled, read dataLen: length of uncompressed pid+data, or 0
long dataLen = 0;
if (threshold_ > 0) {
bytesLeft -= varintFromStream(&dataLen);
}
// Read pid + data into data_
if (dataLen != 0) {
// zlib-encoded: read first into zlib_buf
uint8_t zlib_buf[bytesLeft];
bufferExactly(zlib_buf, bytesLeft);
data_.resize(dataLen);
unsigned long destLen = dataLen;
int err = uncompress(&data_[0], &destLen, zlib_buf, bytesLeft);
if (err != 0) {
LOG(WARNING) << "Closing PacketReader since uncompress returned " << err;
throw new ExitGracefully("uncompress failed");
}
CHECK_EQ(destLen, dataLen);
} else {
// Not zlib-encoded: read directly into data_
data_.resize(bytesLeft);
bufferExactly(&data_[0], bytesLeft);
}
// extract pid and return
return readVarint();
}
void PacketReader::bufferExactly(uint8_t* buf, unsigned long n) {
unsigned int off = 0;
while (off < n) {
int r = recv(socket_, (buf + off), (n - off), 0);
if (r <= 0) {
throw new ExitGracefully("");
}
off += r;
}
}
// Read a varint from socket_ into v, and return the number of bytes read
int PacketReader::varintFromStream(long* v) {
long read = 0;
*v = 0;
uint8_t b;
while (true) {
bufferExactly(&b, 1);
long val = b & 127;
*v |= (val << (7 * read));
read += 1;
if ((b & 128) == 0) {
return read;
}
}
}
// Get next byte in packet
uint8_t PacketReader::next() { return data_[data_off_++]; }
// Peek at next byte in packet without moving the cursor
uint8_t PacketReader::peek() { return data_[data_off_]; }
// Skip n bytes in packet
void PacketReader::skip(unsigned long n) { data_off_ += n; }
// Skip remaining bytes in packet
void PacketReader::skipRest() { data_off_ = data_.size(); }
uint64_t PacketReader::readBigEndian(int n) {
uint64_t out = 0;
for (int i = 0; i < n; i++) {
out <<= 8;
out |= next();
}
return out;
}
////////////////
// Decode Types
long PacketReader::readVarint() {
long read = 0;
long out = 0;
while (true) {
uint8_t b = next();
long val = b & 127;
out |= (val << (7 * read));
read += 1;
if ((b & 128) == 0) {
return out;
}
}
}
string PacketReader::readString() {
auto length = readVarint();
string s(data_.begin() + data_off_, data_.begin() + data_off_ + length);
data_off_ += length;
return s;
}
float PacketReader::readFloat() {
uint32_t bytes = readBigEndian(4);
float f;
memcpy(&f, &bytes, 4);
return f;
}
double PacketReader::readDouble() {
uint64_t bytes = readBigEndian(8);
double d;
memcpy(&d, &bytes, 8);
return d;
}
string PacketReader::readUuid() {
unsigned char data[16];
for (int i = 0; i < 16; i++) {
data[i] = next();
}
boost::uuids::uuid uuid;
memcpy(&uuid, data, 16);
return boost::uuids::to_string(uuid);
}
BlockPos PacketReader::readPosition() {
long v = readUint64();
int x = v >> 38;
int y = (v >> 26) & 0xfff;
int z = v << 38 >> 38;
if (x >= (1 << 25)) {
x -= (1 << 26);
}
if (y >= (1 << 11)) {
y -= (1 << 12);
}
if (z >= (1 << 25)) {
z -= (1 << 26);
}
return BlockPos{x, y, z};
}
// See http://wiki.vg/Protocol#Entity_Look_And_Relative_Move
Pos PacketReader::readDeltaPos() {
short dx = readInt16();
short dy = readInt16();
short dz = readInt16();
return {
(double)dx / 4096, (double)dy / 4096, (double)dz / 4096,
};
}
Block PacketReader::readBlock() {
int v = readVarint();
Block block;
block.id = (v >> 4);
block.meta = (v & 0xf);
return block;
}
string PacketReader::readChat() {
string s = readString();
rapidjson::Document j;
j.Parse(s.c_str());
// Pull out text fields, ignore formatting.
string text = j["text"].GetString();
for (auto e = j["extra"].Begin(); e != j["extra"].End(); e++) {
text += (*e)["text"].GetString();
}
return text;
}
Slot PacketReader::readSlot() {
int16_t blockId = readInt16();
if (blockId == -1) {
return EMPTY_SLOT;
}
uint8_t count = readByte();
uint16_t damage = readUint16();
if (peek() != 0) {
uint8_t* p = &data_[data_off_];
shared_ptr<NBTTag> tag = NBTTag::from(&p); // read nbt data
skip(p - &data_[data_off_]); // skip read nbt data
} else {
next(); // skip 0 byte indicating no NBT data
}
// N.B. Cuberite seems to send block meta in the "damage" field
uint8_t meta = damage;
return {(uint16_t)blockId, meta, count, 0};
}
////////////////
// Packets
void PacketReader::setCompression() {
threshold_ = readVarint();
LOG(INFO) << "Set threshold to " << threshold_;
}
void PacketReader::loginSuccess() {
string uuid = readString();
string username = readString();
inPlayState_ = true;
LoginSuccessEvent e = {uuid, username};
eventHandler_->handle(e);
}
void PacketReader::keepAlive() {
auto keepaliveId = readUint64();
KeepaliveEvent e = {keepaliveId};
eventHandler_->handle(e);
}
void PacketReader::chunkData() {
auto cx = readInt32();
auto cz = readInt32();
readByte(); // skip groundUpContinuous
auto bitmask = readVarint();
readVarint(); // skip data_size
array<ChunkSection, 16> chunks;
for (int cy = 0; cy < 16; cy++) {
auto blocks = (bitmask & 1) ? chunkSectionBlocks() : NULL;
chunks[cy] = {cx, cy, cz, blocks};
bitmask >>= 1;
}
skipRest();
ChunkDataEvent e = {cx, cz, chunks};
eventHandler_->handle(e);
}
void PacketReader::joinGame() {
auto entityId = readUint32();
uint8_t gameModeByte = readByte();
readUint32(); // dimension
readByte(); // difficulty
readByte(); // maxPlayers
readString(); // levelType
readByte(); // reducedDebugInfo
GameMode gameMode;
if (gameModeByte == 0) {
gameMode = GameMode::SURVIVAL;
} else if (gameModeByte == 1) {
gameMode = GameMode::CREATIVE;
} else {
LOG(FATAL) << "Can't handle game mode: " << (int)gameModeByte;
}
JoinGameEvent e = {entityId, gameMode};
eventHandler_->handle(e);
}
void PacketReader::entityRelativeMove() {
unsigned long entityId = readVarint();
Pos deltaPos = readDeltaPos();
skipRest(); // on_ground
EntityRelativeMoveEvent e = {entityId, deltaPos};
eventHandler_->handle(e);
}
void PacketReader::entityLookAndRelativeMove() {
unsigned long entityId = readVarint();
Pos deltaPos = readDeltaPos();
float yaw = readAngle();
float pitch = readAngle();
skipRest(); // on_ground
Look look = {yaw, pitch};
EntityLookAndRelativeMoveEvent e = {entityId, deltaPos, look};
eventHandler_->handle(e);
}
void PacketReader::entityLook() {
unsigned long entityId = readVarint();
float yaw = readAngle();
float pitch = readAngle();
skipRest(); // on_ground
Pos deltaPos = {0, 0, 0};
Look look = {yaw, pitch};
EntityLookAndRelativeMoveEvent e = {entityId, deltaPos, look};
eventHandler_->handle(e);
}
void PacketReader::playerListItem() {
auto action = readVarint();
auto numPlayers = readVarint();
if (action == 0) {
// add players
vector<pair<string, string>> uuidNamePairs = playerListItemsAddPlayer(numPlayers);
AddPlayersEvent e = {uuidNamePairs};
eventHandler_->handle(e);
} else if (action == 4) {
// remove players
vector<string> uuidsToRemove;
for (int i = 0; i < numPlayers; i++) {
uuidsToRemove.push_back(readUuid());
}
RemovePlayersEvent e = {uuidsToRemove};
eventHandler_->handle(e);
} else if (action < 4) {
// Not implemented:
// 1: update gamemode
// 2: update latency
// 3: update display name
skipRest();
} else {
LOG(FATAL) << "Bad PlayerListItem action: " << action;
}
}
void PacketReader::playerPositionAndLook() {
Pos pos = {readDouble(), readDouble(), readDouble()};
Look look = {readFloat(), readFloat()};
auto flags = readByte();
auto teleportId = readVarint();
PlayerPositionAndLookEvent e = {pos, look, flags, teleportId};
eventHandler_->handle(e);
}
void PacketReader::entityHeadLook() {
unsigned long entityId = readVarint();
float yaw = readAngle();
EntityHeadLookEvent e = {entityId, yaw};
eventHandler_->handle(e);
}
void PacketReader::entityTeleport() {
unsigned long entityId = readVarint();
double x = readDouble();
double y = readDouble();
double z = readDouble();
float yaw = readAngle();
float pitch = readAngle();
skipRest(); // on_ground
Pos pos = {x, y, z};
Look look = {yaw, pitch};
EntityTeleportEvent e = {entityId, pos, look};
eventHandler_->handle(e);
}
void PacketReader::spawnPosition() {
auto pos = readPosition();
SpawnPositionEvent e = {pos};
eventHandler_->handle(e);
}
void PacketReader::spawnPlayer() {
unsigned long entityId = readVarint();
string uuid = readUuid();
double x = readDouble();
double y = readDouble();
double z = readDouble();
float yaw = readAngle();
float pitch = readAngle();
skipRest(); // entity metadata
Pos pos = {x, y, z};
Look look = {yaw, pitch};
SpawnPlayerEvent e = {entityId, uuid, pos, look};
eventHandler_->handle(e);
}
void PacketReader::blockChange() {
BlockPos pos = readPosition();
Block block = readBlock();
BlockChangeEvent e = {pos, block};
eventHandler_->handle(e);
}
void PacketReader::serverDifficulty() {
uint8_t difficulty = readByte();
ServerDifficultyEvent e = {difficulty};
eventHandler_->handle(e);
}
void PacketReader::chatMessage() {
string chat = readChat();
uint8_t position = readByte();
if (position != 0) {
// Only handle player-initiated chat
return;
}
ChatMessageEvent e = {chat, position};
eventHandler_->handle(e);
}
void PacketReader::multiBlockChange() {
int cx = readInt32();
int cz = readInt32();
int count = readVarint();
for (int i = 0; i < count; i += 1) {
uint8_t pos = readByte();
uint8_t y = readByte();
Block block = readBlock();
auto ox = (pos >> 4); // upper 4b
auto oz = (pos & 0xf); // lower 4b
BlockPos target = {(cx << 4) | ox, y, (cz << 4) | oz};
BlockChangeEvent e = {target, block};
eventHandler_->handle(e);
}
}
void PacketReader::confirmTransaction() {
uint8_t windowId = readByte();
uint16_t counter = readUint16();
bool accepted = readBool();
ConfirmTransactionEvent e = {windowId, counter, accepted};
eventHandler_->handle(e);
}
void PacketReader::openWindow() {
uint8_t windowId = readByte();
string windowTypeString = readString();
skipRest(); // window title, # slots, entity id
WindowType windowType;
if (windowTypeString == "minecraft:crafting_table") {
windowType = WindowType::CRAFTING_TABLE;
} else {
LOG(FATAL) << "Can't handle openWindow type=" << windowTypeString;
}
OpenWindowEvent e = {windowId, windowType};
eventHandler_->handle(e);
}
void PacketReader::windowItems() {
uint8_t windowId = readByte();
uint16_t count = readUint16();
vector<Slot> slots;
for (auto i = 0; i < count; i++) {
slots.push_back(readSlot());
}
WindowItemsEvent e = {windowId, slots};
eventHandler_->handle(e);
}
void PacketReader::setSlot() {
uint8_t windowId = readByte();
uint16_t index = readInt16();
Slot slot = readSlot();
SetSlotEvent e = {windowId, index, slot};
eventHandler_->handle(e);
}
void PacketReader::spawnMob() {
uint64_t entityId = readVarint();
string uuid = readUuid();
uint8_t mobType = readVarint();
double x = readDouble();
double y = readDouble();
double z = readDouble();
skipRest(); // look, head angle, velocity, metadata
Pos pos = {x, y, z};
SpawnMobEvent e = {entityId, uuid, mobType, pos};
eventHandler_->handle(e);
}
void PacketReader::updateHealth() {
float health = readFloat();
uint32_t foodLevel = readVarint();
skipRest(); // skip food saturation level from the packet
UpdateHealthEvent e = {health, foodLevel};
eventHandler_->handle(e);
}
void PacketReader::entityEquipment() {
uint64_t entityId = readVarint();
uint8_t which = readVarint();
Slot slot = readSlot();
EntityEquipmentEvent e = {entityId, which, slot};
eventHandler_->handle(e);
}
////////////////
// Packet subcomponents (helpers)
//
// See http://wiki.vg/Chunk_Format#Chunk_Section_structure
ChunkSectionBlocks PacketReader::chunkSectionBlocks() {
auto bitsPerBlock = readByte();
CHECK_EQ(bitsPerBlock, 13);
uint64_t blockIdMask = (((1 << bitsPerBlock) - 1) ^ 0xf); // bits 4-12
// Skip palette
int paletteLen = readVarint();
for (int i = 0; i < paletteLen; i++) {
readVarint();
}
// Read block data
auto dataLen = readVarint(); // number of longs to read
const int DATA_LEN = 4096 * 13 / 64;
CHECK_EQ(dataLen, DATA_LEN);
array<uint64_t, DATA_LEN> longs;
for (int i = 0; i < DATA_LEN; i++) {
longs[i] = readUint64();
}
ChunkSectionBlocks blocks = make_shared<array<Block, 4096>>();
long bo = 0;
for (long i = 0; i < 4096; i++) {
// Because of the bit offset, block_ids might stretch across two longs
auto startLong = bo / 64;
auto off = bo % 64;
auto endLong = (bo + bitsPerBlock - 1) / 64;
uint64_t d = (startLong == endLong) ? longs[startLong] >> off : (longs[endLong] << (64 - off)) +
(longs[startLong] >> off);
auto blockId = (d & blockIdMask) >> 4;
uint8_t blockMeta = d & 0xf;
CHECK(blockId < 256) << "invalid block id > 256, " << blockId;
(*blocks)[i] = Block{uint8_t(blockId), blockMeta};
bo += bitsPerBlock;
}
skip(4096 / 2); // block_light (nibble per block)
skip(4096 / 2); // sky_light (nibble per block)
return blocks;
}
vector<pair<string, string>> PacketReader::playerListItemsAddPlayer(int numPlayers) {
vector<pair<string, string>> pairs;
for (int i = 0; i < numPlayers; i++) {
string uuid = readUuid();
string name = readString();
auto numProperties = readVarint();
for (int j = 0; j < numProperties; j++) {
readString(); // property name
readString(); // property value
if (readBool()) { // is signed?
readString(); // signature
}
}
readVarint(); // game mode
readVarint(); // ping (ms)
string displayName = readBool() ? readString() : "";
pairs.push_back(make_pair(uuid, name));
}
return pairs;
}
|
{"hexsha": "b14fea84559d51a39bf068677e7aeadbd0f4ed2d", "size": 17919, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "client/src/packet_reader.cpp", "max_stars_repo_name": "satyamedh/craftassist", "max_stars_repo_head_hexsha": "d97cbc14bc25149d3ef41737231ab9f3cb7e392a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-10-04T02:09:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-04T02:09:58.000Z", "max_issues_repo_path": "client/src/packet_reader.cpp", "max_issues_repo_name": "satyamedh/craftassist", "max_issues_repo_head_hexsha": "d97cbc14bc25149d3ef41737231ab9f3cb7e392a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "client/src/packet_reader.cpp", "max_forks_repo_name": "satyamedh/craftassist", "max_forks_repo_head_hexsha": "d97cbc14bc25149d3ef41737231ab9f3cb7e392a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-03-29T20:04:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-29T20:04:11.000Z", "avg_line_length": 25.2025316456, "max_line_length": 100, "alphanum_fraction": 0.5949550756, "num_tokens": 4991}
|
[STATEMENT]
lemma sheaf_spec_on_open_is_comm_ring:
assumes "is_zariski_open U"
shows "comm_ring (\<O> U) (add_sheaf_spec U) (mult_sheaf_spec U) (zero_sheaf_spec U) (one_sheaf_spec U)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. comm_ring (\<O> U) (add_sheaf_spec U) (mult_sheaf_spec U) (zero_sheaf_spec U) (one_sheaf_spec U)
[PROOF STEP]
proof unfold_locales
[PROOF STATE]
proof (state)
goal (15 subgoals):
1. \<And>a b. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U a b \<in> \<O> U
2. zero_sheaf_spec U \<in> \<O> U
3. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c = add_sheaf_spec U a (add_sheaf_spec U b c)
4. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a = a
5. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
6. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
7. \<And>x y. \<lbrakk>x \<in> \<O> U; y \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U x y = add_sheaf_spec U y x
8. \<And>a b. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a b \<in> \<O> U
9. one_sheaf_spec U \<in> \<O> U
10. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
A total of 15 subgoals...
[PROOF STEP]
show add_\<O>:"add_sheaf_spec U a b \<in> \<O> U"
and "mult_sheaf_spec U a b \<in> \<O> U"
if "a \<in> \<O> U" "b \<in> \<O> U" for a b
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U a b \<in> \<O> U &&& mult_sheaf_spec U a b \<in> \<O> U
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U a b \<in> \<O> U
[PROOF STEP]
by (simp add: add_sheaf_spec_in_sheaf_spec assms that(1,2) zariski_open_is_subset)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mult_sheaf_spec U a b \<in> \<O> U
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mult_sheaf_spec U a b \<in> \<O> U
[PROOF STEP]
by (simp add: assms mult_sheaf_spec_in_sheaf_spec that(1,2) zariski_open_is_subset)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
\<lbrakk>?a3 \<in> \<O> U; ?b3 \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U ?a3 ?b3 \<in> \<O> U
\<lbrakk>?a3 \<in> \<O> U; ?b3 \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U ?a3 ?b3 \<in> \<O> U
goal (13 subgoals):
1. zero_sheaf_spec U \<in> \<O> U
2. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c = add_sheaf_spec U a (add_sheaf_spec U b c)
3. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a = a
4. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
5. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
6. \<And>x y. \<lbrakk>x \<in> \<O> U; y \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U x y = add_sheaf_spec U y x
7. one_sheaf_spec U \<in> \<O> U
8. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
9. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a = a
10. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
A total of 13 subgoals...
[PROOF STEP]
show "zero_sheaf_spec U \<in> \<O> U" "one_sheaf_spec U \<in> \<O> U"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zero_sheaf_spec U \<in> \<O> U &&& one_sheaf_spec U \<in> \<O> U
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zero_sheaf_spec U \<in> \<O> U
[PROOF STEP]
by (simp add: assms zero_sheaf_spec_in_sheaf_spec)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. one_sheaf_spec U \<in> \<O> U
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. one_sheaf_spec U \<in> \<O> U
[PROOF STEP]
by (simp add: assms one_sheaf_spec_in_sheaf_spec)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
zero_sheaf_spec U \<in> \<O> U
one_sheaf_spec U \<in> \<O> U
goal (11 subgoals):
1. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c = add_sheaf_spec U a (add_sheaf_spec U b c)
2. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a = a
3. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
4. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
5. \<And>x y. \<lbrakk>x \<in> \<O> U; y \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U x y = add_sheaf_spec U y x
6. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
7. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a = a
8. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
9. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
10. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
A total of 11 subgoals...
[PROOF STEP]
have imp_qr:"quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>" if "\<pp> \<in> U" for \<pp>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
by (meson assms comm_ring.spectrum_imp_cxt_quotient_ring in_mono local.comm_ring_axioms
zariski_open_is_subset)
[PROOF STATE]
proof (state)
this:
?\<pp>3 \<in> U \<Longrightarrow> Comm_Ring.quotient_ring (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> \<one>
goal (11 subgoals):
1. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c = add_sheaf_spec U a (add_sheaf_spec U b c)
2. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a = a
3. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
4. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
5. \<And>x y. \<lbrakk>x \<in> \<O> U; y \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U x y = add_sheaf_spec U y x
6. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
7. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a = a
8. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
9. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
10. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
A total of 11 subgoals...
[PROOF STEP]
have qr_valid_frac:"quotient_ring.valid_frac (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (s \<pp>)"
if "s \<in> \<O> U" "\<pp> \<in> U" for s \<pp>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. quotient_ring.valid_frac (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (s \<pp>)
[PROOF STEP]
using assms comm_ring.zariski_open_is_subset quotient_ring.carrier_quotient_ring_iff
imp_qr local.comm_ring_axioms pr_ideal.carrier_local_ring_at_def sec_has_right_codom
spectrum_imp_pr that(1) that(2)
[PROOF STATE]
proof (prove)
using this:
is_zariski_open U
\<lbrakk>comm_ring ?R ?addition ?multiplication ?zero ?unit; comm_ring.is_zariski_open ?R ?addition ?multiplication ?zero ?unit ?U\<rbrakk> \<Longrightarrow> ?U \<subseteq> comm_ring.spectrum ?R ?addition ?multiplication ?zero ?unit
Comm_Ring.quotient_ring ?S ?R ?addition ?multiplication ?zero ?unit \<Longrightarrow> (?X \<in> ?S \<^sup>\<inverse> ?R \<^bsub>?addition ?multiplication ?zero\<^esub>) = quotient_ring.valid_frac ?S ?R ?addition ?multiplication ?zero ?X
?\<pp>3 \<in> U \<Longrightarrow> Comm_Ring.quotient_ring (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> \<one>
comm_ring R (+) (\<cdot>) \<zero> \<one>
pr_ideal ?R ?I ?addition ?multiplication ?zero ?unit \<Longrightarrow> ?R \<^bsub>?I ?addition ?multiplication ?zero\<^esub> \<equiv> ?R\<setminus>?I \<^sup>\<inverse> ?R \<^bsub>?addition ?multiplication ?zero\<^esub>
\<lbrakk>?s \<in> \<O> ?U; ?\<pp> \<in> ?U\<rbrakk> \<Longrightarrow> ?s ?\<pp> \<in> R \<^bsub>?\<pp> (+) (\<cdot>) \<zero>\<^esub>
?\<pp> \<in> Spec \<Longrightarrow> pr_ideal R ?\<pp> (+) (\<cdot>) \<zero> \<one>
s \<in> \<O> U
\<pp> \<in> U
goal (1 subgoal):
1. quotient_ring.valid_frac (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (s \<pp>)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<lbrakk>?s3 \<in> \<O> U; ?\<pp>3 \<in> U\<rbrakk> \<Longrightarrow> quotient_ring.valid_frac (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> (?s3 ?\<pp>3)
goal (11 subgoals):
1. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c = add_sheaf_spec U a (add_sheaf_spec U b c)
2. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a = a
3. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
4. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
5. \<And>x y. \<lbrakk>x \<in> \<O> U; y \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U x y = add_sheaf_spec U y x
6. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
7. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a = a
8. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
9. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
10. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
A total of 11 subgoals...
[PROOF STEP]
show add_zero:"add_sheaf_spec U (zero_sheaf_spec U) a = a" if "a \<in> \<O> U" for a
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U (zero_sheaf_spec U) a = a
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. add_sheaf_spec U (zero_sheaf_spec U) a = a
[PROOF STEP]
have "add_sheaf_spec U (zero_sheaf_spec U) a \<pp> = a \<pp>" if "\<pp> \<in> U" for \<pp>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U (zero_sheaf_spec U) a \<pp> = a \<pp>
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. add_sheaf_spec U (zero_sheaf_spec U) a \<pp> = a \<pp>
[PROOF STEP]
interpret cq:quotient_ring "R\<setminus>\<pp>" R "(+)" "(\<cdot>)" \<zero> \<one>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
using imp_qr that
[PROOF STATE]
proof (prove)
using this:
?\<pp>3 \<in> U \<Longrightarrow> Comm_Ring.quotient_ring (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> \<one>
\<pp> \<in> U
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. add_sheaf_spec U (zero_sheaf_spec U) a \<pp> = a \<pp>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U (zero_sheaf_spec U) a \<pp> = a \<pp>
[PROOF STEP]
unfolding add_sheaf_spec_def zero_sheaf_spec_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> ((\<lambda>\<pp>\<in>U. quotient_ring.zero_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>) \<pp>) (a \<pp>)) \<pp> = a \<pp>
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> ((\<lambda>\<pp>\<in>U. quotient_ring.zero_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>) \<pp>) (a \<pp>)) \<pp> = a \<pp>
[PROOF STEP]
by (simp add: \<open>a \<in> \<O> U\<close> qr_valid_frac)
[PROOF STATE]
proof (state)
this:
add_sheaf_spec U (zero_sheaf_spec U) a \<pp> = a \<pp>
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
?\<pp>3 \<in> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a ?\<pp>3 = a ?\<pp>3
goal (1 subgoal):
1. add_sheaf_spec U (zero_sheaf_spec U) a = a
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?\<pp>3 \<in> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a ?\<pp>3 = a ?\<pp>3
[PROOF STEP]
show "add_sheaf_spec U (zero_sheaf_spec U) a = a"
[PROOF STATE]
proof (prove)
using this:
?\<pp>3 \<in> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a ?\<pp>3 = a ?\<pp>3
goal (1 subgoal):
1. add_sheaf_spec U (zero_sheaf_spec U) a = a
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
?\<pp>3 \<in> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a ?\<pp>3 = a ?\<pp>3
a \<in> \<O> U
goal (1 subgoal):
1. add_sheaf_spec U (zero_sheaf_spec U) a = a
[PROOF STEP]
by(auto intro: extensionalityI[where A=U])
[PROOF STATE]
proof (state)
this:
add_sheaf_spec U (zero_sheaf_spec U) a = a
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
?a3 \<in> \<O> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) ?a3 = ?a3
goal (10 subgoals):
1. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c = add_sheaf_spec U a (add_sheaf_spec U b c)
2. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
3. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
4. \<And>x y. \<lbrakk>x \<in> \<O> U; y \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U x y = add_sheaf_spec U y x
5. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
6. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a = a
7. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
8. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
9. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
10. \<And>a b. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a b = mult_sheaf_spec U b a
[PROOF STEP]
show add_assoc:"add_sheaf_spec U (add_sheaf_spec U a b) c
= add_sheaf_spec U a (add_sheaf_spec U b c)"
if "a \<in> \<O> U" and "b \<in> \<O> U" and "c \<in> \<O> U"
for a b c
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U (add_sheaf_spec U a b) c = add_sheaf_spec U a (add_sheaf_spec U b c)
[PROOF STEP]
proof (rule extensionalityI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. add_sheaf_spec U (add_sheaf_spec U a b) c \<in> extensional ?A
2. add_sheaf_spec U a (add_sheaf_spec U b c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c x = add_sheaf_spec U a (add_sheaf_spec U b c) x
[PROOF STEP]
fix \<pp>
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. add_sheaf_spec U (add_sheaf_spec U a b) c \<in> extensional ?A
2. add_sheaf_spec U a (add_sheaf_spec U b c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c x = add_sheaf_spec U a (add_sheaf_spec U b c) x
[PROOF STEP]
assume "\<pp> \<in> U"
[PROOF STATE]
proof (state)
this:
\<pp> \<in> U
goal (3 subgoals):
1. add_sheaf_spec U (add_sheaf_spec U a b) c \<in> extensional ?A
2. add_sheaf_spec U a (add_sheaf_spec U b c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c x = add_sheaf_spec U a (add_sheaf_spec U b c) x
[PROOF STEP]
interpret cq:quotient_ring "R\<setminus>\<pp>" R "(+)" "(\<cdot>)" \<zero> \<one>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close> imp_qr
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
?\<pp>3 \<in> U \<Longrightarrow> Comm_Ring.quotient_ring (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> \<one>
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. add_sheaf_spec U (add_sheaf_spec U a b) c \<in> extensional ?A
2. add_sheaf_spec U a (add_sheaf_spec U b c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c x = add_sheaf_spec U a (add_sheaf_spec U b c) x
[PROOF STEP]
show "add_sheaf_spec U (add_sheaf_spec U a b) c \<pp> = add_sheaf_spec U a (add_sheaf_spec U b c) \<pp>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U (add_sheaf_spec U a b) c \<pp> = add_sheaf_spec U a (add_sheaf_spec U b c) \<pp>
[PROOF STEP]
unfolding add_sheaf_spec_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> ((\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) (b \<pp>)) \<pp>) (c \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) ((\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (b \<pp>) (c \<pp>)) \<pp>)) \<pp>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close>
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> ((\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) (b \<pp>)) \<pp>) (c \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) ((\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (b \<pp>) (c \<pp>)) \<pp>)) \<pp>
[PROOF STEP]
by (simp add: cq.additive.associative qr_valid_frac that(1) that(2) that(3))
[PROOF STATE]
proof (state)
this:
add_sheaf_spec U (add_sheaf_spec U a b) c \<pp> = add_sheaf_spec U a (add_sheaf_spec U b c) \<pp>
goal (2 subgoals):
1. add_sheaf_spec U (add_sheaf_spec U a b) c \<in> extensional U
2. add_sheaf_spec U a (add_sheaf_spec U b c) \<in> extensional U
[PROOF STEP]
qed (auto simp add:add_sheaf_spec_def)
[PROOF STATE]
proof (state)
this:
\<lbrakk>?a3 \<in> \<O> U; ?b3 \<in> \<O> U; ?c3 \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U ?a3 ?b3) ?c3 = add_sheaf_spec U ?a3 (add_sheaf_spec U ?b3 ?c3)
goal (9 subgoals):
1. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
2. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
3. \<And>x y. \<lbrakk>x \<in> \<O> U; y \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U x y = add_sheaf_spec U y x
4. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
5. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a = a
6. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
7. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
8. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
9. \<And>a b. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a b = mult_sheaf_spec U b a
[PROOF STEP]
show add_comm:"add_sheaf_spec U x y = add_sheaf_spec U y x"
if "x \<in> \<O> U" and "y \<in> \<O> U" for x y
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U x y = add_sheaf_spec U y x
[PROOF STEP]
proof (rule extensionalityI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. add_sheaf_spec U x y \<in> extensional ?A
2. add_sheaf_spec U y x \<in> extensional ?A
3. \<And>xa. xa \<in> ?A \<Longrightarrow> add_sheaf_spec U x y xa = add_sheaf_spec U y x xa
[PROOF STEP]
fix \<pp>
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. add_sheaf_spec U x y \<in> extensional ?A
2. add_sheaf_spec U y x \<in> extensional ?A
3. \<And>xa. xa \<in> ?A \<Longrightarrow> add_sheaf_spec U x y xa = add_sheaf_spec U y x xa
[PROOF STEP]
assume "\<pp> \<in> U"
[PROOF STATE]
proof (state)
this:
\<pp> \<in> U
goal (3 subgoals):
1. add_sheaf_spec U x y \<in> extensional ?A
2. add_sheaf_spec U y x \<in> extensional ?A
3. \<And>xa. xa \<in> ?A \<Longrightarrow> add_sheaf_spec U x y xa = add_sheaf_spec U y x xa
[PROOF STEP]
interpret cq:quotient_ring "R\<setminus>\<pp>" R "(+)" "(\<cdot>)" \<zero> \<one>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close> imp_qr
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
?\<pp>3 \<in> U \<Longrightarrow> Comm_Ring.quotient_ring (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> \<one>
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. add_sheaf_spec U x y \<in> extensional ?A
2. add_sheaf_spec U y x \<in> extensional ?A
3. \<And>xa. xa \<in> ?A \<Longrightarrow> add_sheaf_spec U x y xa = add_sheaf_spec U y x xa
[PROOF STEP]
show " add_sheaf_spec U x y \<pp> = add_sheaf_spec U y x \<pp>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U x y \<pp> = add_sheaf_spec U y x \<pp>
[PROOF STEP]
unfolding add_sheaf_spec_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (x \<pp>) (y \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (y \<pp>) (x \<pp>)) \<pp>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close>
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (x \<pp>) (y \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (y \<pp>) (x \<pp>)) \<pp>
[PROOF STEP]
by (simp add: cq.additive.commutative qr_valid_frac that(1) that(2))
[PROOF STATE]
proof (state)
this:
add_sheaf_spec U x y \<pp> = add_sheaf_spec U y x \<pp>
goal (2 subgoals):
1. add_sheaf_spec U x y \<in> extensional U
2. add_sheaf_spec U y x \<in> extensional U
[PROOF STEP]
qed auto
[PROOF STATE]
proof (state)
this:
\<lbrakk>?x3 \<in> \<O> U; ?y3 \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U ?x3 ?y3 = add_sheaf_spec U ?y3 ?x3
goal (8 subgoals):
1. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
2. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
3. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
4. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a = a
5. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
6. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
7. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
8. \<And>a b. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a b = mult_sheaf_spec U b a
[PROOF STEP]
show mult_comm:"mult_sheaf_spec U x y = mult_sheaf_spec U y x"
if "x \<in> \<O> U" and "y \<in> \<O> U" for x y
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mult_sheaf_spec U x y = mult_sheaf_spec U y x
[PROOF STEP]
proof (rule extensionalityI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U x y \<in> extensional ?A
2. mult_sheaf_spec U y x \<in> extensional ?A
3. \<And>xa. xa \<in> ?A \<Longrightarrow> mult_sheaf_spec U x y xa = mult_sheaf_spec U y x xa
[PROOF STEP]
fix \<pp>
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U x y \<in> extensional ?A
2. mult_sheaf_spec U y x \<in> extensional ?A
3. \<And>xa. xa \<in> ?A \<Longrightarrow> mult_sheaf_spec U x y xa = mult_sheaf_spec U y x xa
[PROOF STEP]
assume "\<pp> \<in> U"
[PROOF STATE]
proof (state)
this:
\<pp> \<in> U
goal (3 subgoals):
1. mult_sheaf_spec U x y \<in> extensional ?A
2. mult_sheaf_spec U y x \<in> extensional ?A
3. \<And>xa. xa \<in> ?A \<Longrightarrow> mult_sheaf_spec U x y xa = mult_sheaf_spec U y x xa
[PROOF STEP]
interpret cq:quotient_ring "R\<setminus>\<pp>" R "(+)" "(\<cdot>)" \<zero> \<one>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close> imp_qr
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
?\<pp>3 \<in> U \<Longrightarrow> Comm_Ring.quotient_ring (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> \<one>
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U x y \<in> extensional ?A
2. mult_sheaf_spec U y x \<in> extensional ?A
3. \<And>xa. xa \<in> ?A \<Longrightarrow> mult_sheaf_spec U x y xa = mult_sheaf_spec U y x xa
[PROOF STEP]
show "mult_sheaf_spec U x y \<pp> = mult_sheaf_spec U y x \<pp>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mult_sheaf_spec U x y \<pp> = mult_sheaf_spec U y x \<pp>
[PROOF STEP]
unfolding mult_sheaf_spec_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (x \<pp>) (y \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (y \<pp>) (x \<pp>)) \<pp>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close>
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (x \<pp>) (y \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (y \<pp>) (x \<pp>)) \<pp>
[PROOF STEP]
by (simp add: cq.comm_mult qr_valid_frac that(1) that(2))
[PROOF STATE]
proof (state)
this:
mult_sheaf_spec U x y \<pp> = mult_sheaf_spec U y x \<pp>
goal (2 subgoals):
1. mult_sheaf_spec U x y \<in> extensional U
2. mult_sheaf_spec U y x \<in> extensional U
[PROOF STEP]
qed auto
[PROOF STATE]
proof (state)
this:
\<lbrakk>?x3 \<in> \<O> U; ?y3 \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U ?x3 ?y3 = mult_sheaf_spec U ?y3 ?x3
goal (7 subgoals):
1. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
2. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
3. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
4. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a = a
5. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
6. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
7. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
[PROOF STEP]
show add_zero:"add_sheaf_spec U a (zero_sheaf_spec U) = a"
if "a \<in> \<O> U" for a
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U a (zero_sheaf_spec U) = a
[PROOF STEP]
using add_zero add_comm that
[PROOF STATE]
proof (prove)
using this:
?a3 \<in> \<O> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) ?a3 = ?a3
\<lbrakk>?x3 \<in> \<O> U; ?y3 \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U ?x3 ?y3 = add_sheaf_spec U ?y3 ?x3
a \<in> \<O> U
goal (1 subgoal):
1. add_sheaf_spec U a (zero_sheaf_spec U) = a
[PROOF STEP]
by (simp add: \<open>zero_sheaf_spec U \<in> \<O> U\<close>)
[PROOF STATE]
proof (state)
this:
?a3 \<in> \<O> U \<Longrightarrow> add_sheaf_spec U ?a3 (zero_sheaf_spec U) = ?a3
goal (6 subgoals):
1. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
2. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
3. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a = a
4. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
5. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
6. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
[PROOF STEP]
show "mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)"
if "a \<in> \<O> U" and "b \<in> \<O> U"
and "c \<in> \<O> U"
for a b c
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mult_sheaf_spec U (mult_sheaf_spec U a b) c = mult_sheaf_spec U a (mult_sheaf_spec U b c)
[PROOF STEP]
proof (rule extensionalityI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U (mult_sheaf_spec U a b) c \<in> extensional ?A
2. mult_sheaf_spec U a (mult_sheaf_spec U b c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c x = mult_sheaf_spec U a (mult_sheaf_spec U b c) x
[PROOF STEP]
fix \<pp>
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U (mult_sheaf_spec U a b) c \<in> extensional ?A
2. mult_sheaf_spec U a (mult_sheaf_spec U b c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c x = mult_sheaf_spec U a (mult_sheaf_spec U b c) x
[PROOF STEP]
assume "\<pp> \<in> U"
[PROOF STATE]
proof (state)
this:
\<pp> \<in> U
goal (3 subgoals):
1. mult_sheaf_spec U (mult_sheaf_spec U a b) c \<in> extensional ?A
2. mult_sheaf_spec U a (mult_sheaf_spec U b c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c x = mult_sheaf_spec U a (mult_sheaf_spec U b c) x
[PROOF STEP]
interpret cq:quotient_ring "R\<setminus>\<pp>" R "(+)" "(\<cdot>)" \<zero> \<one>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close> imp_qr
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
?\<pp>3 \<in> U \<Longrightarrow> Comm_Ring.quotient_ring (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> \<one>
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U (mult_sheaf_spec U a b) c \<in> extensional ?A
2. mult_sheaf_spec U a (mult_sheaf_spec U b c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U a b) c x = mult_sheaf_spec U a (mult_sheaf_spec U b c) x
[PROOF STEP]
show "mult_sheaf_spec U (mult_sheaf_spec U a b) c \<pp>
= mult_sheaf_spec U a (mult_sheaf_spec U b c) \<pp>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mult_sheaf_spec U (mult_sheaf_spec U a b) c \<pp> = mult_sheaf_spec U a (mult_sheaf_spec U b c) \<pp>
[PROOF STEP]
unfolding mult_sheaf_spec_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> ((\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) (b \<pp>)) \<pp>) (c \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) ((\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (b \<pp>) (c \<pp>)) \<pp>)) \<pp>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close>
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> ((\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) (b \<pp>)) \<pp>) (c \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) ((\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (b \<pp>) (c \<pp>)) \<pp>)) \<pp>
[PROOF STEP]
by (simp add: cq.multiplicative.associative qr_valid_frac that(1) that(2) that(3))
[PROOF STATE]
proof (state)
this:
mult_sheaf_spec U (mult_sheaf_spec U a b) c \<pp> = mult_sheaf_spec U a (mult_sheaf_spec U b c) \<pp>
goal (2 subgoals):
1. mult_sheaf_spec U (mult_sheaf_spec U a b) c \<in> extensional U
2. mult_sheaf_spec U a (mult_sheaf_spec U b c) \<in> extensional U
[PROOF STEP]
qed (auto simp add:add_sheaf_spec_def)
[PROOF STATE]
proof (state)
this:
\<lbrakk>?a3 \<in> \<O> U; ?b3 \<in> \<O> U; ?c3 \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (mult_sheaf_spec U ?a3 ?b3) ?c3 = mult_sheaf_spec U ?a3 (mult_sheaf_spec U ?b3 ?c3)
goal (5 subgoals):
1. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
2. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a = a
3. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
4. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
5. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
[PROOF STEP]
show "mult_sheaf_spec U (one_sheaf_spec U) a = a"
if "a \<in> \<O> U" for a
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mult_sheaf_spec U (one_sheaf_spec U) a = a
[PROOF STEP]
proof (rule extensionalityI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U (one_sheaf_spec U) a \<in> extensional ?A
2. a \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a x = a x
[PROOF STEP]
fix \<pp>
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U (one_sheaf_spec U) a \<in> extensional ?A
2. a \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a x = a x
[PROOF STEP]
assume "\<pp> \<in> U"
[PROOF STATE]
proof (state)
this:
\<pp> \<in> U
goal (3 subgoals):
1. mult_sheaf_spec U (one_sheaf_spec U) a \<in> extensional ?A
2. a \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a x = a x
[PROOF STEP]
interpret cq:quotient_ring "R\<setminus>\<pp>" R "(+)" "(\<cdot>)" \<zero> \<one>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close> imp_qr
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
?\<pp>3 \<in> U \<Longrightarrow> Comm_Ring.quotient_ring (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> \<one>
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U (one_sheaf_spec U) a \<in> extensional ?A
2. a \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) a x = a x
[PROOF STEP]
show "mult_sheaf_spec U (one_sheaf_spec U) a \<pp> = a \<pp>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mult_sheaf_spec U (one_sheaf_spec U) a \<pp> = a \<pp>
[PROOF STEP]
unfolding mult_sheaf_spec_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (one_sheaf_spec U \<pp>) (a \<pp>)) \<pp> = a \<pp>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close>
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (one_sheaf_spec U \<pp>) (a \<pp>)) \<pp> = a \<pp>
[PROOF STEP]
by (simp add: one_sheaf_spec_def qr_valid_frac that)
[PROOF STATE]
proof (state)
this:
mult_sheaf_spec U (one_sheaf_spec U) a \<pp> = a \<pp>
goal (2 subgoals):
1. mult_sheaf_spec U (one_sheaf_spec U) a \<in> extensional U
2. a \<in> extensional U
[PROOF STEP]
qed (auto simp add: \<open>a \<in> \<O> U\<close>)
[PROOF STATE]
proof (state)
this:
?a3 \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) ?a3 = ?a3
goal (4 subgoals):
1. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
2. \<And>a. a \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U a (one_sheaf_spec U) = a
3. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
4. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?a3 \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) ?a3 = ?a3
[PROOF STEP]
show "mult_sheaf_spec U a (one_sheaf_spec U) = a"
if "a \<in> \<O> U" for a
[PROOF STATE]
proof (prove)
using this:
?a3 \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U (one_sheaf_spec U) ?a3 = ?a3
goal (1 subgoal):
1. mult_sheaf_spec U a (one_sheaf_spec U) = a
[PROOF STEP]
by (simp add: \<open>one_sheaf_spec U \<in> \<O> U\<close> mult_comm that)
[PROOF STATE]
proof (state)
this:
?a3 \<in> \<O> U \<Longrightarrow> mult_sheaf_spec U ?a3 (one_sheaf_spec U) = ?a3
goal (3 subgoals):
1. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
2. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
3. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
[PROOF STEP]
show "mult_sheaf_spec U a (add_sheaf_spec U b c)
= add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)"
if "a \<in> \<O> U" and "b \<in> \<O> U" and "c \<in> \<O> U" for a b c
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mult_sheaf_spec U a (add_sheaf_spec U b c) = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c)
[PROOF STEP]
proof (rule extensionalityI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U a (add_sheaf_spec U b c) \<in> extensional ?A
2. add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) x = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) x
[PROOF STEP]
fix \<pp>
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U a (add_sheaf_spec U b c) \<in> extensional ?A
2. add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) x = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) x
[PROOF STEP]
assume "\<pp> \<in> U"
[PROOF STATE]
proof (state)
this:
\<pp> \<in> U
goal (3 subgoals):
1. mult_sheaf_spec U a (add_sheaf_spec U b c) \<in> extensional ?A
2. add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) x = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) x
[PROOF STEP]
interpret cq:quotient_ring "R\<setminus>\<pp>" R "(+)" "(\<cdot>)" \<zero> \<one>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close> imp_qr
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
?\<pp>3 \<in> U \<Longrightarrow> Comm_Ring.quotient_ring (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> \<one>
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mult_sheaf_spec U a (add_sheaf_spec U b c) \<in> extensional ?A
2. add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> mult_sheaf_spec U a (add_sheaf_spec U b c) x = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) x
[PROOF STEP]
show "mult_sheaf_spec U a (add_sheaf_spec U b c) \<pp> =
add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) \<pp>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mult_sheaf_spec U a (add_sheaf_spec U b c) \<pp> = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) \<pp>
[PROOF STEP]
unfolding mult_sheaf_spec_def add_sheaf_spec_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) ((\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (b \<pp>) (c \<pp>)) \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> ((\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) (b \<pp>)) \<pp>) ((\<lambda>\<pp>\<in>U. quotient_ring.mult_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (a \<pp>) (c \<pp>)) \<pp>)) \<pp>
[PROOF STEP]
by (simp add: cq.distributive(1) qr_valid_frac that(1) that(2) that(3))
[PROOF STATE]
proof (state)
this:
mult_sheaf_spec U a (add_sheaf_spec U b c) \<pp> = add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) \<pp>
goal (2 subgoals):
1. mult_sheaf_spec U a (add_sheaf_spec U b c) \<in> extensional U
2. add_sheaf_spec U (mult_sheaf_spec U a b) (mult_sheaf_spec U a c) \<in> extensional U
[PROOF STEP]
qed auto
[PROOF STATE]
proof (state)
this:
\<lbrakk>?a3 \<in> \<O> U; ?b3 \<in> \<O> U; ?c3 \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U ?a3 (add_sheaf_spec U ?b3 ?c3) = add_sheaf_spec U (mult_sheaf_spec U ?a3 ?b3) (mult_sheaf_spec U ?a3 ?c3)
goal (2 subgoals):
1. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
2. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?a3 \<in> \<O> U; ?b3 \<in> \<O> U; ?c3 \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U ?a3 (add_sheaf_spec U ?b3 ?c3) = add_sheaf_spec U (mult_sheaf_spec U ?a3 ?b3) (mult_sheaf_spec U ?a3 ?c3)
[PROOF STEP]
show "mult_sheaf_spec U (add_sheaf_spec U b c) a
= add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)"
if "a \<in> \<O> U" and "b \<in> \<O> U" and "c \<in> \<O> U" for a b c
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?a3 \<in> \<O> U; ?b3 \<in> \<O> U; ?c3 \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U ?a3 (add_sheaf_spec U ?b3 ?c3) = add_sheaf_spec U (mult_sheaf_spec U ?a3 ?b3) (mult_sheaf_spec U ?a3 ?c3)
goal (1 subgoal):
1. mult_sheaf_spec U (add_sheaf_spec U b c) a = add_sheaf_spec U (mult_sheaf_spec U b a) (mult_sheaf_spec U c a)
[PROOF STEP]
by (simp add: add_\<O> mult_comm that(1) that(2) that(3))
[PROOF STATE]
proof (state)
this:
\<lbrakk>?a3 \<in> \<O> U; ?b3 \<in> \<O> U; ?c3 \<in> \<O> U\<rbrakk> \<Longrightarrow> mult_sheaf_spec U (add_sheaf_spec U ?b3 ?c3) ?a3 = add_sheaf_spec U (mult_sheaf_spec U ?b3 ?a3) (mult_sheaf_spec U ?c3 ?a3)
goal (1 subgoal):
1. \<And>u. u \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
[PROOF STEP]
show "monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u"
if "u \<in> \<O> U" for u
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) u
[PROOF STEP]
proof (rule monoid.invertibleI)
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. Group_Theory.monoid (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U)
2. add_sheaf_spec U u ?v = zero_sheaf_spec U
3. add_sheaf_spec U ?v u = zero_sheaf_spec U
4. u \<in> \<O> U
5. ?v \<in> \<O> U
[PROOF STEP]
show "Group_Theory.monoid (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Group_Theory.monoid (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U)
[PROOF STEP]
apply unfold_locales
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>a b. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U a b \<in> \<O> U
2. zero_sheaf_spec U \<in> \<O> U
3. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c = add_sheaf_spec U a (add_sheaf_spec U b c)
4. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a = a
5. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
[PROOF STEP]
using add_\<O> \<open>zero_sheaf_spec U \<in> \<O> U\<close> add_assoc \<open>zero_sheaf_spec U \<in> \<O> U\<close>
add_comm add_zero add_zero
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?a3 \<in> \<O> U; ?b3 \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U ?a3 ?b3 \<in> \<O> U
zero_sheaf_spec U \<in> \<O> U
\<lbrakk>?a3 \<in> \<O> U; ?b3 \<in> \<O> U; ?c3 \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U ?a3 ?b3) ?c3 = add_sheaf_spec U ?a3 (add_sheaf_spec U ?b3 ?c3)
zero_sheaf_spec U \<in> \<O> U
\<lbrakk>?x3 \<in> \<O> U; ?y3 \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U ?x3 ?y3 = add_sheaf_spec U ?y3 ?x3
?a3 \<in> \<O> U \<Longrightarrow> add_sheaf_spec U ?a3 (zero_sheaf_spec U) = ?a3
?a3 \<in> \<O> U \<Longrightarrow> add_sheaf_spec U ?a3 (zero_sheaf_spec U) = ?a3
goal (5 subgoals):
1. \<And>a b. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U a b \<in> \<O> U
2. zero_sheaf_spec U \<in> \<O> U
3. \<And>a b c. \<lbrakk>a \<in> \<O> U; b \<in> \<O> U; c \<in> \<O> U\<rbrakk> \<Longrightarrow> add_sheaf_spec U (add_sheaf_spec U a b) c = add_sheaf_spec U a (add_sheaf_spec U b c)
4. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U (zero_sheaf_spec U) a = a
5. \<And>a. a \<in> \<O> U \<Longrightarrow> add_sheaf_spec U a (zero_sheaf_spec U) = a
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
Group_Theory.monoid (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U)
goal (4 subgoals):
1. add_sheaf_spec U u ?v = zero_sheaf_spec U
2. add_sheaf_spec U ?v u = zero_sheaf_spec U
3. u \<in> \<O> U
4. ?v \<in> \<O> U
[PROOF STEP]
show "add_sheaf_spec U u (uminus_sheaf_spec U u) = zero_sheaf_spec U"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_sheaf_spec U u (uminus_sheaf_spec U u) = zero_sheaf_spec U
[PROOF STEP]
proof (rule extensionalityI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. add_sheaf_spec U u (uminus_sheaf_spec U u) \<in> extensional ?A
2. zero_sheaf_spec U \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> add_sheaf_spec U u (uminus_sheaf_spec U u) x = zero_sheaf_spec U x
[PROOF STEP]
fix \<pp>
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. add_sheaf_spec U u (uminus_sheaf_spec U u) \<in> extensional ?A
2. zero_sheaf_spec U \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> add_sheaf_spec U u (uminus_sheaf_spec U u) x = zero_sheaf_spec U x
[PROOF STEP]
assume "\<pp> \<in> U"
[PROOF STATE]
proof (state)
this:
\<pp> \<in> U
goal (3 subgoals):
1. add_sheaf_spec U u (uminus_sheaf_spec U u) \<in> extensional ?A
2. zero_sheaf_spec U \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> add_sheaf_spec U u (uminus_sheaf_spec U u) x = zero_sheaf_spec U x
[PROOF STEP]
interpret cq:quotient_ring "R\<setminus>\<pp>" R "(+)" "(\<cdot>)" \<zero> \<one>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close> imp_qr
[PROOF STATE]
proof (prove)
using this:
\<pp> \<in> U
?\<pp>3 \<in> U \<Longrightarrow> Comm_Ring.quotient_ring (R\<setminus>?\<pp>3) R (+) (\<cdot>) \<zero> \<one>
goal (1 subgoal):
1. Comm_Ring.quotient_ring (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. add_sheaf_spec U u (uminus_sheaf_spec U u) \<in> extensional ?A
2. zero_sheaf_spec U \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> add_sheaf_spec U u (uminus_sheaf_spec U u) x = zero_sheaf_spec U x
[PROOF STEP]
have "cq.add_rel (u \<pp>) (cq.uminus_rel (u \<pp>)) = cq.zero_rel"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cq.add_rel (u \<pp>) (cq.uminus_rel (u \<pp>)) = cq.zero_rel
[PROOF STEP]
by (simp add: \<open>\<pp> \<in> U\<close> cq.add_minus_zero_rel qr_valid_frac that)
[PROOF STATE]
proof (state)
this:
cq.add_rel (u \<pp>) (cq.uminus_rel (u \<pp>)) = cq.zero_rel
goal (3 subgoals):
1. add_sheaf_spec U u (uminus_sheaf_spec U u) \<in> extensional ?A
2. zero_sheaf_spec U \<in> extensional ?A
3. \<And>x. x \<in> ?A \<Longrightarrow> add_sheaf_spec U u (uminus_sheaf_spec U u) x = zero_sheaf_spec U x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
cq.add_rel (u \<pp>) (cq.uminus_rel (u \<pp>)) = cq.zero_rel
[PROOF STEP]
show "add_sheaf_spec U u (uminus_sheaf_spec U u) \<pp> = zero_sheaf_spec U \<pp>"
[PROOF STATE]
proof (prove)
using this:
cq.add_rel (u \<pp>) (cq.uminus_rel (u \<pp>)) = cq.zero_rel
goal (1 subgoal):
1. add_sheaf_spec U u (uminus_sheaf_spec U u) \<pp> = zero_sheaf_spec U \<pp>
[PROOF STEP]
unfolding add_sheaf_spec_def uminus_sheaf_spec_def zero_sheaf_spec_def
[PROOF STATE]
proof (prove)
using this:
cq.add_rel (u \<pp>) (cq.uminus_rel (u \<pp>)) = cq.zero_rel
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (u \<pp>) ((\<lambda>\<pp>\<in>U. quotient_ring.uminus_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (u \<pp>)) \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.zero_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>) \<pp>
[PROOF STEP]
using \<open>\<pp> \<in> U\<close>
[PROOF STATE]
proof (prove)
using this:
cq.add_rel (u \<pp>) (cq.uminus_rel (u \<pp>)) = cq.zero_rel
\<pp> \<in> U
goal (1 subgoal):
1. (\<lambda>\<pp>\<in>U. quotient_ring.add_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (u \<pp>) ((\<lambda>\<pp>\<in>U. quotient_ring.uminus_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> (u \<pp>)) \<pp>)) \<pp> = (\<lambda>\<pp>\<in>U. quotient_ring.zero_rel (R\<setminus>\<pp>) R (+) (\<cdot>) \<zero> \<one>) \<pp>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
add_sheaf_spec U u (uminus_sheaf_spec U u) \<pp> = zero_sheaf_spec U \<pp>
goal (2 subgoals):
1. add_sheaf_spec U u (uminus_sheaf_spec U u) \<in> extensional U
2. zero_sheaf_spec U \<in> extensional U
[PROOF STEP]
qed auto
[PROOF STATE]
proof (state)
this:
add_sheaf_spec U u (uminus_sheaf_spec U u) = zero_sheaf_spec U
goal (3 subgoals):
1. add_sheaf_spec U (uminus_sheaf_spec U u) u = zero_sheaf_spec U
2. u \<in> \<O> U
3. uminus_sheaf_spec U u \<in> \<O> U
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
add_sheaf_spec U u (uminus_sheaf_spec U u) = zero_sheaf_spec U
[PROOF STEP]
show "add_sheaf_spec U (uminus_sheaf_spec U u) u = zero_sheaf_spec U"
[PROOF STATE]
proof (prove)
using this:
add_sheaf_spec U u (uminus_sheaf_spec U u) = zero_sheaf_spec U
goal (1 subgoal):
1. add_sheaf_spec U (uminus_sheaf_spec U u) u = zero_sheaf_spec U
[PROOF STEP]
by (simp add: add_comm assms comm_ring.zariski_open_is_subset local.comm_ring_axioms
that uminus_sheaf_spec_in_sheaf_spec)
[PROOF STATE]
proof (state)
this:
add_sheaf_spec U (uminus_sheaf_spec U u) u = zero_sheaf_spec U
goal (2 subgoals):
1. u \<in> \<O> U
2. uminus_sheaf_spec U u \<in> \<O> U
[PROOF STEP]
show "u \<in> \<O> U"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u \<in> \<O> U
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
u \<in> \<O> U
goal (1 subgoal):
1. u \<in> \<O> U
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
u \<in> \<O> U
goal (1 subgoal):
1. uminus_sheaf_spec U u \<in> \<O> U
[PROOF STEP]
show "uminus_sheaf_spec U u \<in> \<O> U"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. uminus_sheaf_spec U u \<in> \<O> U
[PROOF STEP]
by (simp add: assms comm_ring.zariski_open_is_subset local.comm_ring_axioms
that uminus_sheaf_spec_in_sheaf_spec)
[PROOF STATE]
proof (state)
this:
uminus_sheaf_spec U u \<in> \<O> U
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
?u3 \<in> \<O> U \<Longrightarrow> monoid.invertible (\<O> U) (add_sheaf_spec U) (zero_sheaf_spec U) ?u3
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 25695, "file": "Grothendieck_Schemes_Comm_Ring", "length": 147}
|
import os
import pickle
from collections import defaultdict
import numpy as np
def get_paths(root_folder):
"""
Creating a path dictionary for the features in the dataset.
"""
path_dict = defaultdict(list)
folders = os.listdir(root_folder)
for feature in folders:
file_names = os.listdir(os.path.join(root_folder, feature))
for datafile in file_names:
path_dict[feature].append(datafile)
return path_dict
def read_file(file_path):
data = []
with open(file_path, 'r') as datafile:
for line in datafile.readlines():
data.append([float(value) for value in line.strip().split(' ')])
return data
def extract_raw_data(root_folder, output_folder, filename):
dictionary = defaultdict(list)
print('Extracting Raw Data ...')
output_file = os.path.join(output_folder, filename)
for feature, file_names in get_paths(root_folder).items():
data = []
for datafile in file_names:
path = os.path.join(root_folder, feature, datafile)
data.append(read_file(path))
dictionary[feature] = data
print(f' Completed data extraction for feature: {feature}')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
pickle.dump(dictionary, open(output_file, 'wb'))
print(f'Pickle dumped to file: {output_file}\n')
def create_sliding_windows(data, freq, window, slider):
"""
Dividing the data into sliding windows
:param freq: The frequency of which the accelerometer collects data
:param window: The size of the sliding window in seconds
:param slider: A number indicating the shifting factor of the sliding window
"""
num = freq * window
points = []
for index in range(0, len(data) - num, freq * slider):
points.append(data[index:index + num])
return points
def prepare_data(pickle_file, freq, window, prepared_data, slider=1, func=None):
"""
Preparing the data to fit the specification of the Deep Learning algorithms
"""
print('Preparing data for Neural Network ...')
with open(pickle_file, 'rb') as bin_file:
raw_data = pickle.load(bin_file)
key_to_int = {v: k for k, v in enumerate(raw_data.keys())}
x_values = []
y_values = []
for feature, data_points in raw_data.items():
for data in data_points:
if callable(func):
data = func(data)
sliding_windows = create_sliding_windows(data, freq, window, slider=slider)
x_values.extend(sliding_windows)
y_values.extend([key_to_int[feature]]*len(sliding_windows))
x_values = np.asarray(x_values)
y_values = np.asarray(y_values)
print(f' Number of sliding windows: {len(x_values)}')
print(f' Number of true values: {len(y_values)}')
pickle.dump((x_values, y_values), open(prepared_data, 'wb'))
print(f'Pickle dumped to file: {prepared_data}\n')
def extract_adl(adl_categories, root, output_folder, filename):
"""
Extracting Activities-of-Daily-Life categories from the dataset
"""
dictionary = defaultdict(list)
print('Extracting ADL Categories from Raw Data ...')
output_file = os.path.join(output_folder, filename)
paths = get_paths(root)
for adl, motions in adl_categories.items():
data = []
for motion in motions:
for file_name in paths[motion]:
path = os.path.join(root, motion, file_name)
data.append(read_file(path))
print(f' Completed data extraction for specific_movement: {motion}')
dictionary[adl] = data
print(f' Data extracted for feature: {adl}\n')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
pickle.dump(dictionary, open(output_file, 'wb'))
print(f'Pickle dumped to file: {output_file}\n')
def extract_adl_from_raw_data(root, output_folder, filename):
"""
Creating dictionary for Activity-of-Daily-Life --> Motion Primitives relations
"""
motion_to_adl = {
'Personal_hygiene': ['Brush_teeth', 'Comb_hair'],
'Mobility': ['Climb_stairs', 'Descend_stairs', 'Walk'],
'Feeding': ['Drink_glass', 'Pour_water', 'Eat_meat', 'Eat_soup'],
'Communication': ['Use_telephone'],
'Functional Transfers': ['Getup_bed', 'Liedown_bed', 'Standup_chair', 'Sitdown_chair']
}
extract_adl(motion_to_adl, root, output_folder, filename)
if __name__ == '__main__':
print('Running code from read_dataset.py ... \n')
data_folder = 'data/ucl_dataset'
movement_type = 'specific'
new_data = False
raw_pickle = 'raw_data.p'
output = f'data/processed_data/{movement_type}_movement/'
if new_data and movement_type == 'basic':
extract_adl_from_raw_data(data_folder, output, raw_pickle)
elif new_data and not movement_type == 'basic':
extract_raw_data(data_folder, output, raw_pickle)
freq = 32 # Hz
windows = [3, 5, 10] # sec
raw_data = os.path.join(output, raw_pickle)
with open(raw_data, 'rb') as bin_file:
d = pickle.load(bin_file)
for window in windows:
prep_pickle = f'data/processed_data/{movement_type}_movement/sliding_window_{window}_sec.p'
if window == 3:
slider = window
else:
slider = 1
prepare_data(raw_data, freq, window, prep_pickle, slider=slider)
|
{"hexsha": "0eed43f4aa064cdd8b35e229eb84793f76e02545", "size": 5479, "ext": "py", "lang": "Python", "max_stars_repo_path": "read_dataset.py", "max_stars_repo_name": "Tommy-Johannessen/MovementRecognition", "max_stars_repo_head_hexsha": "be84d7d014a272987dd20d03194336a9244eb900", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "read_dataset.py", "max_issues_repo_name": "Tommy-Johannessen/MovementRecognition", "max_issues_repo_head_hexsha": "be84d7d014a272987dd20d03194336a9244eb900", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "read_dataset.py", "max_forks_repo_name": "Tommy-Johannessen/MovementRecognition", "max_forks_repo_head_hexsha": "be84d7d014a272987dd20d03194336a9244eb900", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-13T12:42:39.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-13T12:42:39.000Z", "avg_line_length": 28.5364583333, "max_line_length": 99, "alphanum_fraction": 0.6541339661, "include": true, "reason": "import numpy", "num_tokens": 1277}
|
import torch
from torch.utils.data import Dataset
from PIL import Image, ImageFile
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import albumentations
from albumentations.pytorch import ToTensorV2
import operator
ImageFile.LOAD_TRUNCATED_IMAGES = True
class DatasetUtils(object):
"""
This class contains utilities for making a PyTorch Dataset.
"""
@staticmethod
def splitter(train_df=None, image_paths=None, targets=None, valid_size=0.25):
if isinstance(train_df, pd.DataFrame):
train_images, valid_images, train_labels, valid_labels = train_test_split(
train_df[image_paths],
train_df[targets],
test_size=valid_size,
random_state=42,
)
return (
train_images.values,
train_labels.values,
valid_images.values,
valid_labels.values,
)
else:
train_images, valid_images, train_labels, valid_labels = train_test_split(
image_paths, targets, test_size=valid_size, random_state=42,
)
return (
train_images,
train_labels,
valid_images,
valid_labels,
)
@staticmethod
def make_dataset(
train_df=None,
image_paths=None,
targets=None,
train_tfms=None,
valid_tfms=None,
train_idx=None,
val_idx=None,
valid_size=0.25,
is_CV=False,
split=True,
):
if is_CV:
train_dataset = CVDataset(
df=train_df,
indices=train_idx,
image_paths=image_paths,
targets=targets,
transform=train_tfms,
)
valid_dataset = CVDataset(
df=train_df,
indices=val_idx,
image_paths=image_paths,
targets=targets,
transform=valid_tfms,
)
else:
if split:
(
train_image_paths,
train_labels,
valid_image_paths,
valid_labels,
) = DatasetUtils.splitter(
train_df=train_df,
image_paths=image_paths,
targets=targets,
valid_size=valid_size,
)
train_dataset = SimpleDataset(
image_paths=train_image_paths,
targets=train_labels,
transform=train_tfms,
)
valid_dataset = SimpleDataset(
image_paths=valid_image_paths,
targets=valid_labels,
transform=valid_tfms,
)
return train_dataset, valid_dataset
else:
train_dataset = SimpleDataset(
image_paths=image_paths, targets=targets, transform=train_tfms
)
return train_dataset
class SimpleDataset(Dataset):
def __init__(self, image_paths, targets, transform=None):
self.image_paths = image_paths
self.targets = targets
self.default_aug = albumentations.Compose(
[
albumentations.Normalize(),
ToTensorV2()
]
)
self.transform = transform
def __getitem__(self, index: int):
image = Image.open(self.image_paths[index])
label = self.targets[index]
if self.transform:
image = self.transform(image)
augmented_image = self.default_aug(image=np.array(image))
return (
torch.tensor(augmented_image['image']),
torch.tensor(label),
)
def __len__(self):
return len(self.image_paths)
class PascalVOCDataset(Dataset):
def __init__(self, df, image_paths, boxes, targets, transform=None):
self.df = df
self.transform = transform
self.image_paths = image_paths
self.boxes = boxes
self.targets = targets
self.default_aug = albumentations.Normalize()
def __getitem__(self, idx):
image = Image.open(self.df[self.image_paths][idx])
box = self.df[self.boxes].astype(np.float32)[idx]
class_ = self.df[self.targets].astype(np.float32)[idx]
if self.transform:
image = self.transform(image)
augmented_image = self.default_aug(image=np.array(image))
box = torch.as_tensor(box, dtype=torch.float32)
class_ = torch.as_tensor(class_, dtype=torch.float32)
return {
"image": image,
"box": box,
"class": class_
}
def __len__(self):
return len(self.df)
class CVDataset(Dataset):
"""
This class provides utilities for doing cross-validation, using a PyTorch Dataset.
"""
def __init__(self, df, indices, image_paths, targets, transform=None):
self.df = df
self.indices = indices
self.transform = transform
self.image_paths = image_paths
self.targets = targets
self.default_aug = albumentations.Compose(
[
albumentations.Normalize(),
ToTensorV2()
]
)
def __getitem__(self, idx):
if isinstance(self.df, pd.DataFrame):
image_ids = operator.itemgetter(*self.indices)(self.df[self.image_paths])
labels = operator.itemgetter(*self.indices)(self.df[self.target_cols])
else:
image_ids = operator.itemgetter(*self.indices)(self.image_paths)
labels = operator.itemgetter(*self.indices)(self.targets)
image = Image.open(image_ids[idx])
label = labels[idx]
if self.transform:
image = self.transform(image)
augmented_image = self.default_aug(image=np.array(image))
return (
torch.tensor(augmented_image['image']),
torch.tensor(label),
)
def __len__(self):
return len(self.indices)
|
{"hexsha": "222d384a572ad5dd137d8b96a21e25c2b222f51d", "size": 6276, "ext": "py", "lang": "Python", "max_stars_repo_path": "MyVision/dataset/Dataset.py", "max_stars_repo_name": "Abhiswain97/MyVision", "max_stars_repo_head_hexsha": "2f8dd5c57d979b2bec365d637575e839e4b2427b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-04T00:19:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-04T00:19:33.000Z", "max_issues_repo_path": "MyVision/dataset/Dataset.py", "max_issues_repo_name": "Abhiswain97/MyVision", "max_issues_repo_head_hexsha": "2f8dd5c57d979b2bec365d637575e839e4b2427b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MyVision/dataset/Dataset.py", "max_forks_repo_name": "Abhiswain97/MyVision", "max_forks_repo_head_hexsha": "2f8dd5c57d979b2bec365d637575e839e4b2427b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-06T23:28:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-06T23:28:03.000Z", "avg_line_length": 28.1434977578, "max_line_length": 86, "alphanum_fraction": 0.5540152964, "include": true, "reason": "import numpy", "num_tokens": 1190}
|
#include <boost/test/utils/iterator/token_iterator.hpp>
|
{"hexsha": "6bc50dfd9839219d31b28c42f253afa3657cd300", "size": 56, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_test_utils_iterator_token_iterator.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_test_utils_iterator_token_iterator.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_test_utils_iterator_token_iterator.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 28.0, "max_line_length": 55, "alphanum_fraction": 0.8214285714, "num_tokens": 11}
|
import numpy as np
from numpy.testing import assert_almost_equal
import filecmp
from ..molecules.protein import Protein
seq_reference = 'GG'
prot = Protein(seq_reference)
def test_coords():
coords = prot.coords
# this test is valid for GG
ref_coords = np.array([[-3.28713324, 1.37438873, -0.25902808],
[-1.83713324, 1.46438873, -0.51902808],
[-0.99713324, 0.76438873, 0.53097192],
[-1.16706266, 0.8527295, 1.72047597],
[-3.45713324, 0.84438873, 0.60097192],
[-1.60713324, 1.00438873, -1.47902808],
[-1.52713324, 2.51438873, -0.52902808],
[0., 0., 0.],
[0.81779359, -0.68149466, 1.02224199],
[2.30716697, -0.58905081, 0.75518192],
[2.80465929, -0.02404619, -0.18549399],
[0.34230744, -0.23438601, -0.93663697],
[0.63403818, -0.23355634, 1.99778553],
[0.5632952, -1.7459708, 1.05089699]])
assert_almost_equal(coords, ref_coords)
def test_exclusions():
# the first 13 tuples correspond to 1-2 interactions (bonds),
# the rest are 1-3 interactions.
# this test is valid for GG
reference = {(0, 1), (0, 4), (1, 5), (1, 6), (1, 2), (2, 3), (2, 7),
(7, 11), (7, 8), (8, 9), (8, 12), (8, 13), (9, 10), (1, 4),
(0, 2), (0, 5), (0, 6), (2, 5), (2, 6), (1, 3), (1, 7),
(3, 7), (2, 11), (2, 8), (5, 6), (7, 12), (7, 13), (7, 9),
(8, 11), (9, 13), (9, 12), (8, 10), (12, 13)}
exclusions = prot._exclusions
assert reference == exclusions
def test_names():
# this test is valid for GG
names = prot._names
assert ['N', 'CA', 'C', 'O', 'H', 'HA2', 'HA3'] * 2 == names
def test_offsets():
# this test is valid for GG
offsets = prot._offsets
assert [0, 7, 14, 21] == offsets
def test_set_get_torsionals():
# create a molecule set the backbone torsional angles and then check that
# the actual values corresponds to the set ones.
poly_gly = Protein('GGGGGG')
for i in range(len(poly_gly)):
poly_gly.set_phi(i, -60.)
poly_gly.set_psi(i, -40.)
for i in range(1, len(poly_gly)-1):
assert_almost_equal(poly_gly.get_phi(i), -60., 5)
assert_almost_equal(poly_gly.get_psi(i), -40., 5)
assert prot.get_phi(0) is np.nan
assert prot.get_psi(len(poly_gly)-1) is np.nan
def test_at_coords():
assert_almost_equal(prot[1].coords, prot.coords[7:])
assert_almost_equal(prot[1]['N'], prot.coords[7])
assert_almost_equal(prot[1]['bb'], prot.coords[7:])
assert len(prot[1]['sc']) == 0
def test_protein():
prot.dump_pdb('test_1')
prot2 = Protein(pdb='test_1.pdb')
prot2.dump_pdb('test_2')
assert filecmp.cmp('test_1.pdb', 'test_2.pdb')
assert prot.sequence == prot2.sequence
|
{"hexsha": "f5ac374bcc14472594e866f3b0e7e27e847a702f", "size": 3048, "ext": "py", "lang": "Python", "max_stars_repo_path": "bomeba0/tests/test_protein.py", "max_stars_repo_name": "aloctavodia/bomeba0", "max_stars_repo_head_hexsha": "e212986d8ee60be1da91d63a7a889db14ec851c3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bomeba0/tests/test_protein.py", "max_issues_repo_name": "aloctavodia/bomeba0", "max_issues_repo_head_hexsha": "e212986d8ee60be1da91d63a7a889db14ec851c3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2017-06-01T15:46:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-01T18:28:36.000Z", "max_forks_repo_path": "bomeba0/tests/test_protein.py", "max_forks_repo_name": "aloctavodia/bomeba0", "max_forks_repo_head_hexsha": "e212986d8ee60be1da91d63a7a889db14ec851c3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2017-09-30T13:26:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T10:01:18.000Z", "avg_line_length": 35.8588235294, "max_line_length": 77, "alphanum_fraction": 0.5377296588, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1033}
|
(** Generated by coq-of-ocaml *)
Require Import OCaml.OCaml.
Local Set Primitive Projections.
Local Open Scope string_scope.
Local Open Scope Z_scope.
Local Open Scope type_scope.
Import ListNotations.
Unset Positivity Checking.
Unset Guard Checking.
Inductive nat : Set :=
| O : nat
| S : nat -> nat.
Inductive natural : Set :=
| Zero : natural
| Succ : natural -> natural.
Fixpoint plus (plus_arg0 : natural) (plus_arg1 : natural) {struct plus_arg0}
: natural :=
match plus_arg0 with
| Zero => plus_arg1
| Succ n => Succ (plus n plus_arg1)
end.
Fixpoint mult (mult_arg0 : natural) (mult_arg1 : natural) {struct mult_arg0}
: natural :=
match mult_arg0 with
| Zero => Zero
| Succ n => plus (mult n mult_arg1) mult_arg1
end.
Definition synth (lf2 : natural) (y : natural) (lf1 : natural) : natural :=
mult (Succ y) lf2.
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal33_mult_succ_80_plus_succ/goal33conj203_coqofml_5gOc2D.v"}
|
using BinaryProvider
# Parse some basic command-line arguments
const verbose = "--verbose" in ARGS
const prefix = Prefix(get([a for a in ARGS if a != "--verbose"], 1, joinpath(@__DIR__, "usr")))
products = Product[
# Instantiate products here, e.g.:
LibraryProduct(prefix, "libgumbo", :libgumbo),
]
# Download binaries from hosted location
bin_prefix = "https://github.com/JuliaWeb/GumboBuilder/releases/download/v0.1.0"
# Listing of files generated by BinaryBuilder:
download_info = Dict(
BinaryProvider.Linux(:aarch64, :glibc) => ("$bin_prefix/Gumbo.aarch64-linux-gnu.tar.gz", "14e50e3705352ead8e189375c93886028b3a947e61e31b0e8f8068d8f6a7e84b"),
BinaryProvider.Linux(:armv7l, :glibc) => ("$bin_prefix/Gumbo.arm-linux-gnueabihf.tar.gz", "d76c42f3f3a8f75ab8fa976f8e309bc7bd30e2f2188844df785c4b36462545fb"),
BinaryProvider.Linux(:i686, :glibc) => ("$bin_prefix/Gumbo.i686-linux-gnu.tar.gz", "0d1de0621fc6f23ae3119f2fc8102f0af83bc1f82287e943d23159f60ab0c503"),
BinaryProvider.Windows(:i686) => ("$bin_prefix/Gumbo.i686-w64-mingw32.tar.gz", "48f80a41d504775a7d79acffd597d5921bf3fd14f2d046f229f643c43656c9ce"),
BinaryProvider.Linux(:powerpc64le, :glibc) => ("$bin_prefix/Gumbo.powerpc64le-linux-gnu.tar.gz", "7b7862ee7d0192dd139e75342c00993cb78c9b5b1237bf0d20a42fde855fcaf0"),
BinaryProvider.MacOS() => ("$bin_prefix/Gumbo.x86_64-apple-darwin14.tar.gz", "717b52a0361a70e7f7637ad5cbc970f8c3648777cf382e609631319adef6abc0"),
BinaryProvider.Linux(:x86_64, :glibc) => ("$bin_prefix/Gumbo.x86_64-linux-gnu.tar.gz", "6cf72df5dbbea1de1e0b777c70f88b9ac41a0ef728a22940aed3b910713d3ccc"),
BinaryProvider.Windows(:x86_64) => ("$bin_prefix/Gumbo.x86_64-w64-mingw32.tar.gz", "7a2994a966ae4a408c34df408a9721760836e0115839dc42e6c3b140f9b509a8"),
)
# First, check to see if we're all satisfied
if any(!satisfied(p; verbose=verbose) for p in products)
if haskey(download_info, platform_key())
# Download and install binaries
url, tarball_hash = download_info[platform_key()]
install(url, tarball_hash; prefix=prefix, force=true, verbose=verbose)
else
# If we don't have a BinaryProvider-compatible .tar.gz to download, complain.
# Alternatively, you could attempt to install from a separate provider,
# build from source or something more even more ambitious here.
error("Your platform $(Sys.MACHINE) is not supported by this package!")
end
end
# Write out a deps.jl file that will contain mappings for our products
write_deps_file(joinpath(@__DIR__, "deps.jl"), products)
|
{"hexsha": "bf8ce3b10bc37452c9e947b98a7606bbd20a7186", "size": 2562, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "deps/build.jl", "max_stars_repo_name": "stev47/Gumbo.jl", "max_stars_repo_head_hexsha": "cc7864e819ef2af6791d6f6633f5675400b75741", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deps/build.jl", "max_issues_repo_name": "stev47/Gumbo.jl", "max_issues_repo_head_hexsha": "cc7864e819ef2af6791d6f6633f5675400b75741", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deps/build.jl", "max_forks_repo_name": "stev47/Gumbo.jl", "max_forks_repo_head_hexsha": "cc7864e819ef2af6791d6f6633f5675400b75741", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 67.4210526316, "max_line_length": 169, "alphanum_fraction": 0.7630757221, "num_tokens": 856}
|
from math import *
from functools import reduce
import networkx as nx
import matplotlib as mpl
import matplotlib.pyplot as plt
def GlobalAxialMapAnalysis(graph):
global k, TD, MD, RA, RRA, IntV
d = nx.all_pairs_dijkstra_path_length(graph)
k = len(d)
TD = {i: reduce(lambda x, y: x + y, d[i].values()) for i in d}
MD = {i: TD[i] / (k - 1) for i in TD}
RA = {i: 2 * (MD[i] - 1) / (k - 2) for i in MD}
Dk = 2 * (k * (log((k + 2) / 3, 2) - 1) + 1) / ((k - 1) * (k - 2))
RRA = {i: RA[i] / Dk for i in RA}
IntV = {i: 1 / RRA[i] for i in RRA}
def AxialMapAnalysis(graph, radius):
global k, TD, MD, RA, RRA, IntV
d = dict(nx.all_pairs_dijkstra_path_length(graph))
r = {i: list(filter(lambda n: n <= radius, d[i].values())) for i in d}
k = {i: len(r[i]) for i in d}
TD = {i: reduce(lambda x, y: x + y, r[i]) for i in d}
MD = {i: TD[i] / (k[i] - 1) for i in TD}
RA = {i: 2 * (MD[i] - 1) / (k[i] - 2) for i in MD}
Dk = {i: 2 * (k[i] * (log((k[i] + 2) / 3, 2) - 1) + 1) /
((k[i] - 1) * (k[i] - 2)) for i in d}
RRA = {i: RA[i] / Dk[i] for i in RA}
IntV = {i: 1 / RRA[i] for i in RRA}
def AxialMapAnalysis_from_adjlist(filename, radius=float('inf')):
G = nx.read_adjlist(filename, nodetype=int)
AxialMapAnalysis(G, radius)
# GlobalAxialMapAnalysis(G)
for i in IntV:
print(i, TD[i], round(MD[i], 3), round(RA[i], 3), round(IntV[i], 3))
def AxialMapAnalysis_from_axiallines(filename, index, radius=float('inf')):
S = {}
G = nx.Graph()
i = 0
for line in open(filename):
cs = line.split(",")
S[i] = (float(cs[0]), float(cs[1]), float(cs[2]), float(cs[3]))
i += 1
for i in S:
xa = S[i][0]
ya = S[i][1]
xb = S[i][2]
yb = S[i][3]
for j in S:
xc = S[j][0]
yc = S[j][1]
xd = S[j][2]
yd = S[j][3]
fc = (xa - xb) * (yc - ya) + (ya - yb) * (xa - xc)
fd = (xa - xb) * (yd - ya) + (ya - yb) * (xa - xd)
if fc * fd < 0.0:
fa = (xc - xd) * (ya - yc) + (yc - yd) * (xc - xa)
fb = (xc - xd) * (yb - yc) + (yc - yd) * (xc - xb)
if fa * fb < 0.0:
G.add_edge(i, j)
AxialMapAnalysis(G, radius)
v = list(globals()[index].values())
v_min = reduce(min, v)
v_max = reduce(max, v)
cmap = mpl.cm.rainbow
norm = mpl.colors.Normalize(vmin=v_min, vmax=v_max)
fig = plt.figure(facecolor="w")
plt.subplots_adjust(top=1.0, bottom=0.12, left=0.0, right=1.0)
for i in S:
# n = (v[i] - v_min) / (v_max - v_min)
n = (globals()[index][i] - v_min) / (v_max - v_min)
plt.plot([S[i][0], S[i][2]], [S[i][1], S[i][3]],
color=cmap(n), linewidth=2)
plt.axis('equal')
plt.axis('off')
ax = fig.add_axes([0.05, 0.1, 0.9, 0.02])
cb = mpl.colorbar.ColorbarBase(
ax, cmap=cmap, norm=norm, orientation='horizontal')
cb.set_label(index)
plt.show()
AxialMapAnalysis_from_axiallines('axiallines.txt', 'IntV', 3)
|
{"hexsha": "95efb46d76e11821d09c4b346314286bd671148f", "size": 3108, "ext": "py", "lang": "Python", "max_stars_repo_path": "ch4_1/axialmap3.py", "max_stars_repo_name": "o-kei/design-computing-aij", "max_stars_repo_head_hexsha": "954b46fb5f2192ab79fc003a2ca3a259e41dc7a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2017-11-11T05:09:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T05:01:22.000Z", "max_issues_repo_path": "ch4_1/axialmap3.py", "max_issues_repo_name": "o-kei/design-computing-aij", "max_issues_repo_head_hexsha": "954b46fb5f2192ab79fc003a2ca3a259e41dc7a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-02-09T18:20:25.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-10T01:32:40.000Z", "max_forks_repo_path": "ch4_1/axialmap3.py", "max_forks_repo_name": "o-kei/design-computing-aij", "max_forks_repo_head_hexsha": "954b46fb5f2192ab79fc003a2ca3a259e41dc7a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2016-12-17T03:06:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-28T15:37:06.000Z", "avg_line_length": 33.7826086957, "max_line_length": 76, "alphanum_fraction": 0.4974259974, "include": true, "reason": "import networkx", "num_tokens": 1144}
|
function [mg,nu,sig,info] = spm_rice_mixture(h,x,K)
% Fit a mixture of Ricians to a histogram
% FORMAT [mg,nu,sig] = spm_rice_mixture(h,x,K)
% h - histogram counts
% x - bin positions (plot(x,h) to see the histogram)
% K - number of Ricians
% mg - integral under each Rician
% nu - "mean" parameter of each Rician
% sig - "standard deviation" parameter of each Rician
% info - This struct can be used for plotting the fit as:
% plot(info.x(:),info.p,'--',info.x(:), ...
% info.h/sum(info.h)/info.md,'b.', ...
% info.x(:),info.sp,'r');
%
% An EM algorithm is used, which involves alternating between computing
% belonging probabilities, and then the parameters of the Ricians.
% The Koay inversion technique is used to compute the Rician parameters
% from the sample means and standard deviations. This is described at
% https://en.wikipedia.org/wiki/Rician_distribution
%__________________________________________________________________________
% Copyright (C) 2012-2019 Wellcome Trust Centre for Neuroimaging
% John Ashburner
% $Id: spm_rice_mixture.m 7595 2019-05-23 13:48:53Z mikael $
mg = ones(K,1)/K;
nu = (0:(K-1))'*max(x)/(K+1);
sig = ones(K,1)*max(x)/K/10;
lam = (sum(x.*h)/sum(h)/K).^2;
m0 = zeros(K,1);
m1 = zeros(K,1);
m2 = zeros(K,1);
ll = -Inf;
for iter=1:10000
p = zeros(numel(x),K);
for k=1:K
% Product Rule
% p(class=k, x | mg, nu, sig) = p(class=k|mg) p(x | nu, sig, class=k)
p(:,k) = mg(k)*ricepdf(x(:),nu(k),sig(k)^2) + eps;
end
% Sum Rule
% p(x | mg, nu, sig) = \sum_k p(class=k, x | mg, nu, sig)
sp = sum(p,2);
oll = ll;
ll = sum(log(sp).*h(:)); % Log-likelihood
if ll-oll<1e-8*sum(h), break; end
%fprintf('%g\n',ll);
%md = mean(diff(x));
%plot(x(:),p,'--',x(:),h/sum(h)/md,'b.',x(:),sp,'r'); drawnow
% Bayes Rule
% p(class=k | x, mg, nu, sig) = p(class=k, x | mg, nu, sig) / p(x | mg, nu, sig)
p = bsxfun(@rdivide,p,sp);
% Compute moments from the histograms, weighted by the responsibilities (p).
for k=1:K
m0(k) = sum(p(:,k).*h(:)); % Number of voxels in class k
m1(k) = sum(p(:,k).*h(:).*x(:)); % Sum of the intensities in class k
m2(k) = sum(p(:,k).*h(:).*x(:).*x(:)); % Sum of squares of intensities in class k
end
mg = m0/sum(m0); % Mixing proportions
for k=1:K
mu1 = m1(k)./m0(k); % Mean
mu2 = (m2(k)-m1(k)*m1(k)/m0(k)+lam*1e-3)/(m0(k)+1e-3); % Variance
% Compute nu & sig from mean and variance
[nu(k),sig(k)] = moments2param(mu1,mu2);
end
%disp([nu'; sig'])
end
if nargout >= 4
% This info can be used for plotting the fit
info = struct;
info.x = x;
info.h = h;
info.p = p;
info.sp = sp;
info.md = mean(diff(x));
end
%__________________________________________________________________________
%__________________________________________________________________________
function [nu,sig] = moments2param(mu1,mu2)
% Rician parameter estimation (nu & sig) from mean (mu1) and variance
% (mu2) via the Koay inversion technique.
% This follows the scheme at
% https://en.wikipedia.org/wiki/Rice_distribution#Parameter_estimation_.28the_Koay_inversion_technique.29
% This Wikipedia description is based on:
% Koay, C.G. and Basser, P. J., Analytically exact correction scheme
% for signal extraction from noisy magnitude MR signals,
% Journal of Magnetic Resonance, Volume 179, Issue = 2, p. 317–322, (2006)
r = mu1/sqrt(mu2);
theta = sqrt(pi/(4-pi));
if r>theta
for i=1:256
xi = 2+theta^2-pi/8*exp(-theta^2/2)*((2+theta^2)*besseli(0,theta^2/4)+theta^2*besseli(1,theta^2/4))^2;
g = sqrt(xi*(1+r^2)-2);
if abs(theta-g)<1e-6, break; end
theta = g;
end
if ~isfinite(xi), xi = 1; end
sig = sqrt(mu2)/sqrt(xi);
nu = sqrt(mu1^2+(xi-2)*sig^2);
else
nu = 0;
sig = (2^(1/2)*(mu1^2 + mu2)^(1/2))/2;
end
%__________________________________________________________________________
%__________________________________________________________________________
function p = ricepdf(x,nu,sig2)
% Rician PDF
% p = ricepdf(x,nu,sig2)
% https://en.wikipedia.org/wiki/Rice_distribution#Characterization
p = zeros(size(x));
tmp = -(x.^2+nu.^2)./(2*sig2);
msk = (tmp > -95) & (x*(nu/sig2) < 85) ; % Identify where Rice probability can be computed
p(msk) = (x(msk)./sig2).*exp(tmp(msk)).*besseli(0,x(msk)*(nu/sig2)); % Use Rician distribution
p(~msk) = (1./sqrt(2*pi*sig2))*exp((-0.5/sig2)*(x(~msk)-nu).^2); % Use Gaussian distribution
|
{"author": "spm", "repo": "spm12", "sha": "3085dac00ac804adb190a7e82c6ef11866c8af02", "save_path": "github-repos/MATLAB/spm-spm12", "path": "github-repos/MATLAB/spm-spm12/spm12-3085dac00ac804adb190a7e82c6ef11866c8af02/toolbox/Longitudinal/spm_rice_mixture.m"}
|
# Tests for Mamlmquist DEA Model
@testset "MalmquistDEAModel" begin
## Test Mamlmquist DEA Model with 1 input and 1 output
X = Array{Float64,3}(undef, 5, 1, 2)
X[:, :, 1] = [2; 3; 5; 4; 4];
X[:, :, 2] = [1; 2; 4; 3; 4];
Y = Array{Float64,3}(undef, 5, 1, 2)
Y[:, :, 1] = [1; 4; 6; 3; 5];
Y[:, :, 2] = [1; 4; 6; 3; 3];
# Default Malmquist Productivity Index
mprod = malmquist(X, Y)
@test typeof(mprod) == MalmquistDEAModel
@test nobs(mprod) == 5
@test ninputs(mprod) == 1
@test noutputs(mprod) == 1
@test nperiods(mprod) == 2
@test prodchange(mprod) ≈ [2.0000000000;
1.5000000000;
1.2500000000;
1.3333333333;
0.6000000000]
@test prodchange(mprod, :Prod) == prodchange(mprod)
@test prodchange(mprod, :EC) ≈ [1.3333333333;
1.0000000000;
0.8333333333;
0.8888888889;
0.4000000000];
@test prodchange(mprod, :TC) ≈ [1.5; 1.5; 1.5; 1.5; 1.5];
# Default output oriented
mprodoo = malmquist(X, Y, orient = :Output)
@test prodchange(mprodoo) == prodchange(mprod)
@test prodchange(mprodoo, :Prod) == prodchange(mprod, :Prod)
@test prodchange(mprodoo, :EC) == prodchange(mprod, :EC)
@test prodchange(mprodoo, :TC) == prodchange(mprod, :TC)
# Test geomean is the geometric mean of TC
mprodbase = malmquist(X, Y, refperiod = :Base)
mprodcomparison = malmquist(X, Y, refperiod = :Comparison)
@test prodchange(mprod, :TC) == sqrt.( prodchange(mprodbase, :TC) .* prodchange(mprodcomparison, :TC) )
## Test Mamlmquist DEA Model with 1 input and 1 output; and 3 years
X = Array{Float64,3}(undef, 5, 1, 3)
X[:, :, 1] = [2; 3; 5; 4; 4];
X[:, :, 2] = [1; 2; 4; 3; 4];
X[:, :, 3] = [0.5; 1.5; 3; 2; 4]
Y = Array{Float64,3}(undef, 5, 1, 3)
Y[:, :, 1] = [1; 4; 6; 3; 5];
Y[:, :, 2] = [1; 4; 6; 3; 3];
Y[:, :, 3] = [2; 4; 6; 3; 1];
# Default Malmquist Productivity Index
mprod3 = malmquist(X, Y)
@test nobs(mprod3) == 5
@test ninputs(mprod3) == 1
@test noutputs(mprod3) == 1
@test nperiods(mprod3) == 3
@test prodchange(mprod3) ≈ [2.0000000000 4.0000000;
1.5000000000 1.3333333;
1.2500000000 1.3333333;
1.3333333333 1.5000000;
0.6000000000 0.3333333]
@test prodchange(mprod3, :Prod) == prodchange(mprod3)
@test prodchange(mprod3, :EC) ≈ [1.3333333333 2.0000000;
1.0000000000 0.6666667;
0.8333333333 0.6666667;
0.8888888889 0.7500000;
0.4000000000 0.1666667] atol = 1e-7;
@test prodchange(mprod3, :TC) ≈ [1.5 2.0; 1.5 2.0; 1.5 2.0; 1.5 2.0; 1.5 2.0];
# Print
show(IOBuffer(), mprod)
show(IOBuffer(), mprod3)
# Test errors
@test_throws DimensionMismatch malmquist(X[1:4,:,:], X[1:5,:,:]) # Different number of observations in inputs and outputs
@test_throws DimensionMismatch malmquist(X[:,:,1:2], X[:,:,1:3]) # Different number of time periods in inputs and outputs
@test_throws ArgumentError malmquist(X, Y, refperiod = :Error) # Invalid reference period
@test_throws ArgumentError prodchange(mprod, :Error)
end
|
{"hexsha": "1a650cc0557dd451dd22953cd65aaa70c1963002", "size": 3556, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/malmquist.jl", "max_stars_repo_name": "simeonschaub/DataEnvelopmentAnalysis.jl", "max_stars_repo_head_hexsha": "ef5caf760c212460402b94db3f7c8623ee114f63", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-06-07T10:48:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T20:27:36.000Z", "max_issues_repo_path": "test/malmquist.jl", "max_issues_repo_name": "simeonschaub/DataEnvelopmentAnalysis.jl", "max_issues_repo_head_hexsha": "ef5caf760c212460402b94db3f7c8623ee114f63", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2020-02-08T15:30:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T01:46:43.000Z", "max_forks_repo_path": "test/malmquist.jl", "max_forks_repo_name": "simeonschaub/DataEnvelopmentAnalysis.jl", "max_forks_repo_head_hexsha": "ef5caf760c212460402b94db3f7c8623ee114f63", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-02-08T10:58:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-10T23:41:55.000Z", "avg_line_length": 39.0769230769, "max_line_length": 125, "alphanum_fraction": 0.5227784027, "num_tokens": 1287}
|
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <boost/pending/disjoint_sets.hpp>
#include <vector>
#include <queue>
#include <map>
using namespace std;
template <class T>
class AffinityGraphCompare{
private:
const T * mEdgeWeightArray;
public:
AffinityGraphCompare(const T * EdgeWeightArray){
mEdgeWeightArray = EdgeWeightArray;
}
bool operator() (const int& ind1, const int& ind2) const {
return (mEdgeWeightArray[ind1] > mEdgeWeightArray[ind2]);
}
};
/*
* Compute the MALIS loss function and its derivative wrt the affinity graph
* MAXIMUM spanning tree
* Author: Srini Turaga (sturaga@mit.edu)
* All rights reserved
*/
void malis_loss_weights_cpp(const int nVert, const int* seg,
const int nEdge, const int* node1, const int* node2, const float* edgeWeight,
const int pos,
int* nPairPerEdge){
/* Disjoint sets and sparse overlap vectors */
vector<map<int,int> > overlap(nVert);
vector<int> rank(nVert);
vector<int> parent(nVert);
boost::disjoint_sets<int*, int*> dsets(&rank[0],&parent[0]);
for (int i=0; i<nVert; ++i){
dsets.make_set(i);
if (0!=seg[i]) {
overlap[i].insert(pair<int,int>(seg[i],1));
}
}
/* Sort all the edges in increasing order of weight */
std::vector< int > pqueue( nEdge );
int j = 0;
for ( int i = 0; i < nEdge; i++ ){
if ((node1[i]>=0) && (node1[i]<nVert) && (node2[i]>=0) && (node2[i]<nVert))
pqueue[ j++ ] = i;
}
unsigned long nValidEdge = j;
pqueue.resize(nValidEdge);
sort( pqueue.begin(), pqueue.end(), AffinityGraphCompare<float>( edgeWeight ) );
/* Start MST */
int e;
int set1, set2;
int nPair = 0;
map<int,int>::iterator it1, it2;
/* Start Kruskal's */
for (unsigned int i = 0; i < pqueue.size(); ++i ) {
e = pqueue[i];
set1 = dsets.find_set(node1[e]);
set2 = dsets.find_set(node2[e]);
if (set1!=set2){
dsets.link(set1, set2);
/* compute the number of pairs merged by this MST edge */
for (it1 = overlap[set1].begin();
it1 != overlap[set1].end(); ++it1) {
for (it2 = overlap[set2].begin();
it2 != overlap[set2].end(); ++it2) {
nPair = it1->second * it2->second;
if (pos && (it1->first == it2->first)) {
nPairPerEdge[e] += nPair;
} else if ((!pos) && (it1->first != it2->first)) {
nPairPerEdge[e] += nPair;
}
}
}
/* move the pixel bags of the non-representative to the representative */
if (dsets.find_set(set1) == set2) // make set1 the rep to keep and set2 the rep to empty
swap(set1,set2);
it2 = overlap[set2].begin();
while (it2 != overlap[set2].end()) {
it1 = overlap[set1].find(it2->first);
if (it1 == overlap[set1].end()) {
overlap[set1].insert(pair<int,int>(it2->first,it2->second));
} else {
it1->second += it2->second;
}
overlap[set2].erase(it2++);
}
} // end link
} // end while
}
void connected_components_cpp(const int nVert,
const int nEdge, const int* node1, const int* node2, const int* edgeWeight,
int* seg){
/* Make disjoint sets */
vector<int> rank(nVert);
vector<int> parent(nVert);
boost::disjoint_sets<int*, int*> dsets(&rank[0],&parent[0]);
for (int i=0; i<nVert; ++i)
dsets.make_set(i);
/* union */
for (int i = 0; i < nEdge; ++i )
// check bounds to make sure the nodes are valid
if ((edgeWeight[i]!=0) && (node1[i]>=0) && (node1[i]<nVert) && (node2[i]>=0) && (node2[i]<nVert))
dsets.union_set(node1[i],node2[i]);
/* find */
for (int i = 0; i < nVert; ++i)
seg[i] = dsets.find_set(i);
}
void marker_watershed_cpp(const int nVert, const int* marker,
const int nEdge, const int* node1, const int* node2, const float* edgeWeight,
int* seg){
/* Make disjoint sets */
vector<int> rank(nVert);
vector<int> parent(nVert);
boost::disjoint_sets<int*, int*> dsets(&rank[0],&parent[0]);
for (int i=0; i<nVert; ++i)
dsets.make_set(i);
/* initialize output array and find representatives of each class */
std::map<int,int> components;
for (int i=0; i<nVert; ++i){
seg[i] = marker[i];
if (seg[i] > 0)
components[seg[i]] = i;
}
// merge vertices labeled with the same marker
for (int i=0; i<nVert; ++i)
if (seg[i] > 0)
dsets.union_set(components[seg[i]],i);
/* Sort all the edges in decreasing order of weight */
std::vector<int> pqueue( nEdge );
int j = 0;
for (int i = 0; i < nEdge; ++i)
if ((edgeWeight[i]!=0) &&
(node1[i]>=0) && (node1[i]<nVert) &&
(node2[i]>=0) && (node2[i]<nVert) &&
(marker[node1[i]]>=0) && (marker[node2[i]]>=0))
pqueue[ j++ ] = i;
unsigned long nValidEdge = j;
pqueue.resize(nValidEdge);
sort( pqueue.begin(), pqueue.end(), AffinityGraphCompare<float>( edgeWeight ) );
/* Start MST */
int set1, set2, label_of_set1, label_of_set2;
for (unsigned int i = 0; i < pqueue.size(); ++i ) {
set1=dsets.find_set(node1[i]);
set2=dsets.find_set(node2[i]);
label_of_set1 = seg[set1];
label_of_set2 = seg[set2];
if ((set1!=set2) &&
( ((label_of_set1==0) && (marker[set1]==0)) ||
((label_of_set2==0) && (marker[set1]==0))) ){
dsets.link(set1, set2);
// either label_of_set1 is 0 or label_of_set2 is 0.
seg[dsets.find_set(set1)] = std::max(label_of_set1,label_of_set2);
}
}
// write out the final coloring
for (int i=0; i<nVert; i++)
seg[i] = seg[dsets.find_set(i)];
}
|
{"hexsha": "7dba696326ae988e10f671cba2662211d8dffb71", "size": 6163, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "dataset_06/malis/malis_cpp.cpp", "max_stars_repo_name": "naibaf7/caffe_neural_models", "max_stars_repo_head_hexsha": "9d372c4bc599029902185e19f89e5c39f842fff7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 28.0, "max_stars_repo_stars_event_min_datetime": "2015-06-11T07:48:29.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-28T01:29:09.000Z", "max_issues_repo_path": "dataset_06/malis/malis_cpp.cpp", "max_issues_repo_name": "naibaf7/caffe_neural_models", "max_issues_repo_head_hexsha": "9d372c4bc599029902185e19f89e5c39f842fff7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2015-10-01T13:14:46.000Z", "max_issues_repo_issues_event_max_datetime": "2016-09-28T16:25:35.000Z", "max_forks_repo_path": "dataset_06/malis/malis_cpp.cpp", "max_forks_repo_name": "naibaf7/caffe_neural_models", "max_forks_repo_head_hexsha": "9d372c4bc599029902185e19f89e5c39f842fff7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 16.0, "max_forks_repo_forks_event_min_datetime": "2015-07-08T18:47:59.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-26T13:48:48.000Z", "avg_line_length": 31.2842639594, "max_line_length": 105, "alphanum_fraction": 0.5351289956, "num_tokens": 1739}
|
import pickle
import unittest
from collections import OrderedDict
import numpy as np
from qtt.instrument_drivers.virtual_gates import VirtualGates, extend_virtual_gates, update_cc_matrix
from qtt.instrument_drivers.virtual_instruments import VirtualIVVI
from qtt.measurements.scans import instrumentName
class TestVirtualGates(unittest.TestCase):
def setUp(self):
self.crosscap_map = OrderedDict((
('VP1', OrderedDict((('P1', 1), ('P2', 0.6), ('P3', 0)))),
('VP2', OrderedDict((('P1', 0.3), ('P2', 1), ('P3', 0.3)))),
('VP3', OrderedDict((('P1', 0), ('P2', 0), ('P3', 1))))
))
def test_VirtualGates(self, verbose=0):
""" Test for virtual gates object """
gates = VirtualIVVI(name=instrumentName('testivvi'),
model=None, gates=['P1', 'P2', 'P3', 'P4'])
vgates = VirtualGates(instrumentName('testvgates'), gates, self.crosscap_map)
vp1 = vgates.VP1()
if verbose:
print('before set: VP1 {}'.format(vp1))
vgates.VP1.set(10)
vp1 = vgates.VP1()
if verbose:
print('after set: VP1 {}'.format(vp1))
vgates.VP1.set(10)
vp1 = vgates.VP1()
if verbose:
print('after second set: VP1 {}'.format(vp1))
vgates_matrix = vgates.convert_map_to_matrix(self.crosscap_map)
_ = vgates.convert_matrix_to_map(vgates_matrix)
vgates.multi_set({'VP1': 10, 'VP2': 20, 'VP3': 30})
all_values = vgates.allvalues()
self.assertTrue(isinstance(all_values, dict))
crosscap_matrix = vgates.get_crosscap_matrix()
self.assertEqual(1.0, crosscap_matrix[0][0])
self.assertEqual(0.6, crosscap_matrix[0][1])
vgates.set_distances(1.0 / np.arange(1, 5))
vgates_dictionary = vgates.to_dictionary()
vgates_new = VirtualGates.from_dictionary(vgates_dictionary, gates, 'new_vgates')
self.assertEqual(vgates_new.name, 'new_vgates')
self.assertEqual(vgates_new.pgates(), vgates.pgates())
vgates_new.close()
v_gates = vgates.vgates() + ['vP4']
p_gates = vgates.pgates() + ['P4']
extended_vgates = extend_virtual_gates(v_gates, p_gates, vgates, name='vgates')
if verbose:
extended_vgates.print_matrix()
extended_vgates.close()
newvg, _, _ = update_cc_matrix(vgates, update_cc=np.eye(3), verbose=verbose)
newvg.close()
update_matrix = 0.1 * np.random.rand(3, 3)
np.fill_diagonal(update_matrix, 1)
# test normalization of virtual gate matrix
extended_vgates, _, _ = update_cc_matrix(vgates, update_cc=update_matrix, verbose=verbose)
np.testing.assert_almost_equal(extended_vgates.get_crosscap_matrix(),
update_matrix.dot(vgates.get_crosscap_matrix()))
# test normalization of virtual gate matrix
serialized_matrix = extended_vgates.get_crosscap_matrix()
extended_vgates.normalize_matrix()
crosscap_matrix = extended_vgates.get_crosscap_matrix()
for row in range(serialized_matrix.shape[0]):
np.testing.assert_almost_equal(serialized_matrix[row] / serialized_matrix[row][row], crosscap_matrix[row])
cc_matrix_diagonal = crosscap_matrix.diagonal()
np.testing.assert_almost_equal(cc_matrix_diagonal, 1.)
vgates.close()
extended_vgates.close()
gates.close()
def test_VirtualGates_serialization(self):
""" Test for virtual gates object """
gates = VirtualIVVI(
name=instrumentName('ivvi_dummy_serialization_test'), model=None,
gates=['P1', 'P2', 'P3', 'P4'])
virts = VirtualGates(instrumentName('testvgates'), gates, self.crosscap_map)
vgdict = virts.to_dictionary()
vx = VirtualGates.from_dictionary(vgdict, gates, name=instrumentName('vgdummy'))
np.testing.assert_almost_equal(vx.get_crosscap_matrix_inv(), virts.get_crosscap_matrix_inv())
self.assertTrue(vx.pgates() == ['P%d' % i for i in range(1, 4)])
vx.close()
gates.close()
virts.close()
|
{"hexsha": "b1dd020868463db39829fe2473d865480a7264d6", "size": 4182, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tests/unittests/instrument_drivers/test_virtual_gates.py", "max_stars_repo_name": "codecrap/qtt", "max_stars_repo_head_hexsha": "39a8bf21f7bcab94940a66f4d553a14bf34f82b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 39, "max_stars_repo_stars_event_min_datetime": "2018-09-13T14:14:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T22:02:29.000Z", "max_issues_repo_path": "src/tests/unittests/instrument_drivers/test_virtual_gates.py", "max_issues_repo_name": "codecrap/qtt", "max_issues_repo_head_hexsha": "39a8bf21f7bcab94940a66f4d553a14bf34f82b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 136, "max_issues_repo_issues_event_min_datetime": "2018-08-30T19:38:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:05:29.000Z", "max_forks_repo_path": "src/tests/unittests/instrument_drivers/test_virtual_gates.py", "max_forks_repo_name": "codecrap/qtt", "max_forks_repo_head_hexsha": "39a8bf21f7bcab94940a66f4d553a14bf34f82b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2018-11-04T09:00:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T01:40:08.000Z", "avg_line_length": 38.7222222222, "max_line_length": 118, "alphanum_fraction": 0.6398852224, "include": true, "reason": "import numpy", "num_tokens": 1058}
|
subroutine foo(f1,f2,f3,f4,f5,f6,f7,f8,f9,f0,g1,g2,g3)
implicit none
integer f4,f3,f2,f1
integer g4,g5,g6,g7,g8,g9
integer i1,i2,i3,i4,i5
real*8 g1(5,f3,f2,f1),g2(5,5,f3,f2,f1),g3(5,f3,f2,f1)
real*8 f0(5,5,f3,f2,f1),f9(5,5,f3,f2,f1),f8(5,5,f3,f2,f1)
real*8 f7(5,5,f3,f2,f1),f6(5,5,f3,f2,f1),f5(5,5,f3,f2,f1)
do i3=1,f1
g8=mod(i3+f1-2,f1)+1
g9=mod(i3,f1)+1
do i4=1,f2
g6=mod(i4+f2-2,f2)+1
g7=mod(i4,f2)+1
do i5=1,f3
g4=mod(i5+f3-2,f3)+1
g5=mod(i5,f3)+1
do i1=1,5
g3(i1,i5,i4,i3)=0.0d0
do i2=1,5
g3(i1,i5,i4,i3)=g3(i1,i5,i4,i3)+
1 g2(i1,i2,i5,i4,i3)*g1(i2,i5,i4,i3)+
2 f0(i1,i2,i5,i4,i3)*g1(i2,g5,i4,i3)+
3 f9(i1,i2,i5,i4,i3)*g1(i2,i5,g7,i3)+
4 f8(i1,i2,i5,i4,i3)*g1(i2,i5,i4,g9)+
5 f7(i1,i2,i5,i4,i3)*g1(i2,g4,i4,i3)+
6 f6(i1,i2,i5,i4,i3)*g1(i2,i5,g6,i3)+
7 f5(i1,i2,i5,i4,i3)*g1(i2,i5,i4,g8)
enddo
enddo
enddo
enddo
enddo
return
end
! We should be able to interchange this as the number of iterations is
! known to be 4 in the inner two loops. See interchange-2.f for the
! kernel from bwaves.
! { dg-final { scan-tree-dump-times "will be interchanged" 1 "graphite" { xfail *-*-* } } }
|
{"hexsha": "d19cf70c1661d9a1189aa2f40d381acd8729eb70", "size": 1551, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/graphite/interchange-1.f", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/graphite/interchange-1.f", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/graphite/interchange-1.f", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 34.4666666667, "max_line_length": 91, "alphanum_fraction": 0.4603481625, "num_tokens": 663}
|
% -----------------------------------------------------------------------------
% Author : Bimalka Piyaruwan Thalagala
% GitHub : https://github.com/bimalka98
% Date Created : 11/8/2021
% Last Modified :
% -----------------------------------------------------------------------------
\documentclass[a4paper,11pt]{article}%,twocolumn
\input{settings/packages}
\input{settings/page}
\input{settings/macros}
\usepackage[ framed, numbered]{matlab-prettifier}%framed,%
\usepackage{listings}
\usepackage{pythonhighlight}
\usepackage{pdfpages}
\begin{document}
\input{content/title_page}
\pagebreak
%%-----------------------------------------------------------------------
\includepdf[pages=-, width=\textwidth]{code/answers.pdf}
\section*{Question 4}
\subsection*{(a) The sequence of signals (waveforms) corresponding to the binary sequence}
Please note that when plotting the below figure following values were assumed for the parameters.\\
\begin{tabular}{l l}
Bit interval ($T_b$) & 1 $s$ \\
Symbol interval (T) &$2 \times T_b = $ 2 $s$
\end{tabular}
\begin{figure}[!h]
\centering
\includegraphics[scale=0.35]{figures/fig4a}
\end{figure}
\lstinputlisting[basicstyle = \mlttfamily\scriptsize , style = Matlab-editor]{code/codea.m}
\pagebreak
\subsection*{(b) The Quaternary MSK phase trajectory}
Please note that when plotting the below figure following values were assumed for the parameters.\\
\begin{tabular}{l l}
Bit interval ($T_b$) & 1 $s$ \\
Symbol interval (T) &$2 \times T_b = $ 2 $s$
\end{tabular}
\begin{figure}[!h]
\centering
\includegraphics[scale=0.35]{figures/fig4b}
\end{figure}
\lstinputlisting[basicstyle = \mlttfamily\scriptsize , style = Matlab-editor]{code/codeb.m}
\pagebreak
\subsection*{(c) The Quaternary MSK waveform corresponding to the binary sequence}
Please note that when plotting the below figure following values were assumed for the parameters.\\
\begin{tabular}{l l}
Bit interval ($T_b$) & 1 $s$ \\
Symbol interval (T) &$2 \times T_b = $ 2 $s$
\end{tabular}
\begin{figure}[!h]
\centering
\includegraphics[scale=0.35]{figures/fig4c}
\end{figure}
\lstinputlisting[basicstyle = \mlttfamily\scriptsize , style = Matlab-editor]{code/codec.m}
%---------------------------------------------------------------------------
\end{document}
|
{"hexsha": "b3a2801386ef2ae79ba230ee8667246707e0fed5", "size": 2325, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Assignment 01/LaTeX Report/EN3053_180631J_A1.tex", "max_stars_repo_name": "bimalka98/EN3053-Digital-Communications-I", "max_stars_repo_head_hexsha": "723d984fc12e2d27743855a2a791f999c2148426", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Assignment 01/LaTeX Report/EN3053_180631J_A1.tex", "max_issues_repo_name": "bimalka98/EN3053-Digital-Communications-I", "max_issues_repo_head_hexsha": "723d984fc12e2d27743855a2a791f999c2148426", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Assignment 01/LaTeX Report/EN3053_180631J_A1.tex", "max_forks_repo_name": "bimalka98/EN3053-Digital-Communications-I", "max_forks_repo_head_hexsha": "723d984fc12e2d27743855a2a791f999c2148426", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0625, "max_line_length": 99, "alphanum_fraction": 0.6288172043, "num_tokens": 639}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pdb
class MNISTCNNModel(nn.Module):
def __init__(self):
super(MNISTCNNModel, self).__init__()
# ONE LAYER
self.layer1 = torch.nn.Sequential(torch.nn.Conv2d(1, 16, 5, 1, 4), # output space (16, 16, 16)
torch.nn.ReLU(),
torch.nn.MaxPool2d(1)
)
# TWO LAYERS
# self.layer1 = torch.nn.Sequential(torch.nn.Conv2d(1, 8, 5, 1, 4), # output space (8, 16, 16)
# torch.nn.ReLU(),
# torch.nn.MaxPool2d(2),
# torch.nn.Conv2d(8, 16, 5, 1, 4), # output space (16, 10, 10)
# torch.nn.ReLU(),
# torch.nn.MaxPool2d(2)
# )
# self.drop_out = nn.Dropout()
self.fc1 = nn.Linear(16*32*32, 10) # ONE LAYER
# self.fc1 = nn.Linear(16*10*10, 10) # TWO LAYERS
def forward(self, x):
# x = x.view(x.shape[0], 28, 28)
# x = x.unsqueeze(1)
out = self.layer1(x)
out = out.reshape(out.size(0), -1)
# out = self.drop_out(out)
out = self.fc1(out)
return out
def reshape(self, flat_gradient):
layers = []
# ONE LAYER
l1 = 16*1*5*5
l2 = 16
l3 = 10*16*32*32
l4 = 10
layers.append( torch.from_numpy( np.reshape(flat_gradient[0:l1], (16, 1, 5, 5))).type(torch.FloatTensor) )
layers.append( torch.from_numpy( np.reshape(flat_gradient[l1:l1+l2], (l2, ))).type(torch.FloatTensor) )
layers.append( torch.from_numpy( np.reshape(flat_gradient[l1+l2: l1+l2+l3], (10, 16*32*32))).type(torch.FloatTensor) )
layers.append( torch.from_numpy( np.reshape(flat_gradient[l1+l2+l3 : l1+l2+l3+l4], (l4, ))).type(torch.FloatTensor) )
# TWO LAYERS
# l1 = 8*1*5*5
# l2 = 8
# l3 = 16*8*5*5
# l4 = 16
# l5 = 10*1600
# l6 = 10
# layers.append( torch.from_numpy( np.reshape(flat_gradient[0:l1], (8, 1, 5, 5))).type(torch.FloatTensor) )
# layers.append( torch.from_numpy( np.reshape(flat_gradient[l1:l1+l2], (l2, ))).type(torch.FloatTensor) )
# layers.append( torch.from_numpy( np.reshape(flat_gradient[l1+l2: l1+l2+l3], (16, 8, 5, 5))).type(torch.FloatTensor) )
# layers.append( torch.from_numpy( np.reshape(flat_gradient[l1+l2+l3 : l1+l2+l3+l4], (l4, ))).type(torch.FloatTensor) )
# layers.append( torch.from_numpy( np.reshape(flat_gradient[l1+l2+l3+l4: l1+l2+l3+l4+l5], (10, 1600))).type(torch.FloatTensor) )
# layers.append( torch.from_numpy( np.reshape(flat_gradient[l1+l2+l3+l4+l5: l1+l2+l3+l4+l5+l6], (l6, ))).type(torch.FloatTensor) )
return layers
|
{"hexsha": "b4c806201d2093d6a0098d322cb60904df6a9015", "size": 2950, "ext": "py", "lang": "Python", "max_stars_repo_path": "ML/Pytorch/mnist_cnn_model.py", "max_stars_repo_name": "DistributedML/Biscotti", "max_stars_repo_head_hexsha": "dfba71b3924e1bafd2ab2545881fb741193f224e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2019-01-13T22:07:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T16:53:13.000Z", "max_issues_repo_path": "ML/Pytorch/mnist_cnn_model.py", "max_issues_repo_name": "cm20210602/Biscotti", "max_issues_repo_head_hexsha": "dfba71b3924e1bafd2ab2545881fb741193f224e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ML/Pytorch/mnist_cnn_model.py", "max_forks_repo_name": "cm20210602/Biscotti", "max_forks_repo_head_hexsha": "dfba71b3924e1bafd2ab2545881fb741193f224e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2019-05-26T15:11:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T16:10:24.000Z", "avg_line_length": 43.3823529412, "max_line_length": 138, "alphanum_fraction": 0.5311864407, "include": true, "reason": "import numpy", "num_tokens": 892}
|
import cv2
import numpy as np
def test_transform(fnames):
imgs = []
for fname in fnames:
img = cv2.imread(fname)
imgs.append(cv2.resize(img, (224, 224)))
return (np.float32(imgs) - 128.)/128.
|
{"hexsha": "99d5d673f05b7934b94ab297dc296df56db8d3e5", "size": 221, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepface/datasets/augmentation_policies.py", "max_stars_repo_name": "MatheusAD95/fg2020-faceunderstanding", "max_stars_repo_head_hexsha": "95a3d04f68c2c3207137a9f3b9fb3f8e2134fe8e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deepface/datasets/augmentation_policies.py", "max_issues_repo_name": "MatheusAD95/fg2020-faceunderstanding", "max_issues_repo_head_hexsha": "95a3d04f68c2c3207137a9f3b9fb3f8e2134fe8e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deepface/datasets/augmentation_policies.py", "max_forks_repo_name": "MatheusAD95/fg2020-faceunderstanding", "max_forks_repo_head_hexsha": "95a3d04f68c2c3207137a9f3b9fb3f8e2134fe8e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1, "max_line_length": 48, "alphanum_fraction": 0.6199095023, "include": true, "reason": "import numpy", "num_tokens": 64}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorReduction, TensorReductionMixin
class TensorNanSum(TensorReduction, TensorReductionMixin):
_op_type_ = OperandDef.NANSUM
_func_name = 'nansum'
def __init__(self, axis=None, dtype=None, keepdims=None, combine_size=None, stage=None, **kw):
stage = self._rewrite_stage(stage)
super().__init__(_axis=axis, _dtype=dtype, _keepdims=keepdims,
_combine_size=combine_size, _stage=stage, **kw)
def nansum(a, axis=None, dtype=None, out=None, keepdims=None, combine_size=None):
"""
Return the sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
Zero is returned for slices that are all-NaN or
empty.
Parameters
----------
a : array_like
Tensor containing numbers whose sum is desired. If `a` is not an
tensor, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute the
sum of the flattened array.
dtype : data-type, optional
The type of the returned tensor and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
out : Tensor, optional
Alternate output tensor in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `Tensor`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
combine_size: int, optional
The number of chunks to combine.
Returns
-------
nansum : Tensor.
A new tensor holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
mt.sum : Sum across tensor propagating NaNs.
isnan : Show which elements are NaN.
isfinite: Show which elements are not NaN or +/-inf.
Notes
-----
If both positive and negative infinity are present, the sum will be Not
A Number (NaN).
Examples
--------
>>> import mars.tensor as mt
>>> mt.nansum(1).execute()
1
>>> mt.nansum([1]).execute()
1
>>> mt.nansum([1, mt.nan]).execute()
1.0
>>> a = mt.array([[1, 1], [1, mt.nan]])
>>> mt.nansum(a).execute()
3.0
>>> mt.nansum(a, axis=0).execute()
array([ 2., 1.])
>>> mt.nansum([1, mt.nan, mt.inf]).execute()
inf
>>> mt.nansum([1, mt.nan, mt.NINF]).execute()
-inf
>>> mt.nansum([1, mt.nan, mt.inf, -mt.inf]).execute() # both +/- infinity present
nan
"""
a = astensor(a)
if dtype is None:
dtype = np.nansum(np.empty((1,), dtype=a.dtype)).dtype
op = TensorNanSum(axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size)
return op(a, out=out)
|
{"hexsha": "c3623019992e0bd4fd3a19033d5eddb5bfe13b81", "size": 4485, "ext": "py", "lang": "Python", "max_stars_repo_path": "mars/tensor/reduction/nansum.py", "max_stars_repo_name": "tomzhang/mars-1", "max_stars_repo_head_hexsha": "6f1d85e37eb1b383251314cb0ba13e06288af03d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-29T04:11:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-08T10:19:54.000Z", "max_issues_repo_path": "mars/tensor/reduction/nansum.py", "max_issues_repo_name": "tomzhang/mars-1", "max_issues_repo_head_hexsha": "6f1d85e37eb1b383251314cb0ba13e06288af03d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mars/tensor/reduction/nansum.py", "max_forks_repo_name": "tomzhang/mars-1", "max_forks_repo_head_hexsha": "6f1d85e37eb1b383251314cb0ba13e06288af03d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4634146341, "max_line_length": 98, "alphanum_fraction": 0.6550724638, "include": true, "reason": "import numpy", "num_tokens": 1159}
|
#include "Util.h"
#include "platform.h"
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/filesystem/operations.hpp>
namespace fs = boost::filesystem;
std::string strToUpper(const char* from)
{
std::string str(from);
for(unsigned int i = 0; i < str.size(); i++)
str[i] = (char)toupper(from[i]);
return str;
}
std::string& strToUpper(std::string& str)
{
for(unsigned int i = 0; i < str.size(); i++)
str[i] = (char)toupper(str[i]);
return str;
}
std::string strToUpper(const std::string& str)
{
return strToUpper(str.c_str());
}
// embedded resources, e.g. ":/font.ttf", need to be properly handled too
std::string getCanonicalPath(const std::string& path)
{
if(path.empty() || !boost::filesystem::exists(path))
return path;
return boost::filesystem::canonical(path).generic_string();
}
// expands "./my/path.sfc" to "[relativeTo]/my/path.sfc"
// if allowHome is true, also expands "~/my/path.sfc" to "/home/pi/my/path.sfc"
fs::path resolvePath(const fs::path& path, const fs::path& relativeTo, bool allowHome)
{
// nothing here
if(path.begin() == path.end())
return path;
if(*path.begin() == ".")
{
fs::path ret = relativeTo;
for(auto it = ++path.begin(); it != path.end(); ++it)
ret /= *it;
return ret;
}
if(allowHome && *path.begin() == "~")
{
fs::path ret = getHomePath();
for(auto it = ++path.begin(); it != path.end(); ++it)
ret /= *it;
return ret;
}
return path;
}
fs::path removeCommonPathUsingStrings(const fs::path& path, const fs::path& relativeTo, bool& contains)
{
#ifdef WIN32
std::wstring pathStr = path.c_str();
std::wstring relativeToStr = relativeTo.c_str();
#else
std::string pathStr = path.c_str();
std::string relativeToStr = relativeTo.c_str();
#endif
if (pathStr.find_first_of(relativeToStr) == 0) {
contains = true;
return pathStr.substr(relativeToStr.size() + 1);
}
else {
contains = false;
return path;
}
}
// example: removeCommonPath("/home/pi/roms/nes/foo/bar.nes", "/home/pi/roms/nes/") returns "foo/bar.nes"
fs::path removeCommonPath(const fs::path& path, const fs::path& relativeTo, bool& contains)
{
// if either of these doesn't exist, fs::canonical() is going to throw an error
if(!fs::exists(path) || !fs::exists(relativeTo))
{
contains = false;
return path;
}
// if it's a symlink we don't want to apply fs::canonical on it, otherwise we'll lose the current parent_path
fs::path p = (fs::is_symlink(path) ? fs::canonical(path.parent_path()) / path.filename() : fs::canonical(path));
fs::path r = fs::canonical(relativeTo);
if(p.root_path() != r.root_path())
{
contains = false;
return p;
}
fs::path result;
// find point of divergence
auto itr_path = p.begin();
auto itr_relative_to = r.begin();
while(*itr_path == *itr_relative_to && itr_path != p.end() && itr_relative_to != r.end())
{
++itr_path;
++itr_relative_to;
}
if(itr_relative_to != r.end())
{
contains = false;
return p;
}
while(itr_path != p.end())
{
if(*itr_path != fs::path("."))
result = result / *itr_path;
++itr_path;
}
contains = true;
return result;
}
// usage: makeRelativePath("/path/to/my/thing.sfc", "/path/to") -> "./my/thing.sfc"
// usage: makeRelativePath("/home/pi/my/thing.sfc", "/path/to", true) -> "~/my/thing.sfc"
fs::path makeRelativePath(const fs::path& path, const fs::path& relativeTo, bool allowHome)
{
bool contains = false;
fs::path ret = removeCommonPath(path, relativeTo, contains);
if(contains)
{
// success
ret = "." / ret;
return ret;
}
if(allowHome)
{
contains = false;
std::string homePath = getHomePath();
ret = removeCommonPath(path, homePath, contains);
if(contains)
{
// success
ret = "~" / ret;
return ret;
}
}
// nothing could be resolved
return path;
}
std::string strreplace(std::string str, const std::string& replace, const std::string& with)
{
size_t pos;
while((pos = str.find(replace)) != std::string::npos)
str = str.replace(pos, replace.length(), with.c_str(), with.length());
return str;
}
// plaform-specific escape path function
// on windows: just puts the path in quotes
// everything else: assume bash and escape special characters with backslashes
std::string escapePath(const boost::filesystem::path& path)
{
#ifdef WIN32
// windows escapes stuff by just putting everything in quotes
return '"' + fs::path(path).make_preferred().string() + '"';
#else
// a quick and dirty way to insert a backslash before most characters that would mess up a bash path
std::string pathStr = path.string();
const char* invalidChars = " '\"\\!$^&*(){}[]?;<>";
for(unsigned int i = 0; i < pathStr.length(); i++)
{
char c;
unsigned int charNum = 0;
do {
c = invalidChars[charNum];
if(pathStr[i] == c)
{
pathStr.insert(i, "\\");
i++;
break;
}
charNum++;
} while(c != '\0');
}
return pathStr;
#endif
}
std::string removeParenthesis(const std::string& str)
{
// remove anything in parenthesis or brackets
// should be roughly equivalent to the regex replace "\((.*)\)|\[(.*)\]" with ""
// I would love to just use regex, but it's not worth pulling in another boost lib for one function that is used once
std::string ret = str;
size_t start, end;
static const int NUM_TO_REPLACE = 2;
static const char toReplace[NUM_TO_REPLACE*2] = { '(', ')', '[', ']' };
bool done = false;
while(!done)
{
done = true;
for(int i = 0; i < NUM_TO_REPLACE; i++)
{
end = ret.find_first_of(toReplace[i*2+1]);
start = ret.find_last_of(toReplace[i*2], end);
if(start != std::string::npos && end != std::string::npos)
{
ret.erase(start, end - start + 1);
done = false;
}
}
}
// also strip whitespace
end = ret.find_last_not_of(' ');
if(end != std::string::npos)
end++;
ret = ret.substr(0, end);
return ret;
}
std::vector<std::string> commaStringToVector(std::string commaString)
{
// from a comma separated string, get a vector of strings
std::vector<std::string> strs;
boost::split(strs, commaString, boost::is_any_of(","));
std::sort(strs.begin(), strs.end());
return strs;
}
std::string vectorToCommaString(std::vector<std::string> stringVector)
{
std::string out = "";
std::sort(stringVector.begin(), stringVector.end());
// from a vector of system names get comma separated string
for(std::vector<std::string>::const_iterator it = stringVector.cbegin() ; it != stringVector.cend() ; it++ )
{
out = out + (out == "" ? "" : ",") + (*it);
}
return out;
}
|
{"hexsha": "3a52d484324c9ca1acb1415b7d30f67951089bba", "size": 6520, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "es-core/src/Util.cpp", "max_stars_repo_name": "Odroid-RetroArena/EmulationStation", "max_stars_repo_head_hexsha": "62c92b5ec76561d543a213acf51495220189b8e6", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "es-core/src/Util.cpp", "max_issues_repo_name": "Odroid-RetroArena/EmulationStation", "max_issues_repo_head_hexsha": "62c92b5ec76561d543a213acf51495220189b8e6", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "es-core/src/Util.cpp", "max_forks_repo_name": "Odroid-RetroArena/EmulationStation", "max_forks_repo_head_hexsha": "62c92b5ec76561d543a213acf51495220189b8e6", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2017-12-02T03:32:16.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-07T16:29:49.000Z", "avg_line_length": 24.5112781955, "max_line_length": 118, "alphanum_fraction": 0.6521472393, "num_tokens": 1821}
|
/**
* @license BSD 3-Clause
* @copyright Pawel Okas
* @version $Id$
* @brief
*
* @authors Pawel Okas
* created on: 30-03-2019
*
* @copyright Copyright (c) 2019, Pawel Okas
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _AT24MAC_H_
#define _AT24MAC_H_
#include <charconv>
#include <gsl/span>
#include <optional>
#include <string_view>
#include "I2CDevice/I2CDevice.h"
#include "microhal.h"
/**
* \addtogroup Devices
* @{
* @class AT24MAC
* @}
*/
class AT24MAC {
public:
using I2C = microhal::I2C;
enum class Error {
AcknowledgeFailure = static_cast<int>(I2C::Error::AcknowledgeFailure),
ArbitrationLost = static_cast<int>(I2C::Error::ArbitrationLost),
Bus = static_cast<int>(I2C::Error::Bus),
None = static_cast<int>(I2C::Error::None),
Overrun = static_cast<int>(I2C::Error::Overrun),
Timeout = static_cast<int>(I2C::Error::Timeout),
Unknown = static_cast<int>(I2C::Error::Unknown),
DataOverflow,
Addres
};
struct SerialNumber {
uint8_t serial[128 / 8];
bool operator!=(SerialNumber b) { return std::equal(std::begin(serial), std::begin(serial) + sizeof(serial), std::begin(b.serial)); }
std::string toString() const {
std::string str;
str.reserve(18);
for (uint_fast8_t i = 0; i < sizeof(serial); i++) {
std::array<char, 2> buff;
auto [p, ec] = std::to_chars(buff.data(), buff.data() + buff.size(), serial[i], 16);
str.append(std::string_view(buff.data(), p - str.data()));
if (i + 1 != sizeof(serial)) {
str.append(":");
}
}
return str;
}
};
static constexpr const size_t pageSize = 16;
static constexpr const size_t memorySizeInBytes = 256;
private:
using Endianness = microhal::Endianness;
using Access = microhal::Access;
using span = gsl::span<uint8_t>; // Todo change to std::span when it will be available in gcc
// create alias to microhal::Address, we just want to type less
template <typename T, T i>
using Address = microhal::Address<T, i>;
struct Register {
static constexpr auto SerialNumberReg =
microhal::makeRegister<SerialNumber, Access::ReadOnly, Endianness::Little>(Address<uint8_t, 0b1000'0000>{});
static constexpr auto EUIAddress = microhal::makeRegister<uint64_t, Access::ReadOnly, Endianness::Big>(Address<uint8_t, 0b1001'1000>{});
};
static constexpr uint8_t getMACi2cAddrFromMemoryI2cAddr(uint8_t memoryAddress) { return (memoryAddress & 0x0F) | 0xB0; }
public:
AT24MAC(I2C &i2c, uint8_t address) : memory(i2c, address), mac(i2c, getMACi2cAddrFromMemoryI2cAddr(address)) {}
static std::string_view toString(Error error);
Error readEUI(uint64_t &eui) { return static_cast<Error>(mac.readRegister(Register::EUIAddress, eui)); }
Error readSerialNumber(SerialNumber &serial) { return static_cast<Error>(mac.readRegister(Register::SerialNumberReg, serial)); }
// Memory access functions
Error readByte(uint8_t address, uint8_t &data) { return static_cast<Error>(memory.read(address, data)); }
Error read(uint8_t address, span data) { return static_cast<Error>(memory.read(address, data)); }
Error writeByte(uint8_t address, uint8_t data) { return static_cast<Error>(memory.write(address, data)); }
Error writePage(uint8_t pageAddress, span data) {
if (data.size_bytes() > 16) return Error::DataOverflow;
if ((pageAddress % 16) != 0) return Error::Addres;
return static_cast<Error>(memory.write(pageAddress, data));
}
Error write(uint8_t address, span data) {
if (data.size_bytes() > memorySizeInBytes) return Error::DataOverflow;
uint_fast8_t bytesInFirstPage = pageSize - (address % pageSize);
span firstPage(data.data(), bytesInFirstPage);
writePage(address, firstPage);
address += bytesInFirstPage;
uint_fast8_t bytesToWrite = (data.size_bytes() - bytesInFirstPage);
uint8_t *dataPtr = data.data() + bytesInFirstPage;
while (bytesToWrite / pageSize) {
span page(dataPtr, pageSize);
writePage(address, page);
address += pageSize;
dataPtr += pageSize;
bytesToWrite -= pageSize;
}
uint_fast8_t lastPageSize = bytesToWrite % pageSize;
if (lastPageSize) {
span page(dataPtr, lastPageSize);
writePage(address, page);
}
return Error::None;
}
void writeWait() { std::this_thread::sleep_for(std::chrono::milliseconds{5}); }
private:
microhal::I2CDevice memory;
microhal::I2CDevice mac;
};
#endif /* _AT24MAC_H_ */
|
{"hexsha": "0ceeeaac071404bfc8510dc5f19319464124766f", "size": 6291, "ext": "h", "lang": "C", "max_stars_repo_path": "drivers/Atmel/AT24MAC/driver/at24mac.h", "max_stars_repo_name": "microHAL/microhal-drivers", "max_stars_repo_head_hexsha": "09925a9696e4794f9ca0b2e9b5e61908ac99b84b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "drivers/Atmel/AT24MAC/driver/at24mac.h", "max_issues_repo_name": "microHAL/microhal-drivers", "max_issues_repo_head_hexsha": "09925a9696e4794f9ca0b2e9b5e61908ac99b84b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "drivers/Atmel/AT24MAC/driver/at24mac.h", "max_forks_repo_name": "microHAL/microhal-drivers", "max_forks_repo_head_hexsha": "09925a9696e4794f9ca0b2e9b5e61908ac99b84b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6622516556, "max_line_length": 148, "alphanum_fraction": 0.6719122556, "num_tokens": 1531}
|
# coding: utf-8
import os
import numpy as np
def shuffle_array(*args):
"""
Shuffle the given data. Keeps the relative associations arr_j[i] <-> arr_k[i].
Params
------
args: (numpy arrays tuple) arr_1, arr_2, ..., arr_n to be shuffled.
Return
------
X, y : the shuffled arrays.
"""
# Assert that there is at least one array
if len(args) == 0:
raise ValueError('shuffle must take at least one array')
length = args[0].shape[0]
# Assert that every array have the same 1st dimension length:
for i, arr in enumerate(args):
assert arr.shape[0] == length, "Every array should have the same shape: " \
" array {} length = {} array 1 length = {} ".format(i+1, arr.shape[0], length)
# Make the random indices
indices = np.arange(length)
np.random.shuffle(indices)
# Return shuffled arrays
return tuple(arr[indices] for arr in args)
def make_pizza_slice(n_samples=500, radius_sep=0.5, radius_max=1, start_angle=0, end_angle=1, shuffle=True):
"""
Make the toy dataset.
Parameters
----------
n_samples : (int, default=500) the total number of samples in the dataset
radius_sep : (float, default=0.5) the radius of the frontier between the 2 classes
radius_max : (float, default=1) the radius of the complete data
start_angle : the start angle
end_angle : the end angle
Return
------
X: (numpy.ndarray, [n_samples, 2]) the data
y: (numpy.ndarray, [n_samples]) the labels
"""
assert radius_sep < radius_max, "radius_sep should be strictly smaller than radius_max."
rho_0 = np.random.uniform(high=radius_sep, size=n_samples//2)
rho_1 = np.random.uniform(low=radius_sep, high=radius_max, size=n_samples//2)
rho = np.concatenate((rho_0, rho_1), axis=0)
theta = np.random.uniform(low=start_angle, high=end_angle, size=n_samples)
X = np.empty(shape=(n_samples, 2))
X[:, 0] = rho*np.cos(theta)
X[:, 1] = rho*np.sin(theta)
y = np.zeros(n_samples)
y[(n_samples//2):] = 1
if shuffle:
X, y = shuffle_array(X, y)
return X, y
|
{"hexsha": "4811bd35f72bea00390c0f5578e5d68a5762fc44", "size": 2193, "ext": "py", "lang": "Python", "max_stars_repo_path": "datawarehouse/pizza.py", "max_stars_repo_name": "victor-estrade/datawarehouse", "max_stars_repo_head_hexsha": "9ae342bf6f9c3622eb841c2ee770519b12cde1c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datawarehouse/pizza.py", "max_issues_repo_name": "victor-estrade/datawarehouse", "max_issues_repo_head_hexsha": "9ae342bf6f9c3622eb841c2ee770519b12cde1c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datawarehouse/pizza.py", "max_forks_repo_name": "victor-estrade/datawarehouse", "max_forks_repo_head_hexsha": "9ae342bf6f9c3622eb841c2ee770519b12cde1c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8095238095, "max_line_length": 108, "alphanum_fraction": 0.6219790242, "include": true, "reason": "import numpy", "num_tokens": 587}
|
import numpy as np
from src.compute_corr_coef import compute_corr_coef
from utils.plotting import plot_similarities
def compute_trust_values(dsk, do_plot=False):
"""
Compute trust values following formula 6
k:= number of blendshapes
n:= num_features (num_markers*3)
:param dsk: delta_sk vector (k, n)
:param do_plot: decide if we want to plot the between-correlation matrix
:return: trust values vector (k,)
"""
if len(np.shape(dsk)) != 2:
raise ValueError("[COMPUTE TRUST VALUE] dsk dimensions not supported ({}) instead of 2".format(len(np.shape(dsk))))
# compute between-blendshape correlation
ckl = compute_corr_coef(dsk, dsk)
ckl = np.maximum(ckl, np.zeros(np.shape(ckl)))
if do_plot:
plot_similarities(ckl, "Between blendshapes correlation", vmin=0, vmax=1)
# compute lower triangle
num_k = np.shape(ckl)[0]
low_trig = np.zeros(num_k)
for k in range(num_k):
val = 0
for l in range(k):
val += ckl[k, l]
low_trig[k] = val
max_low_trig = np.max(low_trig)
# compute trust values (formula 6)
tk = np.zeros(num_k)
for k in range(len(tk)):
tk[k] = 1 - low_trig[k]/max_low_trig
return tk
if __name__ == '__main__':
"""
test compute_trust_values function
run: python -m src.compute_trust_values
"""
np.random.seed(0)
from utils.re_order_delta import re_order_delta
# test compute trust values
sk = np.random.rand(6, 3) # (k, n)
sorted_sk = re_order_delta(sk)
tk = compute_trust_values(sorted_sk, do_plot=False)
print("tk")
print(tk)
|
{"hexsha": "f1c021de79d124febfa8a831e976cd4dc12aeed9", "size": 1647, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/compute_trust_values.py", "max_stars_repo_name": "johndpope/FacialRetargeting", "max_stars_repo_head_hexsha": "5fb0c1da6af6c3d59aef264f567bfa7a244d0764", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2020-08-19T02:52:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T12:35:04.000Z", "max_issues_repo_path": "src/compute_trust_values.py", "max_issues_repo_name": "johndpope/FacialRetargeting", "max_issues_repo_head_hexsha": "5fb0c1da6af6c3d59aef264f567bfa7a244d0764", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-10-16T07:11:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-30T10:26:04.000Z", "max_forks_repo_path": "src/compute_trust_values.py", "max_forks_repo_name": "johndpope/FacialRetargeting", "max_forks_repo_head_hexsha": "5fb0c1da6af6c3d59aef264f567bfa7a244d0764", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-08-24T08:30:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T15:55:24.000Z", "avg_line_length": 26.564516129, "max_line_length": 123, "alphanum_fraction": 0.6514875531, "include": true, "reason": "import numpy", "num_tokens": 456}
|
"""
Test for nested class Parent
This file contains a discussion, examples, and tests about nested
classes and parents. It is kept in a separate file to avoid import
loops.
EXAMPLES:
Currently pickling fails for parents using nested classes (typically
for categories), but deriving only from Parent::
sage: from sage.misc.nested_class_test import TestParent1, TestParent2, TestParent3, TestParent4
sage: P = TestParent1()
sage: TestSuite(P).run()
Failure ...
The following tests failed: _test_elements, _test_pickling
They actually need to be in the NestedClassMetaclass. However, due to
a technical detail, this is currently not directly supported::
sage: P = TestParent2()
Traceback (most recent call last):
...
TypeError: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases
sage: TestSuite(P).run() # not tested
Instead, the easiest is to inherit from UniqueRepresentation, which is
what you want to do anyway most of the time::
sage: P = TestParent3()
sage: TestSuite(P).run()
This is what all Sage's parents using categories currently do. An
alternative is to use ClasscallMetaclass as metaclass::
sage: P = TestParent4()
sage: TestSuite(P).run()
"""
#*****************************************************************************
# Copyright (C) 2009 Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from __future__ import print_function, absolute_import
from six import add_metaclass
__all__ = [] # Don't document any parents
from sage.structure.parent import Parent
from sage.structure.element_wrapper import ElementWrapper
from sage.structure.unique_representation import UniqueRepresentation
from sage.misc.classcall_metaclass import ClasscallMetaclass
from sage.misc.nested_class import NestedClassMetaclass
class TestParent1(Parent):
def __init__(self):
"""
EXAMPLES::
sage: sage.misc.nested_class_test.TestParent1()
<sage.misc.nested_class_test.TestParent1_with_category object at ...>
"""
from sage.categories.all import Sets
Parent.__init__(self, category = Sets())
class Element(ElementWrapper):
pass
@add_metaclass(NestedClassMetaclass)
class TestParent2(Parent):
def __init__(self):
"""
EXAMPLES::
sage: sage.misc.nested_class_test.TestParent2()
Traceback (most recent call last):
...
TypeError: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases
"""
from sage.categories.all import Sets
Parent.__init__(self, category = Sets())
class Element(ElementWrapper):
pass
class TestParent3(UniqueRepresentation, Parent):
def __init__(self):
"""
EXAMPLES::
sage: sage.misc.nested_class_test.TestParent3()
<sage.misc.nested_class_test.TestParent3_with_category object at ...>
"""
from sage.categories.all import Sets
Parent.__init__(self, category = Sets())
class Element(ElementWrapper):
pass
@add_metaclass(ClasscallMetaclass)
class TestParent4(Parent):
def __init__(self):
"""
EXAMPLES::
sage: sage.misc.nested_class_test.TestParent4()
<sage.misc.nested_class_test.TestParent4_with_category object at ...>
"""
from sage.categories.all import Sets
Parent.__init__(self, category=Sets())
def __eq__(self, other):
"""
EXAMPLES::
sage: from sage.misc.nested_class_test import TestParent4
sage: TestParent4() == TestParent4()
True
"""
return self.__class__ == other.__class__
def __ne__(self, other):
"""
EXAMPLES::
sage: from sage.misc.nested_class_test import TestParent4
sage: TestParent4() != TestParent4()
False
"""
return self.__class__ != other.__class__
def __hash__(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: from sage.misc.nested_class_test import TestParent4
sage: hash(TestParent4()) == hash(TestParent4())
True
"""
return hash(8960522744683456048)
class Element(ElementWrapper):
pass
# Class for tests:
class B(object):
"""
A normal external class.
"""
pass
class ABB(object):
class B(object):
"""
This class is broken and can't be pickled.
A warning is emmited during compilation.
"""
pass
class ABL(object):
"""
There is no problem here.
"""
B = B
class ALB(object):
"""
There is a nested class just below. Which can't be properly sphinxed.
"""
class C(object):
"""
Internal C class.
Thanks to the links below this class is pickled ok.
But it is sphinxed wrong: It is typeset as a link to an outer class.
"""
pass
C = ALB.C
@add_metaclass(NestedClassMetaclass)
class ABBMeta(object):
class B(object):
"""
B interne
"""
pass
@add_metaclass(NestedClassMetaclass)
class ABLMeta(object):
B = B
@add_metaclass(NestedClassMetaclass)
class ALBMeta(object):
"""
There is a nested class just below which is properly sphinxed.
"""
class CMeta(object):
"""
B interne
"""
pass
CMeta = ALBMeta.CMeta
class TestNestedParent(UniqueRepresentation, Parent):
"""
This is a dummy for testing source inspection of nested classes.
See the test in ``sage.misc.sageinspect.sage_getsourcelines``.
"""
class Element(object):
"This is a dummy element class"
pass
|
{"hexsha": "9c714f13889b710c0ac1d7fc887f0e20868825a7", "size": 6057, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sage/misc/nested_class_test.py", "max_stars_repo_name": "fchapoton/sage", "max_stars_repo_head_hexsha": "765c5cb3e24dd134708eca97e4c52e0221cd94ba", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-30T04:27:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-30T04:27:27.000Z", "max_issues_repo_path": "src/sage/misc/nested_class_test.py", "max_issues_repo_name": "fchapoton/sage", "max_issues_repo_head_hexsha": "765c5cb3e24dd134708eca97e4c52e0221cd94ba", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sage/misc/nested_class_test.py", "max_forks_repo_name": "fchapoton/sage", "max_forks_repo_head_hexsha": "765c5cb3e24dd134708eca97e4c52e0221cd94ba", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-23T10:40:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-23T10:40:14.000Z", "avg_line_length": 25.7744680851, "max_line_length": 143, "alphanum_fraction": 0.6311705465, "include": true, "reason": "from sage", "num_tokens": 1345}
|
export PrioritizedSweepingSamplingModel
using DataStructures: PriorityQueue, dequeue!
import StatsBase: sample
"""
PrioritizedSweepingSamplingModel(θ::Float64=1e-4)
See more details at Section (8.4) on Page 168 of the book *Sutton, Richard S., and Andrew G. Barto. Reinforcement learning: An introduction. MIT press, 2018.*
"""
mutable struct PrioritizedSweepingSamplingModel <: AbstractEnvironmentModel
experiences::Dict{Tuple{Any,Any},Tuple{Float64,Bool,Any}}
PQueue::PriorityQueue{Tuple{Any,Any},Float64}
predecessors::Dict{Any,Set{Tuple{Any,Any,Float64,Bool}}}
θ::Float64
sample_count::Int
PrioritizedSweepingSamplingModel(θ::Float64 = 1e-4) = new(
Dict{Tuple{Any,Any},Tuple{Float64,Bool,Any}}(),
PriorityQueue{Tuple{Any,Any},Float64}(Base.Order.Reverse),
Dict{Any,Set{Tuple{Any,Any,Float64,Bool}}}(),
θ,
0,
)
end
function RLBase.update!(
m::PrioritizedSweepingSamplingModel,
t::AbstractTrajectory,
p::AbstractPolicy,
::AbstractEnv,
::Union{PreActStage,PostEpisodeStage},
)
if length(t[:terminal]) > 0
transition = (
t[:state][end-1],
t[:action][end-1],
t[:reward][end],
t[:terminal][end],
t[:state][end],
)
pri = RLBase.priority(p, transition)
update!(m, (transition..., pri))
end
end
function RLBase.update!(m::PrioritizedSweepingSamplingModel, transition::Tuple)
s, a, r, d, s′, P = transition
m.experiences[(s, a)] = (r, d, s′)
if P >= m.θ
m.PQueue[(s, a)] = P
end
if !haskey(m.predecessors, s′)
m.predecessors[s′] = Set{Tuple{Any,Any,Float64,Bool}}()
end
push!(m.predecessors[s′], (s, a, r, d))
end
function sample(m::PrioritizedSweepingSamplingModel)
if length(m.PQueue) > 0
s, a = dequeue!(m.PQueue)
r, d, s′ = m.experiences[(s, a)]
m.sample_count += 1
s, a, r, d, s′
else
nothing
end
end
|
{"hexsha": "d8852283a1de1aaaff8ae6980b8f6717245a319f", "size": 1994, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ReinforcementLearningZoo/src/algorithms/tabular/dyna_agents/env_models/prioritized_sweeping_sampling_model.jl", "max_stars_repo_name": "LaarsOman/ReinforcementLearning.jl", "max_stars_repo_head_hexsha": "b04e3f192e71418dbca496331ada44f65b2822d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 352, "max_stars_repo_stars_event_min_datetime": "2018-08-30T18:41:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:41:22.000Z", "max_issues_repo_path": "src/ReinforcementLearningZoo/src/algorithms/tabular/dyna_agents/env_models/prioritized_sweeping_sampling_model.jl", "max_issues_repo_name": "LaarsOman/ReinforcementLearning.jl", "max_issues_repo_head_hexsha": "b04e3f192e71418dbca496331ada44f65b2822d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 325, "max_issues_repo_issues_event_min_datetime": "2018-08-24T12:41:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:29:23.000Z", "max_forks_repo_path": "src/ReinforcementLearningZoo/src/algorithms/tabular/dyna_agents/env_models/prioritized_sweeping_sampling_model.jl", "max_forks_repo_name": "LaarsOman/ReinforcementLearning.jl", "max_forks_repo_head_hexsha": "b04e3f192e71418dbca496331ada44f65b2822d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 62, "max_forks_repo_forks_event_min_datetime": "2018-09-02T03:40:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T12:35:19.000Z", "avg_line_length": 29.3235294118, "max_line_length": 158, "alphanum_fraction": 0.6248746239, "num_tokens": 608}
|
using StatsBase
input = joinpath(@__DIR__, "input")
lines = readlines(input)
function endpoints(line)
components = map(n -> parse(Int, n), split(line, r",| -> "))
return ((components[1], components[2]), (components[3], components[4]))
end
function orthogonals(coords)
map(c -> begin
(start, finish) = c
if start[1] == finish[1]
[(start[1], i) for i in minimum([start[2], finish[2]]):maximum([start[2], finish[2]])]
elseif start[2] == finish[2]
[(i, start[2]) for i in minimum([start[1], finish[1]]):maximum([start[1], finish[1]])]
else
nothing
end
end, coords)
end
function diagonals(coords)
map(c -> begin
(start, finish) = c
if start[1] < finish[1] && start[2] < finish[2]
return collect(zip(start[1]:finish[1], start[2]:finish[2]))
elseif start[1] > finish[1] && start[2] > finish[2]
return collect(zip(start[1]:-1:finish[1], start[2]:-1:finish[2]))
elseif start[1] < finish[1] && start[2] > finish[2]
return collect(zip(start[1]:finish[1], start[2]:-1:finish[2]))
elseif start[1] > finish[1] && start[2] < finish[2]
return collect(zip(start[1]:-1:finish[1], start[2]:finish[2]))
else
nothing
end
end, coords)
end
coords = map(endpoints, lines)
orthos = filter(!isnothing, orthogonals(coords))
dias = filter(!isnothing, diagonals(coords))
p1 = length(filter(p -> last(p) > 1, countmap(Iterators.flatten(orthos))))
p2 = length(filter(p -> last(p) > 1, countmap(Iterators.flatten(append!(orthos, dias)))))
println("-----------------------------------------------------------------------")
println("hydrothermal venture -- part one :: $p1")
println("hydrothermal venture -- part two :: $p2")
println("-----------------------------------------------------------------------")
@assert(p1 == 6005)
@assert(p2 == 23864)
|
{"hexsha": "021f603ed4d69b0da528e48290fed27ebeaf29a4", "size": 1944, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "bin/five/run.jl", "max_stars_repo_name": "talentdeficit/aoc2021", "max_stars_repo_head_hexsha": "6dbc52d2ad096584641aab629b29a0cdadedd5a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/five/run.jl", "max_issues_repo_name": "talentdeficit/aoc2021", "max_issues_repo_head_hexsha": "6dbc52d2ad096584641aab629b29a0cdadedd5a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/five/run.jl", "max_forks_repo_name": "talentdeficit/aoc2021", "max_forks_repo_head_hexsha": "6dbc52d2ad096584641aab629b29a0cdadedd5a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3454545455, "max_line_length": 98, "alphanum_fraction": 0.5447530864, "num_tokens": 545}
|
subroutine axial
!
! to obtain the axial distributiun of velocity and/or mach number
!
use kinddefine
use fg, only:gc,gd,ge,gf,gh,gi,hb,hc,he
use gg, only:gam,gm,g2,g4,g5,g6,g7,g8,g9,ga,rga,qt
use cline, only:wip,x1,frip,zonk,seo,cse,axis,taxi
use prop, only:sfoa,conv
use param, only: etad,rc,amach,bmach,cmach,emach,gmach,frc,sf,wwo,
&wwop,qm,we,cbet,xe,eta,epsi,bpsi,xo,yo,rrc,sdo,xb,xc,ah,pp,se,tye,
&xa
use contr,only:itle,ie,lr,it,jb,jq,jx,lv,nocon,in,mc,mcp,ip,iq,ise
&,jc,m,mp,mq,n,np,nf,nr,lc,md,mf,mt,nd,nt
implicit none
!
interface
function cubic(ea,eb,ec,ed)
use kinddefine
implicit none
real(kind=K8) :: cubic
real(kind=K8),intent(in) :: ea,eb,ec,ed
end function cubic
!
function fmv(psi)
use kinddefine
implicit none
real(kind=K8) :: fmv
real(kind=K8), intent(in) :: psi
end function fmv
!
function toric(wip,se)
use kinddefine
implicit none
real(kind=K8) :: toric
real(kind=K8),intent(in) :: se,wip
end function toric
!
subroutine conic(xm,b)
use kinddefine
implicit none
real(kind=K8),dimension(4),intent(out) :: b
real(kind=K8),intent(in) :: xm
end subroutine conic
!
subroutine scond(a,b,c,king)
use kinddefine
implicit none
integer(kind=K4),intent(in) :: king
real(kind=K8),dimension(150),intent(in) :: a,b
real(kind=K8),dimension(150),intent(out) :: c
end subroutine scond
!
subroutine sorce(w,b)
use kinddefine
implicit none
real(kind=K8),intent(in) :: w
real(kind=K8),dimension(4),intent(out) :: b
end subroutine sorce
!
subroutine trans(rto,tk,wo,amn,amp,ampp,w,awp,awpp,cwoppp,axn)
use kinddefine
implicit none
real(kind=K8) :: rto,tk,wo,amn,amp,ampp,w,awp,awpp,cwoppp,axn
end subroutine trans
end interface
!
integer(kind=K4) :: ix,j,k,l,n0,n3,n4,n5,nx
real(kind=K8) :: aa,ab,abcm,aem,am,amp,ampp,amsq
real(kind=K8) :: apsi,awp,awpp,awppp,bbet,bmp,bmpp,bppp
real(kind=K8) :: c2,c3,c4,cm,cbm,cmc,cmp,cpp,cppp,dw,dx
real(kind=K8) :: ea,eb,ebet,ec,ed,eit,eoe,ew
real(kind=K8) :: fbet,fftn,fiv,fmach,fn,four,fpsi,fq,fxw
real(kind=K8) :: gj,gk,gmm,gmp,gpsi,gq,gr,gs,gw,gwp,gww
real(kind=K8) :: h,half,hh,om,one,q
real(kind=K8) :: ra,rg,rmach,rt,sev,six,sm,smpp,smppp,sxty
real(kind=K8) :: ten,thr,tk,tlv,trty,two,tyin
real(kind=K8) :: w,wap,wapp,wb,xbc,wbp,wbpp,wc,wcb
real(kind=K8) :: wcp,wep,wepp,weppp,whp,whpp
real(kind=K8) :: wi,wipp,wippp,wm,wo,woppp,wp,wpp,wppp
real(kind=K8) :: wrppp,wspp,wsppp
real(kind=K8) :: x1in,xain,xbcm,xbcmn,xbcmx,xbcn,xbet,xbin
real(kind=K8) :: xcin,xd,xdin,xi,xie
real(kind=K8) :: xiin,xinch,xj,xm,xmp,xmpp,xmppp,xmw
real(kind=K8) :: xo1,xoi,xoin,yain,zro
real(kind=K8),dimension(4) :: d
real(kind=K8),dimension(6) :: c
real(kind=K8),dimension(150) :: ax,axm,axmp
character(len=4,kind=K3) :: iaxis,m1,m2
data zro/0.0d+0/,one/1.d+0/,two/2.d+0/,six/6.d+0/,half/5.d-1/
data thr/3.d+0/,four/4.d+0/,fiv/5.d+0/,ten/1.d+1/,tlv/1.2d+1/
data sev/7.d+0/,eit/8.d+0/,fftn/1.5d+1/,trty/3.d+1/,sxty/6.d+1/
data m1/'GMAC'/,m2/'2-D '/,iaxis/'AXIS'/
data n3/4h 3RD/,n4/4h 4TH/,n5/4h 5TH/,n0/4h-DEG/
! npi=9.d+1/conv
! if (jq.eq.0.and.jx.eq.0) call orez(axis,2*750)
if (jq.eq.0.and.jx.eq.0) then
axis(:,:)=0.0d0
taxi(:,:)=0.0d0
endif
if (jq .gt. 0) goto 50
if (jx .eq. 0) goto 2
!
! card used to obtain internal streamlines (jx > 0)
!
read (1,93,end=91) etad,qm,xj
!
jx=int(xj,K4)
if (etad .eq. sxty) goto 1
eta=etad/conv
if (ie .eq. 0) se=eta
if (ie .eq. 1) se=two*dsin(half*eta)
cse=dcos(eta)
apsi=bpsi-eta/qt
amach=fmv(apsi)
ra=((g6+g5*amach**2)**ga/amach)**qt
gpsi=epsi+eta/qt
gmach=fmv(gpsi)
rg=((g6+g5*gmach**2)**ga/gmach)**qt
mp=one+thr*(ra-rg)
goto 14
1 se=qm*seo
goto 14
!
! constants used in transonic solution
2 gc=(two*gam/qt-thr)/six/(3+ie)
ge=(thr*(8+ie)-four*gam/qt)/thr/(7+ie)
gh=(fftn+(2-6*ie)*gam)/tlv/(5+ie)
gj=(gam*(gam+9.25d+0*ie-26.5d+0)+.75d+0*(6-ie))/tlv/(3-ie)
gk=(gam*(gam+2.25d+0*ie-16.5d+0)+2.25d+0*(2+ie))/six
gr=(fftn-(1+9*ie)*gam)/(15+ie)/18.d+0
hb=(14.d+0*gam-75.d+0+18*ie)/(270.d+0+18*ie)
if (ie .eq. 0) goto 3
gd=(gm*(652.d+0*gm+1319.d+0)+1000.d+0)/6912.d+0
gf=(3612.d+0+gm*(751.d+0+gm*754.d+0))/2880.d+0
gi=(909.d+0+gam*(270.d+0+gam*412.d+0))/10368.d+0
gs=(gam*(gam*2708.d+0+2079.d+0)+2115.d+0)/82944.d+0
hc=(gam*(2364.d+0*gam-3915.d+0)+14337.d+0)/82944.d+0
he=(gam*(64.d+0*gam+117.d+0)-1026.d+0)/1152.d+0
goto 4
!
! axisym flow, ie=1, qt=0.5, gam=1.4, gc=0.10833333, gd=0.236099537,
! ge=0.65833333, gf=1.40036111, gh=0.13055556, gi=0.2020177469,
! gj=0.76833333, gk=-1.87333333, gr=0.003472222, gs=0.1245814043,
! hb=0.12986111, hc=0.1626331019, he=-0.6395486111
!
3 gd=(gm*(32.d+0*gm-14.d+0)+221.d+0)/1080.d+0
gf=(4230.d+0+gm*(211.d+0+gm*334.d+0))/3780.d+0
gi=(738.d+0+gam*(273.d+0-gam*82.d+0))/7560.d+0
gs=(gam*(gam*782.d+0+3507.d+0)+7767.d+0)/272160.d+0
hc=(gam*(274.d+0*gam-861.d+0)+4464.d+0)/17010.d+0
he=(gam*(32.d+0*gam+87.d+0)-561.d+0)/540.d+0
!
! planar flow, ie=0, qt=1.0,gam=1.4, gc=-0.011111, gd=0.2041851852,
! ge=0.8761904762, gf=1.155513228, gh=0.29666667, gi=0.1269153439,
! gj=-0.85111111, gk=-2.7733333, gr=0.05037037037, gs=0.05221017049,
! hb=-0.2051851852, hc=0.2231416814, he=-0.6971851852
!
! card used to establish inviscid parameters
!
4 read (1,93,end=91) etad,rc,fmach,bmach,cmc,sf,pp,xc
!
! card used to control calculations
!
read (1,92) mt,nt,ix,in,iq,md,nd,nf,mp,mq,jb,jx,jc,it,lr,nx
!
lc=int(xc,K4)
if (xc .gt. one) lc=int(xc+one,K4)
nr=six*rc
mf=fmach
if (ie .eq. 1) mc=m1
if (ie .eq. 0) mc=m2
nocon=0
eta=etad/conv
if (ie .eq. 0) se=eta
if (ie .eq. 1) se=two*dsin(half*eta)
if (etad .eq. sxty) se=one
seo=se
ise=int(se,K4)
cse=dcos(eta)
rt=rc+one
am=one
wi=one
wipp=zro
mcp=cmc
cmach=dabs(cmc)
cbet=dsqrt(cmach*cmach-one)
frc=((g6+g5*cmach**2)**ga/cmach)**qt
tye=frc*se
if (sf .lt. zro) sf=-sf/tye
if (ise .eq. 0) goto 5
!
! non-radial flow at inflection point
iq=1
amach=cmach
bmach=cmach
emach=cmach
fmach=cmach
gmach=cmach
if (ie .eq. 1) am=gmach
we=g2*emach/dsqrt(emach**2+g9)
dw=we-wi
xo=zro
eoe=zro
goto 15
!
! radial flow at inflection point
5 if (in .eq. 0) goto 6
if ((lc .lt. 0) .and. (in .lt. 0)) in=-1
if ((lc .eq. 0) .or. (mcp .lt. 0)) in=isign(10,in)
6 bbet=dsqrt(bmach*bmach-one)
bpsi=g2*datan(g4*bbet)-datan(bbet)
if (fmach) 9,8,7
7 fbet=dsqrt(fmach*fmach-one)
fpsi=g2*datan(g4*fbet)-datan(fbet)
goto 10
8 fmach=-bpsi/eta
if (bpsi/eta .gt. 7.5d+0) fmach=-7.5d+0
9 fpsi=-fmach*eta
fmach=fmv(fpsi)
10 epsi=fpsi-two*eta/qt
emach=fmv(epsi)
we=g2*emach/dsqrt(emach*emach+g9)
dw=we-wi
call sorce(we,d)
xe=d(1)
wep=d(2)
wepp=d(3)
wrppp=d(4)
if (nr .ne. 0) goto 15
if ((lr .ne. 0) .or. (iq .lt. 0)) goto 11
if (ix .eq. 0) write (2,106) itle,n3
if (ix .ne. 0) write (2,106) itle,n4
!
! iteration to determine rc if not specified (nr = 0)
11 ea=wrppp
eb=-fiv*wepp-wipp
ec=tlv*wep
ed=-tlv*dw
xie=cubic(ea,eb,ec,ed)
if (xie .le. zro) goto 89
12 wip=two*(we-one)/xie-wep+(wepp-wipp)*xie/six
13 nocon=nocon+1
if (nocon .gt. 100) goto 90
14 rt=toric(wip,se)
rc=rt-one
15 tk=(one-g7*(one+(ge+gf/rt)/rt)/rt**2/(15+ie)/thr)**qt
yo=se/tk
aa=dsqrt(qt*(gam+one)*rt)
if (qm .ne. one) goto 19
whpp=(one-gam/1.5d+0+gj/rt)/(aa*yo)**2
if ((nr .ne. 0) .or. (ise.eq. 1)) goto 18
if (dabs(whpp-wipp) .lt. 1.d-10) goto 18
wipp=whpp
if (ix) 11,17,16
16 ea=gk/(aa*yo)**3
eb=thr*(wipp+wepp)
ec=-tlv*wep
ed=tlv*dw
xie=cubic(ea,eb,ec,ed)
if (xie .le. zro) goto 89
goto 12
17 h=(eit*wip+sev*wep)/(thr*wipp-two*wepp)
hh=trty*dw/(thr*wipp-two*wepp)
xie=hh/(dsqrt(h*h+hh)+h)
wip=wep-half*xie*(wepp+wipp)
goto 13
!
! iteration for rc completed, remainder of transonic values computed
18 wip=(one-(gc-gd/rt)/rt)/yo/aa
whp=wip
wipp=whpp
amp=g7*wip
ampp=g7*(whpp+thr*g8*wip**2)
19 xoi=yo*dsqrt(g7/two/(9-ie)/rt)*(one+(gh+gi/rt)/rt)
if (qm .ne. one) goto 21
if (ise .eq. 1) xi=xoi
xo1=xoi
wo=one-(half/(3-ie)+(gr+gs/rt)/rt)/rt
om=wo/dsqrt(g7-g8*wo**2)
woppp=gk/(aa*yo)**3
if (lr .eq. 0) goto 21
!
! call for throat characteristic values
call trans (rt,tk,wo,am,amp,ampp,wi,awp,awpp,awppp,xi)
if ((nx .lt. 0) .and. (nt.lt.0)) goto 87
if (nx .lt. 0) goto 4
amp=amp/se
ampp=ampp/se**2
wap=awp/se
wapp=awpp/se**2
woppp=awppp/se**3
if (ise .eq. 1) goto 21
dw=we-wi
xoi=xi*se
if (nr .gt. 0) goto 20
x1=xe-xie
xo=xe-xie-xo1
c2=xie*wip
c3=half*wipp*xie**2
c4=we-one-c2-c3
if (ix .ne. 0) c4=four*c4+two*c3+c2-xie*wep
if (iq .lt. 0) goto 20
write (2,110) itle,n4,lr
write (2,96) xie,c2,c3,c4,x1
20 wip=wap
wipp=wapp
21 wwo=one+(one/(ie+3)-(hb-hc/rt)/rt)/rt
wwop=(one+(one-ie/eit-he/rt)/rt)/yo/aa
rrc=one/rc
sdo=rrc/yo
zonk=qm+1.0d-03
np=zonk*(iabs(nf)-1)+1
if (sf .gt. zro) goto 22
sf=one/yo
22 if (iq .lt. 0) goto 31
ip=0
jq=0
m=zonk*(mt-1)+1
n=nt
if (qm .eq. one) goto 23
xo=x1-xoi
return
!23 call orez (c,6)
23 c(:)=0.0d0
if (ise .eq. 0) goto 31
!
! length of axial distribution for non-radial flow
x1=xoi
aem=emach-am
c(1)=am
if (lc) 25,24,27
24 amsq=amp**2+aem*ampp*four/thr
if (lr .eq. 0) write (2,122) itle,n4,n0
if (lr .ne. 0) write (2,107) itle,n4,n0,lr
if (amsq .lt. zro) goto 28
xie=four*aem/(dsqrt(amsq)+amp)
xe=xie+xi
c(5)=thr*aem-amp*xie
goto 26
25 xie=thr*aem/amp
xe=xie+xi
if (lr .eq. 0) write (2,122) itle,n3,n0
if (lr .ne. 0) write (2,107) itle,n3,n0,lr
26 c(2)=amp*xie
c(3)=six*aem-thr*c(2)
c(4)=thr*c(2)-eit*aem
goto 46
27 if (lc .eq. 1) goto 29
xe=xc/tk
xie=fiv*aem/(dsqrt(amp**2+in*aem*ampp/eit)+amp)
if (xe .gt. xi+xie) xe=xi+xie
xie=xe-xi
c(2)=amp*xie
c(3)=half*in*ampp*xie**2/ten
c(4)=ten*aem-six*c(2)-thr*c(3)
c(5)=-fftn*aem+eit*c(2)+thr*c(3)
c(6)=six*aem-thr*c(2)-c(3)
if (lr .eq. 0) write (2,122) itle,n5,n0
if (lr .ne. 0) write (2,107) itle,n5,n0,lr
goto 46
28 c(2)=two*aem
c(4)=-c(2)
c(5)=aem
xie=two*aem/amp
xe=xie+xi
goto 46
! if xc=1 then read the centerline mach distribution from point b
! to point c. see explanatory note on xc page 54 in adec-tr-87-63
29 do j=1,nt
k=nt+1-j
read(9) ax(k),axm(k),axmp(k)
if (j .eq. 1) dx=xi-ax(k)
axis(1,k)=ax(k)+dx
enddo
axm(nt)=am
axmp(nt)=amp
xe=axis(1,1)
xie=xe-xi
if (lr .eq. 0) write (2,122) itle,n5,n0
if (lr .ne. 0) write (2,107) itle,n5,n0,lr
goto 46
!
! length of upstream axial distribution for radial flow
31 if (sfoa .eq. zro) goto 32
if (lr .eq. 0) write (2,106) itle,n5
if (lr .ne. 0) write (2,110) itle,n5,lr
goto 44
32 if (lr .eq. 0) goto 33
if ((nr .eq. 0) .and. (ix .eq. 0)) goto 41
if ((nr .eq. 0) .and. (ix .ne. 0)) mf=0
if (mf .ne. 0) goto 40
if ((iq .lt. 0) .or. (nr .eq. 0)) goto 35
if (ix .eq. 0) write (2,110) itle,n3,lr
if (ix .ne. 0) write (2,110) itle,n4,lr
goto 35
33 if (mf .eq. 0) goto 34
if (nr .eq. 0) goto 45
if (iq .ge. 0) write (2,106) itle,n4
goto 41
!
! iteration for emach if not specified (mf = 0)
34 if (iq .lt. 0) goto 35
if (ix .eq. 0) write (2,106) itle,n3
if (ix .ne. 0) write (2,106) itle,n4
35 if (nocon .gt. 100) goto 90
if (ix) 41,36,37
36 xie=six*dw/(dsqrt((wip+wep+wep)**2-six*dw*wepp)+wip+wep+wep)
fxw=half*xie*(wepp+wipp)/(wep-wip)
if (fxw .le. zro) ew=we+.1d+0
if (fxw .le. zro) goto 39
if (fxw .lt. one) ew=wi+dw*(four+fxw**2)/fiv
if ((fxw .gt. one) .or. (ie .eq. 0)) ew=wi+dw*(9.d+0+fxw)/ten
goto 39
37 ea=woppp
eb=fiv*wipp+wepp
ec=tlv*wip
ed=-tlv*dw
xie=cubic(ea,eb,ec,ed)
if (xie .gt. zro) goto 38
ew=we-.1d+0
if (ew .gt. wi) goto 39
write (2,113)
goto 4
38 ew=wi+half*xie*(wip+wep+xie*(wipp-wepp)/six)
39 we=ew
! if (we .gt. g2) go to 79
if (we .gt. g2) then
write (2,119)
write (2,126)
stop
endif
if (dabs(ew-dw-wi) .lt. 1.d-9) goto 43
dw=we-wi
call sorce(we,d)
xe=d(1)
wep=d(2)
wepp=d(3)
wrppp=d(4)
nocon=nocon+1
goto 35
40 if (iq .lt. 0) goto 41
write (2,110) itle,n4,lr
41 h=thr*(wep+wip)/(wipp-wepp)
hh=tlv*dw/(wipp-wepp)
xie=hh/(dsqrt(h*h+hh)+h)
if (mf) 44,42,45
42 ew=wi+xie*(wip+thr*wep-xie*(wepp-xie*wrppp/six))/four
goto 39
43 emach=we/dsqrt(g7-g8*we*we)
!
! iteration for emach completed
ebet=dsqrt(emach*emach-one)
epsi=g2*datan(g4*ebet)-datan(ebet)
fpsi=epsi+two*eta/qt
fmach=fmv(fpsi)
44 if (bmach .gt. fmach) goto 45
bmach=fmach
bpsi=fpsi
mp=0
45 gpsi=fpsi-eta/qt
gmach=fmv(gpsi)
if (ie .eq. 1) ah=gmach
rg=((g6+g5*gmach**2)**ga/gmach)**qt
apsi=bpsi-eta/qt
amach=fmv(apsi)
ra=((g6+g5*amach**2)**ga/amach)**qt
xa=ra*cse
if (sfoa .gt. zro) xie=sfoa/sf+xe-xa-xoi
if (sfoa .lt. zro) xie=xe-sfoa/sf-rg*cse-xoi
xi=xe-xie
xo=xi-xoi
x1=xo+xo1
if (iq .lt. 0) goto 48
xb=((g6+g5*bmach**2)**ga/bmach)**qt
if (lc .lt. 2) xc=((g6+g5*cmach**2)**ga/cmach)**qt
c(1)=wi
c(2)=xie*wip
c(3)=half*wipp*xie*xie
c(4)=ten*dw-xie*(four*wep-half*xie*wepp)-six*c(2)-thr*c(3)
c(5)=xie*(sev*wep+eit*wip-xie*(wepp-thr*wipp/two))-fftn*dw
c(6)=six*dw-thr*xie*(wep+wip)+half*xie*xie*(wepp-wipp)
if (mf .eq. 0 .and. ix .eq. 0) c(5)=zro
if (nr .eq. 0 .and. ix .eq. 0 .and. lr .eq. 0) c(5)=zro
if (sfoa .eq. zro) c(6)=zro
eoe=epsi/eta
wippp=six*c(4)/xie/xie/xie
weppp=six*(c(4)+four*c(5)+ten*c(6))/xie/xie/xie
46 write (2,99) m,n,eoe,bmach,cmach,gam,etad,rc,sf
write (2,102) se,tk,wwo,wwop,emach,fmach,mc,ah
if (lr .ne. 0) write (2,123) wi,wap,wapp,am,amp,ampp
if (ise.eq.1 .and. lr.eq.0) write (2,123) wi,wip,whpp,am,amp,ampp
if (ise .eq. 1) goto 47
write (2,101) wi,wip,wipp,wippp,woppp
write (2,98) we,wep,wepp,weppp,wrppp
47 write (2,94) c(1),c(2),c(3),c(4),c(5),c(6)
write (2,95) xoi,xi,xo,yo,xie,xe,nocon
if (ise .eq. 1) xc=xe
if (ise .eq. 1) xa=xe+tye*cbet
48 nocon=0
wip=whp
if (qm .ne. one) goto 49
if (pp .lt. zro) frip=zro
if (pp .eq. zro) frip=-xo*sf
if (pp .gt. zro) frip=pp-sf*xa
if (iq .lt. 0) goto 50
xoin=sf*xo+frip
x1in=sf*x1+frip
xiin=sf*xi+frip
write (2,125) om,xoin,x1in,am,xiin
if (iq .gt. 0) goto 67
49 if (n) 87,50,68
50 m=zonk*(md-1)+1
jq=1
n=nd
ip=in
if (qm .ne. one) return
! call orez(c,6)
c(:)=0.0d0
if (iq .lt. 0) goto 51
if (mq .ge. 0 .and. n .gt. 0) goto 51
write (2,104)
goto 52
51 write (2,105)
52 if (ip) 53,67,58
!
! length of downstream velocity distribution, radial flow
53 wc=g2*cmach/dsqrt(cmach*cmach+g9)
wb=g2*bmach/dsqrt(bmach*bmach+g9)
wcb=wc-wb
call sorce(wb,d)
xb=d(1)
wbp=d(2)
wspp=d(3)
wsppp=d(4)
c(1)=wb
wcp=zro
if (lc) 54,55,56
54 xbc=thr*wcb/wbp
wbpp=-two*wbp/xbc
write (2,109) itle,n3
goto 57
55 wbpp=wspp
if (mcp .lt. 0) write (2,109) itle,n3
if (mcp .lt. 0) xbcn=thr*wcb/wbp
if (mcp .lt. 0) xbcm=-two*wbp/wbpp
if (mcp .gt. 0) write (2,109) itle,n4
if (mcp .gt. 0) xbcn=four*wcb/wbp
if (mcp .gt. 0) xbcm=-thr*wbp/wbpp
abcm=one-xbcn/xbcm
if (abcm .lt. zro) goto 88
xbc=xbcn/(dsqrt(abcm)+one)
goto 57
56 wbpp=-wspp*ip/ten
if (mcp.gt.0) xbcmn=cubic(wsppp/thr,thr*wbpp,tlv*wbp,-two*ten*wcb)
if (mcp.lt.0) xbcmn=cubic(wsppp/six,wbpp,thr*wbp,-four*wcb)
xbcmx=fiv*wcb/(dsqrt(wbp**2-ip*wcb*wspp/eit)+wbp)
if (xc .gt. xb+xbcmx) xc=xb+xbcmx
if (xc .lt. xb+xbcmn) xc=xb+xbcmn
xbc=xc-xb
if (mcp .lt. 0) write (2,109) itle,n4
if (mcp .gt. 0) write (2,109) itle,n5
57 c(2)=xbc*wbp
c(3)=half*xbc*xbc*wbpp
if (mcp .lt. 0) c(4)=four*wcb-thr*c(2)-two*c(3)
if (mcp .lt. 0) c(5)=-thr*wcb+two*c(2)+c(3)
if (mcp .gt. 0) c(4)=ten*wcb-six*c(2)-thr*c(3)
if (mcp .gt. 0) c(5)=-fftn*wcb+eit*c(2)+thr*c(3)
if (mcp .gt. 0) c(6)=six*wcb-thr*c(2)-c(3)
if (lc .lt. 0) c(5)=zro
if (lc .le. 0) c(6)=zro
xc=xb+xbc
goto 63
!
! length of downstream mach no. distribution, radial flow
58 call conic(bmach,d)
xb=d(1)
bmp=d(2)
smpp=d(3)
smppp=d(4)
cbm=cmach-bmach
c(1)=bmach
bmpp=smpp*ip/ten
if (lc .ne. 0) goto 59
if (mcp .lt. 0) write (2,108) itle,n3
if (mcp .lt. 0) xbcn=thr*cbm/bmp
if (mcp .lt. 0) xbcm=-two*bmp/bmpp
if (mcp .gt. 0) write (2,108) itle,n4
if (mcp .gt. 0) xbcn=four*cbm/bmp
if (mcp .gt. 0) xbcm=-thr*bmp/bmpp
abcm=one-xbcn/xbcm
if (abcm .lt. zro) goto 88
xbc=xbcn/(dsqrt(abcm)+one)
xc=xb+xbc
goto 62
59 if (lc .ne. 1) goto 61
do k=1,nd
read (9) ax(k),axm(k),axmp(k)
if (k .eq. 1) dx=xb-ax(1)
axis(1,k)=ax(k)+dx
enddo
if (axmp(2) .eq. zro) call scond(ax,axm,axmp,nd)
axm(1)=bmach
axmp(1)=bmp
xc=axis(1,nd)
xbc=xc-xb
write (2,111) itle
goto 63
61 if (mcp.gt.0) xbcmn=cubic(smppp/thr,thr*bmpp,tlv*bmp,-two*ten*cbm)
if (mcp.lt.0) xbcmn=cubic(smppp/six,bmpp,thr*bmp,-four*cbm)
xbcmx=fiv*cbm/(dsqrt(bmp**2+ip*cbm*smpp/eit)+bmp)
if (xc .gt. xb+xbcmx) xc=xb+xbcmx
if (xc .lt. xb+xbcmn) xc=xb+xbcmn
xbc=xc-xb
if (mcp .lt. 0) write (2,108) itle,n4
if (mcp .gt. 0) write (2,108) itle,n5
62 c(2)=xbc*bmp
c(3)=half*xbc*xbc*bmpp
if (mcp .lt. 0) c(4)=four*cbm-thr*c(2)-two*c(3)
if (mcp .lt. 0) c(5)=-thr*cbm+two*c(2)+c(3)
if (mcp .gt. 0) c(4)=ten*cbm-six*c(2)-thr*c(3)
if (mcp .gt. 0) c(5)=-fftn*cbm+eit*c(2)+thr*c(3)
if (mcp .gt. 0) c(6)=six*cbm-thr*c(2)-c(3)
if (lc .le. 0) c(6)=zro
63 cpp=zro
cmp=zro
if (mcp .lt. 0) cpp=(two*c(3)+six*c(4)+tlv*c(5))/xbc**2
bppp=six*c(4)/xbc/xbc/xbc
cppp=six*(c(4)+four*c(5)+ten*c(6))/xbc/xbc/xbc
xd=xc+tye*cbet
write (2,100) m,n,np,gam,etad,rc,sf
if (ip) 64,67,65
64 write (2,116) wb,wbp,wbpp,bppp,wspp,wc,wcp,cpp,cppp,wsppp
goto 66
65 write (2,117) bmach,bmp,bmpp,bppp,smpp,cmach,cmp,cpp,cppp,smppp
66 write (2,94) c(1),c(2),c(3),c(4),c(5),c(6)
write (2,118) amach,xa,xb,xbc,xc,xd
xain=sf*xa+frip
yain=sf*xa*dtan(eta)
xbin=sf*xb+frip
xcin=sf*xc+frip
xdin=sf*xd+frip
tyin=sf*tye
write (2,120) xain,yain,xbin,xcin,xdin,tyin
! the n=0 goto 4 on next line will fail with a single input card
67 if (n) 87,4,68
68 if (mq .lt. 0) goto 69
!
! calculate axial distribution
write (2,103) iaxis
!69 fn=n-1
69 fn=real(n-1,k8)
l=int((n+40)/41,K4)
if (ip .ne. 0) xie=xbc
if (ip .ne. 0) xi=xb
q=zro
do k=1,n
if (ise .eq. 1 .and. lc .eq. 1) goto 72
if (ip .ne. 0) goto 70
if (nx .eq. 0) q=((n-k)/fn)**2
if (nx .ne. 0) q=((n-k)/fn)**(nx*1.d-1)
goto 71
70 if (lc .eq. 1) goto 72
q=(k-1)/fn
71 axis(1,k)=xie*q+xi
72 rmach=one
if (ise .eq. 1) goto 75
if (axis(1,k) .lt. one+1.d-9) goto 74
ab=axis(1,k)**(rga/qt)
if (ab .lt. two) sm=((one+dsqrt(ab*gm-gm))**ga)**2
if (ab .ge. two) sm=(ab/g5)**g7
! if (ab .ge. two) sm=(ab/gs)**g7
73 cm=sm**g5
fq=sm*(g6+g5*sm-cm*ab)/(sm-one)/g5/g6
sm=sm-fq
if (dabs(fq) .gt. 1.d-9) goto 73
rmach=dsqrt(sm)
74 if (ip .lt. 1) goto 78
75 if (lc .eq. 1) goto 76
xm=c(1)+q*(c(2)+q*(c(3)+q*(c(4)+q*(c(5)+q*c(6)))))
if (ise .eq. 1 .or. k .eq. 1) goto 77
if (rmach .lt. xm) write (2,124) k,rmach,xm
goto 77
76 xm=axm(k)
77 xmp=(c(2)+q*(two*c(3)+q*(thr*c(4)+q*(four*c(5)+q*fiv*c(6)))))/xie
if (lc .eq. 1) xmp=axmp(k)
xmpp=two*(c(3)+q*(thr*c(4)+q*(six*c(5)+q*ten*c(6))))/xie/xie
xmppp=six*(c(4)+q*(four*c(5)+ten*q*c(6)))/xie/xie/xie
gmm=xm*xm+g9
gq=dsqrt(gmm)
w=g2*xm/gq
wm=g9*g2/gq/gmm
wp=wm*xmp
wpp=wm*(xmpp-thr*xm*xmp*xmp/gmm)
gmp=fiv*xm*xm*xmp*xmp/gmm-thr*xm*xmpp-xmp*xmp
wppp=wm*(xmppp+thr*xmp*gmp/gmm)
if (mq .lt. 0) goto 83
if (mod(k-1,l) .ne. 0) goto 83
goto 82
78 w=c(1)+q*(c(2)+q*(c(3)+q*(c(4)+q*(c(5)+q*c(6)))))
wp=(c(2)+q*(two*c(3)+q*(thr*c(4)+q*(four*c(5)+q*fiv*c(6)))))/xie
wpp=two*(c(3)+q*(thr*c(4)+q*(six*c(5)+q*ten*c(6))))/xie/xie
wppp=six*(c(4)+q*(four*c(5)+ten*q*c(6)))/xie/xie/xie
gww=g7-w*w*g8
if (gww .gt. zro) goto 80
79 write (2,119)
goto 4
80 gw=dsqrt(gww)
xm=w/gw
if (k .eq. 1 .or. k .eq. n) goto 81
if (ip .eq. 0 .and. rmach .gt. xm) write (2,124) k,rmach,xm
if (ip .ne. 0 .and. rmach .lt. xm) write (2,124) k,rmach,xm
81 xmw=g7/gw/gww
xmp=xmw*wp
xmpp=xmw*(wpp+thr*g8*w*wp*wp/gww)
gwp=fiv*w*w*wp*wp*g8/gww+thr*w*wpp+wp*wp
xmppp=xmw*(wppp+thr*wp*g8*gwp/gww)
if (mq .lt. 0) goto 83
if (mod(k-1,l) .ne. 0) goto 83
82 xinch=sf*axis(1,k)+frip
write (2,97) k,axis(1,k),xinch,xm,xmp,xmpp,xmppp,w,wp,wpp,wppp
if (mod(k+l-1,10*l) .eq. 0) write (2,115)
83 axis(3,k)=xm
axis(2,k)=zro
axis(5,k)=ie*half*(xm-one/xm)*wp/w
xbet=dsqrt(xm**2-one)
axis(4,k)=g2*datan(g4*xbet)-datan(xbet)
enddo
if (iq .eq. 0 .and. ip .eq. 0 .and. m .le. 0) goto 50
! the n=0 goto 4 on next line will fail with a single input card
if (m) 87,4,85
85 if (ip .ne. 0) return
do k=1,n
do j=1,5
taxi(j,k)=axis(j,k)
enddo
enddo
return
87 lv=-1
return
88 write (2,114)
goto 4
89 write (2,112)
goto 4
90 write (2,121) nocon
goto 4
91 stop
!
92 format (16i5)
93 format (8f10.3)
94 format (1x,9x,'C1=',f11.7,3x,'C2=',f12.8,3x,'C3=',1pe15.7,3x,'C4='
&,e15.7,3x,'C5=',e15.7,3x,'C6=',e15.7/)
95 format (1x,9x,'XOI=',f12.8,3x,'XI=',f12.8,3x,'XO=',f12.8,3x,'YO=',
&f12.8,3x,'XIE=',f12.8,3x,'XE=',f12.8,i5,' ITERATIONS'/)
96 format (1x,4x,'CURVE FROM MACH 1, XIE=',f12.8,' C2=',f12.8,'
& C3=',1pe15.7,' C4=',e15.7,' X1=',0pf12.8/)
97 format (1x,i3,2f10.5,f10.6,1p3e14.6,0pf10.6,1p3e14.6)
98 format (1x,9x,'WE=',f12.8,4x,'WEP=',f12.8,4x,'WEPP=',1pe15.7,4x,'W
&EPPP=',e15.7,4x,'WRPPP=',e15.7/)
99 format (1x,4x,'NO. OF POINTS ON 1ST CHAR. (M)=',i3,5x,'NO. OF POIN
&TS ON AXIS (N)=',i3,5x,'EPSI/ETA=',f8.5,4x,'BMACH=',f9.5,4x,'CMACH
&=',f9.5//5x,'GAMMA=',f7.4,5x,'INFLECTION ANG. (ETA)=',f8.4,2x,'DEG
&REES',5x,'RAD. OF CURV. (RC)=',f11.6,5x,'SCALE FACTOR (SF)=',f13.8
&/)
100 format (1x,4x,'NO. OF POINTS ON 1ST CHAR. (M)=',i3,5x,'NO. OF POIN
&TS ON AXIS (N)=',i3,5x,'NO. OF POINTS ON LAST CHAR. (NP)=',I3//5x,
&'GAMMA=',f7.4,5x,'INFLECTION ANG. (ETA)=',f8.4,2x,'DEGREES',5x,'RA
&D. OF CURV. (RC)=',f13.8,5x,'SCALE FACTOR (SF)=',f11.6/)
101 format (1x,9x,'WI=',f12.8,4x,'WIP=',f12.8,4x,'WIPP=',1pe15.7,4x,'W
&IPPP=',e15.7,4x,'WOPPP=',e15.7/)
102 format (1x,4x,'Y*=',f10.8,4x,'RMASS=',f10.8,4x,'WWO=',f10.7,4x,'WW
&OP=',f11.8,4x,'EMACH=',f8.5,4x,'FMACH=',f10.7,4x,a4,'H=',f9.5/)
103 format (1x,1x,a4/' POINT',4x,'X',7x,'X(IN)',3x,'MACH NO.',4x,'DM/D
&X',8x,'D2M/DX2',7x,'D3M/DX3',7x,'W=Q/A*',5x,'DW/DX',8x,'D2W/DX2',7
&x,'D3W/DX3'/)
104 format ('0',//)
105 format (1x)
106 format (1x,3a4,' THROAT CONTOUR,',a4,'-DEG AXIAL VELOCITY DISTRIBU
&TION FROM SONIC POINT'/)
107 format ('1',3a4,' INVISCID CONTOUR,',a4,a4,'AXIAL MACH NUMBER DIST
&RIBUTION FROM THROAT CHARACTERISTIC WHICH HAS',i4,' POINTS'/)
108 format (1x,3a4,' DOWNSTREAM CONTOUR,',a4,'-DEG AXIAL MACH NUMBER D
&ISTRIBUTION'/)
109 format (1x,3a4,' DOWNSTREAM CONTOUR,',a4,'-DEG AXIAL VELOCITY DIST
&RIBUTION'/)
110 format (1x,3a4,' THROAT CONTOUR,',a4,'-DEG AXIAL VELOCITY DISTRIBU
&TION FROM THROAT CHARACTERISTIC WHICH HAS',i4,' POINTS'/)
111 format (1x,3a4,' DOWNSTREAM CONTOUR'/)
112 format (1x,'SOLUTION TO CUBIC EQUATION IS NEGATIVE')
113 format (1x,'RC IS TOO LARGE TO ALLOW A SOLUTION')
114 format (1x,'BMACH IS TOO SMALL TO ALLOW A SOLUTION')
115 format (1x)
116 format ('0',9x,'WB=',f12.8,4x,'WBP=',f12.8,4x,'WBPP=',1pe15.7,4x,'
&WBPPP=',e15.7,5x,'WSPP=',e15.7//10x,'WC=',0pf12.8,4x,'WCP=',f12.8,
&4x,'WCPP=',1pe15.7,4x,'WCPPP=',e15.7,4x,'WSPPP=',e15.7)
117 format (1x,9x,'BMACH=',f9.5,4x,'BMP=',f12.8,4x,'BMPP=',1pe15.7,4x,
&'BMPPP=',e15.7,5x,'SMPP=',e15.7//10x,'CMACH=',0pf9.5,4x,'CMP=',f12
&.8,4x,'CMPP=',1pe15.7,4x,'CMPPP=',e15.7,4x,'SMPPP=',e15.7/)
118 format (1x,9x,'AMACH=',f11.7,4x,'XA=',f11.7,4x,'XB=',f11.7,4x,'XBC
&=',f11.7,4x,'XC=',f12.7,4x,'XD=',f12.7/)
119 format ('0','VELOCITY GREATER THAN THEORETICAL MAXIMUM VALUE')
120 format (1x,9x,'XA(IN)=',f11.7,', YA(IN)=',f11.7,', XB(IN)=',f12.7,
&', XC(IN)=',f12.7,', XD(IN)=',f12.7,', YD(IN)=',f11.7/)
121 format ('1','NO CONVERGENCE IN',i4,'ITERATIONS')
122 format ('1',3a4,' INVISCID CONTOUR,',a4,a4,' AXIAL MACH NUMBER DIS
&TRIBUTION FROM SONIC POINT'/)
123 format (1x,9x,'WI=',f12.8,4x,'WIP=',f12.8,4x,'WIPP=',1pe15.7,4x,'M
&I=',0pf12.8,4x,'MIP=',f12.8,4x,'MIPP=',1pe15.7/)
124 format (1x,i3,' RMACH=',2f12.8)
125 format (1x,9x,'MACH',f11.8,' AT',f11.7,' IN., MACH 1 AT',f11.7,'
& IN., MACH',f11.8,' AT',f11.7,' IN.'/)
126 format (1x,'CHANGE CARD 3 INPUT MACH NUMBER'/)
end subroutine axial
|
{"hexsha": "462c4c4cc015ad41166d3a78582491b7550778dd", "size": 28001, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/axial.f", "max_stars_repo_name": "aldorona/contur", "max_stars_repo_head_hexsha": "d4197b55e28b20f905f9418f0473b2c39fadb0fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-03-03T10:30:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-25T22:20:59.000Z", "max_issues_repo_path": "src/axial.f", "max_issues_repo_name": "aldorona/contur", "max_issues_repo_head_hexsha": "d4197b55e28b20f905f9418f0473b2c39fadb0fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-08-01T20:33:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-05T13:46:04.000Z", "max_forks_repo_path": "src/axial.f", "max_forks_repo_name": "aldorona/contur", "max_forks_repo_head_hexsha": "d4197b55e28b20f905f9418f0473b2c39fadb0fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-25T16:14:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T12:14:43.000Z", "avg_line_length": 34.3992628993, "max_line_length": 73, "alphanum_fraction": 0.5258026499, "num_tokens": 12706}
|
import gym
import numpy as np
from marathon_envs.envs import MarathonEnvs
from timeit import default_timer as timer
from datetime import timedelta
import os
env_names = [
'Hopper-v0',
# 'Walker2d-v0',
# 'Ant-v0',
# 'MarathonMan-v0',
# 'MarathonManSparse-v0'
]
for env_name in env_names:
print ('-------', env_name, '-------')
env = MarathonEnvs(env_name, 1)
obs = env.reset()
episode_score = 0.
episode_steps = 0
episodes = 0
while episodes < 5:
# action, _states = model.predict(obs)
action = [env.action_space.sample() for _ in range(env.number_agents)]
obs, rewards, dones, info = env.step(action)
episode_score += rewards
episode_steps += 1
env.render()
if dones:
print ('episode_score', episode_score, 'episode_steps', episode_steps)
episode_score = 0.
episode_steps = 0
episodes += 1
env.close()
|
{"hexsha": "8517336346fc59391fcc9bce2989fcf6a3a8071f", "size": 973, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_marathon_envs.py", "max_stars_repo_name": "Sohojoe/plan2exploreMarathonEnvs", "max_stars_repo_head_hexsha": "d5ea00e0d24d5bf2447df4921681a5e3bfc398c1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_marathon_envs.py", "max_issues_repo_name": "Sohojoe/plan2exploreMarathonEnvs", "max_issues_repo_head_hexsha": "d5ea00e0d24d5bf2447df4921681a5e3bfc398c1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_marathon_envs.py", "max_forks_repo_name": "Sohojoe/plan2exploreMarathonEnvs", "max_forks_repo_head_hexsha": "d5ea00e0d24d5bf2447df4921681a5e3bfc398c1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2972972973, "max_line_length": 82, "alphanum_fraction": 0.6012332991, "include": true, "reason": "import numpy", "num_tokens": 256}
|
module GeoCost
using FillArrays
using StaticArrays
using Distances
using OffsetArrays
using DataStructures
using Statistics
const sqrt2 = sqrt(2.0)
const neib_8 = @SMatrix[1. 1 1; 1 0 1; 1 1 1]
const distance_8 = @SMatrix[sqrt2 1 sqrt2; 1 Inf 1; sqrt2 1 sqrt2]
const distance_4 = @SMatrix[Inf 1 Inf; 1 Inf 1; Inf 1 Inf]
const Δ = CartesianIndex(1, 1)
# Write your package code here.
"""Total friction distance spread from `points`."""
function spread(points::Matrix{<:Real}, initial::Matrix{<:AbstractFloat}, friction::Matrix{<:AbstractFloat}; res=1, limit=Inf)
ofriction = OffsetMatrix(fill(Inf, size(friction) .+ 2), UnitRange.(0, size(points) .+ 1))
ofriction[begin + 1:end - 1,begin + 1:end - 1] .= friction
result = OffsetMatrix(fill(limit, size(friction) .+ 2), UnitRange.(0, size(points) .+ 1))
r = @view result[1:end - 1,1:end - 1]
locations = points .> 0
r[locations] .= initial[locations]
# Construct stack for locations
mask = OffsetMatrix(trues(size(points) .+ 2), UnitRange.(0, size(points) .+ 1))
mask[begin + 1:end - 1,begin + 1:end - 1] .= false
II = CartesianIndices(size(points))
stack = Deque{CartesianIndex}()
for I in II[locations]
push!(stack, I)
end
# Step 1: Set the distance of the starting node to 0 and the distances of all other nodes to the highest value possible.
sdata = zeros(MMatrix{3,3})
mcell = MMatrix{3,3}(false, false, false, false, false, false, false, false, false)
# fcdata = zeros(MMatrix{3,3})
# rcdata = zeros(MMatrix{3,3})
# Step 3: For each of the active node’s adjacent neighbors, set its distance to whichever is
# less: its current distance value or the sum of the distance of the active node plus the
# weight of the arc from the active node to that neighbor.
while !isempty(stack)
spread!(stack, mask, result, ofriction, sdata, mcell, res)
end
r
end
function spread2(points::Matrix{<:Real}, initial::Matrix{<:AbstractFloat}, friction::Matrix{<:AbstractFloat}; res=1, limit=Inf)
ofriction = OffsetMatrix(fill(Inf, size(friction) .+ 2), UnitRange.(0, size(points) .+ 1))
ofriction[begin + 1:end - 1,begin + 1:end - 1] .= friction
result = OffsetMatrix(fill(limit, size(friction) .+ 2), UnitRange.(0, size(points) .+ 1))
r = @view result[1:end - 1,1:end - 1]
locations = points .> 0
r[locations] .= initial[locations]
mask = OffsetMatrix(trues(size(points) .+ 2), UnitRange.(0, size(points) .+ 1))
mask[begin + 1:end - 1,begin + 1:end - 1] .= false
minval, minidx = [0.], [CartesianIndex(1, 1)]
x = @MMatrix zeros(3, 3)
II = CartesianIndices(size(points))
for I ∈ II
patch = I - Δ:I + Δ
rdata = view(result, patch)
fdata = view(ofriction, patch)
x .= (fdata .+ fdata[2,2]) .* res ./ 2 .* distance_8 .+ rdata
findmin!(minval, minidx, x)
rdata[2,2] = min(rdata[2,2], minval[1])
end
for I ∈ reverse(II)
patch = I - Δ:I + Δ
rdata = view(result, patch)
fdata = view(ofriction, patch)
x .= (fdata .+ fdata[2,2]) .* res ./ 2 .* distance_8 .+ rdata
findmin!(minval, minidx, x)
rdata[2,2] = min(rdata[2,2], minval[1])
end
for I ∈ II
patch = I - Δ:I + Δ
rdata = view(result, patch)
fdata = view(ofriction, patch)
x .= (fdata .+ fdata[2,2]) .* res ./ 2 .* distance_8 .+ rdata
findmin!(minval, minidx, x)
rdata[2,2] = min(rdata[2,2], minval[1])
end
r
end
function spread3(points::Matrix{<:Real}, initial::Matrix{<:AbstractFloat}, friction::Matrix{<:AbstractFloat}; res=1, limit=Inf)
ofriction = OffsetMatrix(fill(Inf, size(friction) .+ 2), UnitRange.(0, size(points) .+ 1))
ofriction[begin + 1:end - 1,begin + 1:end - 1] .= friction
result = OffsetMatrix(fill(limit, size(friction) .+ 2), UnitRange.(0, size(points) .+ 1))
r = @view result[1:end - 1,1:end - 1]
locations = points .> 0
r .= initial
mask = OffsetMatrix(trues(size(points) .+ 2), UnitRange.(0, size(points) .+ 1))
mask[begin + 1:end - 1,begin + 1:end - 1] .= false
minval, minidx = [0.], [CartesianIndex(1, 1)]
x = @MMatrix zeros(3, 3)
II = CartesianIndices(size(points))
for I ∈ II
patch = I - Δ:I + Δ
rdata = view(result, patch)
fdata = view(ofriction, patch)
x .= (fdata .+ fdata[2,2]) .* res ./ 2 .* distance_8 .+ rdata
findmin!(minval, minidx, x)
rdata[2,2] = min(rdata[2,2], minval[1])
end
for I ∈ reverse(II)
patch = I - Δ:I + Δ
rdata = view(result, patch)
fdata = view(ofriction, patch)
x .= (fdata .+ fdata[2,2]) .* res ./ 2 .* distance_8 .+ rdata
findmin!(minval, minidx, x)
rdata[2,2] = min(rdata[2,2], minval[1])
end
for I ∈ II
patch = I - Δ:I + Δ
rdata = view(result, patch)
fdata = view(ofriction, patch)
x .= (fdata .+ fdata[2,2]) .* res ./ 2 .* distance_8 .+ rdata
findmin!(minval, minidx, x)
rdata[2,2] = min(rdata[2,2], minval[1])
end
r
end
function spread!(stack, mask, result, ofriction, sdata, mcell, res)
I = popfirst!(stack)
mask[I] = true
patch = I - Δ:I + Δ
rdata = view(result, patch)
fdata = view(ofriction, patch)
# fcdata .= fdata[2,2]
# rcdata .= rdata[2,2]
# New distance is cell_distance + average friction values
for i ∈ eachindex(sdata)
sdata[i] = muladd(fdata[i] + fdata[2,2], res / 2 * distance_8[i], rdata[2,2])
mcell[i] = sdata[i] < rdata[i] # cells where new distance is lower
end
rdata[mcell] .= sdata[mcell]
result[patch] .= rdata
# Add new cells to stack
for I in patch[mcell]
mask[I] || push!(stack, I)
end
end
"""Optimized (and more accurate) function based on the same friction everywhere."""
function spread(points::Matrix{<:AbstractFloat}, initial::AbstractFloat, friction::AbstractFloat; distance=Euclidean(), res=1.0)
locations = points .> 0
I = CartesianIndices(size(points))
result = fill(Inf, size(points))
for location ∈ I[locations]
for cell ∈ I
result[cell] = min(evaluate(distance, location.I, cell.I) * res * friction + initial, result[cell])
end
end
# result .+ initial
m = .~isfinite.(points)
result[m] = points[m]
return result
end
"""Optimized (and more accurate) function based on the same friction everywhere."""
function spread(points::Matrix{<:AbstractFloat}, initial::Matrix{<:AbstractFloat}, friction::Real; distance=Euclidean(), res=1.0)
locations = points .> 0
I = CartesianIndices(size(points))
result = fill(Inf, size(points))
for location ∈ I[locations]
for cell ∈ I
result[cell] = min(evaluate(distance, location.I, cell.I) * res * friction + initial[location], result[cell])
end
end
m = .~isfinite.(points)
result[m] .= points[m]
return result
end
"""
roughness(dem::Matrix{<:AbstractFloat})
Roughness is the largest inter-cell difference of a central pixel and its surrounding cell, as defined in Wilson et al (2007, Marine Geodesy 30:3-35).
"""
function roughness(dem::Matrix{<:AbstractFloat})
ex_dem = OffsetMatrix(fill(Inf, size(dem) .+ 2), UnitRange.(0, size(dem) .+ 1))
# Update center
ex_dem[begin + 1:end - 1,begin + 1:end - 1] .= dem
# Set edges to mirror center
ex_dem[begin, begin + 1:end - 1] .= dem[begin, :]
ex_dem[end, begin + 1:end - 1] .= dem[end, :]
ex_dem[begin + 1:end - 1, begin] .= dem[:, begin]
ex_dem[begin + 1:end - 1, end] .= dem[:, end]
# Set corners to mirror corners of center
ex_dem[begin, begin] = dem[begin, begin]
ex_dem[begin, end] = dem[begin, end]
ex_dem[end, begin] = dem[end, begin]
ex_dem[end, end] = dem[end, end]
roughness = similar(dem)
x = @MMatrix zeros(3, 3)
@inbounds for I ∈ CartesianIndices(size(roughness))
patch = I - Δ:I + Δ
rdata = view(ex_dem, patch)
x .= rdata .- rdata[2,2]
roughness[I] = maximum(abs.(x))
end
roughness
end
"""
TPI(dem::Matrix{<:AbstractFloat})
TPI stands for Topographic Position Index, which is defined as the difference between a central pixel and the mean of its surrounding cells (see Wilson et al 2007, Marine Geodesy 30:3-35).
"""
function TPI(dem::Matrix{<:AbstractFloat})
ex_dem = OffsetMatrix(fill(Inf, size(dem) .+ 2), UnitRange.(0, size(dem) .+ 1))
# Update center
ex_dem[begin + 1:end - 1,begin + 1:end - 1] .= dem
# Set edges to mirror center
ex_dem[begin, begin + 1:end - 1] .= dem[begin, :]
ex_dem[end, begin + 1:end - 1] .= dem[end, :]
ex_dem[begin + 1:end - 1, begin] .= dem[:, begin]
ex_dem[begin + 1:end - 1, end] .= dem[:, end]
# Set corners to mirror corners of center
ex_dem[begin, begin] = dem[begin, begin]
ex_dem[begin, end] = dem[begin, end]
ex_dem[end, begin] = dem[end, begin]
ex_dem[end, end] = dem[end, end]
tpi = similar(dem)
x = @MMatrix zeros(3, 3)
@inbounds for I ∈ CartesianIndices(size(tpi))
patch = I - Δ:I + Δ
rdata = view(ex_dem, patch)
x .= rdata .* neib_8
tpi[I] = rdata[2,2] - mean(x)
end
tpi
end
"""
TRI(dem::Matrix{<:AbstractFloat})
TRI stands for Terrain Ruggedness Index, which measures the difference between a central pixel and its surrounding cells.
This algorithm uses the square root of the sum of the square of the difference between a central pixel and its surrounding cells.
This is recommended for terrestrial use cases.
"""
function TRI(dem::Matrix{<:AbstractFloat})
ex_dem = OffsetMatrix(fill(Inf, size(dem) .+ 2), UnitRange.(0, size(dem) .+ 1))
# Update center
ex_dem[begin + 1:end - 1,begin + 1:end - 1] .= dem
# Set edges to mirror center
ex_dem[begin, begin + 1:end - 1] .= dem[begin, :]
ex_dem[end, begin + 1:end - 1] .= dem[end, :]
ex_dem[begin + 1:end - 1, begin] .= dem[:, begin]
ex_dem[begin + 1:end - 1, end] .= dem[:, end]
# Set corners to mirror corners of center
ex_dem[begin, begin] = dem[begin, begin]
ex_dem[begin, end] = dem[begin, end]
ex_dem[end, begin] = dem[end, begin]
ex_dem[end, end] = dem[end, end]
tri = similar(dem)
x = @MMatrix zeros(3, 3)
@inbounds for I ∈ CartesianIndices(size(tri))
patch = I - Δ:I + Δ
rdata = view(ex_dem, patch)
x .= (rdata .- rdata[2,2]).^2
tri[I] = sqrt(sum(x))
end
tri
end
"""
```
B, flags = pmf(A; ωₘ, slope, dhₘ, dh₀, cellsize)
```
Applies the progressive morphological filter by [Zhang et al. (2003)] to `A`.
# Output
- `B::Array{T,2}` Maximum allowable values
- `flags::Array{Float64,2}` A sized array with window sizes if filtered, zero if not filtered.
Afterwards, one can retrieve the resulting mask for `A` by `A .<= B` or `flags .== 0.`.
# Arguments
- `A::Array{T,2}` Input Array
- `ωₘ::Float64=20.` Maximum window size [m]
- `slope::Float64=0.01` Terrain slope [m/m]
- `dhₘ::Float64=2.5` Maximum elevation threshold [m]
- `dh₀::Float64=0.2` Initial elevation threshold [m]
- `cellsize::Float64=1.` Cellsize in [m]
[Zhang et al. (2003)] Zhang, Keqi, Shu-Ching Chen, Dean Whitman, Mei-Ling Shyu, Jianhua Yan, and Chengcui Zhang. “A Progressive Morphological Filter for Removing Nonground Measurements from Airborne LIDAR Data.” IEEE Transactions on Geoscience and Remote Sensing 41, no. 4 (2003): 872–82. [https://doi.org/10.1109/TGRS.2003.810682].
"""
function pmf(A::Array{T,2};
ωₘ::Float64=20.,
slope::Float64=0.01,
dhₘ::Float64=2.5,
dh₀::Float64=0.2,
cellsize::Float64=1.0) where T <: Real
# Compute windowsizes and thresholds
ωₘ = round(Int, ωₘ / cellsize)
κ_max = floor(Int, log2(ωₘ - 1)) # determine # iterations based on exp growth
windowsizes = Int.(exp2.(1:κ_max)) .+ 1
# Compute tresholds
dwindows = vcat(windowsizes[1], windowsizes) # prepend first element so we get 0 as diff
window_diffs = [dwindows[i] - dwindows[i - 1] for i in 2:length(dwindows)]
height_tresholds = [min(dhₘ, slope * window_diff * cellsize + dh₀) for window_diff in window_diffs]
# Set up arrays
Af = copy(A) # array to be morphed
nan_mask = isnan.(Af)
Af[nan_mask] .= Inf # Replace NaN with Inf, as to always filter these
B = copy(A) # max_elevation raster
out = copy(A) # max_elevation raster
flags = zeros(size(A)) # 0 = ground, other values indicate window size
flags[nan_mask] .= NaN
mask = falses(size(A))
# Iterate over window sizes and height tresholds
for (ωₖ, dhₜ) in zip(windowsizes, height_tresholds)
opening!(Af, ωₖ, out)
for I in eachindex(A)
mask[I] = (A[I] - Af[I]) > dhₜ
end
for I in eachindex(flags)
if mask[I] && flags[I] == 0
flags[I] = ωₖ
end
end
# @info "PMF with window size $ωₖ and threshold $dhₜ filters $(sum(mask)) cells."
B .= min.(B, Af .+ dhₜ)
end
B, flags
end
# First discussed here https://github.com/JuliaImages/ImageFiltering.jl/issues/179
function mapwindow!(f, img, window, out)
R = CartesianIndices(img)
I_first, I_last = first(R), last(R)
Δ = CartesianIndex(ntuple(x -> window ÷ 2, ndims(img)))
@inbounds @simd for I in R
patch = max(I_first, I - Δ):min(I_last, I + Δ)
out[I] = f(view(img, patch))
end
out
end
"""Apply the opening operation to `A` with window size `ω`."""
function opening!(A::Array{T,2}, ω::Integer, out::Array{T,2}) where T <: Real
mapwindow!(minimum, A, ω, out) # erosion
mapwindow!(maximum, out, ω, A) # dilation
A
end
export spread, spread2, spread3, roughness, TPI, TRI, pmf
end # module
|
{"hexsha": "12668fc838a29c680589c766301ffb9dd1a41abf", "size": 13829, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/GeoCost.jl", "max_stars_repo_name": "evetion/GeoCost.jl", "max_stars_repo_head_hexsha": "98b39c6fc5808d686cbfc2c5ef5738b7a35c3e5e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/GeoCost.jl", "max_issues_repo_name": "evetion/GeoCost.jl", "max_issues_repo_head_hexsha": "98b39c6fc5808d686cbfc2c5ef5738b7a35c3e5e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GeoCost.jl", "max_forks_repo_name": "evetion/GeoCost.jl", "max_forks_repo_head_hexsha": "98b39c6fc5808d686cbfc2c5ef5738b7a35c3e5e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7292682927, "max_line_length": 332, "alphanum_fraction": 0.6158073613, "num_tokens": 4476}
|
'''
This script divides copies of the focal plane into individual rafts
This script is designed to be called from the command line as:
python hp2fp_tiler.py [fpID] [chunkSize]
- fpID refers to the index of the focal plane we are writing in the list stored in utils/pointingList.obj
- chunkSize is not required, and is used to reduce memory usage. The lower chunkSize is, the less memory will
be used, but the process will also be slower
The new file created stores: RA, Dec, Half Light Radius ('SIZE'), Ellipticity ('EPSILON'), and magnitude ('LMAG').
It also stores the pointing position of this "telescope" in the 'TRA' and 'TDEC' slots.
'''
import numpy as np
import os
import lsst.geom as geom
from lsst.obs.lsst import LsstCamMapper as camMapper
from lsst.obs.lsst.lsstCamMapper import getWcsFromDetector
import fitsio
from fitsio import FITS
import sys
import pickle
if len(sys.argv) == 1:
raise(Exception('Must supply a fpIndex'))
elif len(sys.argv) == 2:
fpID = int(sys.argv[1])
chunkSize = 100000
elif len(sys.argv) == 3:
fpID= int(sys.argv[1])
chunkSize = int(sys.argv[2])
else:
raise(Exception('Too many arguments'))
### Utility functions
def getRaftNo(ra, dec, wcsList):
loc = geom.SpherePoint(ra, dec, geom.degrees)
for det, wcs in wcsList.items():
dPix = wcs.skyToPixel(loc)
pix = geom.Point2I(dPix)
if det.getBBox().contains(pix):
return det.getId() // 9
return -1
# Get pointing list from file
with open('utils/pointingList.obj', 'rb') as pl:
pointingList = pickle.load(pl)
### Write focal plane copies to fits files
inRoot = '/nfs/slac/g/ki/ki19/lsst/jrovee/outputs/fpCopies'
fNamePattern = 'fpCopy{}.fits'
fpFile = os.path.join(inRoot,fNamePattern.format(fpID))
camera = camMapper._makeCamera()
boresight = pointingList[fpID]
wcsList = {detector : getWcsFromDetector(detector, boresight) for detector in camera}
det2raft = lambda det : det.getId() // 9
outRoot = '/nfs/slac/g/ki/ki19/lsst/jrovee/outputs/raftCopies'
outFiles = []
for raftNo in range(21):
fname = os.path.join(outRoot, 'fpCopy{}'.format(fpID), 'raft{}.fits'.format(raftNo))
outFiles.append(FITS(fname, 'rw', clobber=True))
writtenIn = np.zeros(21, dtype=bool)
length = len(fitsio.read(fpFile, columns=[], ext=1))
nChunks = -(-length // chunkSize) # Ceiling integer division
for i in range(nChunks):
print(i)
if i != nChunks - 1:
span = range(chunkSize*i, chunkSize*(i+1))
else: # We treat the last chunk slightly different because it is a different size
span = range(chunkSize*i, length)
ra = fitsio.read(fpFile, columns='RA', rows=span, ext=1)
dec = fitsio.read(fpFile, columns='DEC', rows=span, ext=1)
usefulRows = [[] for _ in range(21)]
for j, k in enumerate(span):
usefulRows[getRaftNo(ra[j],dec[j],wcsList)].append(k)
for raftNo in range(21):
if usefulRows[raftNo]:
data = fitsio.read(fpFile, rows=usefulRows[raftNo], columns=['RA', 'DEC', 'SIZE', 'EPSILON', 'LMAG', 'TRA', 'TDEC'], ext=1)
data['TRA'] = boresight.getRa().asDegrees()
data['TDEC'] = boresight.getDec().asDegrees()
if writtenIn[raftNo]:
outFiles[raftNo][1].append(data)
else:
outFiles[raftNo].write(data)
writtenIn[raftNo] = True
|
{"hexsha": "fef76ca1db26261ec9d9122eed7981f853a69c8d", "size": 3357, "ext": "py", "lang": "Python", "max_stars_repo_path": "fp2raft_tiler.py", "max_stars_repo_name": "cahebert/read-noise", "max_stars_repo_head_hexsha": "c8c7972bb9dcdd8758a4f48313f73011fe533937", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-17T21:01:02.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-17T21:01:02.000Z", "max_issues_repo_path": "fp2raft_tiler.py", "max_issues_repo_name": "cahebert/read-noise", "max_issues_repo_head_hexsha": "c8c7972bb9dcdd8758a4f48313f73011fe533937", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fp2raft_tiler.py", "max_forks_repo_name": "cahebert/read-noise", "max_forks_repo_head_hexsha": "c8c7972bb9dcdd8758a4f48313f73011fe533937", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8901098901, "max_line_length": 135, "alphanum_fraction": 0.6747095621, "include": true, "reason": "import numpy", "num_tokens": 952}
|
import pickle
import numpy as np
import sys
with open(sys.argv[1], 'rb') as handle:
dict_out = pickle.load(handle)
TD = np.array(dict_out['2 metre dewpoint temperature']['values'])
T = np.array(dict_out['2 metre temperature']['values'])
RH = 100*(np.exp((17.625*TD)/(243.04+TD))/np.exp((17.625*T)/(243.04+T)))
dict_out['Relative Humidity']={'values':RH,'units':'NA'}
with open(sys.argv[2],"wb") as handle:
pickle.dump(dict_out, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
{"hexsha": "f8fb0d4bd3607a9e82c93825da02b5744d4414bb", "size": 487, "ext": "py", "lang": "Python", "max_stars_repo_path": "WINGSWorkflowComponents/GeneralDataPreparation/deprecated/netCDF_simple/code/library/calculateRH/calculateRH.py", "max_stars_repo_name": "mintproject/MINT-WorkflowDomain", "max_stars_repo_head_hexsha": "aea1a3c10a8249a481ea8dabcf9e1fb5e306e427", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "WINGSWorkflowComponents/GeneralDataPreparation/deprecated/netCDF_simple/code/library/calculateRH/calculateRH.py", "max_issues_repo_name": "mintproject/MINT-WorkflowDomain", "max_issues_repo_head_hexsha": "aea1a3c10a8249a481ea8dabcf9e1fb5e306e427", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2019-08-14T18:08:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-23T14:31:28.000Z", "max_forks_repo_path": "WINGSWorkflowComponents/GeneralDataPreparation/deprecated/netCDF_simple/code/library/calculateRH/calculateRH.py", "max_forks_repo_name": "KnowledgeCaptureAndDiscovery/MINT-WorkflowDomain", "max_forks_repo_head_hexsha": "aea1a3c10a8249a481ea8dabcf9e1fb5e306e427", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-05-17T14:13:39.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-11T23:03:31.000Z", "avg_line_length": 30.4375, "max_line_length": 72, "alphanum_fraction": 0.6837782341, "include": true, "reason": "import numpy", "num_tokens": 140}
|
#AIXPM - AIX Package Manager, by Michael Felt aka aixtools
# Copyright 2020
# An Ansible 'project' that is to evolve from a role to a module
## History and Motivation to develop AIXPM
AIX, since roughly the year 2000 and the development of AIX 5.0 (alpha test), the concept
of 'geninstall' generic installer. This was the period of Project Montery. IBM AIX was involved
as they were considering porting AIX to the IA64 processor. This occurred, but was never
commercially available (GA release).
The concept was to support installation of packages from three philosophies: AIX native installer;
Linux (via RPM - Redhat Package Manager) and Install Anywhere (from Windows?).
As part of AIX 5L (the L for Linux affinity) geninstall became part of AIX. For better or worse,
the convention became to package OSS (aka GNU dependencies) using a GCC compiler and RPM.
Nearly 18 years later - to address the complaint know as "RPM-hell" IBM AIX OSS team added support
for yum (a python module used in redhat enviornments for years - to automate dependency installation).
This parallels the introduction of AIX 7.2.
While yum (which, officially is no longer developed) alleviates many issues - it continues the other
issue: namely the unintended collision of software packages both wanting to own a file-system-path,
e.g., /usr/lib/libiconv.a and incompatible contents.
AIXPM is not going to try and solve that issue. My first goal is to make it easier to get OSS software
packaged in `installp` aka `bff` (backup-file-format) installed using direct/proxied downloads
from the internet and/or internal servers - without requiring a sys-admin to manually download all the dependencies.
## Basics
The will be one directory {{ give_it_a_name }} where one or more packages are `copied` to.
Once all the files (if more than one is needed) the appropriate install program (e.g, installp or rpm)
will be called to install the software.
## Data
The first draft will define package requirements via a YAML file that describes the variable: {{ packages }}.
This will be either a list - perhaps with only one item.
This item will describe the source type {{ XXX }}, source location {{ YYY }}, and
filename {{ ZZZ }}.
The source type will determine how the file gets copied to the install directory {{ give it a name }}.
Once all the package file are copied - the installer will be called to execute them.
Additionally, to support Ansible idempotency an attribute {{ creates }} may be specified.
When it already exists - the copy and install process can be skipped.
|
{"hexsha": "cb25c05def3e3b857cfe6f3210a640c1df99e5e4", "size": 2559, "ext": "rd", "lang": "R", "max_stars_repo_path": "AIXPM.rd", "max_stars_repo_name": "aixtools/aixpm", "max_stars_repo_head_hexsha": "8c00c5241b38b16d7f6bfe8a16699e288c0c4642", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AIXPM.rd", "max_issues_repo_name": "aixtools/aixpm", "max_issues_repo_head_hexsha": "8c00c5241b38b16d7f6bfe8a16699e288c0c4642", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AIXPM.rd", "max_forks_repo_name": "aixtools/aixpm", "max_forks_repo_head_hexsha": "8c00c5241b38b16d7f6bfe8a16699e288c0c4642", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.2244897959, "max_line_length": 116, "alphanum_fraction": 0.7753028527, "num_tokens": 589}
|
import pickle
import numpy as np
import os
def export_linear(x, weight, bias):
z = x @ weight + bias #can change this to a RELU function instead too
return sigmoid(z)
def sigmoid(x):
return 1/(1+np.exp(-x))
#weights
model_weights = pickle.load(open(os.getcwd() + "/MLP_scratch/model_weights.pickle", "rb" ))
fc0_weight = model_weights[0]
fc1_weight = model_weights[1]
fc2_weight = model_weights[2]
fc3_weight = model_weights[3]
fc4_weight = model_weights[4]
fc0_bias = model_weights[5]
fc1_bias = model_weights[6]
fc2_bias = model_weights[7]
fc3_bias = model_weights[8]
fc4_bias = model_weights[9]
def model(input_string):
#input_string = str(np.array(raw_data_test.iloc[0,1:]).tolist()) #string input '[1, 2, ..., 3]' 784 elements
x = np.fromstring(input_string[1:-1],sep=',').astype(int)/255
x = export_linear(x, fc0_weight, fc0_bias)
x = export_linear(x, fc1_weight, fc1_bias)
x = export_linear(x, fc2_weight, fc2_bias)
x = export_linear(x, fc3_weight, fc3_bias)
x = export_linear(x, fc4_weight, fc4_bias)
return prediction = np.argmax(x)
print(model(input_string))
|
{"hexsha": "ea1271774c6066773111349dd372fd65eebe1148", "size": 1121, "ext": "py", "lang": "Python", "max_stars_repo_path": "MLP_scratch/model_export.py", "max_stars_repo_name": "christopherjgan/fsdl-text-recognizer-2021-labs", "max_stars_repo_head_hexsha": "c0ecdbc579094f802bfddab30206699d71a50d9a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MLP_scratch/model_export.py", "max_issues_repo_name": "christopherjgan/fsdl-text-recognizer-2021-labs", "max_issues_repo_head_hexsha": "c0ecdbc579094f802bfddab30206699d71a50d9a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MLP_scratch/model_export.py", "max_forks_repo_name": "christopherjgan/fsdl-text-recognizer-2021-labs", "max_forks_repo_head_hexsha": "c0ecdbc579094f802bfddab30206699d71a50d9a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5, "max_line_length": 112, "alphanum_fraction": 0.7145405888, "include": true, "reason": "import numpy", "num_tokens": 329}
|
[STATEMENT]
lemma fundamental_theorem_of_algebra:
assumes nc: "\<not> constant (poly p)"
shows "\<exists>z::complex. poly p z = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>z. poly p z = 0
[PROOF STEP]
using nc
[PROOF STATE]
proof (prove)
using this:
\<not> constant (poly p)
goal (1 subgoal):
1. \<exists>z. poly p z = 0
[PROOF STEP]
proof (induct "psize p" arbitrary: p rule: less_induct)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>p. \<lbrakk>\<And>pa. \<lbrakk>psize pa < psize p; \<not> constant (poly pa)\<rbrakk> \<Longrightarrow> \<exists>z. poly pa z = 0; \<not> constant (poly p)\<rbrakk> \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
case less
[PROOF STATE]
proof (state)
this:
\<lbrakk>psize ?p < psize p; \<not> constant (poly ?p)\<rbrakk> \<Longrightarrow> \<exists>z. poly ?p z = 0
\<not> constant (poly p)
goal (1 subgoal):
1. \<And>p. \<lbrakk>\<And>pa. \<lbrakk>psize pa < psize p; \<not> constant (poly pa)\<rbrakk> \<Longrightarrow> \<exists>z. poly pa z = 0; \<not> constant (poly p)\<rbrakk> \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
let ?p = "poly p"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>p. \<lbrakk>\<And>pa. \<lbrakk>psize pa < psize p; \<not> constant (poly pa)\<rbrakk> \<Longrightarrow> \<exists>z. poly pa z = 0; \<not> constant (poly p)\<rbrakk> \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
let ?ths = "\<exists>z. ?p z = 0"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>p. \<lbrakk>\<And>pa. \<lbrakk>psize pa < psize p; \<not> constant (poly pa)\<rbrakk> \<Longrightarrow> \<exists>z. poly pa z = 0; \<not> constant (poly p)\<rbrakk> \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
from nonconstant_length[OF less(2)]
[PROOF STATE]
proof (chain)
picking this:
2 \<le> psize p
[PROOF STEP]
have n2: "psize p \<ge> 2"
[PROOF STATE]
proof (prove)
using this:
2 \<le> psize p
goal (1 subgoal):
1. 2 \<le> psize p
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
2 \<le> psize p
goal (1 subgoal):
1. \<And>p. \<lbrakk>\<And>pa. \<lbrakk>psize pa < psize p; \<not> constant (poly pa)\<rbrakk> \<Longrightarrow> \<exists>z. poly pa z = 0; \<not> constant (poly p)\<rbrakk> \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
from poly_minimum_modulus
[PROOF STATE]
proof (chain)
picking this:
\<exists>z. \<forall>w. cmod (poly ?p z) \<le> cmod (poly ?p w)
[PROOF STEP]
obtain c where c: "\<forall>w. cmod (?p c) \<le> cmod (?p w)"
[PROOF STATE]
proof (prove)
using this:
\<exists>z. \<forall>w. cmod (poly ?p z) \<le> cmod (poly ?p w)
goal (1 subgoal):
1. (\<And>c. \<forall>w. cmod (poly p c) \<le> cmod (poly p w) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<forall>w. cmod (poly p c) \<le> cmod (poly p w)
goal (1 subgoal):
1. \<And>p. \<lbrakk>\<And>pa. \<lbrakk>psize pa < psize p; \<not> constant (poly pa)\<rbrakk> \<Longrightarrow> \<exists>z. poly pa z = 0; \<not> constant (poly p)\<rbrakk> \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
show ?ths
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>z. poly p z = 0
[PROOF STEP]
proof (cases "?p c = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. poly p c = 0 \<Longrightarrow> \<exists>z. poly p z = 0
2. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
poly p c = 0
goal (2 subgoals):
1. poly p c = 0 \<Longrightarrow> \<exists>z. poly p z = 0
2. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
poly p c = 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
poly p c = 0
goal (1 subgoal):
1. \<exists>z. poly p z = 0
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>z. poly p z = 0
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
poly p c \<noteq> 0
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
from poly_offset[of p c]
[PROOF STATE]
proof (chain)
picking this:
\<exists>q. psize q = psize p \<and> (\<forall>x. poly q x = poly p (c + x))
[PROOF STEP]
obtain q where q: "psize q = psize p" "\<forall>x. poly q x = ?p (c + x)"
[PROOF STATE]
proof (prove)
using this:
\<exists>q. psize q = psize p \<and> (\<forall>x. poly q x = poly p (c + x))
goal (1 subgoal):
1. (\<And>q. \<lbrakk>psize q = psize p; \<forall>x. poly q x = poly p (c + x)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
psize q = psize p
\<forall>x. poly q x = poly p (c + x)
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
have False if h: "constant (poly q)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. False
[PROOF STEP]
from q(2)
[PROOF STATE]
proof (chain)
picking this:
\<forall>x. poly q x = poly p (c + x)
[PROOF STEP]
have th: "\<forall>x. poly q (x - c) = ?p x"
[PROOF STATE]
proof (prove)
using this:
\<forall>x. poly q x = poly p (c + x)
goal (1 subgoal):
1. \<forall>x. poly q (x - c) = poly p x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>x. poly q (x - c) = poly p x
goal (1 subgoal):
1. False
[PROOF STEP]
have "?p x = ?p y" for x y
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. poly p x = poly p y
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. poly p x = poly p y
[PROOF STEP]
from th
[PROOF STATE]
proof (chain)
picking this:
\<forall>x. poly q (x - c) = poly p x
[PROOF STEP]
have "?p x = poly q (x - c)"
[PROOF STATE]
proof (prove)
using this:
\<forall>x. poly q (x - c) = poly p x
goal (1 subgoal):
1. poly p x = poly q (x - c)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
poly p x = poly q (x - c)
goal (1 subgoal):
1. poly p x = poly p y
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
poly p x = poly q (x - c)
goal (1 subgoal):
1. poly p x = poly p y
[PROOF STEP]
have "\<dots> = poly q (y - c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. poly q (x - c) = poly q (y - c)
[PROOF STEP]
using h
[PROOF STATE]
proof (prove)
using this:
constant (poly q)
goal (1 subgoal):
1. poly q (x - c) = poly q (y - c)
[PROOF STEP]
unfolding constant_def
[PROOF STATE]
proof (prove)
using this:
\<forall>x y. poly q x = poly q y
goal (1 subgoal):
1. poly q (x - c) = poly q (y - c)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
poly q (x - c) = poly q (y - c)
goal (1 subgoal):
1. poly p x = poly p y
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
poly q (x - c) = poly q (y - c)
goal (1 subgoal):
1. poly p x = poly p y
[PROOF STEP]
have "\<dots> = ?p y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. poly q (y - c) = poly p y
[PROOF STEP]
using th
[PROOF STATE]
proof (prove)
using this:
\<forall>x. poly q (x - c) = poly p x
goal (1 subgoal):
1. poly q (y - c) = poly p y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
poly q (y - c) = poly p y
goal (1 subgoal):
1. poly p x = poly p y
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
poly p x = poly p y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
poly p x = poly p y
goal (1 subgoal):
1. poly p x = poly p y
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
poly p x = poly p y
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
poly p ?x = poly p ?y
goal (1 subgoal):
1. False
[PROOF STEP]
with less(2)
[PROOF STATE]
proof (chain)
picking this:
\<not> constant (poly p)
poly p ?x = poly p ?y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<not> constant (poly p)
poly p ?x = poly p ?y
goal (1 subgoal):
1. False
[PROOF STEP]
unfolding constant_def
[PROOF STATE]
proof (prove)
using this:
\<not> (\<forall>x y. poly p x = poly p y)
poly p ?x = poly p ?y
goal (1 subgoal):
1. False
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
constant (poly q) \<Longrightarrow> False
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
constant (poly q) \<Longrightarrow> False
[PROOF STEP]
have qnc: "\<not> constant (poly q)"
[PROOF STATE]
proof (prove)
using this:
constant (poly q) \<Longrightarrow> False
goal (1 subgoal):
1. \<not> constant (poly q)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<not> constant (poly q)
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
from q(2)
[PROOF STATE]
proof (chain)
picking this:
\<forall>x. poly q x = poly p (c + x)
[PROOF STEP]
have pqc0: "?p c = poly q 0"
[PROOF STATE]
proof (prove)
using this:
\<forall>x. poly q x = poly p (c + x)
goal (1 subgoal):
1. poly p c = poly q 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
poly p c = poly q 0
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
from c pqc0
[PROOF STATE]
proof (chain)
picking this:
\<forall>w. cmod (poly p c) \<le> cmod (poly p w)
poly p c = poly q 0
[PROOF STEP]
have cq0: "\<forall>w. cmod (poly q 0) \<le> cmod (?p w)"
[PROOF STATE]
proof (prove)
using this:
\<forall>w. cmod (poly p c) \<le> cmod (poly p w)
poly p c = poly q 0
goal (1 subgoal):
1. \<forall>w. cmod (poly q 0) \<le> cmod (poly p w)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>w. cmod (poly q 0) \<le> cmod (poly p w)
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
let ?a0 = "poly q 0"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
from False pqc0
[PROOF STATE]
proof (chain)
picking this:
poly p c \<noteq> 0
poly p c = poly q 0
[PROOF STEP]
have a00: "?a0 \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
poly p c \<noteq> 0
poly p c = poly q 0
goal (1 subgoal):
1. poly q 0 \<noteq> 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
poly q 0 \<noteq> 0
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
from a00
[PROOF STATE]
proof (chain)
picking this:
poly q 0 \<noteq> 0
[PROOF STEP]
have qr: "\<forall>z. poly q z = poly (smult (inverse ?a0) q) z * ?a0"
[PROOF STATE]
proof (prove)
using this:
poly q 0 \<noteq> 0
goal (1 subgoal):
1. \<forall>z. poly q z = poly (smult (inverse (poly q 0)) q) z * poly q 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>z. poly q z = poly (smult (inverse (poly q 0)) q) z * poly q 0
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
let ?r = "smult (inverse ?a0) q"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
have lgqr: "psize q = psize ?r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. psize q = psize (smult (inverse (poly q 0)) q)
[PROOF STEP]
using a00
[PROOF STATE]
proof (prove)
using this:
poly q 0 \<noteq> 0
goal (1 subgoal):
1. psize q = psize (smult (inverse (poly q 0)) q)
[PROOF STEP]
unfolding psize_def degree_def
[PROOF STATE]
proof (prove)
using this:
poly q 0 \<noteq> 0
goal (1 subgoal):
1. (if q = 0 then 0 else Suc (LEAST n. \<forall>i>n. coeff q i = 0)) = (if smult (inverse (poly q 0)) q = 0 then 0 else Suc (LEAST n. \<forall>i>n. coeff (smult (inverse (poly q 0)) q) i = 0))
[PROOF STEP]
by (simp add: poly_eq_iff)
[PROOF STATE]
proof (state)
this:
psize q = psize (smult (inverse (poly q 0)) q)
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
have False if h: "\<And>x y. poly ?r x = poly ?r y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. False
[PROOF STEP]
have "poly q x = poly q y" for x y
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. poly q x = poly q y
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. poly q x = poly q y
[PROOF STEP]
from qr[rule_format, of x]
[PROOF STATE]
proof (chain)
picking this:
poly q x = poly (smult (inverse (poly q 0)) q) x * poly q 0
[PROOF STEP]
have "poly q x = poly ?r x * ?a0"
[PROOF STATE]
proof (prove)
using this:
poly q x = poly (smult (inverse (poly q 0)) q) x * poly q 0
goal (1 subgoal):
1. poly q x = poly (smult (inverse (poly q 0)) q) x * poly q 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
poly q x = poly (smult (inverse (poly q 0)) q) x * poly q 0
goal (1 subgoal):
1. poly q x = poly q y
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
poly q x = poly (smult (inverse (poly q 0)) q) x * poly q 0
goal (1 subgoal):
1. poly q x = poly q y
[PROOF STEP]
have "\<dots> = poly ?r y * ?a0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. poly (smult (inverse (poly q 0)) q) x * poly q 0 = poly (smult (inverse (poly q 0)) q) y * poly q 0
[PROOF STEP]
using h
[PROOF STATE]
proof (prove)
using this:
poly (smult (inverse (poly q 0)) q) ?x = poly (smult (inverse (poly q 0)) q) ?y
goal (1 subgoal):
1. poly (smult (inverse (poly q 0)) q) x * poly q 0 = poly (smult (inverse (poly q 0)) q) y * poly q 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
poly (smult (inverse (poly q 0)) q) x * poly q 0 = poly (smult (inverse (poly q 0)) q) y * poly q 0
goal (1 subgoal):
1. poly q x = poly q y
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
poly (smult (inverse (poly q 0)) q) x * poly q 0 = poly (smult (inverse (poly q 0)) q) y * poly q 0
goal (1 subgoal):
1. poly q x = poly q y
[PROOF STEP]
have "\<dots> = poly q y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. poly (smult (inverse (poly q 0)) q) y * poly q 0 = poly q y
[PROOF STEP]
using qr[rule_format, of y]
[PROOF STATE]
proof (prove)
using this:
poly q y = poly (smult (inverse (poly q 0)) q) y * poly q 0
goal (1 subgoal):
1. poly (smult (inverse (poly q 0)) q) y * poly q 0 = poly q y
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
poly (smult (inverse (poly q 0)) q) y * poly q 0 = poly q y
goal (1 subgoal):
1. poly q x = poly q y
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
poly q x = poly q y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
poly q x = poly q y
goal (1 subgoal):
1. poly q x = poly q y
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
poly q x = poly q y
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
poly q ?x = poly q ?y
goal (1 subgoal):
1. False
[PROOF STEP]
with qnc
[PROOF STATE]
proof (chain)
picking this:
\<not> constant (poly q)
poly q ?x = poly q ?y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<not> constant (poly q)
poly q ?x = poly q ?y
goal (1 subgoal):
1. False
[PROOF STEP]
unfolding constant_def
[PROOF STATE]
proof (prove)
using this:
\<not> (\<forall>x y. poly q x = poly q y)
poly q ?x = poly q ?y
goal (1 subgoal):
1. False
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<And>x y. poly (smult (inverse (poly q 0)) q) x = poly (smult (inverse (poly q 0)) q) y) \<Longrightarrow> False
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(\<And>x y. poly (smult (inverse (poly q 0)) q) x = poly (smult (inverse (poly q 0)) q) y) \<Longrightarrow> False
[PROOF STEP]
have rnc: "\<not> constant (poly ?r)"
[PROOF STATE]
proof (prove)
using this:
(\<And>x y. poly (smult (inverse (poly q 0)) q) x = poly (smult (inverse (poly q 0)) q) y) \<Longrightarrow> False
goal (1 subgoal):
1. \<not> constant (poly (smult (inverse (poly q 0)) q))
[PROOF STEP]
unfolding constant_def
[PROOF STATE]
proof (prove)
using this:
(\<And>x y. poly (smult (inverse (poly q 0)) q) x = poly (smult (inverse (poly q 0)) q) y) \<Longrightarrow> False
goal (1 subgoal):
1. \<not> (\<forall>x y. poly (smult (inverse (poly q 0)) q) x = poly (smult (inverse (poly q 0)) q) y)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<not> constant (poly (smult (inverse (poly q 0)) q))
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
from qr[rule_format, of 0] a00
[PROOF STATE]
proof (chain)
picking this:
poly q 0 = poly (smult (inverse (poly q 0)) q) 0 * poly q 0
poly q 0 \<noteq> 0
[PROOF STEP]
have r01: "poly ?r 0 = 1"
[PROOF STATE]
proof (prove)
using this:
poly q 0 = poly (smult (inverse (poly q 0)) q) 0 * poly q 0
poly q 0 \<noteq> 0
goal (1 subgoal):
1. poly (smult (inverse (poly q 0)) q) 0 = 1
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
poly (smult (inverse (poly q 0)) q) 0 = 1
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
have mrmq_eq: "cmod (poly ?r w) < 1 \<longleftrightarrow> cmod (poly q w) < cmod ?a0" for w
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w) < cmod (poly q 0))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w) < cmod (poly q 0))
[PROOF STEP]
have "cmod (poly ?r w) < 1 \<longleftrightarrow> cmod (poly q w / ?a0) < 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w / poly q 0) < 1)
[PROOF STEP]
using qr[rule_format, of w] a00
[PROOF STATE]
proof (prove)
using this:
poly q w = poly (smult (inverse (poly q 0)) q) w * poly q 0
poly q 0 \<noteq> 0
goal (1 subgoal):
1. (cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w / poly q 0) < 1)
[PROOF STEP]
by (simp add: divide_inverse ac_simps)
[PROOF STATE]
proof (state)
this:
(cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w / poly q 0) < 1)
goal (1 subgoal):
1. (cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w) < cmod (poly q 0))
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w / poly q 0) < 1)
goal (1 subgoal):
1. (cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w) < cmod (poly q 0))
[PROOF STEP]
have "\<dots> \<longleftrightarrow> cmod (poly q w) < cmod ?a0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (cmod (poly q w / poly q 0) < 1) = (cmod (poly q w) < cmod (poly q 0))
[PROOF STEP]
using a00
[PROOF STATE]
proof (prove)
using this:
poly q 0 \<noteq> 0
goal (1 subgoal):
1. (cmod (poly q w / poly q 0) < 1) = (cmod (poly q w) < cmod (poly q 0))
[PROOF STEP]
unfolding norm_divide
[PROOF STATE]
proof (prove)
using this:
poly q 0 \<noteq> 0
goal (1 subgoal):
1. (cmod (poly q w) / cmod (poly q 0) < 1) = (cmod (poly q w) < cmod (poly q 0))
[PROOF STEP]
by (simp add: field_simps)
[PROOF STATE]
proof (state)
this:
(cmod (poly q w / poly q 0) < 1) = (cmod (poly q w) < cmod (poly q 0))
goal (1 subgoal):
1. (cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w) < cmod (poly q 0))
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w) < cmod (poly q 0))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w) < cmod (poly q 0))
goal (1 subgoal):
1. (cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w) < cmod (poly q 0))
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
(cmod (poly (smult (inverse (poly q 0)) q) w) < 1) = (cmod (poly q w) < cmod (poly q 0))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(cmod (poly (smult (inverse (poly q 0)) q) ?w) < 1) = (cmod (poly q ?w) < cmod (poly q 0))
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
from poly_decompose[OF rnc]
[PROOF STATE]
proof (chain)
picking this:
\<exists>k a qa. a \<noteq> 0 \<and> k \<noteq> 0 \<and> psize qa + k + 1 = psize (smult (inverse (poly q 0)) q) \<and> (\<forall>z. poly (smult (inverse (poly q 0)) q) z = poly (smult (inverse (poly q 0)) q) 0 + z ^ k * poly (pCons a qa) z)
[PROOF STEP]
obtain k a s where
kas: "a \<noteq> 0" "k \<noteq> 0" "psize s + k + 1 = psize ?r"
"\<forall>z. poly ?r z = poly ?r 0 + z^k* poly (pCons a s) z"
[PROOF STATE]
proof (prove)
using this:
\<exists>k a qa. a \<noteq> 0 \<and> k \<noteq> 0 \<and> psize qa + k + 1 = psize (smult (inverse (poly q 0)) q) \<and> (\<forall>z. poly (smult (inverse (poly q 0)) q) z = poly (smult (inverse (poly q 0)) q) 0 + z ^ k * poly (pCons a qa) z)
goal (1 subgoal):
1. (\<And>a k s. \<lbrakk>a \<noteq> 0; k \<noteq> 0; psize s + k + 1 = psize (smult (inverse (poly q 0)) q); \<forall>z. poly (smult (inverse (poly q 0)) q) z = poly (smult (inverse (poly q 0)) q) 0 + z ^ k * poly (pCons a s) z\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
a \<noteq> 0
k \<noteq> 0
psize s + k + 1 = psize (smult (inverse (poly q 0)) q)
\<forall>z. poly (smult (inverse (poly q 0)) q) z = poly (smult (inverse (poly q 0)) q) 0 + z ^ k * poly (pCons a s) z
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
have "\<exists>w. cmod (poly ?r w) < 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
proof (cases "psize p = k + 1")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. psize p = k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
2. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
psize p = k + 1
goal (2 subgoals):
1. psize p = k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
2. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
with kas(3) lgqr[symmetric] q(1)
[PROOF STATE]
proof (chain)
picking this:
psize s + k + 1 = psize (smult (inverse (poly q 0)) q)
psize (smult (inverse (poly q 0)) q) = psize q
psize q = psize p
psize p = k + 1
[PROOF STEP]
have s0: "s = 0"
[PROOF STATE]
proof (prove)
using this:
psize s + k + 1 = psize (smult (inverse (poly q 0)) q)
psize (smult (inverse (poly q 0)) q) = psize q
psize q = psize p
psize p = k + 1
goal (1 subgoal):
1. s = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
s = 0
goal (2 subgoals):
1. psize p = k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
2. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
have hth[symmetric]: "cmod (poly ?r w) = cmod (1 + a * w ^ k)" for w
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cmod (poly (smult (inverse (poly q 0)) q) w) = cmod (1 + a * w ^ k)
[PROOF STEP]
using kas(4)[rule_format, of w] s0 r01
[PROOF STATE]
proof (prove)
using this:
poly (smult (inverse (poly q 0)) q) w = poly (smult (inverse (poly q 0)) q) 0 + w ^ k * poly (pCons a s) w
s = 0
poly (smult (inverse (poly q 0)) q) 0 = 1
goal (1 subgoal):
1. cmod (poly (smult (inverse (poly q 0)) q) w) = cmod (1 + a * w ^ k)
[PROOF STEP]
by (simp add: algebra_simps)
[PROOF STATE]
proof (state)
this:
cmod (1 + a * ?w ^ k) = cmod (poly (smult (inverse (poly q 0)) q) ?w)
goal (2 subgoals):
1. psize p = k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
2. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
from reduce_poly_simple[OF kas(1,2)]
[PROOF STATE]
proof (chain)
picking this:
\<exists>z. cmod (1 + a * z ^ k) < 1
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<exists>z. cmod (1 + a * z ^ k) < 1
goal (1 subgoal):
1. \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
unfolding hth
[PROOF STATE]
proof (prove)
using this:
\<exists>z. cmod (poly (smult (inverse (poly q 0)) q) z) < 1
goal (1 subgoal):
1. \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
psize p \<noteq> k + 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
note kn = this
[PROOF STATE]
proof (state)
this:
psize p \<noteq> k + 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
from kn kas(3) q(1) lgqr
[PROOF STATE]
proof (chain)
picking this:
psize p \<noteq> k + 1
psize s + k + 1 = psize (smult (inverse (poly q 0)) q)
psize q = psize p
psize q = psize (smult (inverse (poly q 0)) q)
[PROOF STEP]
have k1n: "k + 1 < psize p"
[PROOF STATE]
proof (prove)
using this:
psize p \<noteq> k + 1
psize s + k + 1 = psize (smult (inverse (poly q 0)) q)
psize q = psize p
psize q = psize (smult (inverse (poly q 0)) q)
goal (1 subgoal):
1. k + 1 < psize p
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
k + 1 < psize p
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
have th01: "\<not> constant (poly (pCons 1 (monom a (k - 1))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> constant (poly (pCons 1 (monom a (k - 1))))
[PROOF STEP]
unfolding constant_def poly_pCons poly_monom
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> (\<forall>x y. 1 + x * (a * x ^ (k - 1)) = 1 + y * (a * y ^ (k - 1)))
[PROOF STEP]
using kas(1)
[PROOF STATE]
proof (prove)
using this:
a \<noteq> 0
goal (1 subgoal):
1. \<not> (\<forall>x y. 1 + x * (a * x ^ (k - 1)) = 1 + y * (a * y ^ (k - 1)))
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<noteq> 0 \<Longrightarrow> \<exists>x y. x * x ^ (k - Suc 0) \<noteq> y * y ^ (k - Suc 0)
[PROOF STEP]
apply (rule exI[where x=0])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<noteq> 0 \<Longrightarrow> \<exists>y. 0 * 0 ^ (k - Suc 0) \<noteq> y * y ^ (k - Suc 0)
[PROOF STEP]
apply (rule exI[where x=1])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<noteq> 0 \<Longrightarrow> 0 * 0 ^ (k - Suc 0) \<noteq> 1 * 1 ^ (k - Suc 0)
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
\<not> constant (poly (pCons 1 (monom a (k - 1))))
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
from kas(1) kas(2)
[PROOF STATE]
proof (chain)
picking this:
a \<noteq> 0
k \<noteq> 0
[PROOF STEP]
have th02: "k + 1 = psize (pCons 1 (monom a (k - 1)))"
[PROOF STATE]
proof (prove)
using this:
a \<noteq> 0
k \<noteq> 0
goal (1 subgoal):
1. k + 1 = psize (pCons 1 (monom a (k - 1)))
[PROOF STEP]
by (simp add: psize_def degree_monom_eq)
[PROOF STATE]
proof (state)
this:
k + 1 = psize (pCons 1 (monom a (k - 1)))
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
from less(1) [OF k1n [simplified th02] th01]
[PROOF STATE]
proof (chain)
picking this:
\<exists>z. poly (pCons 1 (monom a (k - 1))) z = 0
[PROOF STEP]
obtain w where w: "1 + w^k * a = 0"
[PROOF STATE]
proof (prove)
using this:
\<exists>z. poly (pCons 1 (monom a (k - 1))) z = 0
goal (1 subgoal):
1. (\<And>w. 1 + w ^ k * a = 0 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding poly_pCons poly_monom
[PROOF STATE]
proof (prove)
using this:
\<exists>z. 1 + z * (a * z ^ (k - 1)) = 0
goal (1 subgoal):
1. (\<And>w. 1 + w ^ k * a = 0 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using kas(2)
[PROOF STATE]
proof (prove)
using this:
\<exists>z. 1 + z * (a * z ^ (k - 1)) = 0
k \<noteq> 0
goal (1 subgoal):
1. (\<And>w. 1 + w ^ k * a = 0 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases k) (auto simp add: algebra_simps)
[PROOF STATE]
proof (state)
this:
1 + w ^ k * a = 0
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
from poly_bound_exists[of "cmod w" s]
[PROOF STATE]
proof (chain)
picking this:
\<exists>m>0. \<forall>z. cmod z \<le> cmod w \<longrightarrow> cmod (poly s z) \<le> m
[PROOF STEP]
obtain m where
m: "m > 0" "\<forall>z. cmod z \<le> cmod w \<longrightarrow> cmod (poly s z) \<le> m"
[PROOF STATE]
proof (prove)
using this:
\<exists>m>0. \<forall>z. cmod z \<le> cmod w \<longrightarrow> cmod (poly s z) \<le> m
goal (1 subgoal):
1. (\<And>m. \<lbrakk>0 < m; \<forall>z. cmod z \<le> cmod w \<longrightarrow> cmod (poly s z) \<le> m\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
0 < m
\<forall>z. cmod z \<le> cmod w \<longrightarrow> cmod (poly s z) \<le> m
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
have w0: "w \<noteq> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w \<noteq> 0
[PROOF STEP]
using kas(2) w
[PROOF STATE]
proof (prove)
using this:
k \<noteq> 0
1 + w ^ k * a = 0
goal (1 subgoal):
1. w \<noteq> 0
[PROOF STEP]
by (auto simp add: power_0_left)
[PROOF STATE]
proof (state)
this:
w \<noteq> 0
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
from w
[PROOF STATE]
proof (chain)
picking this:
1 + w ^ k * a = 0
[PROOF STEP]
have "(1 + w ^ k * a) - 1 = 0 - 1"
[PROOF STATE]
proof (prove)
using this:
1 + w ^ k * a = 0
goal (1 subgoal):
1. 1 + w ^ k * a - 1 = 0 - 1
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
1 + w ^ k * a - 1 = 0 - 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
1 + w ^ k * a - 1 = 0 - 1
[PROOF STEP]
have wm1: "w^k * a = - 1"
[PROOF STATE]
proof (prove)
using this:
1 + w ^ k * a - 1 = 0 - 1
goal (1 subgoal):
1. w ^ k * a = - 1
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
w ^ k * a = - 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
have inv0: "0 < inverse (cmod w ^ (k + 1) * m)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < inverse (cmod w ^ (k + 1) * m)
[PROOF STEP]
using norm_ge_zero[of w] w0 m(1)
[PROOF STATE]
proof (prove)
using this:
0 \<le> cmod w
w \<noteq> 0
0 < m
goal (1 subgoal):
1. 0 < inverse (cmod w ^ (k + 1) * m)
[PROOF STEP]
by (simp add: inverse_eq_divide zero_less_mult_iff)
[PROOF STATE]
proof (state)
this:
0 < inverse (cmod w ^ (k + 1) * m)
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
with field_lbound_gt_zero[OF zero_less_one]
[PROOF STATE]
proof (chain)
picking this:
(0::?'a) < ?d2.0 \<Longrightarrow> \<exists>e>0::?'a. e < (1::?'a) \<and> e < ?d2.0
0 < inverse (cmod w ^ (k + 1) * m)
[PROOF STEP]
obtain t where
t: "t > 0" "t < 1" "t < inverse (cmod w ^ (k + 1) * m)"
[PROOF STATE]
proof (prove)
using this:
(0::?'a) < ?d2.0 \<Longrightarrow> \<exists>e>0::?'a. e < (1::?'a) \<and> e < ?d2.0
0 < inverse (cmod w ^ (k + 1) * m)
goal (1 subgoal):
1. (\<And>t. \<lbrakk>0 < t; t < 1; t < inverse (cmod w ^ (k + 1) * m)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
0 < t
t < 1
t < inverse (cmod w ^ (k + 1) * m)
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
let ?ct = "complex_of_real t"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
let ?w = "?ct * w"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
have "1 + ?w^k * (a + ?w * poly s ?w) = 1 + ?ct^k * (w^k * a) + ?w^k * ?w * poly s ?w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w)) = 1 + complex_of_real t ^ k * (w ^ k * a) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)
[PROOF STEP]
using kas(1)
[PROOF STATE]
proof (prove)
using this:
a \<noteq> 0
goal (1 subgoal):
1. 1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w)) = 1 + complex_of_real t ^ k * (w ^ k * a) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)
[PROOF STEP]
by (simp add: algebra_simps power_mult_distrib)
[PROOF STATE]
proof (state)
this:
1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w)) = 1 + complex_of_real t ^ k * (w ^ k * a) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w)) = 1 + complex_of_real t ^ k * (w ^ k * a) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
have "\<dots> = complex_of_real (1 - t^k) + ?w^k * ?w * poly s ?w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 + complex_of_real t ^ k * (w ^ k * a) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w) = complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)
[PROOF STEP]
unfolding wm1
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 + complex_of_real t ^ k * - 1 + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w) = complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
1 + complex_of_real t ^ k * (w ^ k * a) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w) = complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w)) = complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)
[PROOF STEP]
have "cmod (1 + ?w^k * (a + ?w * poly s ?w)) =
cmod (complex_of_real (1 - t^k) + ?w^k * ?w * poly s ?w)"
[PROOF STATE]
proof (prove)
using this:
1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w)) = complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)
goal (1 subgoal):
1. cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) = cmod (complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) = cmod (complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
with norm_triangle_ineq[of "complex_of_real (1 - t^k)" "?w^k * ?w * poly s ?w"]
[PROOF STATE]
proof (chain)
picking this:
cmod (complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) \<le> cmod (complex_of_real (1 - t ^ k)) + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) = cmod (complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
[PROOF STEP]
have th11: "cmod (1 + ?w^k * (a + ?w * poly s ?w)) \<le> \<bar>1 - t^k\<bar> + cmod (?w^k * ?w * poly s ?w)"
[PROOF STATE]
proof (prove)
using this:
cmod (complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) \<le> cmod (complex_of_real (1 - t ^ k)) + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) = cmod (complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
goal (1 subgoal):
1. cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) \<le> \<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
[PROOF STEP]
unfolding norm_of_real
[PROOF STATE]
proof (prove)
using this:
cmod (complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) \<le> \<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) = cmod (complex_of_real (1 - t ^ k) + (complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
goal (1 subgoal):
1. cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) \<le> \<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) \<le> \<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
have ath: "\<And>x t::real. 0 \<le> x \<Longrightarrow> x < t \<Longrightarrow> t \<le> 1 \<Longrightarrow> \<bar>1 - t\<bar> + x < 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x t. \<lbrakk>0 \<le> x; x < t; t \<le> 1\<rbrakk> \<Longrightarrow> \<bar>1 - t\<bar> + x < 1
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
\<lbrakk>0 \<le> ?x; ?x < ?t; ?t \<le> 1\<rbrakk> \<Longrightarrow> \<bar>1 - ?t\<bar> + ?x < 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
have "t * cmod w \<le> 1 * cmod w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t * cmod w \<le> 1 * cmod w
[PROOF STEP]
apply (rule mult_mono)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. t \<le> 1
2. cmod w \<le> cmod w
3. 0 \<le> 1
4. 0 \<le> cmod w
[PROOF STEP]
using t(1,2)
[PROOF STATE]
proof (prove)
using this:
0 < t
t < 1
goal (4 subgoals):
1. t \<le> 1
2. cmod w \<le> cmod w
3. 0 \<le> 1
4. 0 \<le> cmod w
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
t * cmod w \<le> 1 * cmod w
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t * cmod w \<le> 1 * cmod w
[PROOF STEP]
have tw: "cmod ?w \<le> cmod w"
[PROOF STATE]
proof (prove)
using this:
t * cmod w \<le> 1 * cmod w
goal (1 subgoal):
1. cmod (complex_of_real t * w) \<le> cmod w
[PROOF STEP]
using t(1)
[PROOF STATE]
proof (prove)
using this:
t * cmod w \<le> 1 * cmod w
0 < t
goal (1 subgoal):
1. cmod (complex_of_real t * w) \<le> cmod w
[PROOF STEP]
by (simp add: norm_mult)
[PROOF STATE]
proof (state)
this:
cmod (complex_of_real t * w) \<le> cmod w
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
from t inv0
[PROOF STATE]
proof (chain)
picking this:
0 < t
t < 1
t < inverse (cmod w ^ (k + 1) * m)
0 < inverse (cmod w ^ (k + 1) * m)
[PROOF STEP]
have "t * (cmod w ^ (k + 1) * m) < 1"
[PROOF STATE]
proof (prove)
using this:
0 < t
t < 1
t < inverse (cmod w ^ (k + 1) * m)
0 < inverse (cmod w ^ (k + 1) * m)
goal (1 subgoal):
1. t * (cmod w ^ (k + 1) * m) < 1
[PROOF STEP]
by (simp add: field_simps)
[PROOF STATE]
proof (state)
this:
t * (cmod w ^ (k + 1) * m) < 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
with zero_less_power[OF t(1), of k]
[PROOF STATE]
proof (chain)
picking this:
0 < t ^ k
t * (cmod w ^ (k + 1) * m) < 1
[PROOF STEP]
have th30: "t^k * (t* (cmod w ^ (k + 1) * m)) < t^k * 1"
[PROOF STATE]
proof (prove)
using this:
0 < t ^ k
t * (cmod w ^ (k + 1) * m) < 1
goal (1 subgoal):
1. t ^ k * (t * (cmod w ^ (k + 1) * m)) < t ^ k * 1
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
t ^ k * (t * (cmod w ^ (k + 1) * m)) < t ^ k * 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
have "cmod (?w^k * ?w * poly s ?w) = t^k * (t* (cmod w ^ (k + 1) * cmod (poly s ?w)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) = t ^ k * (t * (cmod w ^ (k + 1) * cmod (poly s (complex_of_real t * w))))
[PROOF STEP]
using w0 t(1)
[PROOF STATE]
proof (prove)
using this:
w \<noteq> 0
0 < t
goal (1 subgoal):
1. cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) = t ^ k * (t * (cmod w ^ (k + 1) * cmod (poly s (complex_of_real t * w))))
[PROOF STEP]
by (simp add: algebra_simps power_mult_distrib norm_power norm_mult)
[PROOF STATE]
proof (state)
this:
cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) = t ^ k * (t * (cmod w ^ (k + 1) * cmod (poly s (complex_of_real t * w))))
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) = t ^ k * (t * (cmod w ^ (k + 1) * cmod (poly s (complex_of_real t * w))))
[PROOF STEP]
have "cmod (?w^k * ?w * poly s ?w) \<le> t^k * (t* (cmod w ^ (k + 1) * m))"
[PROOF STATE]
proof (prove)
using this:
cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) = t ^ k * (t * (cmod w ^ (k + 1) * cmod (poly s (complex_of_real t * w))))
goal (1 subgoal):
1. cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) \<le> t ^ k * (t * (cmod w ^ (k + 1) * m))
[PROOF STEP]
using t(1,2) m(2)[rule_format, OF tw] w0
[PROOF STATE]
proof (prove)
using this:
cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) = t ^ k * (t * (cmod w ^ (k + 1) * cmod (poly s (complex_of_real t * w))))
0 < t
t < 1
cmod (poly s (complex_of_real t * w)) \<le> m
w \<noteq> 0
goal (1 subgoal):
1. cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) \<le> t ^ k * (t * (cmod w ^ (k + 1) * m))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) \<le> t ^ k * (t * (cmod w ^ (k + 1) * m))
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
with th30
[PROOF STATE]
proof (chain)
picking this:
t ^ k * (t * (cmod w ^ (k + 1) * m)) < t ^ k * 1
cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) \<le> t ^ k * (t * (cmod w ^ (k + 1) * m))
[PROOF STEP]
have th120: "cmod (?w^k * ?w * poly s ?w) < t^k"
[PROOF STATE]
proof (prove)
using this:
t ^ k * (t * (cmod w ^ (k + 1) * m)) < t ^ k * 1
cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) \<le> t ^ k * (t * (cmod w ^ (k + 1) * m))
goal (1 subgoal):
1. cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) < t ^ k
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) < t ^ k
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
from power_strict_mono[OF t(2), of k] t(1) kas(2)
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>0 \<le> t; 0 < k\<rbrakk> \<Longrightarrow> t ^ k < 1 ^ k
0 < t
k \<noteq> 0
[PROOF STEP]
have th121: "t^k \<le> 1"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>0 \<le> t; 0 < k\<rbrakk> \<Longrightarrow> t ^ k < 1 ^ k
0 < t
k \<noteq> 0
goal (1 subgoal):
1. t ^ k \<le> 1
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t ^ k \<le> 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
from ath[OF norm_ge_zero[of "?w^k * ?w * poly s ?w"] th120 th121]
[PROOF STATE]
proof (chain)
picking this:
\<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) < 1
[PROOF STEP]
have th12: "\<bar>1 - t^k\<bar> + cmod (?w^k * ?w * poly s ?w) < 1"
[PROOF STATE]
proof (prove)
using this:
\<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) < 1
goal (1 subgoal):
1. \<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) < 1
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
\<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) < 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
from th11 th12
[PROOF STATE]
proof (chain)
picking this:
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) \<le> \<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
\<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) < 1
[PROOF STEP]
have "cmod (1 + ?w^k * (a + ?w * poly s ?w)) < 1"
[PROOF STATE]
proof (prove)
using this:
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) \<le> \<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w))
\<bar>1 - t ^ k\<bar> + cmod ((complex_of_real t * w) ^ k * (complex_of_real t * w) * poly s (complex_of_real t * w)) < 1
goal (1 subgoal):
1. cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) < 1
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) < 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) < 1
[PROOF STEP]
have "cmod (poly ?r ?w) < 1"
[PROOF STATE]
proof (prove)
using this:
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) < 1
goal (1 subgoal):
1. cmod (poly (smult (inverse (poly q 0)) q) (complex_of_real t * w)) < 1
[PROOF STEP]
unfolding kas(4)[rule_format, of ?w] r01
[PROOF STATE]
proof (prove)
using this:
cmod (1 + (complex_of_real t * w) ^ k * (a + complex_of_real t * w * poly s (complex_of_real t * w))) < 1
goal (1 subgoal):
1. cmod (1 + (complex_of_real t * w) ^ k * poly (pCons a s) (complex_of_real t * w)) < 1
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
cmod (poly (smult (inverse (poly q 0)) q) (complex_of_real t * w)) < 1
goal (1 subgoal):
1. psize p \<noteq> k + 1 \<Longrightarrow> \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
cmod (poly (smult (inverse (poly q 0)) q) (complex_of_real t * w)) < 1
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
cmod (poly (smult (inverse (poly q 0)) q) (complex_of_real t * w)) < 1
goal (1 subgoal):
1. \<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
goal (1 subgoal):
1. poly p c \<noteq> 0 \<Longrightarrow> \<exists>z. poly p z = 0
[PROOF STEP]
with cq0 q(2)
[PROOF STATE]
proof (chain)
picking this:
\<forall>w. cmod (poly q 0) \<le> cmod (poly p w)
\<forall>x. poly q x = poly p (c + x)
\<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<forall>w. cmod (poly q 0) \<le> cmod (poly p w)
\<forall>x. poly q x = poly p (c + x)
\<exists>w. cmod (poly (smult (inverse (poly q 0)) q) w) < 1
goal (1 subgoal):
1. \<exists>z. poly p z = 0
[PROOF STEP]
unfolding mrmq_eq not_less[symmetric]
[PROOF STATE]
proof (prove)
using this:
\<forall>w. \<not> cmod (poly p w) < cmod (poly q 0)
\<forall>x. poly q x = poly p (c + x)
\<exists>w. cmod (poly q w) < cmod (poly q 0)
goal (1 subgoal):
1. \<exists>z. poly p z = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>z. poly p z = 0
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>z. poly p z = 0
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 23721, "file": null, "length": 238}
|
\chapter{Future developments}
\label{chap:futuredevelopments}
Here we list briefly some of the developments that we are working on. We also discuss a few suboptimal features of the current version of the toolbox.
\section{Load balancer}
Experiments are distributed throughout the workers in a fully random way. We plan on implementing a more efficient load balancer, that should take into consideration the average training time of the algorithms, together with the characteristics of the hardware.
\section{Web service}
We would like to implement a web service where to expose additional functionalities developed by the users, that can be downloaded automatically from the toolbox itself.
|
{"hexsha": "35e94891dc786050a64e083509093cbf15df1d5a", "size": 695, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "manual/Manual Source Code/chapters/appendixB.tex", "max_stars_repo_name": "ispamm/Lynx-Toolbox", "max_stars_repo_head_hexsha": "c018ee3dbad4bfc75315732a883ccfd44e15f18a", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2015-02-02T09:09:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T08:38:25.000Z", "max_issues_repo_path": "manual/Manual Source Code/chapters/appendixB.tex", "max_issues_repo_name": "ispamm/Lynx-Toolbox", "max_issues_repo_head_hexsha": "c018ee3dbad4bfc75315732a883ccfd44e15f18a", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manual/Manual Source Code/chapters/appendixB.tex", "max_forks_repo_name": "ispamm/Lynx-Toolbox", "max_forks_repo_head_hexsha": "c018ee3dbad4bfc75315732a883ccfd44e15f18a", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2015-02-02T09:09:59.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-01T07:23:32.000Z", "avg_line_length": 57.9166666667, "max_line_length": 261, "alphanum_fraction": 0.8215827338, "num_tokens": 132}
|
function run!(model::elmod3d)
path="/home/lzh/Dropbox/Zhenhua/Ongoing/Seisimu/deps/builds/el3d_openmp.so"
ccall((:el3d_openmp,path),
Void,
(Ptr{Cdouble}, Cint, Cint, Cint, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Cint, Cint, Cint, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Cint, Cint, Cint, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Cint, Cint, Cint,
Ptr{Cdouble}, Cint, Cint, Cint, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Cint, Cint, Cint, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Cint, Cint, Cint, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Cdouble, Cdouble, Cdouble, Cdouble, Cint,
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}),
model.wf.vx, model.nwf.BDnvx[2], model.nwf.BDnvx[3], model.nwf.BDnvx[1],
model.pml.PVxBTxx, model.pml.PVxBTxy, model.pml.PVxBTxz,
model.wf.vy, model.nwf.BDnvy[2], model.nwf.BDnvy[3], model.nwf.BDnvy[1],
model.pml.PVyBTxy, model.pml.PVyBTyy, model.pml.PVyBTyz,
model.wf.vz, model.nwf.BDnvz[2], model.nwf.BDnvz[3], model.nwf.BDnvz[1],
model.pml.PVzBTxz, model.pml.PVzBTyz, model.pml.PVzBTzz,
model.wf.txx, model.pml.PTxxBVx, model.pml.PTxxBVy, model.pml.PTxxBVz,
model.wf.tyy, model.pml.PTyyBVx, model.pml.PTyyBVy, model.pml.PTyyBVz,
model.wf.tzz, model.pml.PTzzBVx, model.pml.PTzzBVy, model.pml.PTzzBVz,
model.nwf.BDntpp[2], model.nwf.BDntpp[3], model.nwf.BDntpp[1],
model.wf.txy, model.nwf.BDntxy[2], model.nwf.BDntxy[3], model.nwf.BDntxy[1],
model.pml.PTxyBVx, model.pml.PTxyBVy,
model.wf.tyz, model.nwf.BDntyz[2], model.nwf.BDntyz[3], model.nwf.BDntyz[1],
model.pml.PTyzBVy, model.pml.PTyzBVz,
model.wf.txz, model.nwf.BDntxz[2], model.nwf.BDntxz[3], model.nwf.BDntxz[1],
model.pml.PTxzBVx, model.pml.PTxzBVz,
model.medium.rho, model.medium.lambda, model.medium.mu, model.fdc,
model.medium.dt, model.medium.dx, model.medium.dy, model.medium.dz, model.medium.ext,
model.pml.bhalf, model.pml.ahalf, model.pml.bfull, model.pml.afull)
end
function run!(model::acmod3d)
path="/home/lzh/Dropbox/Zhenhua/Ongoing/Seisimu/deps/builds/ac3d_openmp.so"
ccall((:ac3d_openmp,path),
Void,
(Ptr{Cdouble}, Cint, Cint, Cint, Ptr{Cdouble},
Ptr{Cdouble}, Cint, Cint, Cint, Ptr{Cdouble},
Ptr{Cdouble}, Cint, Cint, Cint, Ptr{Cdouble},
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Cint, Cint, Cint,
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Cdouble, Cdouble, Cdouble, Cdouble, Cint,
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}),
model.wf.vx, model.nwf.BDnvx[2], model.nwf.BDnvx[3], model.nwf.BDnvx[1],
model.pml.PVxBTpp,
model.wf.vy, model.nwf.BDnvy[2], model.nwf.BDnvy[3], model.nwf.BDnvy[1],
model.pml.PVyBTpp,
model.wf.vz, model.nwf.BDnvz[2], model.nwf.BDnvz[3], model.nwf.BDnvz[1],
model.pml.PVzBTpp,
model.wf.tpp, model.pml.PTppBVx, model.pml.PTppBVy, model.pml.PTppBVz,
model.nwf.BDntpp[2], model.nwf.BDntpp[3], model.nwf.BDntpp[1],
model.medium.rho, model.medium.lambda, model.fdc,
model.medium.dt, model.medium.dx, model.medium.dy, model.medium.dz, model.medium.ext,
model.pml.bhalf, model.pml.ahalf, model.pml.bfull, model.pml.afull)
end
function run!(model::elmod2d)
path="/home/lzh/Dropbox/Zhenhua/Ongoing/Seisimu/deps/builds/el2d_openmp.so"
ccall((:el2d_openmp,path),
Void,
(Ptr{Cdouble}, Cint, Cint, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Cint, Cint, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Cint, Cint,
Ptr{Cdouble}, Cint, Cint, Ptr{Cdouble}, Ptr{Cdouble},
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Cdouble, Cdouble, Cdouble, Cint,
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}),
model.wf.vx, model.nwf.BDnvx[2], model.nwf.BDnvx[1],
model.pml.PVxBTxx, model.pml.PVxBTxz,
model.wf.vz, model.nwf.BDnvz[2], model.nwf.BDnvz[1],
model.pml.PVzBTxz, model.pml.PVzBTzz,
model.wf.txx, model.pml.PTxxBVx, model.pml.PTxxBVz,
model.wf.tzz, model.pml.PTzzBVx, model.pml.PTzzBVz,
model.nwf.BDntpp[2], model.nwf.BDntpp[1],
model.wf.txz, model.nwf.BDntxz[2], model.nwf.BDntxz[1],
model.pml.PTxzBVx, model.pml.PTxzBVz,
model.medium.rho, model.medium.lambda, model.medium.mu, model.fdc,
model.medium.dt, model.medium.dx, model.medium.dz, model.medium.ext,
model.pml.bhalf, model.pml.ahalf, model.pml.bfull, model.pml.afull)
end
function run!(model::acmod2d)
path="/home/lzh/Dropbox/Zhenhua/Ongoing/Seisimu/deps/builds/ac2d_openmp.so"
ccall((:ac2d_openmp,path),
Void,
(Ptr{Cdouble}, Cint, Cint, Ptr{Cdouble},
Ptr{Cdouble}, Cint, Cint, Ptr{Cdouble},
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Cint, Cint,
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble},
Cdouble, Cdouble, Cdouble, Cint,
Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}, Ptr{Cdouble}),
model.wf.vx, model.nwf.BDnvx[2], model.nwf.BDnvx[1],
model.pml.PVxBTpp,
model.wf.vz, model.nwf.BDnvz[2], model.nwf.BDnvz[1],
model.pml.PVzBTpp,
model.wf.tpp, model.pml.PTppBVx, model.pml.PTppBVz,
model.nwf.BDntpp[2], model.nwf.BDntpp[1],
model.medium.rho, model.medium.lambda, model.fdc,
model.medium.dt, model.medium.dx, model.medium.dz, model.medium.ext,
model.pml.bhalf, model.pml.ahalf, model.pml.bfull, model.pml.afull)
end
|
{"hexsha": "95d64cce3d0c06a017660030e65c0af77da6cc20", "size": 5468, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/extrap/run_base.jl", "max_stars_repo_name": "zhenhua3/Seisimu", "max_stars_repo_head_hexsha": "357b6a5c1ecfe8e6afd390fd3b295e878044a21d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-17T11:52:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-10T22:25:23.000Z", "max_issues_repo_path": "src/extrap/run_base.jl", "max_issues_repo_name": "zhenhua3/Seisimu", "max_issues_repo_head_hexsha": "357b6a5c1ecfe8e6afd390fd3b295e878044a21d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/extrap/run_base.jl", "max_forks_repo_name": "zhenhua3/Seisimu", "max_forks_repo_head_hexsha": "357b6a5c1ecfe8e6afd390fd3b295e878044a21d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-30T04:53:31.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-30T04:53:31.000Z", "avg_line_length": 45.5666666667, "max_line_length": 88, "alphanum_fraction": 0.7072055596, "num_tokens": 2194}
|
import numpy as np
from dnn_utils import sigmoid,sigmoid_backward,relu,relu_backward
def initialize_two_layer(n_x,n_h,n_y):
W1 = np.random.randn(n_h,n_x) * 0.01
b1 = np.zeros(n_h,1)
W2 = np.random.randn(n_y,n_h) * 0.01
b2 = np.zeros(n_y,1)
param = {"W1":W1,"b1":b1,"W2":W2,"b2":b2}
return param
def initialize_l_layer(layer_dims):
param = {}
L = len(layer_dims)
for l in range(1, L):
param['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1]) * 0.01
param['b' + str(l)] = np.zeros(layer_dims[l],1)
return param
def linear_forward(W,A,b):
"""
Implement the linear part of neural unit
"""
Z = np.dot(W,A) + b
return Z
def linear_activation_forward(A_pre,W,b,activation):
"""
Implement neural unit with the activation of Relu or sigmoid
"""
if activation == "Relu":
Z = linear_forward(W,A_pre,b)
A,activation_cache = relu(Z)
elif activation == "sigmoid":
Z = linear_forward(W,A_pre,b)
A,activation_cache = sigmoid(Z)
backward_used_cache = (A_pre,W,b)
cache = (backward_used_cache,activation_cache)
return A,cache
def L_model_forward(X,param):
"""
Implement forward propagation for L layers model
"""
caches = []
L = len(param) // 2
A = X
for l in range(1,L):
A,cache = linear_activation_forward(A,param['W'+str(l)],param['b'+str(l)],Relu)
caches.append(cache)
Al,cache = linear_activation_forward(A,param['W'+str(l)],param['b'+str(l)],Relu)
caches.append(cache)
return Al,caches
def linear_backward(dz,cache):
"""
Implement the backward propagation of linear part
"""
m = dz.shape[1]
dw = np.dot(dz,cache[0]) / m
db = np.sum(dz) / m
dA_pre = np.dot(cache[1],dz)
return dw,db,dA_pre
def linear_activation_backward(dA,cache,activation):
"""
Implement the backward propagation of neural unit
"""
if activation == "Relu":
dz = relu_backward(dA,cache[1])
elif activation == "sigmoid":
dz = sigmoid_backward(dA,cache[1])
dw,db,dA_pre = linear_backward(dz,cache[0])
return dw,db,dA_pre
def L_model_backward(AL,Y,caches):
"""
Implement the backward propagation for L layer model
"""
grads = {}
L = len(caches)
dAl = - (np.divide(Y,AL) - np.divide(1-Y,1-AL))
grads['dw'+str(L)],grads['db'+str(L)],grads['dA'+str(L)] = linear_activation_backward(dAL,caches[-1],"sigmoid")
for l in reversed(range(L-1)):
cache = caches[l]
grads['dw'+str(l+1)],grads['db'+str(l+1)],grads['dA'+str(l+1)] = linear_activation_backward(grads['dA'+str(l+2)],
cache,"Relu")
return grads
def update_param(param,grads,learning_rate):
"""
Update the parameters
"""
L = len(param) // 2
for l in range(L):
param['W'+str(l+1)] = param['W'+str(l+1)] - learning_rate * grads['W'+str(l+1)]
param['b'+str(l+1)] = param['b'+str(l+1)] - learning_rate * grads['b'+str(l+1)]
return param
|
{"hexsha": "f727406dcaa18843458f6c479462d8f14bb82493", "size": 2802, "ext": "py", "lang": "Python", "max_stars_repo_path": "DLCoursera_part1_week4_1.py", "max_stars_repo_name": "zhouhan921001/DeepLearning-homework", "max_stars_repo_head_hexsha": "20562dc49ca5898b531a678c0e54c8d985fcc72f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DLCoursera_part1_week4_1.py", "max_issues_repo_name": "zhouhan921001/DeepLearning-homework", "max_issues_repo_head_hexsha": "20562dc49ca5898b531a678c0e54c8d985fcc72f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DLCoursera_part1_week4_1.py", "max_forks_repo_name": "zhouhan921001/DeepLearning-homework", "max_forks_repo_head_hexsha": "20562dc49ca5898b531a678c0e54c8d985fcc72f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.062992126, "max_line_length": 115, "alphanum_fraction": 0.6623840114, "include": true, "reason": "import numpy", "num_tokens": 876}
|
import os
import numpy as np
import torch
import torch.autograd
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
class ReplayBuffer:
"""
Buffer to store trajectories.
"""
def __init__(self, size):
self.state_buf = list()
self.act_buf = list()
self.rew_buf = list()
self.next_state_buf = list()
self.ptr, self.max_size = 0, size
def store(self, state, act, rew, next_state):
"""
Append a single timestep to the buffer. This is called at each environment
update to store the observed outcome.
"""
# buffer has to have room so you can store
if self.ptr == self.max_size:
self.state_buf.pop(0)
self.act_buf.pop(0)
self.rew_buf.pop(0)
self.next_state_buf.pop(0)
self.ptr -= 1
# Environment related, subject to change
# Old version
#self.state_buf.append(np.expand_dims(state, axis = 0))
#self.act_buf.append(np.expand_dims(act, axis = 0))
#self.rew_buf.append(np.array(rew, ndmin = 1))
#self.next_state_buf.append(np.expand_dims(next_state, axis = 0))
# New version (best suited for decentralized)
self.state_buf.append(state)
self.act_buf.append(act)
self.rew_buf.append(rew)
self.next_state_buf.append(next_state)
self.ptr += 1
def get(self):
"""
Call when updating the agent networks
"""
data = dict(state= np.concatenate(self.state_buf), act=np.concatenate(self.act_buf),
rew=np.concatenate(self.rew_buf), next_state = np.concatenate(self.next_state_buf))
return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in data.items()}
def parse_observation(self,):
pass
class Critic(nn.Module):
"""
Critic neural network (centralized)
"""
def __init__(self, state_size, action_size, hidden_size = [500, 500]):
"""
Constructor
Arguments:
- state_dim : the state dimension of the critic network state
- action_size : the total action dimension of the critic network action
- hidden_size : hidden layer size
"""
super(Critic, self).__init__()
# Dimensions
self.input_size = state_size + action_size
self.hidden_size = hidden_size
self.output_size = 1
# Network Architecture
self.layers = nn.ModuleList()
# Step 1: Input Layer
self.layers.append(nn.Linear(self.input_size, self.hidden_size[0]).double())
# Step 2: Intermediate Layers
for i in range(len(hidden_size) - 1):
self.layers.append(nn.Linear(hidden_size[i], hidden_size[i+1]).double())
# Step 3: Add last layer
self.layers.append(nn.Linear(hidden_size[-1], self.output_size).double())
# initialize the weights
for layer in self.layers:
torch.nn.init.xavier_uniform_(layer.weight)
def forward(self, state, action):
"""
Forward propagation method
"""
# Params state and actions are torch tensors
x = torch.cat([state, action], 1) #2nd index = dim of concat
# Intermediate Layers
for layer in self.layers[:-1]:
x = nn.ReLU()(layer(x))
# Last layer (linear)
x = self.layers[-1](x)
return x
class Actor(nn.Module):
def __init__(self, state_dim=4, act_dim=2, hidden_size = [100, 100]):
super(Actor, self).__init__()
self.input_size = state_dim
self.output_size = act_dim
self.hidden_size = hidden_size
# Network Architecture
self.layers = nn.ModuleList()
# Input Layer
self.layers.append(nn.Linear(self.input_size, self.hidden_size[0]).double())
# Intermediate Layers
for i in range(len(hidden_size) - 1):
self.layers.append(nn.Linear(hidden_size[i], hidden_size[i+1]).double())
# Last layer
self.layers.append(nn.Linear(hidden_size[-1], self.output_size).double())
# initialize the weights
for layer in self.layers:
torch.nn.init.xavier_uniform_(layer.weight)
def forward(self, state):
"""
Param state is a torch tensor
"""
x = state # Careful: deepcopy bug?
# Intermediate Layers
for layer in self.layers[:-1]:
x = nn.ReLU()(layer(x))
x = nn.Tanh()(self.layers[-1](x))
return x
# Note: Buffer size is changed
class MADDPGagent:
def __init__(self, N_agents, state_dim, act_dim, critic_state_mask = [0,1,2,3,-1,-2], actor_learning_rate=1e-4,
critic_learning_rate=1e-3, gamma=0.99, tau=1e-2, max_memory_size=30000,
hidden_size_critic = [500, 500], hidden_size_actor = [100, 100],
batch_size = 128):
# Params
self.N_agents = N_agents
self.state_dim = state_dim
self.act_dim = act_dim
self.gamma = gamma
self.tau = tau
self.batch_size = batch_size
# Critics
# Note: Sunday quick and dirty hack to avoid duplicate states
self.critic_state_mask = critic_state_mask
self.critic_state_dim = N_agents * len(self.critic_state_mask)
self.critic_act_dim = N_agents * self.act_dim
self.critics = [Critic(self.critic_state_dim,
self.critic_act_dim,
hidden_size_critic) for i in range(N_agents)]
self.critics_target = [Critic(self.critic_state_dim,
self.critic_act_dim,
hidden_size_critic) for i in range(N_agents)]
# Actors
self.actors = [Actor(state_dim, act_dim, hidden_size_actor) for i in range(N_agents)]
self.actors_target = [Actor(state_dim, act_dim, hidden_size_actor) for i in range(N_agents)]
# Initialize weights
# Critic
for critic, critic_target in zip(self.critics, self.critics_target):
for target_param, param in zip(critic_target.parameters(), critic.parameters()):
target_param.data.copy_(param.data)
# Actors
for actor, actor_target in zip(self.actors, self.actors_target):
for target_param, param in zip(actor_target.parameters(), actor.parameters()):
target_param.data.copy_(param.data)
# Replay Buffer
self.memory = ReplayBuffer(max_memory_size)
# Loss functions and other weird artifacts
self.critic_criterion = nn.MSELoss(reduction = 'mean')
self.actor_optimizers = [optim.Adam(self.actors[i].parameters(), lr=actor_learning_rate)
for i in range(N_agents)]
self.critic_optimizers = [optim.Adam(self.critics[i].parameters(), lr=critic_learning_rate)
for i in range(N_agents)]
@torch.no_grad()
def get_action(self, state, *args):
actions = []
for i in range(self.N_agents):
s = torch.tensor(state[i], dtype=torch.float64)
action = self.actors[i](s).detach().numpy().flatten()
actions.append(action)
return actions
def update(self):
# Sample a batch from replay buffer
data_size = self.memory.ptr
sample_idx = np.random.choice(np.arange(data_size), self.batch_size)
states = [self.memory.state_buf[i] for i in sample_idx]
actions = [self.memory.act_buf[i] for i in sample_idx]
rewards = [self.memory.rew_buf[i] for i in sample_idx]
next_states = [self.memory.next_state_buf[i] for i in sample_idx]
# Convert to "correct" input format for Pytorch NNs
batch = {'states' : states,
'actions' : actions,
'rewards' : rewards,
'next_states' : next_states}
# Transform data from Replay Buffer to network inputs
x_crit, a_crit = self.get_Q_state(batch)
reward_mat = np.array(rewards)
critics_loss = []
policy_losses = []
for idx, (actor, critic, critic_target) in enumerate(zip(self.actors,
self.critics,
self.critics_target)):
# Q-values for current state
Q_vals = critic(x_crit, a_crit).squeeze(1)
# Evaluate next state
next_actions = []
next_state_mat = np.array(next_states)
state_mat = np.array(states)
for idx_, actor_ in enumerate(self.actors_target):
next_actions.append(
actor_(torch.tensor(next_state_mat[:,idx_,:],dtype=torch.float64)))
# merge next actions
A_prime = torch.cat(next_actions, axis = 1)
# Q-values for next state
next_state_mat = next_state_mat[:, :, self.critic_state_mask]
S_prime = torch.tensor(next_state_mat.reshape(self.batch_size, self.critic_state_dim),
dtype=torch.float64)
next_Q = critic_target(S_prime, A_prime.detach()).squeeze(1).detach()
# Copmute Q_prime and loss based on the reward
Q_prime = torch.tensor(reward_mat[:, idx], dtype=torch.float64) + self.gamma * next_Q
critics_loss.append(self.critic_criterion(Q_vals, Q_prime))
# Update Actor
a_tot = []
for i in range(self.N_agents):
if i != idx:
# detach the computational graph from all other agents
a_tot.append(self.actors[i](torch.tensor(state_mat[:,idx,:], dtype = torch.float64)).detach())
else:
a_tot.append(self.actors[i](torch.tensor(state_mat[:,idx,:], dtype = torch.float64)))
a_tot = torch.cat(a_tot,1)
policy_losses.append(-critic(x_crit, a_tot).mean())
for i in range(self.N_agents):
self.actor_optimizers[i].zero_grad()
policy_losses[i].backward()
self.actor_optimizers[i].step()
self.critic_optimizers[i].zero_grad()
critics_loss[i].backward()
self.critic_optimizers[i].step()
# update target networks
for target_param, param in zip(self.actors_target[i].parameters(), self.actors[i].parameters()):
target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))
for target_param, param in zip(self.critics_target[i].parameters(), self.critics[i].parameters()):
target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))
def get_Q_state(self, batch):
x_input = torch.zeros((self.batch_size, self.critic_state_dim), dtype=torch.float64)
a_input = torch.zeros((self.batch_size, self.critic_act_dim), dtype=torch.float64)
for t in range(self.batch_size):
# Get states
x = []
for agent in range(self.N_agents):
x += [torch.tensor(batch['states'][t][agent][self.critic_state_mask], dtype=torch.float64)]
x_input[t, :] = torch.cat(x)
# Get actions
a = []
for agent in range(self.N_agents):
a += [torch.tensor(batch['actions'][t][agent], dtype=torch.float64)]
a_input[t, :] = torch.cat(a)
return x_input, a_input
def save_params(self, directory):
# Export Results for training
if not os.path.exists(directory):
os.makedirs(directory)
for i in range(self.N_agents):
# Save Actor Network i
torch.save(self.actors[i].state_dict(), directory + 'actor_net_'+str(i)+'.pkl')
# Save Critic Network i
torch.save(self.critics[i].state_dict(), directory + 'critic_net_'+str(i)+'.pkl')
def load_params(self, directory):
# Export Results for training
if not os.path.exists(directory):
raise Exception('There exists no such directory.')
for i in range(self.N_agents):
self.actors[i].load_state_dict(torch.load(directory + 'actor_net_'+str(i)+'.pkl'))
self.critics[i].load_state_dict(torch.load(directory + 'critic_net_'+str(i)+'.pkl'))
|
{"hexsha": "7a31acae5dea23028a3229c2f053afa69e4d2fbd", "size": 12631, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/MADDPG.py", "max_stars_repo_name": "zisikons/deep-rl", "max_stars_repo_head_hexsha": "3c39a194d048618a2a3962cdf5f4b1825e789a22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-01-17T13:44:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:47:46.000Z", "max_issues_repo_path": "core/MADDPG.py", "max_issues_repo_name": "zisikons/deep-rl", "max_issues_repo_head_hexsha": "3c39a194d048618a2a3962cdf5f4b1825e789a22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/MADDPG.py", "max_forks_repo_name": "zisikons/deep-rl", "max_forks_repo_head_hexsha": "3c39a194d048618a2a3962cdf5f4b1825e789a22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-15T15:15:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T03:47:32.000Z", "avg_line_length": 36.1919770774, "max_line_length": 115, "alphanum_fraction": 0.586335207, "include": true, "reason": "import numpy", "num_tokens": 2785}
|
from sklearn.grid_search import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint as sp_randint
from time import time
import numpy as np
def grid_search_parameter(clf, X, y):
param = {'max_depth':[3,4,5,6,7,8]}
grid_search = GridSearchCV(clf, param, cv=5, scoring='accuracy')
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.3f seconds for parameter settings." % (time() - start))
print("\n", "Beat Estimator : ", grid_search.best_estimator_)
print("\n", "Grid Score : ", grid_search.grid_scores_)
print("\n", "Best Score : ", grid_search.best_score_)
print("\n", "Best Param : ", grid_search.best_params_)
def random_search_parameter(clf, X, y):
param = {"max_depth": [3, 4, 5],
"min_samples_split": sp_randint(2, 11),
"min_samples_leaf": sp_randint(1, 11),
"criterion": ['gini', 'entropy']
}
random_search = RandomizedSearchCV(clf, param, cv=5, scoring='accuracy')
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.3f seconds for parameter settings." % (time() - start))
for i in range(1, 10):
candidates = np.flatnonzero(random_search.cv_results_['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank:{0}".format(i))
print("Mean validation score:{0:.3f}(std:{1:.3f})".format(
random_search.cv_results_['mean_test_score'][candidate],
random_search.cv_results_['std_test_score'][candidate]))
print("Parameters:{0}".format(random_search.cv_results_['params'][candidate]), "\n")
|
{"hexsha": "6e786f458516627733d69955381ae85d3777bbd9", "size": 1693, "ext": "py", "lang": "Python", "max_stars_repo_path": "activity_recognition/parameter.py", "max_stars_repo_name": "linw7/Activity-Recognition", "max_stars_repo_head_hexsha": "f76a327268c48f6e3cbe5ff25576f49d8c4927cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2017-12-08T07:24:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T08:16:56.000Z", "max_issues_repo_path": "activity_recognition/parameter.py", "max_issues_repo_name": "linw7/Activity-Recognition", "max_issues_repo_head_hexsha": "f76a327268c48f6e3cbe5ff25576f49d8c4927cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-17T14:33:21.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-17T14:33:21.000Z", "max_forks_repo_path": "activity_recognition/parameter.py", "max_forks_repo_name": "linw7/Activity-Recognition", "max_forks_repo_head_hexsha": "f76a327268c48f6e3cbe5ff25576f49d8c4927cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2018-04-10T08:45:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-12T12:29:02.000Z", "avg_line_length": 47.0277777778, "max_line_length": 96, "alphanum_fraction": 0.6503248671, "include": true, "reason": "import numpy,from scipy", "num_tokens": 432}
|
from scipy.spatial import cKDTree as KDTree
import numpy as np
class NormalVectorEstimator(object):
def __init__(self, simplices, points):
self.simplices = simplices
self.points = points
self.centroids = self._facet_centroids()
self.centroid_tree = KDTree(self.centroids)
self.normal_vectors = self._facet_normals()
self.num_average_history = []
def reset(self):
"""Reset the normal vector estimator."""
self.num_average_history = []
def _facet_centroids(self):
"""Calculate the centroids of all facets."""
centroids = np.zeros((len(self.simplices), 3))
for i, facet in enumerate(self.simplices):
v1 = self.points[facet[0], :]
v2 = self.points[facet[1], :]
v3 = self.points[facet[2], :]
centroids[i, :] = (v1 + v2 + v3)/3
return centroids
def _facet_normals(self):
"""Calculate the facet normals."""
com = np.mean(self.points, axis=0)
normals = np.zeros((len(self.simplices), 3))
for i, facet in enumerate(self.simplices):
v1 = self.points[facet[0], :]
v2 = self.points[facet[1], :]
v3 = self.points[facet[2], :]
vec1 = v2 - v1
vec2 = v3 - v1
n = np.cross(vec1, vec2)
n /= np.sqrt(n.dot(n))
orig_to_centroid = self.centroids[i, :] - com
# We have to make sure that we store the outwards normal vector
dist = orig_to_centroid.dot(n)
if dist < 0.0:
normals[i, :] = -n
else:
normals[i, :] = n
return normals
def get_normal(self, x, cutoff=1.0):
"""Calculate the normal vector at point on the surface.
:param np.ndarray x: Position at which to calculate the normal
:param float cutoff: Local averaging cutoff. The returned
vector is the mean of the normal of all facets whose
centroid is less that this value away from x.
"""
facet_indx = self.centroid_tree.query_ball_point(x, cutoff)
if not facet_indx:
raise ValueError("There are now facets with {} from {}"
"Are you sure you specified a point on the "
"surface?".format(cutoff, x))
normal = np.zeros(3)
for indx in facet_indx:
normal += self.normal_vectors[indx, :]
# Track how many facets that where used in the averaging
# for later reference
self.num_average_history.append(len(facet_indx))
return normal/np.sqrt(normal.dot(normal))
def show_statistics(self):
"""Plots the statistics of averaging."""
from matplotlib import pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(self.num_average_history, "o", mfc="none")
ax.set_xlabel("Call number")
ax.set_ylabel("Number of facet used in averaging")
plt.show()
|
{"hexsha": "0ca022637e1696d62eb92392ada5da768ef58dc9", "size": 3084, "ext": "py", "lang": "Python", "max_stars_repo_path": "cemc/tools/normal_vector.py", "max_stars_repo_name": "davidkleiven/WangLandau", "max_stars_repo_head_hexsha": "0b253dd98033c53560fe95c76f5e38257834bdf6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-10T00:38:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T22:08:40.000Z", "max_issues_repo_path": "cemc/tools/normal_vector.py", "max_issues_repo_name": "davidkleiven/CEMC", "max_issues_repo_head_hexsha": "0b253dd98033c53560fe95c76f5e38257834bdf6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2018-05-21T14:52:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-24T07:45:09.000Z", "max_forks_repo_path": "cemc/tools/normal_vector.py", "max_forks_repo_name": "davidkleiven/WangLandau", "max_forks_repo_head_hexsha": "0b253dd98033c53560fe95c76f5e38257834bdf6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-10-09T14:03:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T05:36:05.000Z", "avg_line_length": 38.0740740741, "max_line_length": 75, "alphanum_fraction": 0.567769131, "include": true, "reason": "import numpy,from scipy", "num_tokens": 706}
|
import argparse
import json
from pathlib import Path
import skimage.transform
import torch
import visdom
from skimage.io import imread
from torch.nn import functional as F
import numpy as np
from terial import models
from terial.classifier.inference.utils import compute_weighted_scores_single
from terial.classifier.network import RendNet3
from terial.database import session_scope
from terial.classifier import transforms
from terial.config import SUBSTANCES
vis = visdom.Visdom(env='classifier-infer-one')
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint-path', type=Path)
parser.add_argument(dest='image_path', type=Path)
parser.add_argument(dest='mask_path', type=Path)
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()
input_size = 224
input_shape = (224, 224)
def main():
if not args.image_path.exists():
print(f"{args.image_path!s} does not exist")
return
if not args.mask_path.exists():
print(f"{args.mask_path!s} does not exist")
return
if not args.checkpoint_path.exists():
print(f"{args.checkpoint_path!s} does not exist")
return
checkpoint_path = args.checkpoint_path
image = imread(args.image_path)
if len(image.shape) > 2 and image.shape[2] > 3:
image = image[:, :, :3]
image = skimage.transform.resize(
image, input_shape, anti_aliasing=True, order=3,
mode='constant', cval=1)
image = skimage.img_as_ubyte(image)
mask = imread(args.mask_path)
if len(mask.shape) > 2:
mask = mask[:, :, 0]
mask = skimage.transform.resize(
mask, input_shape, anti_aliasing=False, order=0,
mode='constant', cval=0).astype(bool)
print(image.dtype, image.shape, mask.dtype)
with (checkpoint_path.parent / 'meta.json').open('r') as f:
meta_dict = json.load(f)
mat_id_to_label = meta_dict['mat_id_to_label']
label_to_mat_id = {v: k for k, v in mat_id_to_label.items()}
with (checkpoint_path.parent / 'model_params.json').open('r') as f:
model_params = json.load(f)
print(f'Loading checkpoint from {checkpoint_path!s}')
if args.cuda:
checkpoint = torch.load(checkpoint_path)
else:
checkpoint = torch.load(checkpoint_path, map_location='cpu')
model = RendNet3.from_checkpoint(checkpoint)
model.train(False)
if args.cuda:
model = model.cuda()
with session_scope() as sess:
materials = sess.query(models.Material).all()
mat_by_id = {m.id: m for m in materials}
vis.image(image.transpose((2, 0, 1)), win='image')
vis.image((mask * 255).astype(np.uint8), win='mask')
topk_dict = compute_topk(
label_to_mat_id, model, image, mask,
mat_by_id=mat_by_id)
compute_weighted_scores_single(topk_dict, mat_by_id, sort=True,
force_substances=False,
weight_substances=True)
k = 5
topk_mat_ids = [p['id'] for p in topk_dict['material'][:k]]
for i, mat_id in enumerate(topk_mat_ids):
material = mat_by_id[mat_id]
preview = imread(material.get_data_path('previews/bmps.png')).transpose((2, 0, 1))
vis.images(preview, win=f'pred-{i}', opts={
'title': f'pred-{i} (mat_id={mat_id})',
'width': 200,
'height': 200,
})
vis.text(json.dumps((topk_dict['substance']), indent=2),
win='substance-prediction')
print(topk_dict)
def compute_topk(label_to_mat_id,
model: RendNet3,
image,
seg_mask,
*,
mat_by_id):
if image.dtype != np.uint8:
image = (image * 255).astype(np.uint8)
seg_mask = skimage.transform.resize(
seg_mask, (224, 224), order=0, anti_aliasing=False, mode='constant')
seg_mask = seg_mask[:, :, np.newaxis].astype(dtype=np.uint8) * 255
image_tensor = transforms.inference_image_transform(
input_size=224, output_size=224, pad=0, to_pil=True)(image)
mask_tensor = transforms.inference_mask_transform(
input_size=224, output_size=224, pad=0)(seg_mask)
input_tensor = torch.cat((image_tensor, mask_tensor), dim=0).unsqueeze(0)
if args.cuda:
input_tensor = input_tensor.cuda()
output = model.forward(input_tensor)
topk_mat_scores, topk_mat_labels = torch.topk(
F.softmax(output['material'], dim=1), k=output['material'].size(1))
topk_dict = {'material': list()}
for score, label in zip(topk_mat_scores.squeeze().tolist(),
topk_mat_labels.squeeze().tolist()):
if int(label) == 0:
continue
mat_id = int(label_to_mat_id[int(label)])
material = mat_by_id[mat_id]
topk_dict['material'].append({
'score': score,
'id': mat_id,
'pred_substance': material.substance,
})
if 'substance' in output:
topk_subst_scores, topk_subst_labels = torch.topk(
F.softmax(output['substance'].cpu(), dim=1),
k=output['substance'].size(1))
topk_dict['substance'] = \
[
{
'score': score,
'id': label,
'name': SUBSTANCES[int(label)],
} for score, label in zip(topk_subst_scores.squeeze().tolist(),
topk_subst_labels.squeeze().tolist())
]
if 'roughness' in output:
nrc = model.num_roughness_classes
roughness_midpoints = np.linspace(1/nrc/2, 1-1/nrc/2, nrc)
topk_roughness_scores, topk_roughness_labels = torch.topk(
F.softmax(output['roughness'].cpu(), dim=1), k=5)
topk_dict['roughness'] = \
[
{
'score': score,
'value': roughness_midpoints[int(label)],
} for score, label in zip(topk_roughness_scores.squeeze().tolist(),
topk_roughness_labels.squeeze().tolist())
]
return topk_dict
if __name__ == '__main__':
main()
|
{"hexsha": "bf6dca1a07d9b5a7e6b4c91dbd4a0b612a10c84e", "size": 6191, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/terial/classifier/inference/infer_one.py", "max_stars_repo_name": "keunhong/photoshape", "max_stars_repo_head_hexsha": "6e795512e059bc5a6bdac748fda961f66d51c6f6", "max_stars_repo_licenses": ["PostgreSQL"], "max_stars_count": 81, "max_stars_repo_stars_event_min_datetime": "2018-10-10T06:55:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T04:18:23.000Z", "max_issues_repo_path": "src/terial/classifier/inference/infer_one.py", "max_issues_repo_name": "keunhong/photoshape", "max_issues_repo_head_hexsha": "6e795512e059bc5a6bdac748fda961f66d51c6f6", "max_issues_repo_licenses": ["PostgreSQL"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2018-10-22T04:50:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-12T00:29:11.000Z", "max_forks_repo_path": "src/terial/classifier/inference/infer_one.py", "max_forks_repo_name": "keunhong/photoshape", "max_forks_repo_head_hexsha": "6e795512e059bc5a6bdac748fda961f66d51c6f6", "max_forks_repo_licenses": ["PostgreSQL"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2018-11-20T06:57:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-24T07:09:37.000Z", "avg_line_length": 32.5842105263, "max_line_length": 90, "alphanum_fraction": 0.6112098207, "include": true, "reason": "import numpy", "num_tokens": 1497}
|
Artists and performers from Campus campus, the Davis community, and beyond showcase their original work in a huge, welcoming, and free open forum for artistic expression.
Like the name says, Fridays @ 4 happens every Friday at (you guessed it) 4:00 PM at Cafe Roma, on the corner of 3rd and University, next to Navins.
This is a rare opportunity for an eager audience to come see fresh new artists and performers be seen.
Upcoming Shows
???????
Notable Past Shows
January 6, 2006 Opening show
featuring acts such as The Spokes a capella group, Adrian Saint, mystery entertainer, local Davis bands, and more.
Performing or Watching
Get additional information by contacting the Fridays @ 4 team (via KelseyWagner) at MailTo(fridaysat4 AT gmail DOT com)
20070724 19:39:53 nbsp Has this been moved since Roma went byebye? Users/WillJobe
|
{"hexsha": "17e5743cc496ed43cb886a9494d7d8ef0b4cf30e", "size": 850, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Fridays_at_Four.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Fridays_at_Four.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Fridays_at_Four.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6363636364, "max_line_length": 170, "alphanum_fraction": 0.7776470588, "num_tokens": 207}
|
import flameplot as flameplot
from sklearn import (manifold, decomposition)
import numpy as np
# %%
# Load libraries
from sklearn import (manifold, decomposition)
import pandas as pd
import numpy as np
# Import library
import flameplot as flameplot
# Load mnist example data
X,y = flameplot.import_example()
# PCA: 50 PCs
X_pca_50 = decomposition.TruncatedSVD(n_components=50).fit_transform(X)
# tSNE: 2D
X_tsne = manifold.TSNE(n_components=2, init='pca').fit_transform(X)
# Compare PCA(50) vs. tSNE
scores = flameplot.compare(X_pca_50, X_tsne, n_steps=5)
# Plot
fig = flameplot.plot(scores, xlabel='PCA (50d)', ylabel='tSNE (2d)')
# %%
# Load data
X, y = flameplot.import_example()
# Compute embeddings
embed_pca = decomposition.TruncatedSVD(n_components=50).fit_transform(X)
embed_tsne = manifold.TSNE(n_components=2, init='pca').fit_transform(X)
# Compare PCA vs. tSNE
scores = flameplot.compare(embed_pca, embed_tsne, n_steps=25)
# plot PCA vs. tSNE
fig = flameplot.plot(scores, xlabel='PCA', ylabel='tSNE')
# %%
# Make random data
X_rand=np.append([np.random.permutation(embed_tsne[:,0])], [np.random.permutation(embed_tsne[:,1])], axis=0).reshape(-1,2)
# Compare random vs. tSNE
scores = flameplot.compare(X_rand, embed_tsne, n_steps=25)
fig = flameplot.plot(scores, xlabel='Random', ylabel='tSNE')
scores = flameplot.compare(X_rand, embed_pca, n_steps=25)
fig = flameplot.plot(scores, xlabel='Random', ylabel='PCA')
# Scatter
flameplot.scatter(embed_pca[:,0], embed_pca[:,1] , label=y, title='PCA')
flameplot.scatter(embed_tsne[:,0], embed_tsne[:,1], label=y, title='tSNE')
flameplot.scatter(X_rand[:,0], X_rand[:,1], label=y, title='Random')
|
{"hexsha": "f11bc45bb9cd4eb81b7f0146d419fc663b409e98", "size": 1666, "ext": "py", "lang": "Python", "max_stars_repo_path": "flameplot/examples.py", "max_stars_repo_name": "rohankumardubey/flameplot", "max_stars_repo_head_hexsha": "fe24f0e47ea721222112a765d8955d10b4491a86", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "flameplot/examples.py", "max_issues_repo_name": "rohankumardubey/flameplot", "max_issues_repo_head_hexsha": "fe24f0e47ea721222112a765d8955d10b4491a86", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flameplot/examples.py", "max_forks_repo_name": "rohankumardubey/flameplot", "max_forks_repo_head_hexsha": "fe24f0e47ea721222112a765d8955d10b4491a86", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.724137931, "max_line_length": 123, "alphanum_fraction": 0.7382953181, "include": true, "reason": "import numpy", "num_tokens": 494}
|
theory NP4_Simple_Action_Values
imports
NP4_Simple_Action_Syntax
"~~/src/HOL/Word/Word"
"~~/src/HOL/Word/Word_Bitwise"
(* These files contain a minimalistic semantics of P4's action constructs. More complex concepts
like switch statements are left out. The purpose of this verification effort is to showcase
the viability of using Isabelle/HOL to verify P4 applications. These files focus on the
action constructs.
P4 actions are the code fragments that can read and write data being processed. The action
constructs are where sequential code resides in P4. To this end the action constructs are the
main way by which the control-plane can influence the behaviour of the data-plane.
To this end these files define a small-step semantics of the P4 actions. Then a typing
environment is built upon this, and the statements are extended with a termination-counter.
These are used to prove properties like termination, determinism, progression,
preservation, and more. The semantics can also be used to analyse reachability properties.
Well-defined and well-typed P4 programs will yield a derivation tree, where as ill-defined
or ill-typed P4 programs will yield no such tree. *)
begin
(* ============================================================================================================== *)
(* VALUE MAPPINGS *)
(* ============================================================================================================== *)
(* Contains the value of a single bit, can be cast between bool *)
datatype sbit = ZERO | ONE
(* Mapping to concrete value *)
datatype val = SBIT sbit
| UINT nat
| SINT int
| IINT int
| VINT nat
| BOOL bool
| STRING string
| ERROR "identifier list"
| MATCH "identifier list"
(* State is a mapping from variable names to values *)
type_synonym state = "vname \<Rightarrow> val"
(* ============================================================================================================== *)
(* HELPER FUNCTIONS *)
(* ============================================================================================================== *)
(* Convert a basetype to a concrete value used for converting the entries of derived types. *)
fun baseToVal :: "baseType \<Rightarrow> val" where
"baseToVal (BBOOL b) = (BOOL b)"
| "baseToVal (BSBIT 0) = (SBIT ZERO)"
| "baseToVal (BSBIT (Suc n)) = (SBIT ONE)"
| "baseToVal (BIINT n) = (IINT n)"
| "baseToVal (BUINT n) = (UINT n)"
| "baseToVal (BSINT n) = (SINT n)"
| "baseToVal (BVINT n) = (VINT n)"
| "baseToVal (BERROR e) = (ERROR e)"
| "baseToVal (BMATCH m) = (MATCH m)"
| "baseToVal (BSTRING s) = (STRING s)"
(* ============================================================================================================== *)
(* CONCRETE VALUE EVALUATION FUNCTIONS *)
(* ============================================================================================================== *)
inductive eval :: "expression \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool" where
(* =============== Base types =============== *)
RBOOL: "eval (BASE (BBOOL b)) s (BOOL b)"
| RBIT0: "eval (BASE (BSBIT 0)) s (SBIT ZERO)"
| RBIT1: "eval (BASE (BSBIT (Suc n))) s (SBIT ONE)"
| RIINT: "eval (BASE (BIINT n)) s (IINT n)"
| RUINT: "eval (BASE (BUINT n)) s (UINT n)"
| RSINT: "eval (BASE (BSINT n)) s (SINT n)"
| RVINT: "eval (BASE (BVINT n)) s (VINT n)"
| RERROR: "eval (BASE (BERROR e)) s (ERROR e)"
| RMATCH: "eval (BASE (BMATCH m)) s (MATCH m)"
| RSTRING: "eval (BASE (BSTRING str)) s (STRING str)"
(* =============== Miscellaneous expressions =============== *)
| TERNTRUE: "eval e1 s (BOOL b) \<Longrightarrow> b = True \<Longrightarrow> eval e2 s v \<Longrightarrow> eval (TernExpr e1 e2 e3) s v"
| TERNFALSE: "eval e1 s (BOOL b) \<Longrightarrow> b = False \<Longrightarrow> eval e3 s v \<Longrightarrow> eval (TernExpr e1 e2 e3) s v"
(* =============== Variable mapping =============== *)
| NAMEDVAR: "eval (NamedVar varName) s (s varName)"
(* =============== Operations that yield a single bit (SBIT) =============== *)
(* Empty for now *)
(* =============== Operations that yield a boolean (BOOL) =============== *)
| ULNEB: "eval e1 s (BOOL b) \<Longrightarrow> eval (UNA_LNE e1) s (BOOL (\<not>b))"
(* Boolean operations *)
| BEQUB: "eval e1 s (BOOL b1) \<Longrightarrow> eval e2 s (BOOL b2) \<Longrightarrow> eval (BIN_EQU e1 e2) s (BOOL (b1 = b2))"
(* Boolean equality check yields derivation error; code cannot be generated for inductive predicate eval *)
(* | BNEQB: "eval e1 s (BOOL b1) \<Longrightarrow> eval e2 s (BOOL b2) \<Longrightarrow> eval (BIN_NEQ e1 e2) s (BOOL (b1 \<noteq> b2))" *)
| BFANB: "eval e1 s (BOOL b1) \<Longrightarrow> eval e2 s (BOOL b2) \<Longrightarrow> eval (BIN_FAN e1 e2) s (BOOL (b1 \<and> b2))"
| BFORB: "eval e1 s (BOOL b1) \<Longrightarrow> eval e2 s (BOOL b2) \<Longrightarrow> eval (BIN_FOR e1 e2) s (BOOL (b1 \<or> b2))"
(* Signed integer opreations *)
| BEQUS: "eval e1 s (SINT n1) \<Longrightarrow> eval e2 s (SINT n2) \<Longrightarrow> eval (BIN_EQU e1 e2) s (BOOL (n1 = n2))"
| BNEQS: "eval e1 s (SINT n1) \<Longrightarrow> eval e2 s (SINT n2) \<Longrightarrow> eval (BIN_NEQ e1 e2) s (BOOL (n1 \<noteq> n2))"
| BLEQS: "eval e1 s (SINT n1) \<Longrightarrow> eval e2 s (SINT n2) \<Longrightarrow> eval (BIN_LEQ e1 e2) s (BOOL (n1 \<le> n2))"
| BGEQS: "eval e1 s (SINT n1) \<Longrightarrow> eval e2 s (SINT n2) \<Longrightarrow> eval (BIN_GEQ e1 e2) s (BOOL (n1 \<ge> n2))"
| BLESS: "eval e1 s (SINT n1) \<Longrightarrow> eval e2 s (SINT n2) \<Longrightarrow> eval (BIN_LES e1 e2) s (BOOL (n1 < n2))"
| BGRES: "eval e1 s (SINT n1) \<Longrightarrow> eval e2 s (SINT n2) \<Longrightarrow> eval (BIN_GRE e1 e2) s (BOOL (n1 > n2))"
(* Unsigned integer opreations *)
| BEQUU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_EQU e1 e2) s (BOOL (n1 = n2))"
| BNEQU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_NEQ e1 e2) s (BOOL (n1 \<noteq> n2))"
| BLEQU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_LEQ e1 e2) s (BOOL (n1 \<le> n2))"
| BGEQU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_GEQ e1 e2) s (BOOL (n1 \<ge> n2))"
| BLESU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_LES e1 e2) s (BOOL (n1 < n2))"
| BGREU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_GRE e1 e2) s (BOOL (n1 > n2))"
(* Infinite precision integer opreations *)
| BEQUI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_EQU e1 e2) s (BOOL (n1 = n2))"
| BNEQI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_NEQ e1 e2) s (BOOL (n1 \<noteq> n2))"
| BLEQI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_LEQ e1 e2) s (BOOL (n1 \<le> n2))"
| BGEQI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_GEQ e1 e2) s (BOOL (n1 \<ge> n2))"
| BLESI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_LES e1 e2) s (BOOL (n1 < n2))"
| BGREI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_GRE e1 e2) s (BOOL (n1 > n2))"
(* Variable size bitstring opreations *)
| BEQUV: "eval e1 s (VINT n1) \<Longrightarrow> eval e2 s (VINT n2) \<Longrightarrow> eval (BIN_EQU e1 e2) s (BOOL (n1 = n2))"
| BNEQV: "eval e1 s (VINT n1) \<Longrightarrow> eval e2 s (VINT n2) \<Longrightarrow> eval (BIN_NEQ e1 e2) s (BOOL (n1 \<noteq> n2))"
(* =============== Operations that yield an unsigned integer (UINT) =============== *)
| UNEGU: "eval e1 s (UINT n1) \<Longrightarrow> eval (UNA_NEG e1) s (UINT n1)" (* Incorrect but w/e for now *)
| UPOSU: "eval e1 s (UINT n1) \<Longrightarrow> eval (UNA_POS e1) s (UINT n1)"
| UCOMU: "eval e1 s (UINT n1) \<Longrightarrow> eval (UNA_COM e1) s (UINT n1)" (* (nat (NOT (int n1))))" Incorrect but w/e *)
| BADDU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_ADD e1 e2) s (UINT (n1 + n2))"
| BMINU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_MIN e1 e2) s (UINT (n1 - n2))"
| BANDU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_AND e1 e2) s (UINT (nat ((int n1) AND (int n2))))"
| BXORU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_XOR e1 e2) s (UINT (nat ((int n1) XOR (int n2))))"
| BLORU: "eval e1 s (UINT n1) \<Longrightarrow> eval e2 s (UINT n2) \<Longrightarrow> eval (BIN_LOR e1 e2) s (UINT (nat ((int n1) OR (int n2))))"
(* =============== Operations that yield a signed integer (SINT) =============== *)
| UNEGS: "eval e1 s (SINT n1) \<Longrightarrow> eval (UNA_NEG e1) s (SINT (-n1))"
| UPOSS: "eval e1 s (SINT n1) \<Longrightarrow> eval (UNA_POS e1) s (SINT n1)"
| BADDS: "eval e1 s (SINT n1) \<Longrightarrow> eval e2 s (SINT n2) \<Longrightarrow> eval (BIN_ADD e1 e2) s (SINT (n1 + n2))"
| BMINS: "eval e1 s (SINT n1) \<Longrightarrow> eval e2 s (SINT n2) \<Longrightarrow> eval (BIN_MIN e1 e2) s (SINT (n1 - n2))"
(* =============== Operations that yield an infinite-precision integer (IINT) =============== *)
| UNEGI: "eval e1 s (IINT n1) \<Longrightarrow> eval (UNA_NEG e1) s (IINT (-n1))"
| UPOSI: "eval e1 s (IINT n1) \<Longrightarrow> eval (UNA_POS e1) s (IINT n1)"
| BADDI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_ADD e1 e2) s (IINT (n1 + n2))"
| BMINI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_MIN e1 e2) s (IINT (n1 - n2))"
| BMULI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_MUL e1 e2) s (IINT (n1 * n2))"
| BDIVI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_DIV e1 e2) s (IINT (n1 div n2))"
| BMODI: "eval e1 s (IINT n1) \<Longrightarrow> eval e2 s (IINT n2) \<Longrightarrow> eval (BIN_MOD e1 e2) s (IINT (n1 mod n2))"
(* =============== Operations that yield a variable-width integer (VINT) =============== *)
(* Empty for now *)
inductive_cases [elim!]: "eval (BASE b) s v" "eval (TernExpr e1 e2 e3) s v" "eval (NamedVar i) s v"
"eval (UNA_LNE e) s v" "eval (UNA_COM e) s v" "eval (UNA_NEG e) s v" "eval (UNA_POS e) s v" "eval (BIN_MUL e1 e2) s v"
"eval (BIN_DIV e1 e2) s v" "eval (BIN_MOD e1 e2) s v" "eval (BIN_ADD e1 e2) s v" "eval (BIN_MIN e1 e2) s v" "eval (BIN_AND e1 e2) s v"
"eval (BIN_XOR e1 e2) s v" "eval (BIN_LOR e1 e2) s v" "eval (BIN_LEQ e1 e2) s v" "eval (BIN_GEQ e1 e2) s v" "eval (BIN_LES e1 e2) s v"
"eval (BIN_GRE e1 e2) s v" "eval (BIN_NEQ e1 e2) s v" "eval (BIN_EQU e1 e2) s v" "eval (BIN_FAN e1 e2) s v" "eval (BIN_FOR e1 e2) s v"
lemma eval_deterministic: "(eval e s v) \<Longrightarrow> (eval e s v') \<Longrightarrow> (v = v')"
apply (induction arbitrary: v' rule: eval.induct)
apply (blast+)
done
code_pred eval .
definition null_state ("<>") where
"null_state \<equiv> \<lambda>x. (UINT 0)"
syntax
"_State" :: "updbinds \<Rightarrow> 'a" ("<_>")
translations
"_State ms" == "_Update <> ms"
"_State (_updbinds b bs)" <= "_Update (_State b) bs"
end
|
{"author": "Johanmyst", "repo": "Nano-P4", "sha": "fc3720d7115d0bac5d719cfe6c73a024aae7f9c4", "save_path": "github-repos/isabelle/Johanmyst-Nano-P4", "path": "github-repos/isabelle/Johanmyst-Nano-P4/Nano-P4-fc3720d7115d0bac5d719cfe6c73a024aae7f9c4/Theory_Files/Simple_Action_Verification/NP4_Simple_Action_Values.thy"}
|
import pytest
from solo import hashsolo
from anndata import AnnData
import numpy as np
def test_cell_demultiplexing():
from scipy import stats
import random
random.seed(52)
signal = stats.poisson.rvs(1000, 1, 990)
doublet_signal = stats.poisson.rvs(1000, 1, 10)
x = np.reshape(stats.poisson.rvs(5, 1, 10000), (1000, 10))
for idx, signal_count in enumerate(signal):
col_pos = idx % 10
x[idx, col_pos] = signal_count
for idx, signal_count in enumerate(doublet_signal):
col_pos = (idx % 10) - 1
x[idx, col_pos] = signal_count
test_data = AnnData(x)
hashsolo.hashsolo(test_data)
doublets = ["Doublet"] * 10
classes = list(np.repeat(np.arange(10), 98).reshape(98, 10, order="F").ravel())
negatives = ["Negative"] * 10
classification = doublets + classes + negatives
assert all(test_data.obs["Classification"] == classification)
doublets = [2] * 10
classes = [1] * 980
negatives = [0] * 10
classification = doublets + classes + negatives
ll_results = np.argmax(hashsolo._calculate_log_likelihoods(x, 8)[0], axis=1)
assert all(ll_results == classification)
bayes_results = hashsolo._calculate_bayes_rule(x, [0.1, 0.8, 0.1], 8)
assert all(bayes_results["most_likely_hypothesis"] == classification)
singlet_prior = 0.99999999999999999
other_prior = (1 - singlet_prior) / 2
bayes_results = hashsolo._calculate_bayes_rule(
x, [other_prior, singlet_prior, other_prior], 8
)
assert all(bayes_results["most_likely_hypothesis"] == 1)
|
{"hexsha": "f4487674f55eefee01895a830355161e8c91775d", "size": 1582, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/hashsolo_tests.py", "max_stars_repo_name": "Elhl93/solo", "max_stars_repo_head_hexsha": "76b158f203ac9af6704304d1d21543fa561d3ed2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 63, "max_stars_repo_stars_event_min_datetime": "2019-11-15T08:05:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:53:54.000Z", "max_issues_repo_path": "tests/hashsolo_tests.py", "max_issues_repo_name": "Elhl93/solo", "max_issues_repo_head_hexsha": "76b158f203ac9af6704304d1d21543fa561d3ed2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2019-12-17T21:54:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T15:41:16.000Z", "max_forks_repo_path": "tests/hashsolo_tests.py", "max_forks_repo_name": "Elhl93/solo", "max_forks_repo_head_hexsha": "76b158f203ac9af6704304d1d21543fa561d3ed2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-03-04T23:44:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T04:07:46.000Z", "avg_line_length": 31.64, "max_line_length": 83, "alphanum_fraction": 0.6750948167, "include": true, "reason": "import numpy,from scipy", "num_tokens": 480}
|
import cv2
import mediapipe as mp
import math
# FOR CHECKING THE FRAME RATE
import time
import numpy as np
class HandDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
# TO DETECT HAND
self.mpHands = mp.solutions.hands
# WE HAVE CREATED A MEDIAPIPE 'HANDS' OBJECT, THUS DETECTING HAND WITH HELP OF THE 21 GIVEN POINTS)
# PARAMS :-
# static_image_mode = false means DETECTION + TRACKING (if tracking confidence is above some threshold)
# SINCE DEFAULT PARAMS USED, WE HAVE NOT PASSED ANYTHING TO Hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds=[4,8,12,16,20]
def findHands(self, img, draw=True):
# CONVERT IMAGE TO RGB
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# THIS METHOD PERFORMS HAND LANDMARK ESTIMATION AND THIS METHOD EXPECTS RGB FORMAT IMAGE
self.results = self.hands.process(imgRGB)
# IF WE WANT TO GET THE LANDMARK OF OUR HANDS
# print(self.results.multi_hand_landmarks);
# CHECK IF MULTIPLE HANDS ARE THERE ,AND IF YES, EXTRACT THEM
if self.results.multi_hand_landmarks:
for handlms in self.results.multi_hand_landmarks:
if draw:
# FOR DRAWING LANDMARKS (HAND_CONNECTIONS HELP TO JOIN THE 21 POINTS TO THE RESPECTIVE POINTS)
self.mpDraw.draw_landmarks(img, handlms, self.mpHands.HAND_CONNECTIONS);
return img
def findPosition(self,img, handNo=0, draw=True):
xList=[]
yList=[]
bbox=[]
# Initialize a List to append id with location of landmarks
self.lmList = []
if self.results.multi_hand_landmarks:
myhand= self.results.multi_hand_landmarks[handNo]
# HERE WE ARE LOCATING THE 21(0-20) POINTS OF OUR HAND WITH X AND Y COORDINATES FOR EACH HAND FRAME
for id, lm in enumerate(myhand.landmark):
# By default the landmarks given by WebCam contains height, Width and Channel that is ratio of Image
# But We Need Location in Pixel
# Get The shape
h, w, c= img.shape
# we are taking height, width and channel so that we can convert the landmark which was in ratio to pixles
cx , cy = int(lm.x* w), int(lm.y* h)
xList.append(cx)
yList.append(cy)
self.lmList.append([id,cx,cy])
# now we will draw a circle for id 8
# if id == 8:
if id==8:
cv2.circle(img, (cx , cy), 10, (255,0,0), cv2.FILLED)
if id==12:
cv2.circle(img,(cx,cy),10,(255,0,255),cv2.FILLED)
xmin,xmax=min(xList),max(xList)
ymin,ymax=min(yList),max(yList)
bbox=xmin,ymin,xmax,ymax
return self.lmList,bbox
def fingersUp(self):
fingers=[]
#Thumb
#print(self.lmList,self.tipIds)
if self.lmList[self.tipIds[0]][1]>self.lmList[self.tipIds[0]-1][1]:
fingers.append(1)
else:
fingers.append(0)
# Fingers
for id in range(1,5):
if self.lmList[self.tipIds[id]][2]<self.lmList[self.tipIds[id]-2][2]:
fingers.append(1)
else:
fingers.append(0)
return fingers
# Define a Method to find the distanv=ce between two landmarks
def findDistance(self,p1,p2,img,draw=True,r=15,t=3):
x1,y1=self.lmList[p1][1:]
x2,y2=self.lmList[p2][1:]
cx=(x1+x2)//2
cy=(y1+y2)//2
if draw:
cv2.line(img,(x1,y1),(x2,y2),(255,0,255),t)
cv2.circle(img,(x1,y1),r,(255,0,255),cv2.FILLED)
cv2.circle(img,(x2,y2),r,(255,0,255),cv2.FILLED)
cv2.circle(img,(cx,cy),r,(0,0,255),cv2.FILLED)
length=math.hypot(x2-x1,y2-y1)
return length,img,[x1,y1,x2,y2,cx,cy]
def main():
# NOW WE WILL CHECK FRAME RATE SO FOR THAT WE WILL DEFINE PTIME , CTIME
ptime = 0
ctime = 0
# CREATE A VIDEOCAPTURE OBJECT
cap = cv2.VideoCapture(0)
# in case camera is not open
if not cap.isOpened():
print("Camera is not started")
# Create object of class HandDetector
detector = HandDetector()
# Apply Infinite For Loop
while True:
# CAPTURE IMAGE FRAME BY FRAME
# RETURNS BOOL AND FRAME , TRUE IF FRAME IS READ CORRECTLY IN BGR FORMAT
success, img = cap.read()
# call findHands method to get hand drawn
img = detector.findHands(img)
# Call findPosition() method to get different landmarks of hand
lmlist,bbox = detector.findPosition(img)
#print("Hello lmlist:",lmlist)
if len(lmlist) !=0:
print(lmlist[8])
else:
print("No Hand Detected")
#Call FingersUp() method to know which finger is up
# it will return a list containing 0m and 1
# 0 indicates the finger is down
# 1 indicates finger is up
# if len(lmlist)!=0:
# fingers=detector.fingersUp()
# print("Finger Up:",fingers)
# else:
# print("No Hand Detected")
#Call findDistance() funtion to cross check whether it is working or not
# it will return distance between two fingers, image and some info
#if len(lmlist)!=0:
# distance,img,info=detector.findDistance(p1=8,p2=12,img=img)
# print("Distance between index finger and middle finger",distance)
#print("Info",info)
#else:
# print("No Hand Found")
ctime = time.time()
fps = 1 / (ctime - ptime)
ptime = ctime
# HERE WE ARE DISPLAYING THE FPS ALONG WITH THE VIDEO
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
# TO DISPLAY THE FRAME
cv2.imshow("Hand Ditector WebCame", img);
# if succcess is false
if not success:
break
# IF USER PRESS Q THEN WE HAVE TO QUIT
if cv2.waitKey(1) & 0xFF==ord("q"):
break
if __name__ == "__main__":
main()
|
{"hexsha": "550720200c37ba69bed9bf1fb0beb26127d75282", "size": 6700, "ext": "py", "lang": "Python", "max_stars_repo_path": "Virtual Mouse/handtrackingmodule.py", "max_stars_repo_name": "yadavujwal/virtual-mouse", "max_stars_repo_head_hexsha": "548a4ef74239006ac3cd2edbe0655f46961ec0ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Virtual Mouse/handtrackingmodule.py", "max_issues_repo_name": "yadavujwal/virtual-mouse", "max_issues_repo_head_hexsha": "548a4ef74239006ac3cd2edbe0655f46961ec0ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Virtual Mouse/handtrackingmodule.py", "max_forks_repo_name": "yadavujwal/virtual-mouse", "max_forks_repo_head_hexsha": "548a4ef74239006ac3cd2edbe0655f46961ec0ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6037735849, "max_line_length": 123, "alphanum_fraction": 0.5665671642, "include": true, "reason": "import numpy", "num_tokens": 1733}
|
"""Specify the jobs to run via config file.
A simple experiment comparing Thompson sampling to greedy algorithm. Finite
armed bandit with 3 arms. Greedy algorithm premature and suboptimal
exploitation.
See Figure 3 from https://arxiv.org/abs/1707.02038
"""
import collections
import functools
from base.config_lib import Config
from base.experiment import BaseExperiment
from finite_arm.agent_finite import FiniteBernoulliBanditEpsilonGreedy
from finite_arm.agent_finite import FiniteBernoulliBanditTS
from finite_arm.env_finite import FiniteArmedBernoulliBandit
import numpy as np
def get_config():
"""Generates the config for the experiment."""
name = 'finite_simple_rand'
n_arm = 3
agents = collections.OrderedDict(
[('greedy',
functools.partial(FiniteBernoulliBanditEpsilonGreedy, n_arm)),
('ts', functools.partial(FiniteBernoulliBanditTS, n_arm))]
)
environments = collections.OrderedDict()
n_env = 100
for env in range(n_env):
probs = np.random.rand(n_arm)
environments[env] = functools.partial(FiniteArmedBernoulliBandit, probs)
experiments = collections.OrderedDict(
[(name, BaseExperiment)]
)
n_steps = 1000
n_seeds = 100
config = Config(name, agents, environments, experiments, n_steps, n_seeds)
return config
|
{"hexsha": "4e7cd583a4cbeac1066e483a44c1a3c9db8f68d3", "size": 1291, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/finite_arm/config_simple_rand.py", "max_stars_repo_name": "AbhinavGopal/ts_tutorial", "max_stars_repo_head_hexsha": "147ff28dc507172774693f225071f8e244e5994e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 290, "max_stars_repo_stars_event_min_datetime": "2017-12-29T01:55:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T10:00:32.000Z", "max_issues_repo_path": "src/finite_arm/config_simple_rand.py", "max_issues_repo_name": "AbhinavGopal/ts_tutorial", "max_issues_repo_head_hexsha": "147ff28dc507172774693f225071f8e244e5994e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-08-02T11:45:51.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-24T14:34:58.000Z", "max_forks_repo_path": "src/finite_arm/config_simple_rand.py", "max_forks_repo_name": "AbhinavGopal/ts_tutorial", "max_forks_repo_head_hexsha": "147ff28dc507172774693f225071f8e244e5994e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 76, "max_forks_repo_forks_event_min_datetime": "2018-01-17T06:19:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-10T06:18:20.000Z", "avg_line_length": 29.3409090909, "max_line_length": 76, "alphanum_fraction": 0.7699457785, "include": true, "reason": "import numpy", "num_tokens": 325}
|
import os
import cv2
import string
from tqdm import tqdm
import click
import numpy as np
import editdistance
import glob
import torch
from torch.autograd import Variable
import utils
import dataset
from PIL import Image
import models.crnn as crnn
#model_path = './data/crnn.pth'
#img_path = '../TextBoxes_plusplus/docker_custom/crops/demo2/1.jpg'
@click.command()
@click.option('--image-path', type=str, default=None, help='Path to image')
@click.option('--alphabet', type=str, default="0123456789abcdefghijklmnopqrstuvwxyz?,:-()'", help='Alphabet to recognize')
@click.option('--snapshot', type=str, default="data/crnn.pth", help='Pre-trained weights')
@click.option('--gpu', type=str, default='0', help='List of GPUs for parallel training, e.g. 0,1,2,3')
@click.option('--visualize', type=bool, default=False, help='Visualize output')
def main(image_path, alphabet, snapshot, gpu, visualize):
text = []
if(os.path.isfile(image_path)):
sim_pred = recognize(image_path, alphabet, snapshot, gpu)
text.append(sim_pred)
elif(os.path.isdir(image_path)):
#loop over image directory
print("is dir", image_path)
file_list = []
for file in sorted(os.listdir(image_path)):
if file.endswith(".jpg"):
file = os.path.join(image_path, file)
sim_pred = recognize(file, alphabet, snapshot, gpu)
text.append(sim_pred)
if(len(text) > 0):
print("recognized text: ", " ".join(text))
#bleh
def recognize(image_path, alphabet, snapshot, gpu):
model = crnn.CRNN(32, 1, 37, 256)
if torch.cuda.is_available():
model = model.cuda()
print('loading pretrained model from %s' % snapshot)
model.load_state_dict(torch.load(snapshot))
converter = utils.strLabelConverter(alphabet)
transformer = dataset.resizeNormalize((100, 32))
image = Image.open(image_path).convert('L')
image = transformer(image)
if torch.cuda.is_available():
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
model.eval()
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
print('%-20s => %-20s' % (raw_pred, sim_pred))
return sim_pred
if __name__ == '__main__':
main()
|
{"hexsha": "88b36528453c1ad9dcf0b5880ea4c9542eaa3749", "size": 2526, "ext": "py", "lang": "Python", "max_stars_repo_path": "eval.py", "max_stars_repo_name": "moskiteau/crnn.pytorch", "max_stars_repo_head_hexsha": "934667baf19d89bc593fc859bfedf90c8bc2c1eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "eval.py", "max_issues_repo_name": "moskiteau/crnn.pytorch", "max_issues_repo_head_hexsha": "934667baf19d89bc593fc859bfedf90c8bc2c1eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eval.py", "max_forks_repo_name": "moskiteau/crnn.pytorch", "max_forks_repo_head_hexsha": "934667baf19d89bc593fc859bfedf90c8bc2c1eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9746835443, "max_line_length": 122, "alphanum_fraction": 0.670229612, "include": true, "reason": "import numpy", "num_tokens": 614}
|
import numpy as np
from shap_fork.utils import MaskedModel
from shap_fork import links
from shap_fork.models import Model
from .._explainer import Explainer
class Random(Explainer):
""" Simply returns random (normally distributed) feature attributions.
This is only for benchmark comparisons. It supports both fully random attributions and random
attributions that are constant across all explainations.
"""
def __init__(self, model, masker, link=links.identity, feature_names=None, linearize_link=True, constant=False, **call_args):
super().__init__(model, masker, link=link, linearize_link=linearize_link, feature_names=feature_names)
if not isinstance(model, Model):
self.model = Model(model)
for arg in call_args:
self.__call__.__kwdefaults__[arg] = call_args[arg]
self.constant = constant
self.constant_attributions = None
def explain_row(self, *row_args, max_evals, main_effects, error_bounds, batch_size, outputs, silent):
""" Explains a single row.
"""
# build a masked version of the model for the current input sample
fm = MaskedModel(self.model, self.masker, self.link, self.linearize_link, *row_args)
# compute any custom clustering for this row
row_clustering = None
if getattr(self.masker, "clustering", None) is not None:
if isinstance(self.masker.clustering, np.ndarray):
row_clustering = self.masker.clustering
elif callable(self.masker.clustering):
row_clustering = self.masker.clustering(*row_args)
else:
raise Exception("The masker passed has a .clustering attribute that is not yet supported by the Permutation explainer!")
# compute the correct expected value
masks = np.zeros(1, dtype=np.int)
outputs = fm(masks, zero_index=0, batch_size=1)
expected_value = outputs[0]
# generate random feature attributions
# we produce small values so our explanation errors are similar to a constant function
row_values = np.random.randn(*((len(fm),) + outputs.shape[1:])) * 0.001
return {
"values": row_values,
"expected_values": expected_value,
"mask_shapes": fm.mask_shapes,
"main_effects": None,
"clustering": row_clustering,
"error_std": None,
"output_names": self.model.output_names if hasattr(self.model, "output_names") else None
}
# def __call__(self, X):
# start_time = time.time()
# if self.constant:
# if self.constant_attributions is None:
# self.constant_attributions = np.random.randn(X.shape[1])
# return Explanation(np.tile(self.constant_attributions, (X.shape[0],1)), X, compute_time=time.time() - start_time)
# else:
# return Explanation(np.random.randn(*X.shape), X, compute_time=time.time() - start_time)
# def attributions(self, X):
# if self.constant:
# if self.constant_attributions is None:
# self.constant_attributions = np.random.randn(X.shape[1])
# return np.tile(self.constant_attributions, (X.shape[0],1))
# else:
# return np.random.randn(*X.shape)
|
{"hexsha": "c58be50a0154b03dece9dfadb355962fc3cce7cb", "size": 3337, "ext": "py", "lang": "Python", "max_stars_repo_path": "shap_fork/explainers/other/_random.py", "max_stars_repo_name": "thbuerg/shap_fork", "max_stars_repo_head_hexsha": "bb82becb3295f31a8eed5dedc47d515ecf16e503", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-17T10:27:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T14:02:29.000Z", "max_issues_repo_path": "shap_fork/explainers/other/_random.py", "max_issues_repo_name": "thbuerg/shap_fork", "max_issues_repo_head_hexsha": "bb82becb3295f31a8eed5dedc47d515ecf16e503", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shap_fork/explainers/other/_random.py", "max_forks_repo_name": "thbuerg/shap_fork", "max_forks_repo_head_hexsha": "bb82becb3295f31a8eed5dedc47d515ecf16e503", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-25T10:55:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T10:55:56.000Z", "avg_line_length": 43.3376623377, "max_line_length": 136, "alphanum_fraction": 0.6502846868, "include": true, "reason": "import numpy", "num_tokens": 749}
|
defaultfigure(;kwargs...) = Figure(
;resolution = (800, 800),
background = RGBA(0, 0, 0, 0),
kwargs...)
# ## Plotting interface definition
"""
plotsample(method, sample)
"""
function plotsample(method, sample)
f = defaultfigure(resolution = (300, 150))
plotsample!(f, method, sample)
return f
end
"""
plotsample!(f, method, sample)
"""
function plotsample! end
"""
plotxy(method, (x, y))
"""
function plotxy(method, xy)
f = defaultfigure(resolution = (300, 150))
plotxy!(f, method, xy)
return f
end
"""
plotxy!(f, method, (x, y))
"""
function plotxy! end
function plotbatch(method, (xs, ys))
n = size(xs)[end]
nrows = Int(ceil(sqrt(n)))
f = Figure()
is = Iterators.product(1:nrows, 1:nrows)
for (i, (x, y)) in zip(is, DataLoaders.obsslices((xs, ys)))
plotxy!(f[i...], method, (x, y))
end
return f
end
# ## Utilities
function imageaxis(f; kwargs...)
ax = AbstractPlotting.Axis(f; kwargs...)
ax.aspect = DataAspect()
ax.xzoomlock = true
ax.yzoomlock = true
ax.xrectzoom = false
ax.yrectzoom = false
ax.panbutton = nothing
ax.xpanlock = true
ax.ypanlock = true
ax.bottomspinevisible = false
ax.leftspinevisible = false
ax.rightspinevisible = false
ax.topspinevisible = false
MakieLayout.tightlimits!(ax)
hidedecorations!(ax)
return ax
end
imageaxis(f::AbstractPlotting.FigurePosition; kwargs...) = imageaxis(f.fig; kwargs...)
# ## Plot recipes
@recipe(PlotImage, image) do scene
Attributes()
end
function AbstractPlotting.plot!(plot::PlotImage)
im = plot[:image]
rim = @lift rotr90($im)
image!(plot, rim; plot.attributes...)
return plot
end
@recipe(PlotMask, mask, classes) do scene
Attributes()
end
function AbstractPlotting.plot!(plot::PlotMask; kwargs...)
mask = plot[:mask]
classes = try
classes = plot[:classes]
catch
classes = @lift unique($mask)
end
im = @lift maskimage($mask, $classes)
plotimage!(plot, im; plot.attributes...)
return plot
end
function maskimage(mask, classes)
colors = distinguishable_colors(length(classes), transform = deuteranopic)
im = map(c -> colors[c], mask)
end
maskimage(mask::AbstractArray{<:Gray{T}}, args...) where T =
maskimage(reinterpret(T, mask), args...)
maskimage(mask::AbstractArray{<:Normed{T}}, args...) where T =
maskimage(reinterpret(T, mask), args...)
|
{"hexsha": "488d59983a8e5f8c4c34209af8f86fb253d4c854", "size": 2458, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/plotting.jl", "max_stars_repo_name": "dave7895/FastAI.jl", "max_stars_repo_head_hexsha": "8246d165ab4f9e0830ea9288564503d5d37fcfec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-27T15:53:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-03T22:06:17.000Z", "max_issues_repo_path": "src/plotting.jl", "max_issues_repo_name": "dave7895/FastAI.jl", "max_issues_repo_head_hexsha": "8246d165ab4f9e0830ea9288564503d5d37fcfec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plotting.jl", "max_forks_repo_name": "dave7895/FastAI.jl", "max_forks_repo_head_hexsha": "8246d165ab4f9e0830ea9288564503d5d37fcfec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-23T18:20:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T23:49:49.000Z", "avg_line_length": 21.3739130435, "max_line_length": 86, "alphanum_fraction": 0.6375101709, "num_tokens": 693}
|
# Version: 2020.02.21
#
# MIT License
#
# Copyright (c) 2018 Jiankang Deng and Jia Guo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import os
import sys
import mxnet as mx
import numpy as np
from mxnet import ndarray as nd
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import face_image
def ch_dev(arg_params, aux_params, ctx):
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def main(args):
ctx = mx.gpu(args.gpu)
args.ctx_num = 1
prop = face_image.load_property(args.data)
image_size = prop.image_size
print('image_size', image_size)
vec = args.model.split(',')
prefix = vec[0]
epoch = int(vec[1])
print('loading', prefix, epoch)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
all_layers = sym.get_internals()
sym = all_layers['fc1_output']
# model = mx.mod.Module.load(prefix, epoch, context = ctx)
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
# model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
path_imgrec = os.path.join(args.data, 'train.rec')
path_imgidx = os.path.join(args.data, 'train.idx')
imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
s = imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
assert header.flag > 0
print('header0 label', header.label)
header0 = (int(header.label[0]), int(header.label[1]))
# assert(header.flag==1)
imgidx = range(1, int(header.label[0]))
stat = []
count = 0
data = nd.zeros((1, 3, image_size[0], image_size[1]))
label = nd.zeros((1,))
for idx in imgidx:
if len(stat) % 100 == 0:
print('processing', len(stat))
s = imgrec.read_idx(idx)
header, img = mx.recordio.unpack(s)
img = mx.image.imdecode(img)
img = nd.transpose(img, axes=(2, 0, 1))
data[0][:] = img
# input_blob = np.expand_dims(img.asnumpy(), axis=0)
# arg_params["data"] = mx.nd.array(input_blob, ctx)
# arg_params["softmax_label"] = mx.nd.empty((1,), ctx)
time_now = datetime.datetime.now()
# exe = sym.bind(ctx, arg_params ,args_grad=None, grad_req="null", aux_states=aux_params)
# exe.forward(is_train=False)
# _embedding = exe.outputs[0].asnumpy().flatten()
# db = mx.io.DataBatch(data=(data,), label=(label,))
db = mx.io.DataBatch(data=(data,))
model.forward(db, is_train=False)
net_out = model.get_outputs()[0].asnumpy()
time_now2 = datetime.datetime.now()
diff = time_now2 - time_now
stat.append(diff.total_seconds())
if len(stat) == args.param1:
break
stat = stat[10:]
print('avg infer time', np.mean(stat))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='do network benchmark')
# general
parser.add_argument('--gpu', default=0, type=int, help='')
parser.add_argument('--data', default='', type=str, help='')
parser.add_argument('--model', default='../model/softmax,50', help='path to load model.')
parser.add_argument('--batch-size', default=1, type=int, help='')
parser.add_argument('--param1', default=1010, type=int, help='')
args = parser.parse_args()
main(args)
|
{"hexsha": "e8229e9de0f7f070bf93091922899f8cd665818c", "size": 4959, "ext": "py", "lang": "Python", "max_stars_repo_path": "embedding-calculator/srcext/insightface/src/utils/benchmark.py", "max_stars_repo_name": "drawdy/CompreFace", "max_stars_repo_head_hexsha": "143b7955536f406a622248fad2d2108dfb5dd4f6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "embedding-calculator/srcext/insightface/src/utils/benchmark.py", "max_issues_repo_name": "drawdy/CompreFace", "max_issues_repo_head_hexsha": "143b7955536f406a622248fad2d2108dfb5dd4f6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "embedding-calculator/srcext/insightface/src/utils/benchmark.py", "max_forks_repo_name": "drawdy/CompreFace", "max_forks_repo_head_hexsha": "143b7955536f406a622248fad2d2108dfb5dd4f6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9919354839, "max_line_length": 146, "alphanum_fraction": 0.6739261948, "include": true, "reason": "import numpy", "num_tokens": 1267}
|
# -*- coding: utf-8 -*-
"""
Wind Setbacks tests
"""
from click.testing import CliRunner
import json
import numpy as np
import os
import pytest
import shutil
import tempfile
import traceback
from rex.utilities.loggers import LOGGERS
from reVX import TESTDATADIR
from reVX.handlers.geotiff import Geotiff
from reVX.wind_setbacks import (StructureWindSetbacks,
RailWindSetbacks)
from reVX.wind_setbacks.wind_setbacks_cli import main
EXCL_H5 = os.path.join(TESTDATADIR, 'setbacks', 'ri_setbacks.h5')
HUB_HEIGHT = 135
ROTOR_DIAMETER = 200
MULTIPLIER = 3
REGS_FPATH = os.path.join(TESTDATADIR, 'setbacks', 'ri_wind_regs_fips.csv')
REGS_GPKG = os.path.join(TESTDATADIR, 'setbacks', 'ri_wind_regs_fips.gpkg')
@pytest.fixture(scope="module")
def runner():
"""
cli runner
"""
return CliRunner()
def test_generic_structure():
"""
Test generic structures setbacks
"""
baseline = os.path.join(TESTDATADIR, 'setbacks',
'generic_structures.tif')
with Geotiff(baseline) as tif:
baseline = tif.values
setbacks = StructureWindSetbacks(EXCL_H5, HUB_HEIGHT, ROTOR_DIAMETER,
regs_fpath=None, multiplier=MULTIPLIER)
structure_path = os.path.join(TESTDATADIR, 'setbacks',
'RhodeIsland.geojson')
test = setbacks.compute_setbacks(structure_path)
assert np.allclose(baseline, test)
@pytest.mark.parametrize('max_workers', [None, 1])
def test_local_structures(max_workers):
"""
Test local structures setbacks
"""
baseline = os.path.join(TESTDATADIR, 'setbacks',
'existing_structures.tif')
with Geotiff(baseline) as tif:
baseline = tif.values
setbacks = StructureWindSetbacks(EXCL_H5, HUB_HEIGHT, ROTOR_DIAMETER,
regs_fpath=REGS_GPKG, multiplier=None)
structure_path = os.path.join(TESTDATADIR, 'setbacks',
'RhodeIsland.geojson')
test = setbacks.compute_setbacks(structure_path, max_workers=max_workers)
assert np.allclose(baseline, test)
def test_generic_railroads():
"""
Test generic rail setbacks
"""
baseline = os.path.join(TESTDATADIR, 'setbacks', 'generic_rails.tif')
with Geotiff(baseline) as tif:
baseline = tif.values
setbacks = RailWindSetbacks(EXCL_H5, HUB_HEIGHT, ROTOR_DIAMETER,
regs_fpath=None, multiplier=MULTIPLIER)
rail_path = os.path.join(TESTDATADIR, 'setbacks', 'RI_Railroads',
'RI_Railroads.shp')
test = setbacks.compute_setbacks(rail_path)
assert np.allclose(baseline, test)
@pytest.mark.parametrize('max_workers', [None, 1])
def test_local_railroads(max_workers):
"""
Test local rail setbacks
"""
baseline = os.path.join(TESTDATADIR, 'setbacks', 'existing_rails.tif')
with Geotiff(baseline) as tif:
baseline = tif.values
setbacks = RailWindSetbacks(EXCL_H5, HUB_HEIGHT, ROTOR_DIAMETER,
regs_fpath=REGS_GPKG, multiplier=None)
rail_path = os.path.join(TESTDATADIR, 'setbacks', 'RI_Railroads',
'RI_Railroads.shp')
test = setbacks.compute_setbacks(rail_path, max_workers=max_workers)
assert np.allclose(baseline, test)
def test_setback_preflight_check():
"""
Test BaseWindSetbacks preflight_checks
"""
with pytest.raises(RuntimeError):
StructureWindSetbacks(EXCL_H5, HUB_HEIGHT, ROTOR_DIAMETER,
regs_fpath=None, multiplier=None)
def test_cli(runner):
"""
Test CLI
"""
structure_dir = os.path.join(TESTDATADIR, 'setbacks')
with tempfile.TemporaryDirectory() as td:
regs_fpath = os.path.basename(REGS_FPATH)
regs_fpath = os.path.join(td, regs_fpath)
shutil.copy(REGS_FPATH, regs_fpath)
config = {
"directories": {
"log_directory": td,
"output_directory": td
},
"execution_control": {
"option": "local"
},
"excl_fpath": EXCL_H5,
"feature_type": "structure",
"features_path": structure_dir,
"hub_height": HUB_HEIGHT,
"log_level": "INFO",
"regs_fpath": regs_fpath,
"replace": True,
"rotor_diameter": ROTOR_DIAMETER
}
config_path = os.path.join(td, 'config.json')
with open(config_path, 'w') as f:
json.dump(config, f)
result = runner.invoke(main, ['from-config',
'-c', config_path])
msg = ('Failed with error {}'
.format(traceback.print_exception(*result.exc_info)))
assert result.exit_code == 0, msg
baseline = os.path.join(TESTDATADIR, 'setbacks',
'generic_structures.tif')
with Geotiff(baseline) as tif:
baseline = tif.values
test = os.path.join(td, 'RhodeIsland.tif')
with Geotiff(test) as tif:
test = tif.values
np.allclose(baseline, test)
LOGGERS.clear()
def execute_pytest(capture='all', flags='-rapP'):
"""Execute module as pytest with detailed summary report.
Parameters
----------
capture : str
Log or stdout/stderr capture option. ex: log (only logger),
all (includes stdout/stderr)
flags : str
Which tests to show logs and results for.
"""
fname = os.path.basename(__file__)
pytest.main(['-q', '--show-capture={}'.format(capture), fname, flags])
if __name__ == '__main__':
execute_pytest()
|
{"hexsha": "04ee87126e47cba05ba80f95f7584975f8f6488e", "size": 5739, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_setbacks.py", "max_stars_repo_name": "NREL/reVX", "max_stars_repo_head_hexsha": "4d62eb2c003c3b53b959f7a58bdc342d18098884", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-04-06T00:29:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T20:00:14.000Z", "max_issues_repo_path": "tests/test_setbacks.py", "max_issues_repo_name": "NREL/reVX", "max_issues_repo_head_hexsha": "4d62eb2c003c3b53b959f7a58bdc342d18098884", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 67, "max_issues_repo_issues_event_min_datetime": "2020-02-28T20:15:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:34:52.000Z", "max_forks_repo_path": "tests/test_setbacks.py", "max_forks_repo_name": "NREL/reVX", "max_forks_repo_head_hexsha": "4d62eb2c003c3b53b959f7a58bdc342d18098884", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6898395722, "max_line_length": 77, "alphanum_fraction": 0.6180519254, "include": true, "reason": "import numpy", "num_tokens": 1376}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function # Required for stderr output, must be the first import
import os
import random
import math
import argparse
import multiprocessing as mp
import networkx as nx
import numpy as np
import igraph as ig
import community as cm # python-louvain
def check_consensus_graph(G, n_p, delta):
'''
This function checks if the networkx graph has converged.
Input:
G: networkx graph
n_p: number of partitions while creating G
delta: if more than delta fraction of the edges have weight != n_p then returns False, else True
'''
count = 0
for wt in nx.get_edge_attributes(G, 'weight').values():
if wt != 0 and wt != n_p:
count += 1
if count > delta*G.number_of_edges():
return False
return True
def nx_to_igraph(Gnx, G=None):
'''
Function takes in a network Graph, Gnx, copies to it weights from G and
returns the equivalent igraph graph g
'''
# g = ig.Graph(n=Gnx.number_of_nodes())
# # graph.vs["name"] = Gnx.nodes()
# g.add_edges(sorted(Gnx.edges()))
g = ig.Graph(sorted(Gnx.edges()))
if G is not None:
for es, ed in Gnx.edges():
g[es, ed] = G[es][ed]['weight']
else:
g.es['weight'] = 1.0
return g
def group_to_partition(partition):
'''
Takes in a partition, dictionary in the format {node: community_membership}
Returns a nested list of communities [[comm1], [comm2], ...... [comm_n]]
'''
part_dict = {}
for index, value in partition.items():
if value in part_dict:
part_dict[value].append(index)
else:
part_dict[value] = [index]
return part_dict.values()
def validate_arguments(args, algorithms):
if args.delta < 0.02:
raise ValueError('delta is too low. Allowed values are between 0.02 and 0.2')
if args.delta > 0.2:
raise ValueError('delta is too high. Allowed values are between 0.02 and 0.2')
if args.alg not in algorithms:
raise ValueError('Incorrect algorithm entered. run with -h for help')
if args.tau < 0 or args.tau > 1:
raise ValueError('Incorrect tau. run with -h for help')
if args.procs < 1:
raise ValueError('The number of worker processes shuould be positive')
if args.parts <=0 or args.outp_parts > args.parts:
raise ValueError('Invalid number of the output/input partitons is specified: {}/{}'.format(args.outp_parts, args.parts))
def louvain_community_detection(networkx_graph):
"""
Do louvain community detection
:param networkx_graph:
:return:
"""
return cm.partition_at_level(cm.generate_dendrogram(networkx_graph, randomize=True, weight='weight'), 0)
def get_yielded_graph(graph, times):
"""
Creates an iterator containing the same graph object multiple times. Can be used for applying multiprocessing map
"""
for _ in range(times):
yield graph
def fast_consensus(G, algorithm='louvain', n_p=20, thresh=0.2, delta=0.02, procs=mp.cpu_count()):
"""Fast consensus algorithm
return communities - resulting communities
placeholder_nds - whether placeholder nodes are used by the igraph, which happens for
the non-contiguous node range or node ids not starting from 0
"""
for u,v in G.edges():
G[u][v].setdefault('weight', 1.0) # Set weights if have not been initialized
graph = G.copy()
L = G.number_of_edges()
N = G.number_of_nodes()
while(True):
if (algorithm == 'louvain'):
nextgraph = graph.copy()
L = G.number_of_edges()
for u,v in nextgraph.edges():
nextgraph[u][v]['weight'] = 0.0
with mp.Pool(processes=procs) as pool:
communities_all = pool.map(louvain_community_detection, get_yielded_graph(graph, n_p))
for node,nbr in graph.edges():
if (node,nbr) in graph.edges() or (nbr, node) in graph.edges():
if graph[node][nbr]['weight'] not in (0,n_p):
for i in range(n_p):
communities = communities_all[i]
if communities[node] == communities[nbr]:
nextgraph[node][nbr]['weight'] += 1
remove_edges = []
for u,v in nextgraph.edges():
if nextgraph[u][v]['weight'] < thresh*n_p:
remove_edges.append((u, v))
nextgraph.remove_edges_from(remove_edges)
if check_consensus_graph(nextgraph, n_p=n_p, delta=delta):
break
for _ in range(L):
node = np.random.choice(nextgraph.nodes())
neighbors = [a[1] for a in nextgraph.edges(node)]
if (len(neighbors) >= 2):
a, b = random.sample(set(neighbors), 2)
if not nextgraph.has_edge(a, b):
nextgraph.add_edge(a, b, weight = 0)
for i in range(n_p):
communities = communities_all[i]
if communities[a] == communities[b]:
nextgraph[a][b]['weight'] += 1
for node in nx.isolates(nextgraph):
nbr, weight = sorted(graph[node].items(), key=lambda edge: edge[1]['weight'])[0]
nextgraph.add_edge(node, nbr, weight=weight['weight'])
graph = nextgraph.copy()
if check_consensus_graph(nextgraph, n_p=n_p, delta=delta):
break
elif (algorithm in ('infomap', 'lpm')):
nextgraph = graph.copy()
for u,v in nextgraph.edges():
nextgraph[u][v]['weight'] = 0.0
if algorithm == 'infomap':
communities = [{frozenset(c) for c in nx_to_igraph(graph, G).community_infomap().as_cover()} for _ in range(n_p)]
if algorithm == 'lpm':
communities = [{frozenset(c) for c in nx_to_igraph(graph, G).community_label_propagation().as_cover()} for _ in range(n_p)]
for node, nbr in graph.edges():
for i in range(n_p):
for c in communities[i]:
if node in c and nbr in c:
if not nextgraph.has_edge(node,nbr):
nextgraph.add_edge(node, nbr, weight = 0)
nextgraph[node][nbr]['weight'] += 1
remove_edges = []
for u,v in nextgraph.edges():
if nextgraph[u][v]['weight'] < thresh*n_p:
remove_edges.append((u, v))
nextgraph.remove_edges_from(remove_edges)
for _ in range(L):
node = np.random.choice(nextgraph.nodes())
neighbors = [a[1] for a in nextgraph.edges(node)]
if (len(neighbors) >= 2):
a, b = random.sample(set(neighbors), 2)
if not nextgraph.has_edge(a, b):
nextgraph.add_edge(a, b, weight = 0)
for i in range(n_p):
if a in communities[i] and b in communities[i]:
nextgraph[a][b]['weight'] += 1
graph = nextgraph.copy()
if check_consensus_graph(nextgraph, n_p=n_p, delta=delta):
break
elif (algorithm == 'cnm'):
nextgraph = graph.copy()
for u,v in nextgraph.edges():
nextgraph[u][v]['weight'] = 0.0
communities = []
mapping = []
inv_map = []
for _ in range(n_p):
order = list(graph.nodes())
random.shuffle(order)
maps = dict(zip(graph.nodes(), order))
mapping.append(maps)
inv_map.append({v: k for k, v in maps.items()})
G_c = nx.relabel_nodes(graph, mapping = maps, copy = True)
G_igraph = nx_to_igraph(G_c, G)
communities.append(G_igraph.community_fastgreedy(weights = 'weight').as_clustering())
for i in range(n_p):
edge_list = [(mapping[i][j], mapping[i][k]) for j,k in graph.edges()]
for node,nbr in edge_list:
a, b = inv_map[i][node], inv_map[i][nbr]
if graph[a][b] not in (0, n_p):
for c in communities[i]:
if node in c and nbr in c:
nextgraph[a][b]['weight'] += 1
remove_edges = []
for u,v in nextgraph.edges():
if nextgraph[u][v]['weight'] < thresh*n_p:
remove_edges.append((u, v))
nextgraph.remove_edges_from(remove_edges)
for _ in range(L):
node = np.random.choice(nextgraph.nodes())
neighbors = [a[1] for a in nextgraph.edges(node)]
if (len(neighbors) >= 2):
a, b = random.sample(set(neighbors), 2)
if not nextgraph.has_edge(a, b):
nextgraph.add_edge(a, b, weight = 0)
for i in range(n_p):
for c in communities[i]:
if mapping[i][a] in c and mapping[i][b] in c:
nextgraph[a][b]['weight'] += 1
if check_consensus_graph(nextgraph, n_p, delta):
break
else:
break
communities = None
placeholder_nds = False
if (algorithm == 'louvain'):
with mp.Pool(processes=procs) as pool:
communities = pool.map(louvain_community_detection, get_yielded_graph(graph, n_p))
elif algorithm == 'cnm':
communities = []
mapping = []
inv_map = []
for _ in range(n_p):
order = list(range(N))
random.shuffle(order)
maps = dict(zip(range(N), order))
mapping.append(maps)
inv_map.append({v: k for k, v in maps.items()})
G_c = nx.relabel_nodes(graph, mapping=maps, copy=True)
G_igraph = nx_to_igraph(G_c, G)
if len(G_igraph.vs) != graph.number_of_nodes():
placeholder_nds = True
communities.append(G_igraph.community_fastgreedy(weights = 'weight').as_clustering())
else:
ig_graph = nx_to_igraph(graph, G)
if len(ig_graph.vs) != graph.number_of_nodes():
placeholder_nds = True
if algorithm == 'infomap':
communities = [{frozenset(c) for c in ig_graph.community_infomap().as_cover()} for _ in range(n_p)]
if algorithm == 'lpm':
communities = [{frozenset(c) for c in ig_graph.community_label_propagation().as_cover()} for _ in range(n_p)]
return communities, placeholder_nds
if __name__ == "__main__":
algorithms = ('louvain', 'lpm', 'cnm', 'infomap') # Clustering algorithms
parser = argparse.ArgumentParser(description='Fast consensus clustering algorithm.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
nparts = 10 # 20; Default number of partitions
parser.add_argument('-f', '--network-file', dest='inpfile', type=str, required=True, help='file with edgelist')
parser.add_argument('-a', '--algorithm', dest='alg', type=str, default='louvain' , help='underlying clustering algorithm: {}. Note: CNM is slow'.format(', '.join(algorithms)))
parser.add_argument('-p', '--partitions', dest='parts', type=int, default=nparts, help='number of input partitions for the algorithm')
parser.add_argument('--outp-parts', dest='outp_parts', type=int, default=1, help='number of partitions to be outputted, <= input partitions')
parser.add_argument('-t', '--tau', dest='tau', type=float, help='used for filtering weak edges')
parser.add_argument('-d', '--delta', dest='delta', type=float, default=0.02, help='convergence parameter. Converges when less than delta proportion of the edges are with wt = 1')
parser.add_argument('-w', '--worker-procs', dest='procs', type=int, default=nparts, help='number of parallel worker processes for the clustering,'
' it is automatically decreased to min(input_partitions, cpu_num)')
parser.add_argument('-o', '--output-dir', dest='outdir', type=str, default='out_partitions', help='output directory')
args = parser.parse_args()
default_tau = {'louvain': 0.2, 'cnm': 0.7 ,'infomap': 0.6, 'lpm': 0.8}
if args.tau is None:
args.tau = default_tau.get(args.alg, 0.2)
if args.outp_parts is None:
args.outp_parts = args.parts
if args.procs is None:
args.procs = mp.cpu_count()
if args.procs > args.parts:
args.procs = args.parts
validate_arguments(args, algorithms)
G = nx.read_edgelist(args.inpfile, nodetype=int, data=(('weight',float),))
output, placeholder_nds = fast_consensus(G, algorithm=args.alg, n_p=args.parts, thresh=args.tau, delta=args.delta, procs=args.procs)
if(args.alg == 'louvain'):
for i in range(len(output)):
output[i] = group_to_partition(output[i])
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
if not args.outdir.endswith('/'):
args.outdir += '/'
ofbase = args.outdir + os.path.splitext(os.path.split(args.inpfile)[1])[0]
oftpl = '{{}}_d{:.2}_p{:02}_t{:.2}_{{:0{}d}}.cnl'.format(args.delta, args.parts, args.tau, int(math.ceil(math.log10(len(output)))))
for i, partition in enumerate(output):
if i >= args.outp_parts:
break
with open(oftpl.format(ofbase, i), 'w') as f:
for community in partition:
# Placeholder nodes of igraph form disconnected clusters, filter them out.
# Typically it happens when node ids in the edges file start from 1+ instead of 0
if placeholder_nds and len(community) == 1:
continue
print(*community, file=f)
|
{"hexsha": "d0f39dce18068f8b156786149ddbba5693d37639", "size": 14112, "ext": "py", "lang": "Python", "max_stars_repo_path": "algorithms/fast_consensus.py", "max_stars_repo_name": "eXascaleInfolab/clubmark", "max_stars_repo_head_hexsha": "5c329a5308a39d53f77db790a31d621245a7c693", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2018-11-20T08:32:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T02:46:35.000Z", "max_issues_repo_path": "algorithms/fast_consensus.py", "max_issues_repo_name": "eXascaleInfolab/clubmark", "max_issues_repo_head_hexsha": "5c329a5308a39d53f77db790a31d621245a7c693", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "algorithms/fast_consensus.py", "max_forks_repo_name": "eXascaleInfolab/clubmark", "max_forks_repo_head_hexsha": "5c329a5308a39d53f77db790a31d621245a7c693", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-22T08:39:00.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-22T08:39:00.000Z", "avg_line_length": 41.8753709199, "max_line_length": 182, "alphanum_fraction": 0.5711451247, "include": true, "reason": "import numpy,import networkx", "num_tokens": 3291}
|
@testset "Degree Independent Set" begin
g0 = SimpleGraph(0)
for g in testgraphs(g0)
c = @inferred(independent_set(g, DegreeIndependentSet()))
@test isempty(c)
end
g1 = SimpleGraph(1)
for g in testgraphs(g1)
c = @inferred(independent_set(g, DegreeIndependentSet()))
@test (c == [1,])
end
add_edge!(g1, 1, 1)
for g in testgraphs(g1)
c = @inferred(independent_set(g, DegreeIndependentSet()))
@test isempty(c)
end
g3 = StarGraph(5)
for g in testgraphs(g3)
c = @inferred(independent_set(g, DegreeIndependentSet()))
@test sort(c) == [2, 3, 4, 5]
end
g4 = CompleteGraph(5)
for g in testgraphs(g4)
c = @inferred(independent_set(g, DegreeIndependentSet()))
@test length(c)== 1 #Exactly one vertex
end
#PathGraph(5) with additional edge 2-5
g5 = PathGraph(5)
add_edge!(g5, 2, 5)
for g in testgraphs(g5)
c = @inferred(independent_set(g, DegreeIndependentSet()))
@test sort(c) == [1, 3, 5]
end
add_edge!(g5, 2, 2)
add_edge!(g5, 3, 3)
for g in testgraphs(g5)
c = @inferred(independent_set(g, DegreeIndependentSet()))
@test sort(c) == [1, 5]
end
end
|
{"hexsha": "77e1c8d7b739de120398d53caa133195dbdb907c", "size": 1261, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/independentset/degree_ind_set.jl", "max_stars_repo_name": "SyxP/LightGraphs.jl", "max_stars_repo_head_hexsha": "6c488a872b991d99cc794f59d0ae617d5bf204a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-02-24T03:15:48.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-24T03:15:48.000Z", "max_issues_repo_path": "test/independentset/degree_ind_set.jl", "max_issues_repo_name": "SyxP/LightGraphs.jl", "max_issues_repo_head_hexsha": "6c488a872b991d99cc794f59d0ae617d5bf204a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/independentset/degree_ind_set.jl", "max_forks_repo_name": "SyxP/LightGraphs.jl", "max_forks_repo_head_hexsha": "6c488a872b991d99cc794f59d0ae617d5bf204a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-02-24T03:16:27.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-24T18:41:12.000Z", "avg_line_length": 26.829787234, "max_line_length": 65, "alphanum_fraction": 0.5844567803, "num_tokens": 394}
|
import os
import time
import itertools
import numpy as np
from matplotlib import colors as mcolors
from PyQt5 import QtWidgets
from otk.sdb import lookat, projection
from otk import zemax, trains
from otk import ri
from otk.sdb import npscalar
from otk.sdb import numba as sdb_numba
from otk.rt2 import rt2_scalar_qt as rt2
# Load Zemax file.
train_full = zemax.read_train(os.path.join(os.path.dirname(__file__), 'conic_telecentric_lens.zmx'), encoding='ascii')
# Remove object, stop and image surfaces.
train = train_full.subset(2, -1)
# Convert to a sequence of axisymemtric singlet lenses.
singlet_sequence = trains.SingletSequence.from_train(train)
# Convert to rt2 Elements. For fun make the lenses square.
elements = rt2.make_elements(singlet_sequence, 'square')
# Create assembly object for ray tracing.
assembly = rt2.Assembly.make(elements, ri.air)
lamb = 850e-9
# Get paraxial focal length.
f = train.get_effective_focal_length(lamb)
stop_half_width = train_full.interfaces[1].radius/2**0.5
field_half_width = train_full.interfaces[-1].radius/2**0.5
traced_rays = []
num_field_points = 3
num_rays_side = 3
times = []
# Loop over field positions.
for xy, color in zip(np.linspace(0, field_half_width, num_field_points), mcolors.TABLEAU_COLORS):
# Loop over entrance pupil.
for epx, epy in itertools.product(np.linspace(-stop_half_width, stop_half_width, num_rays_side), repeat=2):
start_ray = rt2.make_ray(assembly, epx, epy, 0, xy, xy, f, 1, 0, 0, lamb)
# Trace ray and convert to sequence of points for plotting.
times.append([])
for backend in (npscalar, sdb_numba):
rt2.set_backend(backend)
t0 = time.time()
rt2.nonseq_trace(assembly, start_ray, dict(epsilon=1e-10))
times[-1].append(time.time() - t0)
print(times)
|
{"hexsha": "46dbdae8a26a39b7ce7f98b2bf2a7f4a24298c10", "size": 1828, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/rt2/benchmark.py", "max_stars_repo_name": "draustin/otk", "max_stars_repo_head_hexsha": "c6e91423ec79b85b380ee9385f6d27c91f92503d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-05-17T14:26:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T04:52:54.000Z", "max_issues_repo_path": "examples/rt2/benchmark.py", "max_issues_repo_name": "uamhforever/otk", "max_issues_repo_head_hexsha": "c6e91423ec79b85b380ee9385f6d27c91f92503d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2020-04-10T22:50:00.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-18T04:54:19.000Z", "max_forks_repo_path": "examples/rt2/benchmark.py", "max_forks_repo_name": "uamhforever/otk", "max_forks_repo_head_hexsha": "c6e91423ec79b85b380ee9385f6d27c91f92503d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-14T04:52:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T04:52:45.000Z", "avg_line_length": 32.0701754386, "max_line_length": 118, "alphanum_fraction": 0.7357768053, "include": true, "reason": "import numpy,import numba", "num_tokens": 502}
|
//
// Copyright (c) 2017 Michele Segata <msegata@disi.unitn.it>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program. If not, see http://www.gnu.org/licenses/.
//
#ifndef MATRIX_UTILS_H
#define MATRIX_UTILS_H
#include "Array.hh"
#include <vector>
#ifdef ENABLE_DISCRETIZATION
#include <gsl/gsl_matrix.h>
#endif
/**
* Copies the values of a matrix over a portion of another
* @param dst destination matrix
* @param src source matrix
* @param row row index of the destination matrix
* @param col column index of the destination matrix
*/
void set_submatrix(Matrix<double> &dst, const Matrix<double> &src, int row,
int col);
/**
* Copies the values of a vector over a portion of another
* @param dst destination vector
* @param src source vector
* @param pos index of the destination vector
*/
void set_subvector(Vector<double> &dst, const Vector<double> &src, int pos);
/**
* Generates a diagonal matrix by using a vector as the diagonal
* @param src vector to use as the diagonal
* @return the diagonal matrix
*/
Matrix<double> diag(const Vector<double> &src);
/**
* Returns a diagonal square matrix
* @param n square matrix size
* @param v value to put on the diagonal
* @return the diagonal matrix
*/
Matrix<double> diag(int n, double v = 1);
/**
* Returns an identity matrix
* @param n square matrix size
* @return the identity matrix
*/
Matrix<double> identity(int n);
/**
* Given a list of comma separated values, returns a vector
* @param v string of comma separated values
* @param length number of elements in the string
* @return the vector including the parsed values
*/
Vector<double> get_vector(const std::string &v, int length);
/**
* Returns a vector of all zeros
* @param length length of the vector
* @return a zero vector of the specified length
*/
Vector<double> zero_vector(int length);
/**
* Given a string of comma separated values, returns a matrix of the parsed
* values
* @param m string of comma separated values. the number of elements must be
* nrow * ncol
* @param nrow number of rows
* @param ncol number of columns
* @return the matrix of parsed values
*/
Matrix<double> get_matrix(const std::string &m, int nrow, int ncol);
/**
* Matrix transpose
* @param m the matrix to be transposed
* @return the transposed matrix
*/
Matrix<double> transpose(const Matrix<double> &m);
/**
* Given a matrix of a particular size, changes the size of the matrix
* without touching the existing values. New matrix values are initialized
* with zeros
* @param m matrix to be enlarged in place
* @param r new number of rows
* @param c new number of columns
*/
void extend_matrix(Matrix<double> &m, int r, int c);
/**
* Given a vector of a particular size, changes the size of the vector
* without touching the existing values. New vector values are initialized
* with zeros
* @param v vector to be enlarged in place
* @param n new size
*/
void extend_vector(Vector<double> &v, int n);
/**
* Merges two matrices together by appending the rows of the second matrix to
* the rows of the first
* @param a first matrix
* @param b second matrix
* @return a matrix where a and b are merged by rows
*/
Matrix<double> bind_matrices(const Matrix<double> &a, const Matrix<double> &b);
/**
* Merges two vectors together
* @param a first vector
* @param b second vector
* @return a vector composed by a and b
*/
Vector<double> bind_vectors(const Vector<double> &a, const Vector<double> &b);
/**
* Performs the product of two matrices
* @param a first matrix
* @param b second matrix
* @return the matrix product between a and b if the matrices are compatible,
* an empty matrix otherwise
*/
Matrix<double> multiply(const Matrix<double> &a, const Matrix<double> &b);
/**
* Performs the product of a matrix and a vector
* @param a matrix
* @param b vector
* @return the product between a and b if the matrices are compatible,
* an empty matrix otherwise
*/
Matrix<double> multiply(const Matrix<double> &a, const Vector<double> &b);
/**
* Performs the product of a matrix and a vector
* @param a matrix
* @param b vector
* @return the product between a and b if the matrices are compatible,
* an empty matrix otherwise
*/
Matrix<double> multiply(const Matrix<double> &a, const double *b);
/**
* Given a matrix, the function computes the powers of such a matrix and
* stores it in a vector of matrices, where the index is the exponent. The
* results is thus a vector v[i] = A^i for i = 0 ... n
* @param a input matrix
* @param n maximum exponent
* @return the vector of matrices A^i
*/
std::vector<Matrix<double> > get_powers(const Matrix<double> &a, int n);
/**
* Pretty prints a matrix to stdout
* @param a the matrix to print
* @param name optional name to print
*/
void pretty_print_matrix(const Matrix<double_t> &a, const std::string &name="");
/**
* Copies a vector inside another. If the sizes don't match, then the source
* vector is repeated multiple time in the destination vector. For example,
* if the source vector is [1, 2, 3] and the destination vector has length 8,
* then the destination will be filled with [1, 2, 3, 1, 2, 3, 1, 2]
* @param dst destination vector
* @param src source vector
*/
void copy_vector(Vector<double>& dst, const Vector<double> &src);
/**
* Copies a vector inside another. The size of the destination vector must be
* larger or equal than the one of the source vector
* @param dst destination vector
* @param src source vector
*/
void copy_vector(double *dst, const Vector<double> &src);
/**
* Copies a vector inside another. The size of the destination vector must be
* larger or equal than the one of the source vector
* @param dst destination vector
* @param src source vector
*/
void copy_vector(double *dst, const Matrix<double> &src);
/**
* Returns a portion of a vector
* @param src source vector
* @param from start index
* @param length elements to copy
* @return the subvector of elements going from src[from] to src[from+length-1]
*/
Vector<double> subvector(const Vector<double> &src, int from, int length);
#ifdef ENABLE_DISCRETIZATION
/**
* Converts a gsl matrix into a Matrix<double> object
* @param m pointer to gsl_matrix
* @return converted matrix
*/
Matrix<double> from_gsl(const gsl_matrix *m);
/**
* Converts a Matrix<double> object into a gsl matrix. This function allocates
* the memory for the gsl matrix and it is a duty of the user to free the
* memory via the gsl_matrix_free() method
* @param m matrix object
* @return converted matrix
*/
gsl_matrix *to_gsl(const Matrix<double> &m);
#endif
#endif //MATRIX_UTILS_H
|
{"hexsha": "7d471a1b8658b95ca2a618f1f4d435de9234a108", "size": 7231, "ext": "h", "lang": "C", "max_stars_repo_path": "src/matrix_utils.h", "max_stars_repo_name": "michele-segata/mpclib", "max_stars_repo_head_hexsha": "a030421cbbcca8d3eb5e50b7cd0335bac0057fb0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22.0, "max_stars_repo_stars_event_min_datetime": "2018-07-17T10:20:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T17:50:54.000Z", "max_issues_repo_path": "src/matrix_utils.h", "max_issues_repo_name": "michele-segata/mpclib", "max_issues_repo_head_hexsha": "a030421cbbcca8d3eb5e50b7cd0335bac0057fb0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2018-01-03T15:23:49.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-22T13:19:19.000Z", "max_forks_repo_path": "src/matrix_utils.h", "max_forks_repo_name": "michele-segata/mpclib", "max_forks_repo_head_hexsha": "a030421cbbcca8d3eb5e50b7cd0335bac0057fb0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13.0, "max_forks_repo_forks_event_min_datetime": "2018-02-03T15:22:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T11:16:32.000Z", "avg_line_length": 30.6398305085, "max_line_length": 80, "alphanum_fraction": 0.7203706265, "num_tokens": 1734}
|
// program-options.cpp - application options
// written by Elijah Zarezky
// GNU libc headers
#include <limits.h>
#include <unistd.h>
// STL headers
#include <exception>
#include <iostream>
#include <string>
// Boost headers
#include <boost/program_options.hpp>
// our headers
#include "common-defs.h"
// shortcuts
namespace bpo = boost::program_options;
// public variables
std::string workingDir;
bool recurseSubdirs = false;
std::string cppExtensions("h,hh,hpp,c,cc,cpp,cxx");
std::string includeDirs;
std::string outputDir;
// impementation helpers
static bool getCurrentWorkingDirectory(std::string& workingDir)
{
char dirBuf[PATH_MAX] = { 0 };
if (getcwd(dirBuf, sizeof(dirBuf)) != nullptr)
{
workingDir.assign(dirBuf);
return (true);
}
else
{
workingDir.clear();
return (false);
}
}
static void prepareProgramOptions(bpo::options_description& bpoDescription)
{
bpo::options_description genericArgs("Generic options");
getCurrentWorkingDirectory(workingDir);
genericArgs.add_options()("directory,d",
bpo::value<std::string>(&workingDir)->default_value(workingDir),
"working directory to scan");
genericArgs.add_options()("recursive,r", "scan directories recursively");
genericArgs.add_options()("extensions,e",
bpo::value<std::string>(&cppExtensions)->default_value(cppExtensions),
"comma-separated list of file extensions to search for");
genericArgs.add_options()("includes,i",
bpo::value<std::string>(&includeDirs)->default_value(includeDirs),
"colon-separated list of the additional include directories");
genericArgs.add_options()("output,o",
bpo::value<std::string>(&outputDir)->default_value(outputDir),
"output directory to write report");
genericArgs.add_options()("help,h", "display this help and exit");
genericArgs.add_options()("version,v", "print version string");
bpoDescription.add(genericArgs);
}
// public functions
bool parseProgramOptions(int argc, char* argv[])
{
bpo::options_description bpoDescription;
bpo::variables_map vmOptions;
prepareProgramOptions(bpoDescription);
bpo::store(bpo::parse_command_line(argc, argv, bpoDescription), vmOptions);
bpo::notify(vmOptions);
if (vmOptions.count("help") > 0)
{
std::cout << bpoDescription << std::endl;
return (false);
}
else if (vmOptions.count("version") > 0)
{
std::cout << "Confident version 0.1.1" << std::endl;
return (false);
}
else
{
recurseSubdirs = vmOptions.count("recursive") > 0;
return (true);
}
}
|
{"hexsha": "5f5ba64609e8ee32b358fb69e9dbf1c94a12eb45", "size": 2470, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/program-options.cpp", "max_stars_repo_name": "SchweinDeBurg/Confident", "max_stars_repo_head_hexsha": "541ebb6d3d72b576b1bd0853f49585cc705d0006", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/program-options.cpp", "max_issues_repo_name": "SchweinDeBurg/Confident", "max_issues_repo_head_hexsha": "541ebb6d3d72b576b1bd0853f49585cc705d0006", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/program-options.cpp", "max_forks_repo_name": "SchweinDeBurg/Confident", "max_forks_repo_head_hexsha": "541ebb6d3d72b576b1bd0853f49585cc705d0006", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2040816327, "max_line_length": 76, "alphanum_fraction": 0.7311740891, "num_tokens": 628}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 16 05:43:41 2016
@author: aman
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
filename = '/home/aman/Pictures/Computer_Vision/Project/1.jpg'
img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,100,0.01,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,255,3)
cv2.imshow('Corners',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
{"hexsha": "0868890bafe17d12d748ba8403866563ed9f58fa", "size": 528, "ext": "py", "lang": "Python", "max_stars_repo_path": "Testing Code/goodFeatures.py", "max_stars_repo_name": "amanwalia92/VisionChess", "max_stars_repo_head_hexsha": "c57219b3b7ce1fd98b27573aa0a8658ceabd0593", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Testing Code/goodFeatures.py", "max_issues_repo_name": "amanwalia92/VisionChess", "max_issues_repo_head_hexsha": "c57219b3b7ce1fd98b27573aa0a8658ceabd0593", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Testing Code/goodFeatures.py", "max_forks_repo_name": "amanwalia92/VisionChess", "max_forks_repo_head_hexsha": "c57219b3b7ce1fd98b27573aa0a8658ceabd0593", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.5555555556, "max_line_length": 62, "alphanum_fraction": 0.7083333333, "include": true, "reason": "import numpy", "num_tokens": 166}
|
# Transform images with multiprocessing
# Author: David Young, 2019, 2020
"""Transform large images with multiprocessing, including up/downsampling
and image transposition.
"""
from time import time
from typing import Sequence
import numpy as np
from skimage import transform
from magmap.cv import chunking, cv_nd
from magmap.settings import config
from magmap.io import importer
from magmap.io import libmag
from magmap.plot import plot_3d
_logger = config.logger.getChild(__name__)
class Downsampler(object):
"""Downsample (or theoretically upsample) a large image in a way
that allows multiprocessing without global variables.
Attributes:
img (:obj:`np.ndarray`): Full image array.
"""
img = None
@classmethod
def set_data(cls, img):
"""Set the class attributes to be shared during multiprocessing.
Args:
img (:obj:`np.ndarray`): See attributes.
"""
cls.img = img
@classmethod
def rescale_sub_roi(cls, coord, slices, rescale, target_size, multichannel,
sub_roi=None):
"""Rescale or resize a sub-ROI.
Args:
coord: Coordinates as a tuple of (z, y, x) of the sub-ROI within the
chunked ROI.
slices (Tuple[slice]): Sequence of slices within
:attr:``img`` defining the sub-ROI.
rescale: Rescaling factor. Can be None, in which case
``target_size`` will be used instead.
target_size: Target rescaling size for the given sub-ROI in
(z, y, x). If ``rescale`` is not None, ``target_size``
will be ignored.
multichannel: True if the final dimension is for channels.
sub_roi (:obj:`np.ndarray`): Array chunk to rescale/resize;
defaults to None to extract from :attr:`img` if available.
Return:
Tuple of ``coord`` and the rescaled sub-ROI, where
``coord`` is the same as the given parameter to identify
where the sub-ROI is located during multiprocessing tasks.
"""
if sub_roi is None and cls.img is not None:
sub_roi = cls.img[slices]
rescaled = None
if rescale is not None:
rescaled = transform.rescale(
sub_roi, rescale, mode="reflect", multichannel=multichannel)
elif target_size is not None:
rescaled = transform.resize(
sub_roi, target_size, mode="reflect", anti_aliasing=True)
return coord, rescaled
def make_modifier_plane(plane):
"""Make a string designating a plane orthogonal transformation.
Args:
plane: Plane to which the image was transposed.
Returns:
String designating the orthogonal plane transformation.
"""
return "plane{}".format(plane.upper())
def make_modifier_scale(scale):
"""Make a string designating a scaling transformation, typically for
filenames of rescaled images.
Args:
scale (float): Scale to which the image was rescaled. Any decimal
point will be replaced with "pt" to avoid confusion with
path extensions.
Returns:
str: String designating the scaling transformation.
"""
mod = "scale{}".format(scale)
return mod.replace(".", "pt")
def make_modifier_resized(target_size):
"""Make a string designating a resize transformation.
Note that the final image size may differ slightly from this size as
it only reflects the size targeted.
Args:
target_size: Target size of rescaling in x,y,z.
Returns:
String designating the resize transformation.
"""
return "resized({},{},{})".format(*target_size)
def get_transposed_image_path(
img_path: str, scale: float = None, target_size: Sequence[int] = None
) -> str:
"""Get path modified for any transposition.
Args:
img_path: Unmodified image path.
scale: Scaling factor, which takes precedence over ``target_size``;
defaults to None.
target_size: Target size in ``x, y, z``, typically given by an atlas
profile; defaults to None.
Returns:
Modified path for the given transposition, or ``img_path`` unmodified
if all transposition factors are None.
"""
img_path_modified = img_path
if scale is not None or target_size is not None:
# use scaled image for pixel comparison, retrieving
# saved scaling as of v.0.6.0
if scale is not None:
# scale takes priority as command-line argument
modifier = make_modifier_scale(scale)
print("loading scaled file with {} modifier".format(modifier))
else:
# otherwise assume set target size
modifier = make_modifier_resized(target_size)
print("loading resized file with {} modifier".format(modifier))
img_path_modified = libmag.insert_before_ext(
img_path, "_" + modifier)
return img_path_modified
def transpose_img(filename, series, plane=None, rescale=None, target_size=None):
"""Transpose Numpy NPY saved arrays into new planar orientations and
rescaling or resizing.
Rescaling/resizing take place in multiprocessing. Files are saved
through memmap-based arrays to minimize RAM usage. Output filenames
are based on the ``make_modifer_[task]`` functions. Currently transposes
all channels, ignoring :attr:``config.channel`` parameter.
Args:
filename: Full file path in :attribute:cli:`filename` format.
series: Series within multi-series file.
plane: Planar orientation (see :attribute:plot_2d:`PLANES`). Defaults
to None, in which case no planar transformation will occur.
rescale: Rescaling factor; defaults to None. Takes precedence over
``target_size``.
target_size (List[int]): Target shape in x,y,z; defaults to None,
in which case the target size will be extracted from the register
profile if available if available.
"""
if target_size is None:
target_size = config.atlas_profile["target_size"]
if plane is None and rescale is None and target_size is None:
print("No transposition to perform, skipping")
return
time_start = time()
# even if loaded already, reread to get image metadata
# TODO: consider saving metadata in config and retrieving from there
img5d, info = importer.read_file(filename, series, return_info=True)
image5d = img5d.img
sizes = info["sizes"]
# make filenames based on transpositions
modifier = ""
if plane is not None:
modifier = make_modifier_plane(plane)
# either rescaling or resizing
if rescale is not None:
modifier += make_modifier_scale(rescale)
elif target_size:
# target size may differ from final output size but allows a known
# size to be used for finding the file later
modifier += make_modifier_resized(target_size)
filename_image5d_npz, filename_info_npz = importer.make_filenames(
filename, series, modifier=modifier)
# TODO: image5d should assume 4/5 dimensions
offset = 0 if image5d.ndim <= 3 else 1
multichannel = image5d.ndim >= 5
image5d_swapped = image5d
if plane is not None and plane != config.PLANE[0]:
# swap z-y to get (y, z, x) order for xz orientation
image5d_swapped = np.swapaxes(image5d_swapped, offset, offset + 1)
config.resolutions[0] = libmag.swap_elements(
config.resolutions[0], 0, 1)
if plane == config.PLANE[2]:
# swap new y-x to get (x, z, y) order for yz orientation
image5d_swapped = np.swapaxes(image5d_swapped, offset, offset + 2)
config.resolutions[0] = libmag.swap_elements(
config.resolutions[0], 0, 2)
scaling = None
if rescale is not None or target_size is not None:
# rescale based on scaling factor or target specific size
rescaled = image5d_swapped
# TODO: generalize for more than 1 preceding dimension?
if offset > 0:
rescaled = rescaled[0]
max_pixels = [100, 500, 500]
sub_roi_size = None
if target_size:
# to avoid artifacts from thin chunks, fit image into even
# number of pixels per chunk by rounding up number of chunks
# and resizing each chunk by ratio of total size to chunk num
target_size = target_size[::-1] # change to z,y,x
shape = rescaled.shape[:3]
num_chunks = np.ceil(np.divide(shape, max_pixels))
max_pixels = np.ceil(
np.divide(shape, num_chunks)).astype(np.int)
sub_roi_size = np.floor(
np.divide(target_size, num_chunks)).astype(np.int)
print("Resizing image of shape {} to target_size: {}, using "
"num_chunks: {}, max_pixels: {}, sub_roi_size: {}"
.format(rescaled.shape, target_size, num_chunks, max_pixels,
sub_roi_size))
else:
print("Rescaling image of shape {} by factor of {}"
.format(rescaled.shape, rescale))
# rescale in chunks with multiprocessing
sub_roi_slices, _ = chunking.stack_splitter(rescaled.shape, max_pixels)
is_fork = chunking.is_fork()
if is_fork:
Downsampler.set_data(rescaled)
sub_rois = np.zeros_like(sub_roi_slices)
pool = chunking.get_mp_pool()
pool_results = []
for z in range(sub_roi_slices.shape[0]):
for y in range(sub_roi_slices.shape[1]):
for x in range(sub_roi_slices.shape[2]):
coord = (z, y, x)
slices = sub_roi_slices[coord]
args = [coord, slices, rescale, sub_roi_size,
multichannel]
if not is_fork:
# pickle chunk if img not directly available
args.append(rescaled[slices])
pool_results.append(pool.apply_async(
Downsampler.rescale_sub_roi, args=args))
for result in pool_results:
coord, sub_roi = result.get()
print("replacing sub_roi at {} of {}"
.format(coord, np.add(sub_roi_slices.shape, -1)))
sub_rois[coord] = sub_roi
pool.close()
pool.join()
rescaled_shape = chunking.get_split_stack_total_shape(sub_rois)
if offset > 0:
rescaled_shape = np.concatenate(([1], rescaled_shape))
print("rescaled_shape: {}".format(rescaled_shape))
# rescale chunks directly into memmap-backed array to minimize RAM usage
image5d_transposed = np.lib.format.open_memmap(
filename_image5d_npz, mode="w+", dtype=sub_rois[0, 0, 0].dtype,
shape=tuple(rescaled_shape))
chunking.merge_split_stack2(sub_rois, None, offset, image5d_transposed)
if rescale is not None:
# scale resolutions based on single rescaling factor
config.resolutions = np.multiply(
config.resolutions, 1 / rescale)
else:
# scale resolutions based on size ratio for each dimension
config.resolutions = np.multiply(
config.resolutions,
(image5d_swapped.shape / rescaled_shape)[1:4])
sizes[0] = rescaled_shape
scaling = importer.calc_scaling(image5d_swapped, image5d_transposed)
else:
# transfer directly to memmap-backed array
image5d_transposed = np.lib.format.open_memmap(
filename_image5d_npz, mode="w+", dtype=image5d_swapped.dtype,
shape=image5d_swapped.shape)
if plane == config.PLANE[1] or plane == config.PLANE[2]:
# flip upside-down if re-orienting planes
if offset:
image5d_transposed[0, :] = np.fliplr(image5d_swapped[0, :])
else:
image5d_transposed[:] = np.fliplr(image5d_swapped[:])
else:
image5d_transposed[:] = image5d_swapped[:]
sizes[0] = image5d_swapped.shape
# save image metadata
print("detector.resolutions: {}".format(config.resolutions))
print("sizes: {}".format(sizes))
image5d.flush()
importer.save_image_info(
filename_info_npz, info["names"], sizes, config.resolutions,
info["magnification"], info["zoom"],
*importer.calc_intensity_bounds(image5d_transposed), scaling, plane)
print("saved transposed file to {} with shape {}".format(
filename_image5d_npz, image5d_transposed.shape))
print("time elapsed (s): {}".format(time() - time_start))
def rotate_img(roi, rotate=None, order=None):
"""Rotate an ROI based on atlas profile settings.
Args:
roi (:obj:`np.ndarray`): Region of interst array (z,y,x[,c]).
rotate (dict): Dictionary of rotation settings in
:class:`magmap.settings.atlas_profile`. Defaults to None
to take the value from :attr:`config.register_settings`.
order (int): Spline interpolation order; defalts to None to use
the value from within ``rotate``. Should be 0 for labels.
Returns:
:obj:`np.ndarray`: The rotated image array.
"""
if rotate is None:
rotate = config.atlas_profile["rotate"]
if order is None:
order = rotate["order"]
roi = np.copy(roi)
for rot in rotate["rotation"]:
print("rotating by", rot)
roi = cv_nd.rotate_nd(
roi, rot[0], rot[1], order=order, resize=rotate["resize"])
return roi
def preprocess_img(image5d, preprocs, channel, out_path):
"""Pre-process an image in 3D.
Args:
image5d (:obj:`np.ndarray`): 5D array in t,z,y,x[,c].
preprocs (Union[str, list[str]]): Pre-processing tasks that will be
converted to enums in :class:`config.PreProcessKeys` to perform
in the order given.
channel (int): Channel to preprocess, or None for all channels.
out_path (str): Output base path.
Returns:
:obj:`np.ndarray`: The pre-processed image array.
"""
if preprocs is None:
print("No preprocessing tasks to perform, skipping")
return
if not libmag.is_seq(preprocs):
preprocs = [preprocs]
roi = image5d[0]
for preproc in preprocs:
# perform global pre-processing task
task = libmag.get_enum(preproc, config.PreProcessKeys)
_logger.info("Pre-processing task: %s", task)
if task is config.PreProcessKeys.SATURATE:
roi = plot_3d.saturate_roi(roi, channel=channel)
elif task is config.PreProcessKeys.DENOISE:
roi = plot_3d.denoise_roi(roi, channel)
elif task is config.PreProcessKeys.REMAP:
roi = plot_3d.remap_intensity(roi, channel)
elif task is config.PreProcessKeys.ROTATE:
roi = rotate_img(roi)
else:
_logger.warn("No preprocessing task found for: %s", preproc)
# save to new file
image5d = importer.roi_to_image5d(roi)
importer.save_np_image(image5d, out_path)
return image5d
|
{"hexsha": "5b11c8750cd5f32b3c68f8cd560b0364324ea203", "size": 15443, "ext": "py", "lang": "Python", "max_stars_repo_path": "magmap/atlas/transformer.py", "max_stars_repo_name": "sanderslab/magellanmapper", "max_stars_repo_head_hexsha": "16d55df6dc1f0e5baf3938a30edcdd634e0ffd85", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-04-14T12:49:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-10T13:08:52.000Z", "max_issues_repo_path": "magmap/atlas/transformer.py", "max_issues_repo_name": "sanderslab/magellanmapper", "max_issues_repo_head_hexsha": "16d55df6dc1f0e5baf3938a30edcdd634e0ffd85", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 55, "max_issues_repo_issues_event_min_datetime": "2020-10-20T03:40:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T11:13:45.000Z", "max_forks_repo_path": "magmap/atlas/transformer.py", "max_forks_repo_name": "sanderslab/magellanmapper", "max_forks_repo_head_hexsha": "16d55df6dc1f0e5baf3938a30edcdd634e0ffd85", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-20T03:27:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-07T21:16:59.000Z", "avg_line_length": 39.5974358974, "max_line_length": 81, "alphanum_fraction": 0.6267564592, "include": true, "reason": "import numpy", "num_tokens": 3523}
|
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
import logging
import argparse
import os
import pandas as pd
import datetime
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
parser = argparse.ArgumentParser(description='speech_recognition')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--batch_size', default=512, type=int, help='batch size')
parser.add_argument('--context_size', default=12, type=int, help='context size')
parser.add_argument('--input_size', default=1000, type=int, help='input size')
parser.add_argument('--output_size', default=138, type=int, help='output size')
parser.add_argument('--num_epochs', default=18, type=int, help='epoch number')
parser.add_argument('--decay_steps', default='7, 12', type=str,
help='The step where learning rate decay by 0.1')
parser.add_argument('--save_step', default=5, type=int, help='step for saving model')
parser.add_argument('--eval_step', default=1, type=int, help='step for validation')
parser.add_argument('--train_data_path', default='../data/train.npy', type=str)
parser.add_argument('--train_label_path', default='../data/train_labels.npy', type=str)
parser.add_argument('--val_data_path', default='../data/dev.npy', type=str)
parser.add_argument('--val_label_path', default='../data/dev_labels.npy', type=str)
parser.add_argument('--test_data_path', default='../data/test.npy', type=str)
parser.add_argument('--checkpoint_dir', default='../checkpoints/', help='checkpoint folder root')
parser.add_argument('--result_file_name', default='result.csv', type=str, help='testing result save path')
args = parser.parse_args()
args.expr_dir = os.path.join(args.checkpoint_dir, current_time)
os.makedirs(args.expr_dir)
# Create the log
log_path = os.path.join(args.expr_dir, 'speech_recognition_{}.log'.format(current_time))
logging.basicConfig(filename=log_path, level=logging.INFO)
# Modify the result save path
args.result_file_name = os.path.join(args.expr_dir, args.result_file_name)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
def save_log(message):
print(message)
logging.info(message)
class load_dataset(Dataset):
def __init__(self, data_path, label_path=None):
# Both data and label has the same time length for one utterrance
# Data shape: (utterance, seq_len, 40), Label shape: (utterance, seq_len)
self.data = np.load(data_path, encoding='bytes', allow_pickle=True)
if label_path:
self.label = np.load(label_path, allow_pickle=True)
else:
self.label = None
self.idx_map = []
for i, xs in enumerate(self.data):
for j in range(xs.shape[0]):
self.idx_map.append((i, j))
def __getitem__(self, index):
i, j = self.idx_map[index]
# Select the context_size before and after the current frame
x = self.data[i].take(range(j - args.context_size, j + args.context_size + 1), mode='clip', axis=0).flatten()
# Normalize
# x = (x - x.mean()) / x.std()
# Select the phoneme state label for the current frame
y = np.int32(self.label[i][j]).reshape(1) if self.label is not None else np.int32(-1).reshape(1)
return torch.from_numpy(x).float(), torch.LongTensor(y)
def __len__(self):
return len(self.idx_map)
###
# * Layers -> [input_size, 2048, 2048, 1024, 1024, output_size]
# * ReLU activations
# * Context size k = 12 frames on both sides
# * Adam optimizer, with the default learning rate 1e-3
# * Zero padding of k frames on both sides of each utterance
###
class MLP(nn.Module):
def __init__(self, input_size, output_size):
super(MLP, self).__init__()
self.net = nn.Sequential(nn.Linear(input_size, 2048),
nn.ReLU(inplace=True),
nn.BatchNorm1d(2048),
nn.Linear(2048, 2048),
nn.ReLU(inplace=True),
nn.BatchNorm1d(2048),
nn.Linear(2048, 2048),
nn.ReLU(inplace=True),
nn.BatchNorm1d(2048),
nn.Linear(2048, 1024),
nn.ReLU(inplace=True),
nn.BatchNorm1d(1024),
nn.Linear(1024, 1024),
nn.ReLU(inplace=True),
nn.BatchNorm1d(1024),
nn.Linear(1024, 1024),
nn.ReLU(inplace=True),
nn.BatchNorm1d(1024),
nn.Linear(1024, 1024),
nn.ReLU(inplace=True),
nn.BatchNorm1d(1024),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, output_size)
)
def forward(self, x):
return self.net(x)
def train(net, loader, optimizer, criterion, epoch):
net.train()
running_batch = 0
running_loss = 0.0
running_corrects = 0
# Iterate over images.
for i, (data, label) in enumerate(loader):
data = data.to(device)
label = label.to(device)
output = net(data)
_, label_pred = torch.max(output, 1)
loss = criterion(output, label.view(-1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_batch += label.size(0)
running_loss += loss.item()
running_corrects += torch.sum(label_pred == label.view(-1)).item()
if (i + 1) % 20 == 0: # print every 5 mini-batches
message = '[%d, %5d] loss: %.3f accuracy: %.3f' % (
epoch, i + 1, running_loss / running_batch, running_corrects / running_batch)
save_log(message)
def validate(net, loader, criterion, epoch):
net.eval()
running_batch = 0
running_loss = 0.0
running_corrects = 0
with torch.no_grad():
message = '*' * 40
save_log(message)
for i, (data, label) in enumerate(loader):
data = data.to(device)
label = label.to(device)
output = net(data)
# label_pred = torch.nn.functional.softmax(output, dim=1)
_, label_pred = torch.max(output, 1)
loss = criterion(output, label.view(-1))
running_batch += label.size(0)
running_loss += loss.item()
running_corrects += torch.sum(label_pred == label.view(-1)).item()
running_loss /= running_batch
acc = running_corrects / running_batch
message = 'Epoch: %d, testing Loss %.3f, testing accuracy: %.3f' % (epoch, running_loss, acc)
save_log(message)
message = '*' * 40
save_log(message)
return acc
def test(net, loader):
net.eval()
label = []
running_batch = 0
with torch.no_grad():
for i, (data, _) in enumerate(loader):
data = data.to(device)
output = net(data)
_, label_pred = torch.max(output, 1)
label.extend(label_pred.cpu().numpy())
running_batch += data.size(0)
return running_batch, label
def save_networks(net, which_epoch):
save_filename = '%s_net.pth' % (which_epoch)
save_path = os.path.join(args.expr_dir, save_filename)
if torch.cuda.is_available():
try:
torch.save(net.module.cpu().state_dict(), save_path)
except:
torch.save(net.cpu().state_dict(), save_path)
else:
torch.save(net.cpu().state_dict(), save_path)
def weights_init(m, type='kaiming'):
classname = m.__class__.__name__
if classname.find('Linear') != -1 or classname.find('Conv2d') != -1:
if type == 'xavier':
nn.init.xavier_normal_(m.weight)
elif type == 'kaiming':
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif type == 'orthogonal':
nn.init.orthogonal_(m.weight)
elif type == 'gaussian':
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if __name__ == '__main__':
net = MLP(input_size=args.input_size, output_size=args.output_size)
net.apply(weights_init)
criterion = nn.CrossEntropyLoss()
criterion.to(device)
optimizer = optim.Adam(net.parameters(), lr=args.lr)
# Set learning rate decay steps
str_steps = args.decay_steps.split(',')
args.decay_steps = []
for str_step in str_steps:
str_step = int(str_step)
args.decay_steps.append(str_step)
scheduler = MultiStepLR(optimizer, milestones=args.decay_steps, gamma=0.1)
save_log('Logging data')
train_data = load_dataset(args.train_data_path, args.train_label_path)
train_loader = DataLoader(dataset=train_data, num_workers=4, batch_size=args.batch_size, pin_memory=True,
shuffle=True)
val_data = load_dataset(args.val_data_path, args.val_label_path)
val_loader = DataLoader(dataset=val_data, num_workers=4, batch_size=args.batch_size, pin_memory=True,
shuffle=False)
save_log('Data is loaded')
# ------------------------
# Start Training and Validating
# ------------------------
cur_acc = 0
for epoch in range(1, args.num_epochs + 1):
net.to(device)
scheduler.step()
lr = optimizer.param_groups[0]['lr']
message = '{}: {}/{} , {}: {:.4f}'.format('epoch', epoch, args.num_epochs, 'lr', lr)
save_log(message)
save_log('-' * 10)
train(net, train_loader, optimizer, criterion, epoch)
if epoch % args.eval_step == 0:
val_acc = validate(net, val_loader, criterion, epoch)
if val_acc > cur_acc:
save_networks(net, epoch)
cur_acc = val_acc
# if epoch % args.save_step == 0:
# save_networks(epoch)
save_networks(net, epoch)
# ------------------------
# Start Testing
# ------------------------
save_log('Loading test data')
test_data = load_dataset(args.test_data_path)
test_loader = DataLoader(dataset=test_data, num_workers=4, batch_size=args.batch_size, pin_memory=True, shuffle=False)
save_log('Test data is loaded')
net.to(device)
test_num, test_label = test(net, test_loader)
d = {'id': list(range(test_num)), 'label': test_label}
df = pd.DataFrame(data=d)
df.to_csv(args.file_name, header=True, index=False)
save_log('Testing is done, result is saved to {}'.format(args.result_file_name))
|
{"hexsha": "ac5b18893303f368ee989ca13d7de32f5ce43ed8", "size": 11943, "ext": "py", "lang": "Python", "max_stars_repo_path": "Frame_Level_Speech_recognition/Frame_Level_Speech_Recognition/src/train.py", "max_stars_repo_name": "MonitSharma/Data-Science-Projects", "max_stars_repo_head_hexsha": "b78df36061a9877240763bf3e71ec797f53b4450", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Frame_Level_Speech_recognition/Frame_Level_Speech_Recognition/src/train.py", "max_issues_repo_name": "MonitSharma/Data-Science-Projects", "max_issues_repo_head_hexsha": "b78df36061a9877240763bf3e71ec797f53b4450", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Frame_Level_Speech_recognition/Frame_Level_Speech_Recognition/src/train.py", "max_forks_repo_name": "MonitSharma/Data-Science-Projects", "max_forks_repo_head_hexsha": "b78df36061a9877240763bf3e71ec797f53b4450", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4158415842, "max_line_length": 123, "alphanum_fraction": 0.574227581, "include": true, "reason": "import numpy", "num_tokens": 2631}
|
"""
AbstractMenu
The supertype for all Menu types.
See AbstractMenu.jl for descriptions of functions mentioned in this
doc string.
# Functions
The following functions can be called on all <:AbstractMenu types.
Details can be found in
## Exported
- `request(m::AbstractMenu)`
- `request(msg::AbstractString, m::AbstractMenu)`
## Hidden
- `printMenu(m::AbstractMenu, cursor::Int; init::Bool=false)`
# Subtypes
All subtypes must contain the feilds `pagesize::Int` and
`pageoffset::Int`. They must also implement the following functions.
## Necessary Functions
These functions must be implemented for all subtypes of AbstractMenu.
- `pick(m::AbstractMenu, cursor::Int)`
- `cancel(m::AbstractMenu)`
- `options(m::AbstractMenu)`
- `writeLine(buf::IOBuffer, m::AbstractMenu, idx::Int, cur::Bool)`
## Optional Functions
These functions do not need to be implemented for all AbstractMenu
subtypes.
- `header(m::AbstractMenu)`
- `keypress(m::AbstractMenu, i::UInt32)`
"""
abstract type AbstractMenu end
# NECESSARY FUNCTIONS
# These functions must be implemented for all subtypes of AbstractMenu
######################################################################
# This function must be implemented for all menu types. It defines what
# happens when a user presses the Enter key while the menu is open.
# If this function returns true, `request()` will exit.
pick(m::AbstractMenu, cursor::Int) = error("unimplemented")
# This function must be implemented for all menu types. It defines what
# happends when a user cancels ('q' or ctrl-c) a menu. `request()` will
# always exit after calling this function.
cancel(m::AbstractMenu) = error("unimplemented")
# This function must be implemented for all menu types. It should return
# a list of strings to be displayed as options in the current page.
options(m::AbstractMenu) = error("unimplemented")
# This function must be implemented for all menu types. It should write
# the option at index `idx` to the buffer. If cursor is `true` it
# should also display the cursor
function writeLine(buf::IOBuffer, m::AbstractMenu, idx::Int, cur::Bool)
error("unimplemented")
end
# OPTIONAL FUNCTIONS
# These functions do not need to be implemented for all Menu types
##################################################################
# If `header()` is defined for a specific menu type, display the header
# above the menu when it is rendered to the screen.
header(m::AbstractMenu) = ""
# If `keypress()` is defined for a specific menu type, send any
# non-standard keypres event to this function. If the function returns
# true, `request()` will exit.
keypress(m::AbstractMenu, i::UInt32) = false
"""
request(m::AbstractMenu)
Display the menu and enter interactive mode. Returns `m.selected` which
varies based on menu type.
"""
request(m::AbstractMenu) = request(terminal, m)
function request(term::Base.Terminals.TTYTerminal, m::AbstractMenu)
cursor = 1
menu_header = header(m)
if !CONFIG[:supress_output] && menu_header != ""
println(term.out_stream, menu_header)
end
printMenu(term.out_stream, m, cursor, init=true)
raw_mode_enabled = enableRawMode(term)
raw_mode_enabled && print(term.out_stream, "\x1b[?25l") # hide the cursor
try
while true
c = readKey(term.in_stream)
if c == Int(ARROW_UP)
if cursor > 1
# move selection up
cursor -= 1
# scroll the page
if cursor < (2+m.pageoffset) && m.pageoffset > 0
m.pageoffset -= 1
end
elseif CONFIG[:scroll_wrap]
# wrap to bottom
cursor = length(options(m))
m.pageoffset = length(options(m)) - m.pagesize
end
elseif c == Int(ARROW_DOWN)
if cursor < length(options(m))
# move selection up
cursor += 1
# scroll page
if cursor >= m.pagesize + m.pageoffset && m.pagesize + m.pageoffset < length(options(m))
m.pageoffset += 1
end
elseif CONFIG[:scroll_wrap]
# wrap to top
cursor = 1
m.pageoffset = 0
end
elseif c == Int(PAGE_UP)
# If we're at the bottom, move the page 1 less to move the cursor up from
# the bottom entry, since we try to avoid putting the cursor at bounds.
m.pageoffset -= m.pagesize - (cursor == length(options(m)) ? 1 : 0)
m.pageoffset = max(m.pageoffset, 0)
cursor -= m.pagesize
cursor = max(cursor, 1)
elseif c == Int(PAGE_DOWN)
m.pageoffset += m.pagesize - (cursor == 1 ? 1 : 0)
m.pageoffset = min(m.pageoffset, length(options(m)) - m.pagesize)
cursor += m.pagesize
cursor = min(cursor, length(options(m)))
elseif c == Int(HOME_KEY)
cursor = 1
m.pageoffset = 0
elseif c == Int(END_KEY)
cursor = length(options(m))
m.pageoffset = length(options(m)) - m.pagesize
elseif c == 13 # <enter>
# will break if pick returns true
pick(m, cursor) && break
elseif c == UInt32('q')
cancel(m)
break
elseif c == 3 # ctrl-c
cancel(m)
if CONFIG[:ctrl_c_interrupt]
throw(InterruptException())
else
break
end
else
# will break if keypress returns true
keypress(m, c) && break
end
printMenu(term.out_stream, m, cursor)
end
finally
# always disable raw mode even even if there is an
# exception in the above loop
if raw_mode_enabled
print(term.out_stream, "\x1b[?25h") # unhide cursor
disableRawMode(term)
end
end
println(term.out_stream)
return m.selected
end
"""
request([term,] msg::AbstractString, m::AbstractMenu)
Shorthand for `println(msg); request(m)`.
"""
request(msg::AbstractString, m::AbstractMenu) =
request(terminal, msg, m)
function request(term::Base.Terminals.TTYTerminal,
msg::AbstractString, m::AbstractMenu)
println(term.out_stream, msg)
request(term, m)
end
# The generic printMenu function is used for displaying the state of a
# menu to the screen. Menus must implement `writeLine` and `options`
# and have fields `pagesize::Int` and `pageoffset::Int` as part of
# their type definition
function printMenu(out, m::AbstractMenu, cursor::Int; init::Bool=false)
CONFIG[:supress_output] && return
buf = IOBuffer()
# Move the cursor to the beginning of where it should print
# Don't do this on the initial print
lines = m.pagesize-1
if init
m.pageoffset = 0
else
print(buf, "\x1b[999D\x1b[$(lines)A")
end
for i in (m.pageoffset+1):(m.pageoffset + m.pagesize)
print(buf, "\x1b[2K")
if i == m.pageoffset+1 && m.pageoffset > 0
# first line && scrolled past first entry
print(buf, CONFIG[:up_arrow])
elseif i == m.pagesize+m.pageoffset && i != length(options(m))
# last line && not last option
print(buf, CONFIG[:down_arrow])
else
# non special line
print(buf, " ")
end
writeLine(buf, m, i, i == cursor)
# dont print an \r\n on the last line
i != (m.pagesize+m.pageoffset) && print(buf, "\r\n")
end
print(out, String(take!(buf)))
end
|
{"hexsha": "508178f50f8a6b2361216a25506f8ca1f31d674a", "size": 7947, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "ext/TerminalMenus/src/AbstractMenu.jl", "max_stars_repo_name": "KristofferC/Pkg.jl", "max_stars_repo_head_hexsha": "c5785d4293dd4fc26b5e03205424d7155342e7ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ext/TerminalMenus/src/AbstractMenu.jl", "max_issues_repo_name": "KristofferC/Pkg.jl", "max_issues_repo_head_hexsha": "c5785d4293dd4fc26b5e03205424d7155342e7ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-11-24T09:16:37.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-24T16:38:34.000Z", "max_forks_repo_path": "ext/TerminalMenus/src/AbstractMenu.jl", "max_forks_repo_name": "KristofferC/Pkg.jl", "max_forks_repo_head_hexsha": "c5785d4293dd4fc26b5e03205424d7155342e7ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.216730038, "max_line_length": 108, "alphanum_fraction": 0.5832389581, "num_tokens": 1847}
|
/*
* channel_element_base.hpp - micros base channel element
* Copyright (C) 2015 Zaile Jiang
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef MICROSRTT_CHANNEL_BASE_HPP
#define MICROSRTT_CHANNEL_BASE_HPP
#include <utility>
#include <boost/intrusive_ptr.hpp>
#include <boost/call_traits.hpp>
#include "micros_rtt/oro/oro_arch.h"
namespace micros_rtt
{
/** In the data flow implementation, a channel is created by chaining
* ChannelElementBase objects.
*
* ChannelElementBase objects are refcounted. In the chain, an element
* maintains the refcount for its successor, and holds a simple pointer to
* its predecessor.
*/
class ChannelElementBase
{
public:
typedef boost::intrusive_ptr<ChannelElementBase> shared_ptr;
private:
oro_atomic_t refcount;
friend void intrusive_ptr_add_ref( ChannelElementBase* e );
friend void intrusive_ptr_release( ChannelElementBase* e );
shared_ptr input;
shared_ptr output;
protected:
/** Increases the reference count */
void ref();
/** Decreases the reference count, and deletes the object if it is zero
*/
void deref();
public:
/**
* A default constructed ChannelElementBase has no input nor output
* configured. The only way to set an input or output is to use
* setOutput().
*/
ChannelElementBase();
virtual ~ChannelElementBase();
/**
* Removes the input channel (if any).
* This call may delete channels from memory.
*/
void removeInput();
/**
* Returns the current input channel element.
* This will only return a valid channel element if
* another element has received this object as an argument
* to setOutput().
* @return
*/
ChannelElementBase::shared_ptr getInput();
/**
* Returns the first input channel element of this connection.
* Will return the channel element the furthest away from the input port,
* or \a this if none.
* @return getInput() ? getInput()->getInputEndPoint() : this
*/
ChannelElementBase::shared_ptr getInputEndPoint();
/** Returns the next channel element in the channel's propagation
* direction
*/
ChannelElementBase::shared_ptr getOutput();
/**
* Returns the last output channel element of this connection.
* Will return the channel element the furthest away from the output port,
* or \a this if none.
* @return getOutput() ? getOutput()->getInputEndPoint() : this
*/
ChannelElementBase::shared_ptr getOutputEndPoint();
/**
* Sets the output of this channel element to \a output and sets the input of \a output to this.
* This implies that this channel element becomes the input of \a output.
* There is no setInput function since this function does both setting input and output of
* \a this and \a output.
* @param output the next element in chain.
*/
void setOutput(shared_ptr output);
/** Signals that there is new data available on this channel
* By default, the channel element forwards the call to its output
*
* @returns false if an error occured that requires the channel to be invalidated. In no ways it indicates that the sample has been received by the other side of the channel.
*/
virtual bool signal();
/**
* This is called by an input port when it is ready to receive data.
* Each channel element has the responsibility to pass this notification
* on to the next, in the direction of the output.
* @return false if a fatal connection failure was encountered and
* the channel needs to be destroyed.
*/
virtual bool inputReady();
/** Clears any data stored by the channel. It means that
* ChannelElement::read() will return false afterwards (provided that no
* new data has been written on the meantime of course)
*
* By default, the channel element forwards the calls to its input
*/
virtual void clear();
/** Performs a disconnection of this channel's endpoints. If
* \a forward is true, then the disconnection is initiated by the input
* endpoint. Otherwise, it has been initiated by the output endpoint.
*/
virtual void disconnect(bool forward);
};
void intrusive_ptr_add_ref( ChannelElementBase* e );
void intrusive_ptr_release( ChannelElementBase* e );
}
#endif
|
{"hexsha": "ef58e723ccdf7fd025359c53fbd7dfc7e0487f73", "size": 4906, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/micros_rtt/oro/channel_element_base.hpp", "max_stars_repo_name": "sukha-cn/hpcl_rtt", "max_stars_repo_head_hexsha": "2fc67fa103011c7683762f26bbeb9a2937087ef5", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2016-03-15T12:12:55.000Z", "max_stars_repo_stars_event_max_datetime": "2017-02-09T04:12:14.000Z", "max_issues_repo_path": "include/micros_rtt/oro/channel_element_base.hpp", "max_issues_repo_name": "sukha-cn/hpcl_rtt", "max_issues_repo_head_hexsha": "2fc67fa103011c7683762f26bbeb9a2937087ef5", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2016-03-15T12:12:51.000Z", "max_issues_repo_issues_event_max_datetime": "2016-03-15T12:12:51.000Z", "max_forks_repo_path": "include/micros_rtt/oro/channel_element_base.hpp", "max_forks_repo_name": "sukha-cn/hpcl_rtt", "max_forks_repo_head_hexsha": "2fc67fa103011c7683762f26bbeb9a2937087ef5", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2016-11-03T05:23:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T15:11:35.000Z", "avg_line_length": 32.7066666667, "max_line_length": 176, "alphanum_fraction": 0.7236037505, "num_tokens": 1129}
|
import numpy as np
from numpy.testing import assert_equal
from h5py._hl.selections import Selection
from ..slicetools import spaceid_to_slice
def test_spaceid_to_slice(h5file):
shape = 10
a = h5file.create_dataset('a', data=np.arange(shape))
for start in range(0, shape):
for count in range(0, shape):
for stride in range(1, shape):
for block in range(0, shape):
if count != 1 and block != 1:
# Not yet supported. Doesn't seem to be supported
# by HDF5 either (?)
continue
spaceid = a.id.get_space()
spaceid.select_hyperslab((start,), (count,),
(stride,), (block,))
sel = Selection((shape,), spaceid)
try:
a[sel]
except ValueError:
# HDF5 doesn't allow stride/count combinations
# that are impossible (the count must be the exact
# number of elements in the selected block).
# Rather than trying to enumerate those here, we
# just check what doesn't give an error.
continue
try:
s = spaceid_to_slice(spaceid)
except:
print(start, count, stride, block)
raise
assert_equal(a[s.raw], a[sel], f"{(start, count, stride, block)}")
|
{"hexsha": "48fa33f218315e8c05e63fd74a995545bc23b2c7", "size": 1615, "ext": "py", "lang": "Python", "max_stars_repo_path": "versioned_hdf5/tests/test_slicetools.py", "max_stars_repo_name": "takluyver/versioned-hdf5", "max_stars_repo_head_hexsha": "6fda5c803346e9be1dff459080566863f71cdc78", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "versioned_hdf5/tests/test_slicetools.py", "max_issues_repo_name": "takluyver/versioned-hdf5", "max_issues_repo_head_hexsha": "6fda5c803346e9be1dff459080566863f71cdc78", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "versioned_hdf5/tests/test_slicetools.py", "max_forks_repo_name": "takluyver/versioned-hdf5", "max_forks_repo_head_hexsha": "6fda5c803346e9be1dff459080566863f71cdc78", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.375, "max_line_length": 86, "alphanum_fraction": 0.4712074303, "include": true, "reason": "import numpy,from numpy", "num_tokens": 307}
|
\documentclass{article}
\def\COMM{0}
\usepackage[nottoc]{tocbibind}
\usepackage{verbatim}
\usepackage{fullpage}
\usepackage{times}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{multirow}
\usepackage{xcolor}
\usepackage{fancyhdr}
\usepackage{float}
\usepackage{hyperref}
\usepackage{framed}
\usepackage{graphicx}
\usepackage{varwidth}
\usepackage{color}
\usepackage{url}
\usepackage{cite}
\usepackage{tikz}
\usepackage{listings}
\usepackage{titlesec}
\usepackage{underscore}
\input{symbols}
\begin{document}
\newcommand{\mainsection}[1]{\newpage \section{#1}}
\newcommand{\msubsection}[1]{\newpage \subsection{#1}}
\newcommand{\msubsubsection}[1]{\subsubsection{#1}}
\title{SCALE--MAMBA v1.5 : Documentation}
\author{
A. Aly
\and D. Cozzo
\and M. Keller
\and E. Orsini
\and D. Rotaru
\and P. Scholl
\and N.P. Smart
\and T. Wood}
\maketitle
\tableofcontents
\input{Changes}
\input{Introduction}
\input{Installation}
\input{Example}
\input{ByteCodes}
\input{Compiler}
\input{IO}
\input{Restart}
\input{GC}
\input{Local}
\input{FHE}
\input{Advanced}
\bibliographystyle{alpha}
\bibliography{SCALE}
\end{document}
|
{"hexsha": "48b9845fa0acb67093e9d59a803e422667a19fe7", "size": 1142, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Documentation/Documentation.tex", "max_stars_repo_name": "vlaskinvlad/SCALE-MAMBA", "max_stars_repo_head_hexsha": "7d318088bfe9110e50b22d7155b9f1775ef3df80", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Documentation/Documentation.tex", "max_issues_repo_name": "vlaskinvlad/SCALE-MAMBA", "max_issues_repo_head_hexsha": "7d318088bfe9110e50b22d7155b9f1775ef3df80", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Documentation/Documentation.tex", "max_forks_repo_name": "vlaskinvlad/SCALE-MAMBA", "max_forks_repo_head_hexsha": "7d318088bfe9110e50b22d7155b9f1775ef3df80", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.641025641, "max_line_length": 54, "alphanum_fraction": 0.7460595447, "num_tokens": 386}
|
import pyvisa as visa
import json
import time
from decimal import Decimal
from threading import Lock
import numpy as np
import math
class generic_driver_visa_gpib(object):
def __init__(self, spec):
self.spec = spec
self.operations = spec['operations']
port = spec["port"]
w_term = spec.get("write_termination", '\r')
r_term = spec.get("read_termination", '\r\n')
doOpen = spec.get("doOpen", True)
rm = visa.ResourceManager()
self.store = {}
self.timeout = spec.get("store_timeout", 2)
self.instrument = rm.open_resource(port,
write_termination=w_term,
read_termination=r_term)
if doOpen:
self.instrument.open()
self.instrument.timeout = 2000
self.lock = Lock()
def read_instrument(self, operation_id):
"""
read instrument
"""
operation = self.operations[operation_id]
# datatype = operation['data_type']
type = operation['type']
echo = self.spec.get('echo', False)
stored = self.store.get(operation_id, (None, time.time()-(self.timeout+1)))
if time.time() - stored[1] < self.timeout:
data, data_trans = stored[0]
else:
if type == 'read_store':
self.read_instrument(operation.get("store_id"))
stored = self.store.get(operation_id)
if time.time() - stored[1] > self.timeout:
print('store not updating')
return -1, -1
data, data_trans = stored[0]
return data, data_trans
elif type == 'read_multiple':
with self.lock:
# print("locK")
self.instrument.write(operation['command'])
if echo:
self.instrument.read()
data = self.instrument.read()
try:
while True:
data = data + operation.get("split") + self.instrument.read()
except visa.errors.VisaIOError:
pass
data = data.split(operation.get("split"))
for i, d in enumerate(data):
if self.isfloat(d):
data[i] = self.decimals(d, operation)
else:
pass
o_ops = operation.get("operations")
if o_ops is not None:
for on in o_ops:
o = self.operations.get(on)
oi = o.get("store_index")
d = data[oi]
dt = self.transform(d, o)
self.store[on] = ((d, dt), time.time())
data_trans = [self.transform(d, operation) for d in data]
# print('unlocK')
elif type == 'read_single':
with self.lock:
# print("locK")
self.instrument.write(operation['command'])
data = 0
try:
while True:
data = self.instrument.read()
except visa.errors.VisaIOError:
pass
data = float(data)
data_trans = self.transform(data, operation)
# print('unlocK')
else:
with self.lock:
# print("lock")
print(operation['command'])
self.instrument.write(operation['command'])
try:
while True:
data = self.instrument.read()
print(data)
except visa.errors.VisaIOError:
pass
data = []
data_trans = []
# print('unlock')
self.store[operation_id] = ((data, data_trans), time.time())
return data, data_trans
def write_instrument(self, operation_id, values):
with self.lock:
"""
write instrument
"""
# todo: check valid values for sending to instrument
op = self.operations[operation_id]
command = op.get("command", "")
command = command.format(*values)
# response = self.instrument.query(command)
response = ""
self.instrument.write(command)
try:
while True:
if response == "":
response = self.instrument.read()
else:
response = response + ", " + self.instrument.read()
except visa.errors.VisaIOError:
pass
if response != "":
print(response)
else:
print("No response")
return response
def action_instrument(self, operation_id):
with self.lock:
self.instrument.timeout = 10000
op = self.operations[operation_id]
command = op.get("command", "")
# response = self.instrument.query(command,delay=1)
response = ""
self.instrument.write(command)
try:
while True:
if response == "":
response = self.instrument.read()
else:
response = response + ", " + self.instrument.read()
except visa.errors.VisaIOError:
pass
self.timeout = 2000
if response != "":
print(response)
else:
print("No response")
return response
def decimals(self, data, operation):
d_shift = operation.get('decimal_shift', 0)
d = Decimal(data).scaleb(d_shift)
f = np.float64(d)
return f
def transform(self, data, operation):
x = data
eq = operation.get("transform_eq", ['V', 0, 1, 0, 0])
if self.isfloat(data): # Check that the data can be transformed
if eq[0] == 'T': # Callendar-Van Dusen equation
if np.isnan(eq[1:4]).any() or np.isinf(eq[1:4]).any() or np.isnan(x) or np.isinf(x):
print("{} with transform {} is out of range.".format(x,eq))
transformed = float("NaN")
else:
if x < eq[4]:
fulltransformed = np.roots([eq[3], -100 * eq[3], eq[2], eq[1], (1 - (x / eq[4]))])
else:
fulltransformed = np.roots([eq[2], eq[1], (1 - (x / eq[4]))])
transformed = float("inf") # Create a maximum value
for j in fulltransformed:
if np.imag(j) == 0: # Remove imaginary roots
if abs(j) < transformed:
transformed = np.real(j) # Find most reasonable real root
elif abs(j) == transformed and j > transformed:
transformed = np.real(j) # If the roots are same magnitude, give positive root
if math.isinf(transformed):
print("Invalid Callendar–Van Dusen equation: No real solutions for")
print("R = {}, R0 = {}, A = {}, B = {}, C = {}".format(x, eq[4], eq[1], eq[2], eq[3]))
transformed = float("NaN")
elif eq[0] == 'V' or eq[0] == 'P':
transformed = eq[1] + eq[2]*x + eq[3]*x**2 + eq[4]*x**3 # V and P both use cubic equations. P is
# listed purely for record keeping purposes
else:
print("Transform form not recognised: {}".format(eq[0]))
transformed = float("NaN")
else:
transformed = float("NaN") # The data can't be transformed
# c = operation.get("transform_coeff", None)
# transformed = eval(eq)
return transformed
def convert_to(self, data, datatype):
if datatype == 'int':
return int(data)
elif datatype == 'float':
return float(data)
else:
return data
def isfloat(self, data):
try:
float(data) # Checks if the string can be made into a float.
return True
except ValueError:
return False
# testing
def main():
instr = generic_driver_visa_gpib(json.load(open('../instruments/Vaisala_HMT337.json')))
print(instr.read_instrument('read_default'))
print(instr.read_instrument('read_rh'))
if __name__ == '__main__':
main()
# import visa
# import json
# import time
# from decimal import Decimal
#
# class generic_driver_visa_gpib(object):
#
# def __init__(self, spec):
# # self.spec =spec
# # self.operations = spec['operations']
# #self.operations =[ ]
# port = spec["port"]
# # baud = spec["baudrate"]
# w_term = spec.get("write_termination", '\r')
# r_term = spec.get("read_termination", '\r\n')
# rm = visa.ResourceManager()
# # self.store = {}
# # self.timeout = 2
# self.instrument = rm.open_resource(port,
# write_termination=w_term,
# read_termination=r_term)
# #self.instrument.open() is needed?
#
# def read_instrument(self,command):
# """
# read instrument
# """
# return self.instrument.q
#
# return data, data_trans
#
# def query(self,request):
# return self.instrument.query(request)
#
# def write(self):
# pass
# #todo writing to instruments
# def write_instrument(self,operation_id,values):
# """
# write instrument
# """
# return "not working yet"
#
# def decimals(self,data,operation):
# d_shift = operation.get('decimal_shift',0)
# return Decimal(data).scaleb(d_shift)
#
# def transform(self,data,operation):
# x = data
# eq = operation.get("transform_eq",'x')
# c = operation.get("transform_coeff",None)
# result = eval(eq)
# return result
#
# def convert_to(self,data,datatype):
# if datatype == 'int':
# return int(data)
# elif datatype == 'float':
# return float(data)
# else:
# return data
#
# # #testing
# # def main():
# # instr = generic_driver_visa(json.load(open('../instruments/HG3900_visa.json')))
# # print (instr.read_instrument('read_default'))
# # print (instr.read_instrument('read_default'))
# # time.sleep(2)
# # print (instr.read_instrument('read_default'))
# #
# #
# # if __name__ == '__main__':
# # main()
|
{"hexsha": "545dd3da14343a7c3a6e49bfbda05b2c72414ddf", "size": 11115, "ext": "py", "lang": "Python", "max_stars_repo_path": "hs-logger/drivers/generic_driver_visa_gpib.py", "max_stars_repo_name": "b-sherson/hs-logger", "max_stars_repo_head_hexsha": "537865e44c93a4d234c9a96e9ad784a735869bcc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hs-logger/drivers/generic_driver_visa_gpib.py", "max_issues_repo_name": "b-sherson/hs-logger", "max_issues_repo_head_hexsha": "537865e44c93a4d234c9a96e9ad784a735869bcc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hs-logger/drivers/generic_driver_visa_gpib.py", "max_forks_repo_name": "b-sherson/hs-logger", "max_forks_repo_head_hexsha": "537865e44c93a4d234c9a96e9ad784a735869bcc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.926910299, "max_line_length": 113, "alphanum_fraction": 0.4817813765, "include": true, "reason": "import numpy", "num_tokens": 2270}
|
[STATEMENT]
lemma obs_a_extTA2J_eq_obs_a_extTA2J0 [simp]: "\<lbrace>extTA2J P ta\<rbrace>\<^bsub>o\<^esub> = \<lbrace>extTA2J0 P ta\<rbrace>\<^bsub>o\<^esub>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrace>extTA2J P ta\<rbrace>\<^bsub>o\<^esub> = \<lbrace>extTA2J0 P ta\<rbrace>\<^bsub>o\<^esub>
[PROOF STEP]
by(cases ta)(simp add: ta_upd_simps)
|
{"llama_tokens": 170, "file": "JinjaThreads_Compiler_J0", "length": 1}
|
From Coq Require Import Lists.List.
Import ListNotations.
Inductive sublist {T : Type} : list T -> nat -> nat -> list T -> Prop :=
| SHeadIncluded head tail j subtail : sublist subtail 0 j tail ->
sublist (head::subtail) 0 (S j) (head::tail)
| SHeadExcluded head tail i j sub : sublist sub i j tail ->
sublist sub (S i) (S j) (head::tail)
| SNil i lst : sublist [] i i lst.
Lemma nil_sublist {X : Type} (i j : nat) {xs : list X} :
sublist [] i j xs -> i = j.
Proof. Admitted.
Lemma sublist_trans {X : Type} {xs ys zs : list X} {i j k l : nat} :
sublist xs i j ys ->
sublist ys k l zs ->
sublist xs k (k + length xs) zs.
Proof. Admitted.
Lemma sublist_app {X : Type} {xs ys : list X} :
sublist xs 0 (length xs) (xs ++ ys).
Proof. Admitted.
Lemma sublist_of_app_sublist_left {X : Type} {xs ys zs : list X} {i j : nat} :
sublist (xs ++ ys) i j zs ->
sublist xs i (i + length xs) zs.
Proof. Admitted.
Lemma sublist_of_app_sublist_right {X : Type} {xs ys zs : list X} {i j : nat} :
sublist (xs ++ ys) i j zs ->
sublist ys (i + length xs) j zs.
Proof. Admitted.
|
{"author": "astOwOlfo", "repo": "PetitC", "sha": "449bc594f698eaf476faac0943e65fb34e36a63f", "save_path": "github-repos/coq/astOwOlfo-PetitC", "path": "github-repos/coq/astOwOlfo-PetitC/PetitC-449bc594f698eaf476faac0943e65fb34e36a63f/Sublist.v"}
|
# coding: utf-8
import numpy as np
import torch
import os
import pickle
from PIL import Image, ImageOps, ImageEnhance
from argparse import ArgumentParser
from torch.optim import SGD, Adam
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Normalize
from torchvision.transforms import ToTensor, ToPILImage
import torchvision
import torchvision.transforms as T
import torch.nn.functional as F
import torch.nn as nn
import collections
import numbers
import random
import math
flip_index = ['16', '15', '14', '13', '12', '11', '10']
def find_bound(img):
''' input: numpy array
'''
h, w = img.shape
xmin, ymin, xmax, ymax = 0, 0, 0, 0
sum_x = img.sum(axis=0)
sum_y = img.sum(axis=1)
for i in range(w):
if sum_x[i] != 0:
xmin = i
break
for i in range(w-1,-1,-1):
if sum_x[i] != 0:
xmax = i
break
for i in range(h):
if sum_y[i] != 0:
ymin = i
break
for i in range(h-1,-1,-1):
if sum_y[i] != 0:
ymax = i
break
return xmin, ymin, xmax, ymax
class Random_Rotate_Crop(object):
def __init__(self, maxAngle = 10):
self.maxAngle = maxAngle
def __call__(self, img_and_label):
img, label = img_and_label
angle = random.uniform(-self.maxAngle, self.maxAngle)
img = img.rotate(angle)
label = label.rotate(angle)
xmin, ymin, xmax, ymax = find_bound(np.array(label))
xcenter = (xmin + xmax) / 2
ycenter = (ymin + ymax) / 2
xmin = max(0, xmin - 50)
ymin = max(0, ymin - 100)
xmax = min(1918, xmax + 50)
ymax = min(1280, ymax + 50)
if ymax - ymin <= 512:
if ymin + 512 < 1280:
ymax = ymin + 512
else: ymin = ymax - 512
if xmax - xmin <= 512:
if xmin + 512 < 1918:
xmax = xmin + 512
else: xmin = xmax - 512
img = img.crop((xmin, ymin, xmax, ymax))
label = label.crop((xmin, ymin, xmax, ymax))
return img, label
class RandomColor(object):
def __init__(self, colorRange = (0.7, 1.3), brightnessRange = (0.5, 1.5), sturaRange = (0.2, 2)):
self.colorRange = colorRange
self.brightnessRange = brightnessRange
self.sturaRange = sturaRange
def __call__(self, img_and_label):
img, label = img_and_label
l, h = self.colorRange
r,g,b = img.split()
ratio = np.random.uniform(l, h, 3)
r = r.point(lambda i: i * ratio[0])
g = g.point(lambda i: i * ratio[1])
b = b.point(lambda i: i * ratio[2])
rgb = [r, g, b]
random.shuffle(rgb)
img = Image.merge("RGB", tuple(rgb))
brightness = ImageEnhance.Brightness(img)
b = random.uniform(self.brightnessRange[0], self.brightnessRange[1])
s = random.uniform(self.sturaRange[0], self.sturaRange[1])
img = brightness.enhance(b)
img = ImageEnhance.Color(img).enhance(s)
return img, label
def min_random(x):
n = math.ceil(x / 512.)
if n == 1:
xmin = random.randint(0, 20)
elif n == 2:
xmin_range = list(range(0, 20))
xmin_range += list(range(x - 512 - 20, x - 512))
xmin = random.choice(xmin_range)
elif n == 3:
xmin_range = list(range(0, 20))
xmin_range += list(range(512 - 20, 512 + 20))
xmin_range += list(range(x - 512 - 20, x - 512))
xmin = random.choice(xmin_range)
elif n == 4:
xmin_range = list(range(0, 20))
xmin_range += list(range(512 - 20, 512 + 20))
xmin_range += list(range(1024 - 20, 1024 + 20))
xmin_range += list(range(x - 512 - 20, x - 512))
xmin = random.choice(xmin_range)
if xmin + 512 > x:
xmin = x - 512
return xmin
class RandomCrop(object):
def __init__(self, crop_size=512):
self.crop_size = crop_size
def __call__(self, img_and_label):
img, label = img_and_label
w, h = img.size
# xmin = min_random(w)
# ymin = min_random(h)
xmin = random.randint(0, w - self.crop_size)
ymin = random.randint(0, h - self.crop_size)
img = img.crop((xmin, ymin, xmin+self.crop_size, ymin+self.crop_size))
label = label.crop((xmin, ymin, xmin+self.crop_size, ymin+self.crop_size))
return img, label
class RandomCrop_different_size_for_image_and_label(object):
def __init__(self, image_size=572, label_size=388):
self.image_size = image_size
self.label_size = label_size
self.bound = (self.image_size - self.label_size) // 2
def __call__(self, img_and_label):
img, label = img_and_label
w, h = img.size
xcenter = random.randint(self.label_size // 2, w - self.label_size // 2)
ycenter = random.randint(self.label_size // 2, h - self.label_size // 2)
img = img.crop((xcenter - self.image_size // 2, ycenter - self.image_size // 2, xcenter + self.image_size // 2, ycenter + self.image_size // 2))
label = label.crop((xcenter - self.label_size // 2, ycenter - self.label_size // 2, xcenter + self.label_size // 2, ycenter + self.label_size // 2))
return img, label
class ToTensor_Label(object):
def __call__(self, img_and_label):
img, label = img_and_label
img_tensor = ToTensor()(img)
label_tensor = torch.from_numpy(np.array(label)).long().unsqueeze(0)
return img_tensor, label_tensor
class ImageNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, img_and_label):
img_tensor, label_tensor = img_and_label
for t, m, s in zip(img_tensor, self.mean, self.std):
t.sub_(m).div_(s)
return img_tensor, label_tensor
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, x):
for transform in self.transforms:
x = transform(x)
return x
class Car_dataset(Dataset):
def __init__(self, images_root, labels_root, filenames_img, transforms=None, ifFlip=False):
self.images_root = images_root
self.labels_root = labels_root
self.filenames_img = filenames_img
self.filenames_img.sort()
self.transforms = transforms
self.ifFlip = ifFlip
def __getitem__(self, index):
filename_img = self.filenames_img[index]
filename_mask = os.path.splitext(filename_img)[0]+'_mask.gif'
angle_idx = os.path.splitext(filename_img)[0].split('_')[-1]
with open(os.path.join(self.images_root, filename_img), 'rb') as f:
image = Image.open(f).convert('RGB')
with open(os.path.join(self.labels_root, filename_mask), 'rb') as f:
label = Image.open(f).convert('P')
if self.ifFlip and angle_idx in flip_index:
image = ImageOps.mirror(image)
label = ImageOps.mirror(label)
if self.transforms is not None:
[image, label] = self.transforms([image, label])
return image, label
def __len__(self):
return len(self.filenames_img)
|
{"hexsha": "fef0826663ddc10dfea739393e2688b73f8c3059", "size": 7406, "ext": "py", "lang": "Python", "max_stars_repo_path": "train/tool.py", "max_stars_repo_name": "ydxb7/graduate", "max_stars_repo_head_hexsha": "836c47f881ff6c4edfdf1a0ee23bd04602788ca3", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train/tool.py", "max_issues_repo_name": "ydxb7/graduate", "max_issues_repo_head_hexsha": "836c47f881ff6c4edfdf1a0ee23bd04602788ca3", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train/tool.py", "max_forks_repo_name": "ydxb7/graduate", "max_forks_repo_head_hexsha": "836c47f881ff6c4edfdf1a0ee23bd04602788ca3", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1176470588, "max_line_length": 156, "alphanum_fraction": 0.5906022144, "include": true, "reason": "import numpy", "num_tokens": 1961}
|
import validation_tool.server.data_request as rs_data
import pandas as pd
import numpy as np
import cStringIO
import os
import json
from pytesmo.validation_framework.validation import Validation
from pytesmo.validation_framework.metric_calculators import BasicMetricsPlusMSE
from pytesmo.validation_framework.temporal_matchers import BasicTemporalMatching
from pytesmo.validation_framework.data_manager import DataManager
from pytesmo.validation_framework.adapters import MaskingAdapter
from pytesmo.validation_framework.adapters import AnomalyAdapter, AnomalyClimAdapter
from validation_tool import app
from flask import request
from flask import jsonify
from flask import make_response
from flask import render_template
png_buffer = cStringIO.StringIO()
from validation_tool.server.ismn import ismn_metadata
from validation_tool.server.ismn import variable_list
from validation_tool.server.ismn import get_station_data
from validation_tool.server.ismn import prepare_station_interface
from validation_tool.server.ismn import get_station_lonlat
from validation_tool.server.ismn import get_station_first_sm_layer
from validation_tool.server.ismn import get_station_start_end
from validation_tool.server.data_request import get_validation_ds_dict
from validation_tool.server.data_request import get_validation_metadata
from validation_tool.server.data_request import get_masking_metadata
from validation_tool.server.data_request import get_masking_data
from validation_tool.server.data_request import get_masking_ds_dict
@app.route('/')
def validation_tool():
if len(app.config['VALIDATION_DS']) == 0:
activate_validation = False
else:
activate_validation = True
return render_template('index.html',
validation=activate_validation,
dataset_name="CCI SM",
scaling_options=app.config['SCALING_OPTIONS'],
default_scaling=app.config['DEFAULT_SCALING'],
val_ds=get_validation_metadata(),
masking_ds=get_masking_metadata(),
default_val_ds=app.config['DEFAULT_VAL_DS'])
@app.route('/getoptions')
def getoptions():
"""
sends available scaling options to client
"""
# get available validation datasets
validation_metadata = get_validation_metadata()
data = jsonify({'scaling': app.config['SCALING_OPTIONS'],
'default_scaling': app.config['DEFAULT_SCALING'],
'validation_datasets': validation_metadata})
resp = make_response(data)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/getlatlon')
def getlatlon():
pass
"""
Parameters
----------
lat: float
latitude of point
lon: float
longitude of point
"""
lat = float(request.args.get('lat'))
lon = float(request.args.get('lon'))
resp = make_response(jsonify({}))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/getdata')
def getdata():
"""
handles the get request, which should contain the arguments listes under
parameters
Parameters
----------
station_id: int
id of station in database
scaling: string
chosen scaling method , for available choices see general.times_eries.scaling
snow_depth: float
mask snow depth greater than this value
st_l1: float
mask surface temperature layer1 lower than this value
air_temp: float
mask 2m air temperature lower than this value
ssf_masking: boolean
use SSF for masking true or false
"""
station_id = request.args.get('station_id')
scaling = request.args.get('scaling')
if scaling == 'noscale':
scaling = None
masking_ids = request.args.getlist('masking_ds[]')
masking_ops = request.args.getlist('masking_op[]')
masking_values = request.args.getlist('masking_values[]')
masking_values = [float(x) for x in masking_values]
anomaly = request.args.get('anomaly')
if anomaly == 'none':
anomaly = None
(depth_from,
depth_to,
sensor_id) = get_station_first_sm_layer(app.config['ISMN_PATH'],
station_id)
lon, lat = get_station_lonlat(app.config['ISMN_PATH'],
station_id)
start, end = get_station_start_end(app.config['ISMN_PATH'],
station_id, "soil moisture",
depth_from, depth_to)
period = [start, end]
masking_data = {'labels': [], 'data': []}
masking_meta = get_masking_metadata()
masking_masked_dict = None
if len(masking_ids) > 0:
# prepare masking datasets
masking_ds_dict = get_masking_ds_dict(masking_ids)
masking_masked_dict = {}
for masking_ds, masking_op, masking_value in zip(masking_ids,
masking_ops,
masking_values):
masking_masked_dict[masking_ds] = dict(masking_ds_dict[masking_ds])
new_cls = MaskingAdapter(masking_masked_dict[masking_ds]['class'],
masking_op,
masking_value)
masking_masked_dict[masking_ds]['class'] = new_cls
# use DataManager for reading masking datasets
masking_dm = DataManager(masking_ds_dict, masking_ids[0],
period=period)
masking_data = {}
valid_masking_ids = []
for mds in masking_ids:
mdata = masking_dm.read_ds(mds, lon, lat)
if mdata is not None:
masking_data[mds] = mdata
valid_masking_ids.append(mds)
else:
masking_data[mds] = pd.DataFrame()
if len(valid_masking_ids) > 1:
masking_data = BasicTemporalMatching(window=1.0).combinatory_matcher(
masking_data, masking_ids[0], n=len(masking_ids))
if len(masking_data) > 0:
labels, values = masking_data[
masking_data.keys()[0]].to_dygraph_format()
elif len(valid_masking_ids) == 1:
masking_data = masking_data[valid_masking_ids[0]]
labels, values = masking_data.to_dygraph_format()
else:
labels = [None]
values = None
for i, label in enumerate(labels):
for mid in masking_meta:
if masking_meta[mid]['variable']['name'] in label:
labels[i] = masking_meta[mid]['long_name']
masking_data = {'labels': labels, 'data': values}
ismn_iface = prepare_station_interface(app.config['ISMN_PATH'],
station_id,
"soil moisture",
depth_from, depth_to, sensor_id)
validation_ds_dict = get_validation_ds_dict()
validation_ds_dict.update({'ISMN': {'class': ismn_iface,
'columns': ['soil moisture']}})
if anomaly is not None:
adapter = {'climatology': AnomalyClimAdapter,
'average': AnomalyAdapter}
for dataset in validation_ds_dict:
validation_ds_dict[dataset]['class'] = adapter[
anomaly](validation_ds_dict[dataset]['class'],
columns=validation_ds_dict[dataset]['columns'])
mcalc = BasicMetricsPlusMSE(other_name='k1',
calc_tau=True).calc_metrics
process = Validation(validation_ds_dict, 'ISMN',
temporal_ref='cci',
scaling=scaling,
metrics_calculators={(2, 2): mcalc},
masking_datasets=masking_masked_dict,
period=period,
temporal_window=1)
df_dict = process.data_manager.get_data(1,
lon,
lat)
matched_data, result, used_data = process.perform_validation(
df_dict, (1, lon, lat))
res_key = list(result)[0]
data = used_data[res_key]
result = result[res_key][0]
# rename data to original names
rename_dict = {}
f = lambda x: "k{}".format(x) if x > 0 else 'ref'
for i, r in enumerate(res_key):
rename_dict[f(i)] = " ".join(r)
data.rename(columns=rename_dict, inplace=True)
labels, values = data.to_dygraph_format()
validation_datasets = {'labels': labels, 'data': values}
statistics = {'kendall': {'v': '%.2f' % result['tau'], 'p': '%.4f' % result['p_tau']},
'spearman': {'v': '%.2f' % result['rho'], 'p': '%.4f' % result['p_rho']},
'pearson': {'v': '%.2f' % result['R'], 'p': '%.4f' % result['p_R']},
'bias': '%.4f' % result['BIAS'],
'rmsd': {'rmsd': '%.4f' % np.sqrt(result['mse']),
'rmsd_corr': '%.4f' % np.sqrt(result['mse_corr']),
'rmsd_bias': '%.4f' % np.sqrt(result['mse_bias']),
'rmsd_var': '%.4f' % np.sqrt(result['mse_var'])},
'mse': {'mse': '%.4f' % result['mse'],
'mse_corr': '%.4f' % result['mse_corr'],
'mse_bias': '%.4f' % result['mse_bias'],
'mse_var': '%.4f' % result['mse_var']}}
scaling_options = {'noscale': 'No scaling',
'porosity': 'Scale using porosity',
'linreg': 'Linear Regression',
'mean_std': 'Mean - standard deviation',
'min_max': 'Minimum,maximum',
'lin_cdf_match': 'Piecewise <br> linear CDF matching',
'cdf_match': 'CDF matching'}
if scaling is None:
scaling = 'noscale'
masking_option_return = {}
for mid, mops, mval in zip(masking_ids,
masking_ops,
masking_values):
masking_option_return[mid] = {'op': mops,
'val': mval,
'name': masking_meta[mid]['long_name']}
settings = {'scaling': scaling_options[scaling],
'masking': masking_option_return}
output_data = {'validation_data': validation_datasets, 'masking_data': masking_data,
'statistics': statistics, 'settings': settings}
status = 1
if status == -1:
data = 'Error'
else:
data = jsonify(output_data)
resp = make_response(data)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/get_station_details')
def get_station_details():
"""Get the station details of the used ISMN data.
If no metadata information exists then it will be generated the first time
and the json file will be stored in the ismn data path.
"""
json_path = os.path.join(app.config['ISMN_PATH'], 'ismn_metadata.json')
if not os.path.exists(json_path):
data = ismn_metadata(app.config['ISMN_PATH'])
with open(json_path, 'w') as fid:
json.dump(data, fid)
with open(json_path, 'r') as fid:
data = jsonify(json.load(fid))
resp = make_response(data)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/dataviewer_get_variable_list')
def get_variable_list():
"""
Get variable list for a station id.
"""
stationname = request.args.get('station_id')
data = variable_list(app.config['ISMN_PATH'], stationname)
data = jsonify(data)
resp = make_response(data)
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/dataviewer_load_possible_timestamps')
def get_possible_timestamps():
"""
Get possible timestamps for a station id.
This defaults to returning hourly timestamps at the moment.
"""
stationname = request.args.get('station_id')
start = request.args.get('start')
end = request.args.get('end')
dr = pd.date_range(start, end, freq='H').to_pydatetime()
l = map(lambda x: x.strftime('%Y-%m-%dT%H:00:00.000Z'), dr)
resp = make_response(jsonify(dates=l))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/dataviewer_load_variable')
def get_ismn_variable():
"""
Get data for ismn variable.
"""
stationname = request.args.get('station_id')
start = request.args.get('start')
end = request.args.get('end')
depth_from = request.args.get('depth_from')
depth_to = request.args.get('depth_to')
sensor_id = request.args.get('sensor_id')
variable = request.args.get('variable')
ts = get_station_data(app.config['ISMN_PATH'],
stationname,
variable,
depth_from, depth_to, sensor_id)
resp = make_response(ts.to_json(orient='split',
date_format='iso'))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
|
{"hexsha": "f8a8705ccb0a16fa4bf609cb498f101e0c17de9f", "size": 13318, "ext": "py", "lang": "Python", "max_stars_repo_path": "validation_tool/views.py", "max_stars_repo_name": "TUW-GEO/web-validation-tool", "max_stars_repo_head_hexsha": "e73faeeda0a5abe4366f1dd39c77d2e63d8bae93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "validation_tool/views.py", "max_issues_repo_name": "TUW-GEO/web-validation-tool", "max_issues_repo_head_hexsha": "e73faeeda0a5abe4366f1dd39c77d2e63d8bae93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "validation_tool/views.py", "max_forks_repo_name": "TUW-GEO/web-validation-tool", "max_forks_repo_head_hexsha": "e73faeeda0a5abe4366f1dd39c77d2e63d8bae93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0974930362, "max_line_length": 91, "alphanum_fraction": 0.5949091455, "include": true, "reason": "import numpy", "num_tokens": 2820}
|
# attr_list = []
# for func in (atomics..., contour)
# Typ = to_type(func)
# attr = keys(default_theme(nothing, Typ))
# push!(attr_list, attr...)
# end
# attr_list = string.(sort!(unique(attr_list)))
# # filter out fxaa attribute
# attr_list = filter!(x -> x ≠ "fxaa", attr_list)
const plot_attr_desc = Dict(
:absorption => "Float32. Sets the absorption value for `volume` plots.",
:algorithm => "Algorithm to be used for `volume` plots. Can be one of `:iso`, `:absorption`, `:mip`, `:absorptionrgba`, or `:indexedabsorption`.",
:align => "`(:pos, :pos)`. Specify the text alignment, where `:pos` can be `:left`, `:center`, or `:right`.",
:alpha => "Float in [0,1]. The alpha value (transparency).",
:color => "Symbol or Colorant. The color of the main plot element (markers, lines, etc.). Can be a color symbol/string like :red, or a Colorant.",
:colormap => "The color map of the main plot. Call available_gradients() to see what gradients are available. Can also be used with any Vector{<: Colorant}, or e.g. [:red, :black]",
:colorrange => "A tuple `(min, max)` where `min` and `max` specify the data range to be used for indexing the colormap. E.g. color = [-2, 4] with colorrange = (-2, 4) will map to the lowest and highest color value of the colormap.",
:fillrange => "Bool. Toggles range filling in `contour` plots.",
:font => "String. Specifies the font, and can choose any font available on the system.",
:glowcolor => "Color Type. Color of the marker glow (outside the border) in `scatter` plots.",
:glowwidth => "Number. Width of the marker glow in `scatter` plots.",
:image => "The image to be plotted on the plot.",
:interpolate => "Bool. For `heatmap` and `images`. Toggles color interpolation between nearby pixels.",
:isorange => "Float32. Sets the isorange for `volume` plots.",
:isovalue => "Float32. Sets the isovalue for `volume` plots.",
:levels => "Integer. Number of levels for a `contour`-type plot.",
:linestyle => "Symbol. Style of the line (for `line` and `linesegments` plots). Available styles are `:dash`, `:dot`, `:dashdot`, and `:dashdotdot`. You can also supply an array describing the length of each gap/fill.",
:linewidth => "Number. Width of the line in `line` and `linesegments` plots.",
:marker => "Symbol, Shape, or AbstractVector.",
:marker_offset => "Array of `GeometryTypes.Point`'s. Specifies the offset coordinates for the markers. See the [Marker offset](@ref) example.",
:markersize => "Number or AbstractVector. Specifies size (radius pixels) of the markers.",
:position => "NTuple{2,Float}, `(x, y)`. Specify the coordinates to position text at.",
:rotation => "Float32. Specifies the rotation in radians.",
:rotations => "AbstractVector{Float32}. Similar to `:rotation`, except it specifies the rotations for each element in the plot.",
:shading => "Bool. Specifies if shading should be on or not (for meshes).",
:strokecolor => "Color Type. Color of the marker stroke (border).",
:strokewidth => "Number. Width of the marker stroke (in pixels).",
:textsize => "Integer. Font pointsize for text.",
:transformation => "`(:plane, location)`. Transforms the `:plane` to the specified location. Possible `:plane`'s are `:xy`, `:yz`, and `:xz`.",
:visible => "Bool. Toggle visibility of plot."
)
|
{"hexsha": "b5baca7c80c39299654bec8fc93286f3e7561ee2", "size": 3375, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/plot_attr_desc.jl", "max_stars_repo_name": "MaximeRivest/Makie.jl", "max_stars_repo_head_hexsha": "331f183c024b031a1ec425a4ccb3c25583f130b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/plot_attr_desc.jl", "max_issues_repo_name": "MaximeRivest/Makie.jl", "max_issues_repo_head_hexsha": "331f183c024b031a1ec425a4ccb3c25583f130b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plot_attr_desc.jl", "max_forks_repo_name": "MaximeRivest/Makie.jl", "max_forks_repo_head_hexsha": "331f183c024b031a1ec425a4ccb3c25583f130b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 78.488372093, "max_line_length": 236, "alphanum_fraction": 0.6708148148, "num_tokens": 909}
|
import json
import csv
from music21 import note, chord, stream, instrument
import numpy as np
from model import lstm_model
time_steps = 32
# load notes dict
with open('notes.json', 'r') as file:
notes_dict = json.load(file)
############
# Load music data from csv produced by mid_to_csv.py
with open('mozart.csv', newline='') as file:
reader = csv.reader(file)
notes_array = list(reader)
notes_array = list(map(lambda x: x + [""], notes_array)) # empty string to represent the end
unique_notes = list(set(sum(notes_array ,[])))
max_length = np.max(list(map(lambda x: len(x), notes_array))) + 10
# save notes dict for prediction
with open('notes.json', 'w') as file:
json.dump(notes_dict, file)
notes_array = [np.vectorize(notes_dict.get)(x) for x in notes_array]
x, y = [], []
for notes in notes_array:
for i in range(0, len(notes) - time_steps, 1):
# Preparing input and output sequences
input = [[x] for x in notes[i:i + time_steps]]
output = notes[i + time_steps]
x.append(input)
y.append(output)
x = np.array(x)
y = np.array(y)
#################
random_music = x[1]
# random_music = np.random.randint(1, len(notes_dict), size=(time_steps, 1))
model = lstm_model(len(notes_dict))
model.build((None, time_steps, 1))
model.summary()
model.load_weights('best_model.h5')
predictions = []
y_pred = 1
for i in range(100):
prob = model.predict(random_music, batch_size=1)[0]
y_pred = np.argmax(prob,axis=0)
predictions.append(y_pred)
if y_pred == 0:
break
random_music = np.concatenate((random_music[1:, :], [[y_pred]]), axis=0)
predictions = [list(notes_dict.keys())[i] for i in predictions]
def convert_to_midi(prediction_output):
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for pattern in prediction_output:
# pattern is a chord
if ('.' in pattern) or pattern.isdigit():
notes_in_chord = pattern.split('.')
notes = []
for current_note in notes_in_chord:
cn=int(current_note)
new_note = note.Note(cn)
new_note.storedInstrument = instrument.Piano()
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
output_notes.append(new_chord)
# pattern is a note
else:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
# increase offset each iteration so that notes do not stack
offset += 1
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='music.mid')
convert_to_midi(predictions)
|
{"hexsha": "21f24528d701542aeb535d3b7fb4c72051cf6d10", "size": 2932, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate.py", "max_stars_repo_name": "g95wang/music-generator", "max_stars_repo_head_hexsha": "528ce6d6dfd1c5aac15749f9f4eac8735078ab46", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generate.py", "max_issues_repo_name": "g95wang/music-generator", "max_issues_repo_head_hexsha": "528ce6d6dfd1c5aac15749f9f4eac8735078ab46", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate.py", "max_forks_repo_name": "g95wang/music-generator", "max_forks_repo_head_hexsha": "528ce6d6dfd1c5aac15749f9f4eac8735078ab46", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6603773585, "max_line_length": 92, "alphanum_fraction": 0.6169849932, "include": true, "reason": "import numpy", "num_tokens": 694}
|
Require Utf8.
Require Import S00_setoid_basics S01_Wty S02_PolyFun S10_Wstd_Obj
S30_IdType S31_DiscStd S32_IdWty S33_PtwEq S34_Eqvr.
(* The setoid of extensional trees is isomorphic to
the subsetoid of pointwise equal trees on the equivariant ones. *)
Section Extensional_as_Equivariant.
Context {X : Set}{B : setoidfamily (dscrStd X)}(isdscrsf : isDscrStdFam B).
(* Lemma 5.6(i) *)
Lemma WPER2EqWty_aux : ∀ z, WstdPER_aux B z → EqWty (|B|) (fst z) (snd z).
Proof.
intros z r. induction r as [[w w'] p br IH]. simpl in p. simpl in br. simpl in IH. simpl.
apply supd with (i := (w , w'))
(a := p).
intros [[b b'] q]. simpl. simpl in *|-.
set (l := projT1 (trspDscrSF isdscrsf p b)).
set (qq := projT2 (trspDscrSF isdscrsf p b) ⊙ᵐ q).
set (qqq := snd (isdscrsf _ (B • p b) b') (existT _ l qq)).
set (H := existT _ (b , b') qqq : TotStdfamEq B p).
apply (IH H).
Qed.
Lemma WPER2EqWty {w w' : Wty (|B|)}
: WstdPER B w w' → EqWty (|B|) w w'.
Proof.
intro r. apply (WPER2EqWty_aux (w , w') r).
Qed.
(* Lemma 5.6(ii) *)
Lemma ExtIsEqvr {w : Wty (|B|)}
: WstdPER B w w → isEquivariant (|B|) w.
Proof.
induction w as [a f IH]. set (w := sup (|B|) a f).
intro wext. set (u := (existT (λ x, WstdPER B x x) w wext) : Wstd B).
refine (supd _ _ _ _ _).
intros l b. cbn. cbn in *|-.
apply WPER2EqWty.
apply (WstdPER_ist wext). apply setoidfamilyrefgeneral_tactical.
apply isdscrsf.
exists (l ⁻¹). apply (transpIsEqv2 (|B|) l).
intro b. specialize (IH b (ist u ↱ setoidrefl (B a) b)).
apply IH.
Qed.
Definition jmap_fnc : Wstd B → EqvrTrees (|B|)
:= λ u, existT _ (projT1 u) (ExtIsEqvr (projT2 u)).
Definition jmap : Wstd B ⇒ EqvrTrees (|B|).
apply (Build_setoidmap (Wstd B) (EqvrTrees (|B|)) jmap_fnc).
intros u u' r. apply (WPER2EqWty r).
Defined.
(* Lemma 5.7 *)
Lemma eqvrEqWty2WPER {w : Wty (|B|)}(E : isEquivariant (|B|) w)
: ∀ w', isEquivariant (|B|) w'
→ EqWty (|B|) w w' → WstdPER B w w'.
Proof.
induction E as [w eqvr br IH]. intros w' E' eq.
set (p := nodeEqWty eq).
apply (isMatch (A := dscrStd X) p).
intros b b' q. cbn in *|-.
specialize (IH b (Wty_ist w' b')).
apply IH. apply (DWty_ist _ _ w' E' b').
set (l := projT1 (trspDscrSF isdscrsf p b)).
set (rr := Id2EqWty (|B|) (ap (f := Wty_ist w') (projT2 (trspDscrSF isdscrsf p b)) ⁻¹)).
(* : EqWty (|B|) (Wty_ist (|B|) w' (|B|•ᵐp b)) (Wty_ist (|B|) w' (|B|•ᵐl (B•p b)))*)
set (l' := projT1 (fst (isdscrsf _ (B•p b) b') q)).
set (rr' := Id2EqWty (|B|) (ap (f := Wty_ist w') (projT2 (fst (isdscrsf _ (B•p b) b') q)))).
(* : Wty_ist (|B|) w' (|B|•ᵐl' (B•p b)) == Wty_ist (|B|) w' b'*)
cbn in *|-.
refine (EqWty_trans _ _ _ (subtEqWty eq rfl) _).
refine (EqWty_trans _ _ _ rr _).
refine (EqWty_trans _ _ _ (eqvrBr E' l (B•p b)) _).
apply (EqWty_trans _ _ _ (EqWty_symm _ _ (eqvrBr E' l' (B•p b))) rr').
Qed.
Definition jinv_fnc : EqvrTrees (|B|) → Wstd B
:= λ ew, existT (λ w, WstdPER B w w)
(projT1 ew)
(eqvrEqWty2WPER (projT2 ew) (projT1 ew) (projT2 ew)
(setoidrefl (PtwTrees (|B|)) (projT1 ew))).
Definition jinv : EqvrTrees (|B|) ⇒ Wstd B.
apply (Build_setoidmap (EqvrTrees (|B|)) (Wstd B) jinv_fnc).
intros ew ew' ptw. apply (eqvrEqWty2WPER (projT2 ew) (projT1 ew') (projT2 ew') ptw).
Defined.
(* Theorem 5.8 *)
Theorem jiso : jmap ∘ jinv ≈ idmap ∧ jinv ∘ jmap ≈ idmap.
Proof.
split.
intro ew. apply (setoidrefl (EqvrTrees (|B|)) ew).
intro u. apply (setoidrefl (Wstd B) u).
Qed.
End Extensional_as_Equivariant.
|
{"author": "j-emmen", "repo": "W-types-in-setoids", "sha": "d1d8028217532c77308227453fa5be0867407120", "save_path": "github-repos/coq/j-emmen-W-types-in-setoids", "path": "github-repos/coq/j-emmen-W-types-in-setoids/W-types-in-setoids-d1d8028217532c77308227453fa5be0867407120/S35_freeWstd.v"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.