text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
from dataclasses import dataclass
from abc import ABC, abstractmethod
from schema import Schema
from typing import Dict, Any, Type, List, Sequence
import numpy as np
REGISTERED_TRANSFORM_CLASSES = {}
class Transform(ABC):
@property
def name(self) -> str:
return type(self).__name__
@property
@abstractmethod
def input_schema(self) -> Schema:
raise NotImplementedError
def dry_run(self, items: Dict[str, Any]):
self.input_schema.validate(items)
return self._dry_run(items)
def __call__(self, items: Dict[str, Any]):
self.input_schema.validate(items)
return self.forward(items)
@abstractmethod
def _dry_run(self, items: Dict[str, Any]):
raise NotImplementedError
@abstractmethod
def forward(self, items: Dict[str, Any]):
raise NotImplementedError
@classmethod
@abstractmethod
def from_dict(cls, d: Dict[str, Any]):
raise NotImplementedError
@abstractmethod
def to_dict(self) -> Dict[str, Any]:
raise NotImplementedError
def register_transform(cls: Type[Transform]):
global REGISTERED_TRANSFORM_CLASSES
name = cls.__name__
assert name not in REGISTERED_TRANSFORM_CLASSES, f"exists class: {REGISTERED_TRANSFORM_CLASSES}"
REGISTERED_TRANSFORM_CLASSES[name] = cls
return cls
def get_transform(name: str) -> Type[Transform]:
global REGISTERED_TRANSFORM_CLASSES
assert name in REGISTERED_TRANSFORM_CLASSES, f"available class: {REGISTERED_TRANSFORM_CLASSES}"
return REGISTERED_TRANSFORM_CLASSES[name]
def is_valid_points(x: Any) -> bool:
return isinstance(x, np.ndarray) and x.ndim == 2 and x.shape[1] == 3
@dataclass
@register_transform
class Compose(Transform):
transforms: List[Transform]
def forward(self, items: Dict[str, Any]):
for t in self.transforms:
items = t(items)
if items is None:
print(f"Transform {t.name} returned None")
return None
return items
def _dry_run(self, items: Dict[str, Any]):
for t in self.transforms:
items = t.dry_run(items)
if items is None:
return None
return items
@property
def input_schema(self) -> Schema:
if len(self.transforms) > 0:
return self.transforms[0].input_schema
else:
return Schema({}, ignore_extra_keys=True)
@classmethod
def from_dict(cls, d: Dict[str, Any]):
transforms = []
for name, kwargs in d.items():
transforms.append(get_transform(name).from_dict(kwargs))
# noinspection PyArgumentList
return cls(transforms)
def to_dict(self) -> Dict[str, Any]:
d = {}
for t in self.transforms:
d[t.name] = t.to_dict()
return d
@dataclass
class NumpyTransform(Transform, ABC):
@classmethod
def from_dict(cls, d: Dict[str, Any]):
kwargs = {}
for name, param in d.items():
if isinstance(param, Sequence):
kwargs[name] = np.array(param)
else:
kwargs[name] = param
# noinspection PyArgumentList
return cls(**kwargs)
def to_dict(self) -> Dict[str, Any]:
d = {}
for name, param in self.__dict__.items():
if isinstance(param, np.ndarray) or isinstance(param, np.number):
d[name] = param.tolist()
else:
d[name] = param
return d
|
{"hexsha": "2bf2d2ef9acf3cdb7da2e88e650515fc8ed42318", "size": 3521, "ext": "py", "lang": "Python", "max_stars_repo_path": "datafeed/transforms.py", "max_stars_repo_name": "jacobbieker/3dml", "max_stars_repo_head_hexsha": "f4b0e49343a18b4935c1502112e7bef0ff448986", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-14T15:11:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T15:11:34.000Z", "max_issues_repo_path": "datafeed/transforms.py", "max_issues_repo_name": "jacobbieker/3dml", "max_issues_repo_head_hexsha": "f4b0e49343a18b4935c1502112e7bef0ff448986", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-07-01T19:28:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-01T19:32:57.000Z", "max_forks_repo_path": "datafeed/transforms.py", "max_forks_repo_name": "jacobbieker/3dml", "max_forks_repo_head_hexsha": "f4b0e49343a18b4935c1502112e7bef0ff448986", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7244094488, "max_line_length": 100, "alphanum_fraction": 0.6273785856, "include": true, "reason": "import numpy", "num_tokens": 769}
|
import random
import numpy as np
import os
import torch
class Agent:
def __init__(self):
self.model = torch.load(__file__[:-8] + "/agent.pkl")
def act(self, state):
with torch.no_grad():
state = torch.tensor(np.array(state)).float()
a, _, _ = self.model.act(state)
return a
def reset(self):
pass
|
{"hexsha": "be20fd409dd26815109d1dfc70dd8a0ad753a48c", "size": 380, "ext": "py", "lang": "Python", "max_stars_repo_path": "PPO/agent.py", "max_stars_repo_name": "mahkons/RL-algorithms", "max_stars_repo_head_hexsha": "bc5da6734263184e6229d34cd68f092feb94e9a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PPO/agent.py", "max_issues_repo_name": "mahkons/RL-algorithms", "max_issues_repo_head_hexsha": "bc5da6734263184e6229d34cd68f092feb94e9a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PPO/agent.py", "max_forks_repo_name": "mahkons/RL-algorithms", "max_forks_repo_head_hexsha": "bc5da6734263184e6229d34cd68f092feb94e9a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.0, "max_line_length": 61, "alphanum_fraction": 0.5605263158, "include": true, "reason": "import numpy", "num_tokens": 89}
|
# load the data for time-series
import numpy as np
from scipy import signal
from load_time_series import load_data
np.random.seed(231)
x = np.array([1, 2, 3, 4])
# print("train_set_x[0]: ", x)
print("len of x: ", len(x))
filter_size = 2
corr_filter = np.array([1, 2])
standard_corr = signal.correlate(x, corr_filter, 'valid')
print("len of standard corr: ", len(standard_corr))
# print("standard_corr:", standard_corr)
def fft_cross_correlation(x, output_len, preserve_energy_rate=0.95):
xfft = np.fft.fft(x)
squared_abs = np.abs(xfft) ** 2
full_energy = np.sum(squared_abs)
current_energy = 0.0
preserve_energy = full_energy * preserve_energy_rate
index = 0
while current_energy < preserve_energy and index < len(squared_abs):
current_energy += squared_abs[index]
index += 1
xfft = xfft[:index]
filterfft = np.conj(np.fft.fft(corr_filter, len(xfft)))
# element-wise multiplication in the frequency domain
out = xfft * filterfft
# take the inverse of the output from the frequency domain and return the modules of the complex numbers
out = np.fft.ifft(out)
output = np.array(out, np.double)
# output = np.absolute(out)
output = output[:output_len]
return output
# print("output of cross-correlation via fft: ", output)
output = fft_cross_correlation(x, len(standard_corr), preserve_energy_rate=0.95)
print("is the fft cross_correlation correct: ", np.allclose(output, standard_corr, atol=1e-12))
print("absolute error: ", np.sum(np.abs(output - standard_corr)))
|
{"hexsha": "32b358bf07ed7c32a04000018d897c690e012c2a", "size": 1557, "ext": "py", "lang": "Python", "max_stars_repo_path": "cnns/nnlib/test/CorrDirectFFTReduceEnergySimple.py", "max_stars_repo_name": "adam-dziedzic/time-series-ml", "max_stars_repo_head_hexsha": "81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-25T13:19:46.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-25T13:19:46.000Z", "max_issues_repo_path": "cnns/nnlib/test/CorrDirectFFTReduceEnergySimple.py", "max_issues_repo_name": "adam-dziedzic/time-series-ml", "max_issues_repo_head_hexsha": "81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cnns/nnlib/test/CorrDirectFFTReduceEnergySimple.py", "max_forks_repo_name": "adam-dziedzic/time-series-ml", "max_forks_repo_head_hexsha": "81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7755102041, "max_line_length": 108, "alphanum_fraction": 0.7071290944, "include": true, "reason": "import numpy,from scipy", "num_tokens": 411}
|
[STATEMENT]
lemma continuous_blinfun_matrix:
fixes f:: "'b::t2_space \<Rightarrow> 'a::real_normed_vector \<Rightarrow>\<^sub>L 'c::real_inner"
assumes "continuous F f"
shows "continuous F (\<lambda>x. (f x) j \<bullet> i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous F (\<lambda>x. blinfun_apply (f x) j \<bullet> i)
[PROOF STEP]
by (rule bounded_linear.continuous[OF bounded_linear_blinfun_matrix assms])
|
{"llama_tokens": 160, "file": null, "length": 1}
|
function [P,N,check]=plane_intersect(N1,A1,N2,A2)
%plane_intersect computes the intersection of two planes(if any)
% Inputs:
% N1: normal vector to Plane 1
% A1: any point that belongs to Plane 1
% N2: normal vector to Plane 2
% A2: any point that belongs to Plane 2
%
%Outputs:
% P is a point that lies on the interection straight line.
% N is the direction vector of the straight line
% check is an integer (0:Plane 1 and Plane 2 are parallel'
% 1:Plane 1 and Plane 2 coincide
% 2:Plane 1 and Plane 2 intersect)
%
% Example:
% Determine the intersection of these two planes:
% 2x - 5y + 3z = 12 and 3x + 4y - 3z = 6
% The first plane is represented by the normal vector N1=[2 -5 3]
% and any arbitrary point that lies on the plane, ex: A1=[0 0 4]
% The second plane is represented by the normal vector N2=[3 4 -3]
% and any arbitrary point that lies on the plane, ex: A2=[0 0 -2]
%[P,N,check]=plane_intersect([2 -5 3],[0 0 4],[3 4 -3],[0 0 -2]);
%This function is written by :
% Nassim Khaled
% Wayne State University
% Research Assistant and Phd candidate
%If you have any comments or face any problems, please feel free to leave
%your comments and i will try to reply to you as fast as possible.
P=[0 0 0];
N=cross(N1,N2);
% test if the two planes are parallel
if norm(N) < 10^-7 % Plane 1 and Plane 2 are near parallel
V=A1-A2;
if (dot(N1,V) == 0)
check=1; % Plane 1 and Plane 2 coincide
return
else
check=0; %Plane 1 and Plane 2 are disjoint
return
end
end
check=2;
% Plane 1 and Plane 2 intersect in a line
%first determine max abs coordinate of cross product
maxc=find(abs(N)==max(abs(N)));
%next, to get a point on the intersection line and
%zero the max coord, and solve for the other two
d1 = -dot(N1,A1); %the constants in the Plane 1 equations
d2 = -dot(N2, A2); %the constants in the Plane 2 equations
switch maxc
case 1 % intersect with x=0
P(1)= 0;
P(2) = (d2*N1(3) - d1*N2(3))/ N(1);
P(3) = (d1*N2(2) - d2*N1(2))/ N(1);
case 2 %intersect with y=0
P(1) = (d1*N2(3) - d2*N1(3))/ N(2);
P(2) = 0;
P(3) = (d2*N1(1) - d1*N2(1))/ N(2);
case 3 %intersect with z=0
P(1) = (d2*N1(2) - d1*N2(2))/ N(3);
P(2) = (d1*N2(1) - d2*N1(1))/ N(3);
P(3) = 0;
end
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/17618-plane-intersection/plane_intersect.m"}
|
%!TEX root = labo.tex
\chapter{Single-Segment IP Networks}
What you will learn in this lab:
\begin{itemize}
\item How to capture and filter network traffic
\item How to configure a network interface for IP networking
\item How to access IP statistics and settings with the netstat command
\item How ARP works
\item How hackers snoop passwords from the network
\end{itemize}
\newpage
\setsession{prelab2}
\section{Prelab 2}\label{sec:prelab2}
\input{prelab2.tex}
\newpage
\setsession{lab2}
\section{Lab 2}\label{sec:lab2}
In Lab 2 you become acquainted with IP configuration issues on a single Ethernet segment. The lab also exposes you to advanced use of \cmd{tcpdump} and Wireshark.
\begin{figure}[h!t]
\centering
\includegraphics{graphics/lab1-network.pdf}
\caption{Network configuration for Lab 2.}
\label{fig:lab2-network}
\end{figure}
The setup for this lab is identical as in Lab 1. All Linux PCs are connected to the same Ethernet segment by an Ethernet hub as shown in Figure \ref{fig:lab2-network}.
The IP addresses for the Linux PCs are configured as shown in Table \ref{tab:lab2-ip-addresses} below. Configure \iface{eth0} for each of the PCs. e.g. for PC1 use the following command.
\begin{cmdblock}
PC1% ifconfig eth0 10.0.1.11 netmask 255.255.255.0 broadcast 10.0.255 up
\end{cmdblock}
As an alternative, you can use th \cmd{ip} command:
\begin{cmdblock}
PC1% ip addr add 10.0.1.11/24 dev eth0
PC1% ip link set dev eth0 up
\end{cmdblock}
\begin{table}[h!t]
\centering
\begin{tabular}{| c | c | c |}
\hline
\textbf{Linux PC} & \textbf{IP Addresses of Ethernet Interface eth0} \\ \hline
PC1 & 10.0.1.11/24 \\
PC2 & 10.0.1.12/24 \\
PC3 & 10.0.1.13/24 \\
PC3 & 10.0.1.14/24 \\ \hline
\end{tabular}
\caption{IPv4 addresses for Lab 2}
\label{tab:lab2-ip-addresses}
\end{table}
\newpage
\subsection{Using filters in tcpdump}
In the first part of the lab, you explore \cmd{tcpdump} in more detail. In particular, you learn how to write filter expressions so that \cmd{tcpdump} monitors only selected traffic flows on the network. See \ref{sec:prelab2} for more details on the use of filters in \cmd{tcpdump}.
\subsubsection*{Exercise 1. Writing filter expressions for tcpdump}
In this exercise, you explore the use of simple filter expressions with the \cmd{tcpdump} command. Save the output for your lab report.
\begin{enumerate}
\item On PC1, execute a \cmd{tcpdump} command with a filter that prints all packets with PC2 as source or destination. This command is the answer to Question 2 from the Prelab. Save the output of this \cmd{tcpdump} session to a file using the tee or tail commands discussed in Lab 1.\hspace*{\fill}
\boxinfo{As in Lab 1, always use the \cmd{-n} option (i.e. \cmd{tcpdump -n}) to avoid that \cmd{tcpdump} tries to resolve hostnames.}
\item In another terminal, issue a ping command to PC2 by typing \cmd{ping -c 5 10.0.1.12} on PC1 and observe the output. Recall that the ping command to a host triggers the transmission of an ICMP Echo Request. The destination host responds with an ICMP Echo Reply message.
\item Repeat steps 1 - 2 above. In addition to the existing filter, set the filter so that only ICMP messages are captured. This command is the answer to Question 3 from the Prelab.
\end{enumerate}
\boxwarning{\textbf{Make sure to include the saved data in your lab report as they are part of your evaluation!}}
\newpage
\subsection{Using filters in Wireshark}
In this part of the lab, you experiment with filter expressions using the \cmd{wireshark} command. Recall that Wireshark has two types of filters: capture filters and display filters.
\boxinfo{There are several command line options that can be assigned when starting the \cmd{wireshark} command:\hspace*{\fill}
}
\begin{itemize}
\item{\textbf{Capture Filters:}} A capture filter specifies the traffic to be captured by the Wireshark tool. A capture filter expression can be specified from the command line using the \cmd{-f} option or using the Wireshark GUI, under the ``Capture:Start'' menu. The syntax for specifying the filter expression is the same syntax as used by \cmd{tcpdump}.
\item{\textbf{Display Filters:}} By default, Wireshark displays all captured packets. With a display filter, just those packets, which meet the requirements of the filter, are displayed. The display filter cannot be set from the command line. It must be entered in the ``Filter'' window at the bottom of the GUI. The syntax for setting the display filter is different from the syntax for setting a capture filter.
\item{\textbf{Setting an interface:}} When you run Wireshark on a host with multiple network interfaces, you may specify the interface with the \cmd{-i} argument. For example, to start Wireshark to capture traffic on interface \iface{eth1}, type
\begin{cmdblock}
wireshark -i eth1
\end{cmdblock}
If you do not specify an interface, the default is \iface{eth0}. Alternatively, you can change the interface using the Wireshark GUI, under the ``Capture:Start'' menu.
\end{itemize}
\subsubsection*{Exercise 2-A. Setting capture filters in Wireshark}
This exercise is a review of the traffic capture capabilities of Wireshark. As a new feature, you are introduced to the notion of capture filters.
\begin{enumerate}
\item Start Wireshark on PC1 and set the same capture preferences as in Lab 1 and as shown again in Figure \ref{fig:lab2-capture-options} for your convenience. You should always set these same preferences for all your experiments.\par
\begin{minipage}{\linewidth}
\begin{framed}
\centering
\textbf{Selecting capture preferences in wireshark} \\
\includegraphics[width=\linewidth]{graphics/capture-options-updated.png}
\begin{itemize}
\item Select \iface{eth0} in ``Interface''.
\item Select ``Capture packets in promiscuous mode''.
\item Select ``Update list of packets in real time''.
\item Select ``Automatic scrolling in live capture''.
\item Unselect ``Enable MAC name resolution''.
\item Unselect ``Enable network name resolution''.
\item Unselect ``Enable transport name resolution''.
\end{itemize}
\end{framed}
\captionof{figure}{General capture settings for Wireshark}
\label{fig:lab2-capture-options}
\end{minipage}
\item Setting a capture filter: In the window ``Capture Preferences'', set a filter so that all packets that contain the IP address of PC2 are recorded. The filter is set in the ``Filter'' box under ``Capture Preferences'' (see Figure \ref{fig:lab2-capture-options}). The required filter expression is the answer to Question 7 from the Prelab.
\item Start the capture by clicking ``OK'' in the ``Capture Preferences'' window.
\item In another terminal window of PC1, issue a \cmd{ping} command to PC2
\begin{cmdblock}
PC1% ping -c 2 10.0.1.12
\end{cmdblock}
\item Stop the capture process of Wireshark.
\item Save the results of the capture. This is done by selecting ``Print'' in the ``File'' menu as described in Lab 1. (As instructed in Lab 1, unless asked to save the details of captured frames, selecting the summary option is usually sufficient.)
\end{enumerate}
\boxwarning{\textbf{Make sure to include the saved data in your lab report as they are part of your evaluation!}}
\subsubsection*{Exercise 2-B. Working with display filters}
Next you set display filters, which allow you to select a subset of the captured data for display in the main window of Wireshark.
\begin{enumerate}
\item In the Wireshark main window on PC1 from Exercise 2-A, set the display options as listed below. You can find the display options in the ``Capture Options'' window (select "Start" in the "Capture" menu)(see Figure \ref{fig:lab2-capture-options}):\par
\begin{itemize}
\item Select ``Update list of packets in real time''.
\item Select ``Automatic Scrolling in live capture''.
\item Unselect ``Enable MAC name resolution''.
\item Unselect ``Enable network name resolution''.
\item Unselect ``Enable transport name resolution''.
\end{itemize}
\item Setting a display filter: Type the desired display filter in the field next to the ``Filter'' box, which is located at the bottom of the Wireshark main window, as shown in Figure \ref{fig:lab2-display-filter}. Click the ``Reset'' button next to the ``Filter'' box to clear any existing filter:\par
\begin{minipage}{\linewidth}
\centering
\includegraphics[width=\linewidth]{graphics/display-filter-updated.png}
\captionof{figure}{Filter box for setting display filters.}
\label{fig:lab2-display-filter}
\end{minipage}
Enter a display filter so that all IP datagrams with destination IP address 10.0.1.12 are shown. Refer to Question 8 from the Prelab.
\item Observe the changes in the display panel of Wireshark. Only packets with 10.0.1.12 in the IP destination address field are now being displayed.
\item Save the displayed data, by selecting ``File:Print''. Note that the ``Print'' command only saves packets that are currently being displayed. If a display filter is used, the saved data is limited to the packets that match the display filter.
\item Repeat the above exercise with a display filter that lists only IP datagrams with source IP address equal to 10.0.1.12. Save the results.
\end{enumerate}
\boxwarning{\textbf{Make sure to include the saved data in your lab report as they are part of your evaluation!}}
\subsubsection{Exercise 2-C. More complex capture and display filters}
In this exercise, you learn how to use more sophisticated filters to restrict the packets being captured and displayed.
\begin{enumerate}
\item Start Wireshark on PC1 and start to capture traffic using the same settings as in Exercise 2-A. Do not set any capture or display filters!
\item From a new terminal on PC1, execute the \cmd{ping} command for PC2
\begin{cmdblock}
PC1% ping -c 5 10.0.1.12
\end{cmdblock}
\item At the same time, start a Telnet session from PC1 to PC2 in another terminal by typing
\begin{cmdblock}
PC1% telnet 10.0.1.12
\end{cmdblock}
and log in as \textit{telecomlabo}. After you logged in successfully to PC2, logout with the command \cmd{exit}.
\item Stop the traffic capture of wireshark.
\item Apply a set of display filters to the captured traffic and save the output to a text file. Select the option ``Print summary'' in the ``Print'' window.
\begin{itemize}
\item Display packets that contain ICMP messages with the IP address of PC2 either in the IP destination address or IP source address. Refer to Question 9 from the Prelab. Save the output.
\item Display packets that contain TCP traffic with the IP address of PC2 either in the IP destination address or IP source address. Refer to Question 10 from the Prelab. Save the output.
\item Display packets that, in addition to the constraints in the previous filter expression, use the port number 23. Refer to Question 10 from the Prelab. Save the output.
\end{itemize}
\end{enumerate}
\boxwarning{\textbf{Make sure to include the saved data in your lab report as they are part of your evaluation!}}
\newpage
\subsection{ARP - Address Resolution Protocol}
This part of the lab explores the operation of the Address Resolution Protocol (ARP) which resolves a MAC address for a given IP address. The lab exercises use the Linux command \cmd{arp} for displaying and manipulating the contents of the ARP cache. The ARP cache is a table that holds entries of the form \textless IP address, MAC address\textgreater.
\boxinfo{The most common uses of the \cmd{arp} command are as follows:
\begin{description}
\item[\texttt{arp -a}]\hfill \\
Displays the content of the ARP cache.
\item[\texttt{arp -d \textless IPAddress\textgreater}] \hfill \\
Deletes the entry with IP address IPAddress.
\item[\texttt{arp -s \textless IPAddress\textgreater \textless MAC\_Address\textgreater}] \hfill \\
Adds a static entry to the ARP cache which is never overwritten by network events. The MAC address is entered as a 6 hexadecimal bytes separated by colons.\\
Example: \texttt{arp -s 00:02:2D:0D:68:C1}
\end{description}
}
\boxinfo{\textbf{Time-outs in the ARP cache:}\hfill \\
The entries in an ARP cache have a limited lifetime. Entries are deleted unless they are refreshed. The typical lifetime of an ARP entry is 2 minutes, but much longer lifetimes (up to 20 minutes) have been observed. You may want to verify when your Linux system does remove ARP entries automatically after a certain amount of time.}
\boxinfo{\textbf{Refreshing the ARP cache:}\hfill \\
In Linux, you will observe that occasionally, a host sends out ARP requests to interfaces that are already in the ARP cache. Example: Suppose that a host with IP address 10.0.1.12 has an ARP cache entry: ``\textless10.0.1.11\textgreater is-at \textless00:02:83:39:2C:42\textgreater''. Then, this host occasionally sends a unicast ARP Request to MAC address 00:02:83:39:2C:42 of the form ``Who has 10.0.1.11? Tell 10.0.1.12'' to verify that the IP address 10.0.1.11 is still present before deleting the entry from the ARP cache.}
\subsubsection*{Exercise 3-A. A simple experiment with ARP}
\begin{enumerate}
\item On PC1, view the ARP cache with \cmd{arp -a} and delete all entries with the \cmd{-d} option.
\item Start Wireshark on PC1 with a capture filter set to the IP address of PC2.
\item Issue a ping command from PC1 to PC2:
\begin{cmdblock}
PC1% ping -c 2 10.0.1.12
\end{cmdblock}
Observe the ARP packets in the Wireshark window. Explore the MAC addresses in the Ethernet headers of the captured packets. Direct your attention to the following fields:
\begin{itemize}
\item The destination MAC address of the ARP Request packets.
\item The Type field in the Ethernet headers of ARP packets and ICMP messages.
\end{itemize}
\item View the ARP cache again with the command \cmd{arp -a}. Note that ARP cache entries get refreshed/ deleted fairly quickly (approx. 2 minutes).
\item Save the results of Wireshark to a pcap dump file.
\end{enumerate}
Use the saved data to answer to the following questions:
\begin{questions}
\q{3.A.1}{What is the destination MAC address of an ARP Request packet?}
\q{3.A.2}{What are the different values of the Type field in the Ethernet headers that you observed?}
\q{3.A.3}{Use the captured data to discuss the process in which ARP acquires the MAC address for IP address 10.0.1.12.}
\end{questions}
\subsubsection*{Exercise 3-B. Matching IP addresses and MAC addresses}
Identify the MAC addresses of all interfaces connected to the network, and enter them in Table \ref{tab:lab2-ip-to-mac}. You can obtain the MAC addresses from the ARP cache of each PC. You can fill up the ARP cache at a host, by issuing a ping command from that host to every other host on the network. Alternatively, you can obtain the MAC addresses from the output of the \cmd{ifconfig -a} command explained in Part 5.
\begin{table}[h!t]
\centering
\begin{tabular}{| c | c | c |}
\hline
\textbf{Linux PC} & \textbf{IP Address of eth0} & \textbf{MAC Address of eth0} \\ \hline
PC1 & 10.0.1.11/24 & \\
PC2 & 10.0.1.12/24 & \\
PC3 & 10.0.1.13/24 & \\
PC3 & 10.0.1.14/24 & \\ \hline
\end{tabular}
\caption{ IP and MAC addresses.}
\label{tab:lab2-ip-to-mac}
\end{table}
\begin{questions}
\q{3.B.1}{Include the completed Table \ref{tab:lab2-ip-to-mac} in your lab report.}
\end{questions}
\subsubsection*{Exercise 3-C. ARP requests for a non-existing address}
Observe what happens when an ARP Request is issued for an IP address that does not exist.
\begin{enumerate}
\item On PC1, start wireshark with a capture filter set to capture packets that contain the IP address of PC1:
\begin{cmdblock}
PC1% wireshark -f 'host 10.0.1.11'
\end{cmdblock}
\item Establish a Telnet session from PC1 to 10.0.1.10 (Note that this address does not exist on this network)
\begin{cmdblock}
PC1% telnet 10.0.1.10
\end{cmdblock}
Observe the time interval and the frequency with which PC1 transmits ARP Request packets. Repeat the experiment a number of times to discover the pattern.
\item Save the captured output.
\end{enumerate}
\begin{questions}
\q{3.C.1}{Using the saved output, describe the time interval between each ARP Request packet issued by PC1. Describe the method used by ARP to determine the time between retransmissions of an unsuccessful ARP Request. Include relevant data to support your answer.}
\q{3.C.2}{Why are ARP Request packets not transmitted (i.e. not encapsulated) as IP packets? Explain your answer.}
\end{questions}
\newpage
\subsection{The netstat command}
The Linux command \cmd{netstat} displays information on the network configuration and activity of a Linux system, including network connections, routing tables, interface statistics, masquerade connections, and multicast memberships. The following exercise explores how to use the \cmd{netstat} command to extract different types of information about the network configuration of a host.
\boxinfo{The most common uses of the \cmd{netstat} command are as follows:
\begin{description}
\item[\texttt{netstat -i}]\hfill \\
Displays a table with statistics of the currently configured network interfaces.
\item[\texttt{netstat -rn}] \hfill \\
Displays the kernel routing table. The \cmd{-n} option forces \cmd{netstat} to print the IP addresses. Without this option, \cmd{netstat} attempts to display the hostnames.
\item[\texttt{netstat -an; netstat -tan; netstat -uan}] \hfill \\
Displays the active network connections. The \cmd{-a} option displays all active network connections, the \cmd{-ta} option displays only information on TCP connections, and the \cmd{-ua} option displays only information on UDP traffic. Omitting the \cmd{-n} options prints hostnames and names of servers, instead of IP addresses and ports numbers.
\item[\texttt{netstat -s}] \hfill \\
Displays summary statistics for each protocol that is currently running on the host.
\end{description}
}
\subsubsection*{Exercise 4. Basic usage of the netstat command}
On PC1, try the different variations of the \cmd{netstat} command listed above and save the output to a file.
\begin{enumerate}
\item Display information on the network interfaces by typing
\begin{cmdblock}
PC1% netstat -in
\end{cmdblock}
\item Display the content of the IP routing table by typing
\begin{cmdblock}
PC1% netstat -rn
\end{cmdblock}
\item Display information on TCP and UDP ports that are currently in use by typing
\begin{cmdblock}
PC1% netstat -a
\end{cmdblock}
\item Display the statistics of various networking protocols by typing
\begin{cmdblock}
PC1% netstat -s
\end{cmdblock}
\end{enumerate}
\boxinfo{The values of the statistics displayed by some of the \cmd{netstat} commands are reset each time a host is rebooted.}
\begin{questions}
\q{4.1}{Attach the saved output to your report. Using the saved output, answer the following questions.}
\q{4.1.a}{What are the network interfaces of PC1 and what are the MTU (Maximum Transmission Unit) values of the interfaces?}
\q{4.1.b}{How many IP datagrams, ICMP messages, UDP datagrams, and TCP segments has PC1 transmitted and received since it was last rebooted?}
\q{4.2}{Explain the role of interface lo, the loopback interface. In the output of "netstat -in", why are the values of RX-OK (packets received) and TX-OK (packets transmitted) different for interface "eth0" but identical for interface lo?}
\end{questions}
\newpage
\subsection{Configuring IP interfaces in Linux}
The \cmd{ifconfig} command is used to configure parameters of network interfaces on a Linux system, such as enabling and disabling of interfaces and setting the IP address. The \cmd{ifconfig} command is usually run when a system boots up. In this case, the parameters of the commands are read from a file. Once the Linux system is running, the \cmd{ifconfig} command can be used to modify the network configuration parameters.
\boxinfo{The most common uses of the \cmd{ifconfig} command to query the status of network interfaces are as follows:
\begin{description}
\item[\texttt{ifconfig}]\hfill \\
Displays the configuration parameters of all active interfaces.
\item[\texttt{ifconfig -a}] \hfill \\
Displays the configuration parameters of all network interfaces, including the inactive interfaces.
\item[\texttt{ifconfig \textless interface>\textgreater}] \hfill \\
Displays the configuration parameters of a single interface. For example, \cmd{ifconfig eth0} displays information on interface \iface{eth0}.
\end{description}
}
\boxinfo{There are numerous options for configuring a network interface with \cmd{ifconfig}. The following example shows how to enable and disable an interface and how to change the IP configuration.
\begin{description}
\item[\texttt{ifconfig eth0 down}]\hfill \\
Disables the eth0 interface. No traffic is sent or received on a disabled interface.
\item[\texttt{ifconfig eth0 up}]\hfill \\
Enables the eth0 interface.
\item[\texttt{ifconfig eth0 10.0.1.8 netmask 255.255.255.0 broadcast 10.0.1.255}]\hfill \\
Assigns interface \iface{eth0} the IP address 10.0.1.8/24 and a broadcast address of 10.0.1.255. The interface should be disabled before a new IP address is assigned, and should be enabled after the IP address has been modified.
\item[\texttt{ifconfig eth0 down 10.0.1.8 netmask 255.255.255.0 broadcast 10.0.1.255 up}]\hfill \\
Performs all three commands above in sequence. Interface \iface{eth0} is disabled, an IP address and a broadcast address are assigned, and the interface is enabled.
\item[\texttt{ifconfig eth0 mtu 500}]\hfill \\
Sets the MTU of interface \iface{eth0} to 500 bytes.
\end{description}
}
\subsubsection*{Exercise 5. Changing the IP address of an interface}
Use the \cmd{ifconfig} command to modify the IP address of the \iface{eth0} interface of PC4.
\begin{enumerate}
\item On PC4, run \cmd{ifconfig -a} and save the output.
\item Change the IP address of interface \iface{eth0} of PC4 to 10.0.1.11/24.
\item Run \cmd{ifconfig -a} again and save the output.
\end{enumerate}
\begin{questions}
\q{5.1}{Attach the saved files to your report and explain the fields of the \cmd{ifconfig} output.}
\end{questions}
\newpage
\subsection{Duplicate IP addresses}
In this part of the lab, you observe what happens when two hosts have identical IP addresses.
\subsubsection*{Exercise 6. Duplicate IP addresses}
\begin{enumerate}
\item After completing Exercise 5, the IP addresses of the Ethernet interfaces on the four PCs are as shown in the Table \ref{tab:lab2-ip-addresses-part-6}. Note that PC1 and P4 are assigned the same IP address.
\begin{table}[h!t]
\centering
\begin{tabular}{| c | c | c |}
\hline
\textbf{Linux PC} & \textbf{IP Addresses of Ethernet Interface eth0} \\ \hline
PC1 & 10.0.1.11/24 \\
PC2 & 10.0.1.12/24 \\
PC3 & 10.0.1.13/24 \\
PC3 & 10.0.1.11/24 \\ \hline
\end{tabular}
\caption{IP addresses for Part 6}
\label{tab:lab2-ip-addresses-part-6}
\end{table}
\item Delete all entries in the ARP cache on all PCs.
\item Run wireshark on PC3 and capture the network traffic to and from the duplicate IP address 10.0.1.11
\item From PC3, start a Telnet session to the duplicate IP address, 10.0.1.11, by typing
\begin{cmdblock}
PC3% telnet 10.0.1.11
\end{cmdblock}
and log in as \textit{telecomlabo} user, using the \textit{mvkbj1n} password.
\item Once you have logged in, determine the name of the host to which you are connected. The name of the host can be determined in several ways: (1) issue the command \cmd{hostname}, (2) inspect the ARP cache on PC3, or (3) interpret the captured Wireshark packets.
\item Stop the traffic capture in Wireshark.
\item Save all ARP packets and the first few TCP packets captured by Wireshark. Also save the ARP cache of PC3 using the \cmd{arp -a} command.
\item When you are done with the exercise, reset the IP address of PC4 to its original value as given in Table \ref{tab:lab2-ip-addresses}.
\end{enumerate}
\begin{questions}
\q{6.1}{Explain why the Telnet session was established to one of the hosts with the duplicate address and not the other. Explain why the Telnet session was established at all, and did not result in an error message. Use the ARP cache and the captured packets to support your explanation.}
\end{questions}
\newpage
\subsection{Changing netmasks}
In this part of the lab you test the effects of changing the netmask of a network configuration. In the table below, two hosts (PC2 and PC4) have been assigned different network prefixes.
\boxwarning{If you are having difficulties understanding the concept of netmasks, try using a subnet calculator tool. You can find one at \url{http://subnet-calculator.com} or most operating systems have native tools or widgets that can do the same.}
\subsubsection*{Exercise 7.}
\begin{enumerate}
\item Setup the interfaces of the hosts as shown in Table \ref{tab:lab2-ip-addresses-part-7}. Note that the netmasks of the hosts are different.
\begin{table}[h!t]
\centering
\begin{tabular}{| c | c | c |}
\hline
\textbf{Linux PC} & \textbf{IP Addresses of eth0} & \textbf{Netmask} \\ \hline
PC1 & 10.0.1.100/24 & 255.255.255.0 \\
PC2 & 10.0.1.101/28 & 255.255.255.240 \\
PC3 & 10.0.1.120/24 & 255.255.255.0 \\
PC3 & 10.0.1.121/28 & 255.255.255.240 \\ \hline
\end{tabular}
\caption{IP addresses for Part 7}
\label{tab:lab2-ip-addresses-part-7}
\end{table}
\item Run Wireshark on PC1 and capture the packets for the following ping commands
\begin{enumerate}[label=\alph*.]
\item From PC1 to PC3:
\begin{cmdblock}
PC1% ping -c 1 10.0.1.120
\end{cmdblock}
\item From PC1 to PC2:
\begin{cmdblock}
PC1% ping -c 1 10.0.1.101
\end{cmdblock}
\item From PC1 to PC4:
\begin{cmdblock}
PC1% ping -c 1 10.0.1.121
\end{cmdblock}
\item From PC4 to PC1:
\begin{cmdblock}
PC1% ping -c 1 10.0.1.100
\end{cmdblock}
\item From PC2 to PC4:
\begin{cmdblock}
PC1% ping -c 1 10.0.1.121
\end{cmdblock}
\item From PC2 to PC3:
\begin{cmdblock}
PC1% ping -c 1 10.0.1.120
\end{cmdblock}
\end{enumerate}
\item Save the Wireshark output, and save the output of the ping commands. Note that not all of the above scenarios are successful. Save all output, including any error messages.
\item When you are done with the exercise, reset the interfaces to their original values as given in Table \ref{tab:lab2-ip-addresses}.
\end{enumerate}
\begin{questions}
\q{7.1}{Use your output data and ping results to explain what happened in each of the \cmd{ping} commands. Which ping operations were successful and which were unsuccessful? Why?
\boxwarning{\textbf{You get credits for answering the 'why?' question, not for stating the obvious.}}}
\end{questions}
\newpage
\subsection{Static mapping of IP addresses and hostnames}
Since it is easier to memorize names than IP addresses, there are mechanisms to associate a symbolic name, called \emph{hostname}, with an IP address. On the Internet, the resolution between hostnames and IP addresses is generally done by the Domain Name System (DNS), which will not be discussed in this course. This experiment illustrates another, simpler method to map IP addresses and domain names using the host file \path{/etc/hosts}.
Before DNS became available, the \path{/etc/hosts} file was the only method to resolve hostnames in the Internet. All hosts on the Internet had to occasionally synchronize with the content of other \path{/etc/hosts} files.
\subsubsection*{Exercise 8. Associating names with IP addresses}
In this exercise, you manipulate the static mapping of hostnames and IP addresses using the \path{/etc/hosts} file.
\begin{enumerate}
\item On PC1, inspect the content of file \path{/etc/hosts} with a text editor.
\item On PC1, issue a \cmd{ping} command to PC2
\begin{cmdblock}
PC1% ping 10.0.1.12
\end{cmdblock}
\item Repeat Step 2, but use symbolic names instead of IP addresses (e.g., PC2 instead of 10.0.1.12). You should see that the symbolic name is unreachable at this point.
\item On PC1, edit the file \path{/etc/hosts} and associate hostnames with the IP addresses and save the changes. Use the names PC1, PC2, etc., as used throughout this lab to refer to the PCs.
\item Repeat Step 3. You should now be able to ping directly using the hostnames ``PC2'', ``PC3'', ``PC4'', as in:
\begin{cmdblock}
PC1% ping PC2
PC1% ping PC3
PC1% ping PC4
\end{cmdblock}
\item Reset the \path{/etc/hosts} file to its original state. That is, remove the changes you have made in this exercise, and save the file.
\end{enumerate}
\begin{questions}
\q{8.1}{Explain why a static mapping of names and IP addresses is impractical when the number of hosts is large.}
\q{8.2}{What will be the result of the hostname resolution when multiple IP addresses are associated with the same hostname in the \path{/etc/hosts} file?}
\end{questions}
\newpage
\subsection{Experiments with FTP and Telnet}
A severe security problem with the file transfer protocol (FTP) is that the login and password information are transmitted as plain text (not encrypted). Sometimes malicious users exploit this by snooping passwords on the network.
Here you learn how easy it is to crack passwords by snooping traffic from FTP and Telnet sessions.
\boxerror{\textbf{The use of applications that do not encrypt passwords, such as FTP and Telnet, is strongly discouraged. On the Internet, you should use protocols such as Secure Shell (SSH) tools for file transfers and remote login.}}
\subsubsection*{Exercise 9-A. Snoop Passwords from an FTP session}
Capture traffic from an FTP session between two hosts.
The ftp server installed on the lab PC's is vsftpd, which is not started by default. Use the following command to start it on PC2:
\begin{cmdblock}
PC2% service vsftpd start
\end{cmdblock}
\begin{enumerate}
\item On PC1, run the Wireshark command with capture filters set to capture traffic between PC1 and PC2. The capture filter is
\begin{cmdblock}
host 10.0.1.11 and host 10.0.1.12
\end{cmdblock}
\item On PC1, initiate a FTP session to PC2 by typing
\begin{cmdblock}
PC1% ftp 10.0.1.12
\end{cmdblock}
\item Log in as \textit{telecomlabo} user.
\item Inspect the payload of packets with FTP payload that are sent from PC1 to PC2. FTP sessions use TCP connections for data transfer.
\boxinfo{In Wireshark, there is a simple method to view the payload sent in a TCP connection. Simply select a packet that contains a TCP segment in the main window of Wireshark, and then click on ``Follow TCP Stream'' in the ``Tools'' menu of the Wireshark window. This will create a new window that displays only the payload of the selected TCP connection.}
\item Save the details of the packets, i.e., select ``Print details'' in the ``Print'' window of Wireshark, which transmit the login name and password. As a hint, you can set the display filter in Wireshark to show only the desired packet(s). Refer to Question 9 from the Prelab.
\end{enumerate}
\begin{questions}
\q{9.A.1}{Using the saved output, identify the port numbers of the FTP client and the FTP server.}
\q{9.A.2}{Identify the login name and the password, shown in plain text in the payload of the packets that you captured.}
\end{questions}
\subsubsection*{Exercise 9-B. Snoop Passwords from a Telnet session}
Repeat the above exercise with the \cmd{telnet} command instead of ftp. On PC1, establish a Telnet session to PC2, and save the Wireshark output of packets used to transmit the login name and password.
\begin{questions}
\q{9.B}{Does Telnet have the same security flaws as FTP? Support your answer using the saved output.}
\end{questions}
\subsubsection*{Exercise 9-C. Observing traffic from a Telnet session}
This exercise uses the Telnet session established in the previous exercise.
\begin{enumerate}
\item Run Wireshark on PC1, and start to capture traffic. If the Wireshark window from the previous exercise is still open, make sure that Wireshark is capturing traffic.
\item If the Telnet session from the previous exercise is still in place, skip to the next step. Otherwise, follow the steps from the previous exercise and log in from PC1 to PC2 with the \cmd{telnet} command.
\item Once you are logged in, type a few characters. Observe the number of packets, captured by Wireshark, for each character typed. Observe that for each key you type, there are three packets transmitted. Determine why this occurs.
\item Save the Wireshark output to a text file (using the ``Print Summary'' option).
\end{enumerate}
\begin{questions}
\q{9.C}{Attach the saved output to your report. Explain why three packets are sent in a telnet session for each character typed on the terminal.}
\end{questions}
|
{"hexsha": "9362b143016ae83e7eb3e9accf55e2b094900676", "size": 32910, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Lab 2/lab2.tex", "max_stars_repo_name": "arminnh/lab-computer-networks", "max_stars_repo_head_hexsha": "f900d3e74e5e225791a537c3a4e7bbc5afb1d93b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Lab 2/lab2.tex", "max_issues_repo_name": "arminnh/lab-computer-networks", "max_issues_repo_head_hexsha": "f900d3e74e5e225791a537c3a4e7bbc5afb1d93b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Lab 2/lab2.tex", "max_forks_repo_name": "arminnh/lab-computer-networks", "max_forks_repo_head_hexsha": "f900d3e74e5e225791a537c3a4e7bbc5afb1d93b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 58.8729874776, "max_line_length": 529, "alphanum_fraction": 0.7543907627, "num_tokens": 9174}
|
//
// MongoDBHAConnection.cpp
// CHAOSFramework
//
// Created by Claudio Bisegni on 22/04/14.
// Copyright (c) 2014 INFN. All rights reserved.
//
#include "MongoDBHAConnectionManager.h"
#include <chaos/common/utility/TimingUtil.h>
#include <boost/format.hpp>
#define RETRIVE_MIN_TIME 500
#define RETRIVE_MAX_TIME 1500
#define MongoDBHAConnection_LOG_HEAD "[MongoDBHAConnection] - "
#define MDBHAC_LAPP_ LAPP_ << MongoDBHAConnection_LOG_HEAD
#define MDBHAC_LDBG_ LDBG_ << MongoDBHAConnection_LOG_HEAD << __FUNCTION__ << " - "
#define MDBHAC_LERR_ LERR_ << MongoDBHAConnection_LOG_HEAD << __FUNCTION__ << " - "
#define MONGO_DB_CHECK_ERROR_CODE(b) b["code"].numberInt()
#define MONGO_DB_GET_ERROR(c, e) \
mongo::BSONObj _error = c->conn().getLastErrorDetailed(); \
e = MONGO_DB_CHECK_ERROR_CODE(_error);
#define CONTINUE_ON_NEXT_CONNECTION(x) \
switch(x) { \
case 13328: \
case 10276: \
case 13386: \
case 16090: \
case 13127: \
case 13348: \
case 13678: \
continue; \
break; \
default: \
break; \
}
namespace chaos_data = chaos::common::data;
using namespace chaos::data_service::db_system;
//-----------------------------------------------------------------------------------------------------------
MongoAuthHook::MongoAuthHook(std::map<string,string>& key_value_custom_param):
has_autentication(false) {
if(key_value_custom_param.count("user") &&
key_value_custom_param.count("pwd") &&
key_value_custom_param.count("db") ) {
//configura for autentication
user = key_value_custom_param["user"];
pwd = key_value_custom_param["pwd"];
db = key_value_custom_param["db"];
has_autentication = true;
}
}
void MongoAuthHook::onCreate( mongo::DBClientBase * conn ) {
std::string err;
if(has_autentication){
MDBHAC_LDBG_ << "Autenticate on - " << conn->getServerAddress();
if(!conn->auth(db, user, pwd, err)) {
MDBHAC_LERR_ << conn->getServerAddress() << " -> " << err;
}
} else {
MDBHAC_LDBG_ << "No Autenticate on - " << conn->getServerAddress();
}
}
void MongoAuthHook::onHandedOut( mongo::DBClientBase * conn ) {
//MDBHAC_LAPP_ << "MongoDBHAConnectionManager::onHandedOut - " << conn->getServerAddress();
}
void MongoAuthHook::onDestroy( mongo::DBClientBase * conn ) {
MDBHAC_LAPP_ << "MongoDBHAConnectionManager::onDestroy - " << conn->getServerAddress();
}
DriverScopedConnection::DriverScopedConnection(mongo::ConnectionString _conn):
ScopedDbConnection(_conn) {}
DriverScopedConnection::~DriverScopedConnection() {
ScopedDbConnection::done();
}
//-----------------------------------------------------------------------------------------------------------
MongoDBHAConnectionManager::MongoDBHAConnectionManager(std::vector<std::string> monogs_routers_list,
std::map<string,string>& key_value_custom_param):
server_number((uint32_t)monogs_routers_list.size()),
next_retrive_intervall(0){
std::string errmsg;
std::string complete_url;
for (std::vector<std::string>::iterator iter = monogs_routers_list.begin();
iter != monogs_routers_list.end();
iter++){
complete_url = boost::str(boost::format("%1%") % *iter);
MDBHAC_LAPP_ << "Register mongo server address " << complete_url;
boost::shared_ptr<mongo::ConnectionString> cs_ptr(new mongo::ConnectionString(mongo::HostAndPort(complete_url)));
valid_connection_queue.push(cs_ptr);
}
mongo::pool.addHook(new MongoAuthHook(key_value_custom_param));
mongo::Status status = mongo::client::initialize();
if (!status.isOK()) {
std::cout << "failed to initialize the client driver: " << status.toString() << endl;
}
}
MongoDBHAConnectionManager::~MongoDBHAConnectionManager() {
std::queue< boost::shared_ptr<mongo::ConnectionString> > empty_queue;
std::swap(valid_connection_queue, empty_queue);
std::swap(offline_connection_queue, empty_queue);
}
inline bool MongoDBHAConnectionManager::canRetry() {
bool retry = false;
uint64_t cur_ts = chaos::TimingUtil::getTimeStamp();
if((retry = cur_ts > next_retrive_intervall)) {
next_retrive_intervall = cur_ts + RETRIVE_MAX_TIME;
}
return retry;
}
bool MongoDBHAConnectionManager::getConnection(MongoDBHAConnection *connection_sptr) {
uint32_t cur_index = 0;
bool connection_is_good = false;
bool result = false;
boost::shared_ptr<mongo::ConnectionString> nextCS;
//lock the mutex for access to the queue
boost::unique_lock<boost::shared_mutex> lock(mutext_queue);
if(canRetry() && !offline_connection_queue.empty()) {
// ad an invalid conneciton string to the end of valid queue
valid_connection_queue.push(offline_connection_queue.front());
//remove invalid connection string to his queue
offline_connection_queue.pop();
}
//get the number of valid server
uint32_t valid_server_num = (uint32_t)valid_connection_queue.size();
//try fo find a good conncetion
while(!nextCS && cur_index < valid_server_num) {
cur_index++;
// get next available server connection string
if((nextCS = valid_connection_queue.front())) {
//remove invalid connection string form queue and put it into the offline one
valid_connection_queue.pop();
try {
DriverScopedConnection c(*nextCS);
connection_is_good = c.ok();
c.get()->setWriteConcern(mongo::WriteConcern::journaled);
} catch (std::exception &ex) {
// in any case of error put the current conneciton string into offline queue
offline_connection_queue.push(nextCS);
//check nex string if there is one
nextCS.reset();
continue;
}
if(connection_is_good) {
//put the used description at the end of the queue
valid_connection_queue.push(nextCS);
} else {
// push into offline queue
offline_connection_queue.push(nextCS);
//check nex string if there is one
nextCS.reset();
continue;
}
}
}
if((result = (nextCS.get() != NULL))) {
//i can create the connection
*connection_sptr = new DriverScopedConnection(*nextCS.get());
}
return result;
}
int MongoDBHAConnectionManager::insert( const std::string &ns,
mongo::BSONObj obj,
int flags) {
int err = -1;
MongoDBHAConnection conn = NULL;
while (getConnection(&conn)) {
try {
conn->conn().insert(ns, obj, flags);
MONGO_DB_GET_ERROR(conn, err);
} catch (std::exception& ex) {
MDBHAC_LERR_ << "MongoDBHAConnectionManager::insert" << " -> " << ex.what();
MONGO_DB_GET_ERROR(conn, err);
DELETE_OBJ_POINTER(conn)
CONTINUE_ON_NEXT_CONNECTION(err)
}
break;
}
if(conn) delete(conn);
return err;
}
int MongoDBHAConnectionManager::findOne(mongo::BSONObj& result,
const std::string &ns,
const mongo::Query& query,
const mongo::BSONObj *fieldsToReturn,
int queryOptions) {
int err = -1;
MongoDBHAConnection conn = NULL;
while (getConnection(&conn)) {
try {
result = conn->conn().findOne(ns, query, fieldsToReturn, queryOptions);
MONGO_DB_GET_ERROR(conn, err);
} catch (std::exception& ex) {
MDBHAC_LERR_ << "MongoDBHAConnectionManager::insert" << " -> " << ex.what();
MONGO_DB_GET_ERROR(conn, err);
DELETE_OBJ_POINTER(conn)
CONTINUE_ON_NEXT_CONNECTION(err)
}
break;
}
if(conn) delete(conn);
return err;
}
void MongoDBHAConnectionManager::findN(std::vector<mongo::BSONObj>& out,
const std::string& ns,
mongo::Query query,
int nToReturn,
int nToSkip,
const mongo::BSONObj *fieldsToReturn,
int queryOptions) {
int err = -1;
MongoDBHAConnection conn = NULL;
while (getConnection(&conn)) {
try {
conn->conn().findN(out, ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions);
MONGO_DB_GET_ERROR(conn, err);
} catch (std::exception& ex) {
MDBHAC_LERR_ << "MongoDBHAConnectionManager::insert" << " -> " << ex.what();
MONGO_DB_GET_ERROR(conn, err);
DELETE_OBJ_POINTER(conn)
CONTINUE_ON_NEXT_CONNECTION(err)
}
break;
}
if(conn) delete(conn);
}
std::auto_ptr<mongo::DBClientCursor> MongoDBHAConnectionManager::query(const std::string &ns,
mongo::Query query,
int nToReturn,
int nToSkip,
const mongo::BSONObj *fieldsToReturn,
int queryOptions,
int batchSize) {
int err = -1;
MongoDBHAConnection conn = NULL;
std::auto_ptr<mongo::DBClientCursor> result;
while (getConnection(&conn)) {
try {
result = conn->conn().query(ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize);
MONGO_DB_GET_ERROR(conn, err);
} catch (std::exception& ex) {
MDBHAC_LERR_ << "MongoDBHAConnectionManager::insert" << " -> " << ex.what();
MONGO_DB_GET_ERROR(conn, err);
DELETE_OBJ_POINTER(conn)
CONTINUE_ON_NEXT_CONNECTION(err)
}
break;
}
if(conn) delete(conn);
return result;
}
int MongoDBHAConnectionManager::runCommand(mongo::BSONObj& result,
const std::string &ns,
const mongo::BSONObj& command,
int queryOptions) {
int err = -1;
MongoDBHAConnection conn = NULL;
while (getConnection(&conn)) {
try {
if(!conn->conn().runCommand(ns, command, result, queryOptions)) {
MDBHAC_LERR_ << "Error executing MongoDBHAConnectionManager::runCommand" << " -> " << command.toString();
}
MONGO_DB_GET_ERROR(conn, err);
} catch (std::exception& ex) {
MDBHAC_LERR_ << "MongoDBHAConnectionManager::runCommand" << " -> " << ex.what();
MONGO_DB_GET_ERROR(conn, err);
DELETE_OBJ_POINTER(conn)
CONTINUE_ON_NEXT_CONNECTION(err)
}
break;
}
if(conn) delete(conn);
return err;
}
int MongoDBHAConnectionManager::update( const std::string &ns,
mongo::Query query,
mongo::BSONObj obj,
bool upsert,
bool multi,
const mongo::WriteConcern* wc) {
int err = -1;
MongoDBHAConnection conn = NULL;
while (getConnection(&conn)) {
try {
conn->conn().update(ns, query, obj, upsert, multi, wc);
MONGO_DB_GET_ERROR(conn, err);
} catch (std::exception& ex) {
MDBHAC_LERR_ << "MongoDBHAConnectionManager::insert" << " -> " << ex.what();
MONGO_DB_GET_ERROR(conn, err);
DELETE_OBJ_POINTER(conn)
CONTINUE_ON_NEXT_CONNECTION(err)
}
break;
}
if(conn) delete(conn);
return err;
}
int MongoDBHAConnectionManager::remove( const std::string &ns , mongo::Query q , bool justOne, const mongo::WriteConcern* wc) {
int err = -1;
MongoDBHAConnection conn = NULL;
while (getConnection(&conn)) {
try {
conn->conn().remove(ns, q, justOne, wc);
MONGO_DB_GET_ERROR(conn, err);
} catch (std::exception& ex) {
MDBHAC_LERR_ << "MongoDBHAConnectionManager::insert" << " -> " << ex.what();
MONGO_DB_GET_ERROR(conn, err);
DELETE_OBJ_POINTER(conn)
CONTINUE_ON_NEXT_CONNECTION(err)
}
break;
}
if(conn) delete(conn);
return err;
}
int MongoDBHAConnectionManager::count(unsigned long long & result,
const std::string &ns,
const mongo::Query& query,
int options,
int limit,
int skip) {
int err = 0;
MongoDBHAConnection conn = NULL;
while (getConnection(&conn)) {
try {
result = conn->conn().count(ns, query, options, limit, skip);
MONGO_DB_GET_ERROR(conn, err);
} catch (std::exception& ex) {
MDBHAC_LERR_ << "MongoDBHAConnectionManager::insert" << " -> " << ex.what();
MONGO_DB_GET_ERROR(conn, err);
DELETE_OBJ_POINTER(conn)
CONTINUE_ON_NEXT_CONNECTION(err)
}
break;
}
if(conn) delete(conn);
return err;
}
int MongoDBHAConnectionManager::ensureIndex( const std::string &database,
const std::string &collection,
mongo::BSONObj keys,
bool unique,
const std::string &name,
bool dropDup,
bool background,
int v,
int ttl) {
int err = -1;
MongoDBHAConnection conn = NULL;
while (getConnection(&conn)) {
try {
mongo::BSONObjBuilder toSave;
toSave.append( "ns" , database+"."+collection );
toSave.append( "key" , keys );
if ( name != "" ) {
toSave.append( "name" , name );
}
else {
string nn = conn->conn().genIndexName( keys );
toSave.append( "name" , nn );
}
if( v >= 0 )
toSave.append("v", v);
if ( unique )
toSave.appendBool( "unique", unique );
if( background )
toSave.appendBool( "background", true );
if( dropDup )
toSave.appendBool( "dropDups", dropDup );
if ( ttl > 0 )
toSave.append( "expireAfterSeconds", ttl );
err = insert(database+".system.indexes", toSave.obj());
//err = conn->conn().ensureIndex(database+"."+collection, keys, unique, name, cache, background, v, ttl);
MONGO_DB_GET_ERROR(conn, err);
} catch (std::exception& ex) {
MDBHAC_LERR_ << "MongoDBHAConnectionManager::insert" << " -> " << ex.what();
MONGO_DB_GET_ERROR(conn, err);
DELETE_OBJ_POINTER(conn)
CONTINUE_ON_NEXT_CONNECTION(err)
}
break;
}
if(conn) delete(conn);
return err;
}
|
{"hexsha": "246ac0c3b8bb87a647cf0664c7225fb12d264e13", "size": 12887, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "ChaosDataService/db_system/MongoDBHAConnectionManager.cpp", "max_stars_repo_name": "fast01/chaosframework", "max_stars_repo_head_hexsha": "28194bcca5f976fd5cf61448ca84ce545e94d822", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-04-16T13:20:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-24T02:05:25.000Z", "max_issues_repo_path": "ChaosDataService/db_system/MongoDBHAConnectionManager.cpp", "max_issues_repo_name": "fast01/chaosframework", "max_issues_repo_head_hexsha": "28194bcca5f976fd5cf61448ca84ce545e94d822", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ChaosDataService/db_system/MongoDBHAConnectionManager.cpp", "max_forks_repo_name": "fast01/chaosframework", "max_forks_repo_head_hexsha": "28194bcca5f976fd5cf61448ca84ce545e94d822", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9002320186, "max_line_length": 127, "alphanum_fraction": 0.6659424226, "num_tokens": 3429}
|
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
from airflow import DAG
from airflow.utils.dates import days_ago
import airflow.hooks.S3_hook
from airflow.hooks.base_hook import BaseHook
from datetime import timedelta
from datetime import datetime
from datetime import date
import sqlalchemy
from sqlalchemy import create_engine
import pymysql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
import requests
import os
from newsapi import NewsApiClient
import re
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
import nltk
from newsapi import NewsApiClient
from dotenv import load_dotenv
import os
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from dash_table import DataTable
from dash_table.FormatTemplate import Format
from matplotlib import rcParams
from plotly.subplots import make_subplots
from wordcloud import WordCloud, STOPWORDS
import collections
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.express as px
import re
load_dotenv()
analyser = SentimentIntensityAnalyzer()
stop_words = list(set(stopwords.words('english')))
nltk.download('stopwords')
nltk.download('punkt')
today = date.today()
today = today.isoformat()
news_api_key = os.getenv("NEWS_API_KEY")
newsapi = NewsApiClient(api_key = news_api_key)
jupyter_location_string = "/Users/jkocher/Documents/projects/DataZCW-Final-Project/Final_Project/Dashboard/getting_df.ipynb"
executed_location_string = "/Users/jkocher/Documents/projects/DataZCW-Final-Project/Final_Project/Dashboard/report.ipynb"
default_args = {
'owner': 'James Kocher',
'depends_on_past': False,
'start_date': datetime.now(),
'retries': 0
}
dag = DAG(
"Final_Project_Pipeline",
default_args=default_args,
description = "this dag will retrieve new News articles and update our visualization",
schedule_interval = timedelta(hours = 1)
)
engine = create_engine('mysql+pymysql://root:zipcoder@localhost/News')
Base = declarative_base()
class articles(Base):
__tablename__ = 'news'
author = Column(String(250))
title = Column(String(500))
content = Column(String(4294967294))
date = Column(Integer)
sentiment = Column(String(50))
score = Column(String(50))
unique_identify = Column(String(200), primary_key = True)
def add_sentiment(news):
news = re.sub(r'[^\w\s]', '', news)
words = word_tokenize(news, "english", True)
filtered = [w for w in words if not w in stop_words]
score = analyser.polarity_scores(" ".join(filtered))
if 0.0 < score['compound'] <= 1.0:
return 'Positive'
elif 0.0 > score['compound'] >= -1.0:
return 'Negative'
elif score['compound'] == 0.0:
return 'Neutral'
def add_score(tweets):
tweet = re.sub(r'[^\w\s]', '', tweets)
words = word_tokenize(tweet, "english", True)
filtered = [w for w in words if not w in stop_words]
score = analyser.polarity_scores(" ".join(filtered))
return (score['compound'])
def clean_news():
now = datetime.now()
start_date = datetime.now()-timedelta(hours=1)
all_articles = newsapi.get_everything(q='covid-19',
from_param=start_date.isoformat()[0:19],
to=now.isoformat()[0:19],
language='en',
sort_by='relevancy',
page=1,
page_size=100)
for x in range(len(all_articles.get("articles"))):
article = all_articles.get("articles")[x]
if article.get("content") != None:
author = str(article.get('author'))
title = str(article.get('title'))
content = str(article.get('content'))
published_date = str(article.get('publishedAt'))
published_date = datetime.strptime(published_date,"%Y-%m-%dT%H:%M:%SZ")
published_date = datetime.timestamp(published_date)*1000
author = str(article.get('author'))
sentiment = str(add_sentiment(content))
score = str(add_score(content))
aut_title = str(str(author)+ " " + str(title))
message_sql = articles(author=author, title=title, content=content, date=published_date, sentiment = sentiment, score= score, unique_identify = aut_title)
Session = sessionmaker(bind=engine)
session = Session()
session.add(message_sql)
session.commit()
t1 = PythonOperator(task_id = "collect_news",
python_callable = clean_news,
dag = dag)
#Dashboard
t2 = BashOperator(
task_id="run_jupyter_notebook",
bash_command = "papermill /Users/jkocher/Documents/projects/DataZCW-Final-Project/Final_Project/Dashboard/getting_df.ipynb /Users/jkocher/Documents/projects/DataZCW-Final-Project/Final_Project/Dashboard/report.ipynb",
dag = dag)
t1 >> t2
|
{"hexsha": "461e5a5672d438c012a20c89ea4fe585a1030606", "size": 5006, "ext": "py", "lang": "Python", "max_stars_repo_path": "Final_Project/Airflow_Dag/final_project_dag.py", "max_stars_repo_name": "JKocher13/DataZCW-Final-Project", "max_stars_repo_head_hexsha": "9749825a3b106879e1e1536172d6adafc888b843", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Final_Project/Airflow_Dag/final_project_dag.py", "max_issues_repo_name": "JKocher13/DataZCW-Final-Project", "max_issues_repo_head_hexsha": "9749825a3b106879e1e1536172d6adafc888b843", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Final_Project/Airflow_Dag/final_project_dag.py", "max_forks_repo_name": "JKocher13/DataZCW-Final-Project", "max_forks_repo_head_hexsha": "9749825a3b106879e1e1536172d6adafc888b843", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5064935065, "max_line_length": 218, "alphanum_fraction": 0.754894127, "include": true, "reason": "import numpy", "num_tokens": 1223}
|
import numpy as np
import pandas as pd
dates = pd.date_range('20130101',periods=6)
df = pd.DataFrame(np.arange(24).reshape((6,4)),index=dates,columns=['A','B','C','D'])
print(df)
df.loc['20130102','B'] = 222
df.iloc[2,2] = 111
df.A[df.A<10] = 0
# df.F=np.nan 这种不能加新列
df['F'] = 0 # 这种可以加新列 np.nan
df['E'] = pd.Series([1,2,3,4,5,6],index=pd.date_range('20130101',periods=6)) #index应该对齐
print(df)
|
{"hexsha": "08b10eddbe22b7400f04f4fc6c9a69a14452a9b5", "size": 407, "ext": "py", "lang": "Python", "max_stars_repo_path": "01-python/source code/04/01.py", "max_stars_repo_name": "lizhangjie316/ComputerVision", "max_stars_repo_head_hexsha": "86d82358bd160074d154773df0284e1154a6d077", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-05T08:38:03.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-05T08:38:03.000Z", "max_issues_repo_path": "01-python/source code/04/01.py", "max_issues_repo_name": "lizhangjie316/ComputerVision", "max_issues_repo_head_hexsha": "86d82358bd160074d154773df0284e1154a6d077", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-11-18T22:13:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:04:02.000Z", "max_forks_repo_path": "01-python/source code/04/01.py", "max_forks_repo_name": "lizhangjie316/ComputerVision", "max_forks_repo_head_hexsha": "86d82358bd160074d154773df0284e1154a6d077", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.380952381, "max_line_length": 87, "alphanum_fraction": 0.6339066339, "include": true, "reason": "import numpy", "num_tokens": 172}
|
import numpy as np
import torch.nn as nn
import torch
import pickle
from datetime import datetime
import os
import glob
class BaseLayer(nn.Module):
def __init__(self):
super(BaseLayer, self).__init__()
self.cuda = True if torch.cuda.is_available() else False
self.Tensor = torch.cuda.FloatTensor if self.cuda else torch.FloatTensor
self.fmap_base = 16 << 10
self.fmap_min = 1
self.fmap_max = 512
self.fmap_decay = 1
def cliped_features(self, stage):
before_clip_value = int(self.fmap_base / (2.0 ** (stage * self.fmap_decay)))
cliped_value = np.clip(before_clip_value, self.fmap_min, self.fmap_max)
return cliped_value
def load_pkl(self, filename):
with open(filename, 'rb') as file:
return pickle.load(file, encoding='latin1')
def save_pkl(self, obj, filename):
with open(filename, 'wb') as file:
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
def restore(self, model_path, dir_name):
ret = False
restore_model_list = sorted(glob.glob(os.path.join(model_path, dir_name, '*.pth')), reverse=True)
if 0 < len(restore_model_list):
restore_model_file = restore_model_list[0]
self.load_state_dict(torch.load(restore_model_file))
ret = True
return ret
def save(self, model_path, dir_name):
now = datetime.now().strftime("%Y%m%d_%H%M%S")
save_path = os.path.join(model_path, dir_name)
if not os.path.isdir(save_path):
os.makedirs(save_path, exist_ok=True)
torch.save(self.state_dict(), os.path.join(save_path, '{}.pth'.format(now)))
@classmethod
def save_model(cls, model_path, obj):
now = datetime.now().strftime("%Y%m%d_%H%M%S")
if not os.path.isdir(model_path):
os.makedirs(model_path, exist_ok=True)
filename = os.path.join(model_path, '{}.pkl'.format(now))
with open(filename, 'wb') as file:
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def restore_model(cls, model_path):
ret = None
restore_model_list = sorted(glob.glob(os.path.join(model_path, '*.pkl')), reverse=True)
if len(restore_model_list) == 0:
return ret
restore_model_file = restore_model_list[0]
with open(restore_model_file, 'rb') as file:
ret = pickle.load(file, encoding='latin1')
return ret
@classmethod
def print_model_parameters(cls, model, model_name):
print('---- {} ----'.format(model_name))
for index, key in enumerate(model.state_dict().keys()):
print('index: {}, name: {}, shape: {}'.format(index, key, model.state_dict()[key].shape))
@classmethod
def set_model_parameter_requires_grad_all(cls, model, flag=True):
for p in model.parameters():
p.requires_grad = flag
|
{"hexsha": "127004f4b88f1b6a3be9972c1a8870e4f512d9bc", "size": 2949, "ext": "py", "lang": "Python", "max_stars_repo_path": "chapter7/stylegan2_pytorch/base_layer.py", "max_stars_repo_name": "tms-byte/gan_sample", "max_stars_repo_head_hexsha": "1ff723cf37af902b400dbb68777a52e6e3dfcc89", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 57, "max_stars_repo_stars_event_min_datetime": "2021-02-11T12:25:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T11:47:21.000Z", "max_issues_repo_path": "chapter7/stylegan2_pytorch/base_layer.py", "max_issues_repo_name": "tms-byte/gan_sample", "max_issues_repo_head_hexsha": "1ff723cf37af902b400dbb68777a52e6e3dfcc89", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-02-22T01:38:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-29T15:55:04.000Z", "max_forks_repo_path": "chapter7/stylegan2_pytorch/base_layer.py", "max_forks_repo_name": "tms-byte/gan_sample", "max_forks_repo_head_hexsha": "1ff723cf37af902b400dbb68777a52e6e3dfcc89", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2021-02-11T14:49:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T04:18:11.000Z", "avg_line_length": 37.3291139241, "max_line_length": 105, "alphanum_fraction": 0.6364869447, "include": true, "reason": "import numpy", "num_tokens": 695}
|
import os
from PIL import ImageGrab
import time
import win32api, win32con
from PIL import ImageOps
from numpy import *
import pyautogui
import random
from ctypes import windll
user32 = windll.user32
user32.SetProcessDPIAware()
#some sort of DPS problem unrelated to project
#this stops the images from being cut off while using screengrab
# ------------------
x_pad = 475 #These pads is so it works for different resolutions. Instead of
y_pad = 699 #changing all the coordinates, other users of the bot would just
#have to adjust the pads using screenGrab() defined further below
class Cord: #All important coordinates that are checked often are stored here
mainmenu = (835, 893)
attack = (922, 806)
scratch = (1106, 835)
shred = (919, 950)
attacker1 = (974, 177)
hpattacker1 = (924, 13)
attacker2 = (1091, 331)
hpattacker2 = (1044, 147)
attacker3 = (1223, 477)
hpattacker3 = (1164, 305)
attacker4 = (1031, 265)
hpattacker4 = (984, 67)
attacker5 = (1145, 433)
hpattacker5 = (1104, 227)
boss = (1007, 292)
hpboss = (893, 67)
def screenGrab(): #Originally used as a tool to get x_pad and y_pad
#Currently used to scan the for RGB values in startGame(). See previous versions in journal
box = (x_pad+1,y_pad+1,x_pad+1371,y_pad+1220)
im = ImageGrab.grab(box)
hm = im.getpixel(Cord.hpboss) #put any coordinate u want
print(hm)
return im
def leftClick(): #just for clicking
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0) #Press left click
time.sleep(.1) #delays
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0) #Release left click
print('Click')
def mousePos(cord): #Moves the mouse to the given coordinates. This changed a lot, see previous versions in journal
pyautogui.moveTo(x_pad + cord[0] + random.randint(0, 20), y_pad + cord[1] + random.randint(0, 20), duration=0.25)
#Receives coordinates given in startGame(), goes to location taking into account the pads
#random.randint(0,20) randomizes the coordinates a bit to avoid bot detection
def get_cords(): #Tool that was used to get the coordinates of all the buttons and attackers in the game.
#No longer used now that the bot is completed
x,y = win32api.GetCursorPos()
x = x - x_pad #Takes into account pads, like all the other functions
y = y - y_pad
print(x,y)
# ------------------
def startGame(): #Start of the main function
wait = 0 #Used and explained further below
while x_pad == 475: #Just needed this to loop forever so picked random variable
#location of first menu
mousePos((257, 559))
leftClick()
leftClick()
time.sleep(1.5)
#location of second menu
mousePos((489, 771))
leftClick()
time.sleep(3.5)
while x_pad == 475: #Loop for the actual game once past menus
x = round(random.uniform(0, 0.2),2) #Generates random float that'll be added to randomize wait intervals
screenGrab()
s = screenGrab() #Takes picture of the screen and assigns it to s
if s.getpixel((205, 57)) == (93, 94, 134): #Checks if bot got past the menu, good for checking 'camping' (explained in journal)
wait = 0 #Resets the counter for amount of times 'waiting', used farther below
if s.getpixel(Cord.mainmenu) == (222, 214, 202):
#Checks if coordinates of mainmenu match RGB value. If so, that means this menu popped up, and level is complete
#The coordinates & RGB values are from using get_cords() & screenGrab() as tools. Check journal for how
print('level complete')
mousePos((811, 822)) #Goes to the button that sends us back to the mainmenu
leftClick()
time.sleep(1.4 + x) #Pauses after clicking for 1.4 + (randomized number) seconds
break #Breaks out of this loop to go back to the menu loop
#All the other if statements have the same idea as the above if statement
if s.getpixel(Cord.attack) == (236, 234, 231):
wait=0
print('attacking')
mousePos(Cord.attack)
leftClick()
time.sleep(0.1 + x)
screenGrab()
s = screenGrab() #Important screen change here, picture of screen taken again
if s.getpixel(Cord.shred) == (214, 172, 99): #Special attack option
mousePos(Cord.shred)
leftClick()
time.sleep(0.4 + x)
else:
mousePos(Cord.scratch) #Normal attack option
leftClick()
time.sleep(0.4 + x)
if s.getpixel(Cord.hpattacker1) == (49, 61, 48):
mousePos(Cord.attacker1)
leftClick()
time.sleep(1.2+ x)
elif s.getpixel(Cord.hpattacker2) == (49, 61, 48):
mousePos(Cord.attacker2)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpattacker3) == (49, 61, 48):
mousePos(Cord.attacker3)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpattacker4) == (49, 61, 48):
mousePos(Cord.attacker4)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpattacker5) == (49, 61, 48):
mousePos(Cord.attacker5)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpboss) == (10, 10, 13):
mousePos(Cord.boss)
leftClick()
time.sleep(1.2 + x)
else: #If no hp bars or attack buttons are detected, page is probably loading or enemies are attacking
wait = wait+1 #Wait counter goes up 1 every loop
print('waiting')
if wait == 15: #If computer waited 15 consecutive times, something must've went wrong. So, program exits
exit()
time.sleep(2) #Pauses for 2 seconds to wait, then loops back to recheck if they're hp bars or attack buttons
|
{"hexsha": "2764e8cf2af125cde1e1dea98f00be38d0e21369", "size": 6205, "ext": "py", "lang": "Python", "max_stars_repo_path": "FlightRisingColiseum/Bot_FR.py", "max_stars_repo_name": "Eternal05/Flightrising-Coliseum-Bot", "max_stars_repo_head_hexsha": "8f4895ff8a2d5533fe6a6546e09361738fd54910", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-17T02:52:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-17T02:52:40.000Z", "max_issues_repo_path": "FlightRisingColiseum/Bot_FR.py", "max_issues_repo_name": "Eternal05/Flightrising-Coliseum-Bot", "max_issues_repo_head_hexsha": "8f4895ff8a2d5533fe6a6546e09361738fd54910", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FlightRisingColiseum/Bot_FR.py", "max_forks_repo_name": "Eternal05/Flightrising-Coliseum-Bot", "max_forks_repo_head_hexsha": "8f4895ff8a2d5533fe6a6546e09361738fd54910", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8595505618, "max_line_length": 134, "alphanum_fraction": 0.6046736503, "include": true, "reason": "from numpy", "num_tokens": 1655}
|
#!/usr/bin/env python
import os, sys
import numpy as np
import IO
def read_OBJ(filename):
'''Read an OBJ file from disk. Returns a geom dict.'''
return decode_OBJ(parse_OBJ(open(filename,'r').readlines()))
def parse_OBJ(obj_strings):
'''Parse an OBJ file into a dict of group:dict of str:list one of (str,list of dict(str:str),dict(str:str)).'''
ret = {'#':{}}
groups = ['#']
ret['groups'] = groups
group = ret['#']
obj_strings = map(str.split, obj_strings)
for line in obj_strings:
if len(line) == 0 or line[0].startswith('#'): continue # ignore blank lines and comments
if line[0] == 'mtllib':
ret[line[0]] = line[1:]
elif line[0] == 'g':
groupname = '/'.join(line[1:])
while ret.has_key(groupname): groupname += '+'
groups.append(groupname)
group = ret[groupname] = {}
else:
if not group.has_key(line[0]): group[line[0]] = []
group[line[0]].append(line[1:])
return ret
def faces_to_triangles(faces):
'''given a list of faces (each vertex might contain multiple coordinates), generate a list of triangles and splits for those triangles
that recreate the faces.'''
tris,tris_splits = [],[0]
for f in faces:
for j,k in zip(f[1:-1],f[2:]): tris.append([f[0],j,k])
tris_splits.append(len(tris))
return np.array(tris,dtype=np.int32),np.array(tris_splits,dtype=np.int32)
def decode_OBJ(geomDict):
'''extract the interesting data from the raw OBJ. also generate some triangles for convenience.'''
ret = { 'groups':geomDict['groups'], 'v':[[0,0,0]], 'vt':[[0,0]], 'vn':[[0,0,0]], 'f':[], 'v_splits':[1], 'vt_splits':[1], 'vn_splits':[1], 'f_splits':[0] }
for groupName in geomDict['groups']:
group = geomDict[groupName]
for k,v in ret.iteritems():
if group.has_key(k):
v.extend(group[k])
if ret.has_key(k+'_splits'):
ret[k+'_splits'].append(len(v))
for k in ['v','vt','vn']: ret[k] = np.array(ret[k],dtype=np.float32)
for k in ['v_splits','vt_splits','vn_splits']: ret[k] = np.array(ret[k],dtype=np.int32)
fs = []
f_splits = ret['f_splits']
for fi in range(len(f_splits)-1):
for face in ret['f'][f_splits[fi]:f_splits[fi+1]]:
f = map(lambda x: map(int,x.split('/')), face)
fs.append(f)
ret['fs'] = fs
ret['tris'],ret['tris_splits'] = faces_to_triangles(fs)
return ret
def flatten_OBJ_and_x10(geomDict, out=None):
'''
convert an OBJ geometry to a flat geometry.
when the geometry was read, the facets were triangulated into the 'tris' field.
those triangles index into vertices, texture vertices and normal lists.
we find all unique vertices (same indices for all three) and flatten the lists.
this causes an unfortunate renumbering of the vertex indices.
however, the resulting data can be directly rendered.
the vertices are also scaled by 10, to convert from cm (obj standard) to mm (our standard).
'''
if out is None: out = geomDict
vs = geomDict['v']*10.# convert to mm
vts = geomDict['vt']
vns = geomDict['vn']
tris = geomDict['tris']
numVerts = vs.shape[0]
numTris = tris.shape[0]
verts = {}
out_vs = []
out_vts = []
out_vns = []
out_fs = []
out_fs_splits = [0]
out_tris = []
# TODO reserve the first N vertex indices to match the original vertices
for face in geomDict['fs']:
for t in face:
v = tuple(t)
if v not in verts:
verts[v] = len(verts)
out_vs.append(vs[v[0]])
if len(v)>1: out_vts.append(vts[v[1]]) # we assume there is a texture index
if len(v)>2: out_vns.append(vns[v[2]]) # we assume there is a normal index
out_fs.append(verts[v])
out_fs_splits.append(len(out_fs))
for tri in tris:
for t in tri:
v = tuple(t)
out_tris.append(verts[v])
out['v'] = np.array(out_vs,dtype=np.float32).reshape(-1,3)
out['vt'] = np.array(out_vts,dtype=np.float32).reshape(-1,2)
out['vn'] = np.array(out_vns,dtype=np.float32).reshape(-1,3)
out['tris'] = np.array(out_tris,dtype=np.int32).reshape(-1,3)
out['fs'] = np.array(out_fs,dtype=np.int32)
out['fs_splits'] = np.array(out_fs_splits,dtype=np.int32)
return out
def poseGeometry(geom, Gs):
print geom.keys()
tv = geom['v'].copy()
tvn = geom['vn'].copy()
for ji,ran in geom['gd']:
ran = np.unique(ran.ravel())
G = Gs[ji]
tv[ran] = np.dot(tv[ran], G[:,:3].T) + G[:,3]
tvn[ran] = np.dot(tvn[ran], G[:,:3].T)
return {'v':tv,'vt':geom['vt'],'vn':tvn,'gd':geom['gd'],'geomMapping':geom['geomMapping']} #,'tris':np.arange(len(tv),dtype=np.int32).reshape(1,-1,3) }
#@profile
def trianglesToNearVerts(triangles, steps = 25):
'''Given the triangles, generate the disc of all the vertices reachable in a number of steps.'''
numVerts = np.max(triangles)+1
# generate N, the vertices neighbouring a given vertex
N = [set() for vi in xrange(numVerts)]
for t in triangles:
N[t[0]].update(set([t[1],t[2]]))
N[t[1]].update(set([t[2],t[0]]))
N[t[2]].update(set([t[0],t[1]]))
D = [set(Di) for Di in N] # disc of surrounding vertices
R = [set(Di) for Di in N] # ring of vertices being added
for step in range(steps):
print step,'/',steps
# make R be a one-ring expansion of R (via N)
R2 = [set() for vi in xrange(numVerts)]
for Ri,R2i in zip(R,R2):
for vi in Ri: R2i.update(N[vi])
R = R2
# thin out R to not include D; update D
for Di,Ri in zip(D,R): Ri.difference_update(Di); Di.update(Ri)
for vi,Di in enumerate(D): Di.discard(vi); D[vi] = np.array(list(Di),dtype=np.int32)
# corners of mouth are 3382, 6922 I think
return D
#@profile
def findCloseVerts(xs, threshold = 80.0):
import ISCV
cloud = ISCV.HashCloud3D(xs, threshold)
scores,matches,matches_splits = cloud.score(xs)
good = (scores < (threshold**2))
D = [matches[m0:m1][np.where(good[m0:m1])[0]] for m0,m1 in zip(matches_splits[:-1],matches_splits[1:])]
print 'avg verts',np.mean(map(len,D))
#for vi,Di in enumerate(D): Di.discard(vi); D[vi] = np.array(list(Di),dtype=np.int32)
return D
def myNormal(x,y):
c = np.cross(x,y)
s = list(c.shape)
s[-1] = 1
return c / (1e-8+(np.sum(c*c,axis=-1))**0.25).reshape(s)
#@profile
def computeLocalCoordinates(xs, uvs, nearVerts):
'''uvs are assumed in correspondence with xs. this won't work at seams (take care).'''
print xs.shape, uvs.shape
numVerts = xs.shape[0]
assert(numVerts == uvs.shape[0])
assert(len(nearVerts) == numVerts)
D = np.zeros((numVerts,3,3), dtype=np.float32)
for vi,nvs in enumerate(nearVerts):
if len(nvs) < 2: print 'skipping',vi; D[vi] = np.eye(3); continue
dxs = xs[nvs] - xs[vi] # N,3
duvs = uvs[nvs] - uvs[vi] # N,2
D[vi,:,:2] = np.linalg.lstsq(duvs,dxs)[0].T
#[[a,b],[c,d]] = np.dot(duvs.T,duvs); scLLT = 1.0/(1e-8+a*d-b*c)
#D[vi,:,:2] = np.dot(np.dot(dxs.T, duvs), [[d*scLLT,-b*scLLT],[-c*scLLT,a*scLLT]])
D[vi,:,2] = myNormal(D[vi,:,0],D[vi,:,1])
return D
#@profile
def computeLocalCoordinates2(xs, x2s, nearVerts):
'''Compute direct mapping from vertices.'''
print xs.shape, x2s.shape
numVerts = xs.shape[0]
assert(numVerts == x2s.shape[0])
assert(len(nearVerts) == numVerts)
D = np.zeros((numVerts,3,3), dtype=np.float32)
for vi,nvs in enumerate(nearVerts):
if len(nvs) < 2: print 'skipping',vi; D[vi] = np.eye(3); continue
dxs = xs[nvs] - xs[vi] # N,3
dx2s = x2s[nvs] - x2s[vi] # N,3
M = np.linalg.lstsq(dxs,dx2s,rcond=1e-6)[0].T # dxs M.T = dx2s; -> M dxs[i] = dx2s[i]
u,s,vt = np.linalg.svd(M, full_matrices=True)
s = (s+np.mean(s))*0.5 # try to stabilize the solution
D[vi] = np.dot(u,np.dot(np.diag(s),vt))
if np.linalg.det(D[vi]) < 0:
s[2] *= -1.0
D[vi] = np.dot(u,np.dot(np.diag(s),vt))
return D
#@profile
def renderMotion(D, motion):
numVerts = motion.shape[0]
ret = np.zeros((numVerts,3),dtype=np.float32)
for vi in range(numVerts):
ret[vi] = np.dot(D[vi], motion[vi])
return ret
#@profile
def computeMotion(xs, D):
numVerts = xs.shape[0]
ret = np.zeros((numVerts,3),dtype=np.float32)
for vi in range(numVerts):
try:
ret[vi] = np.linalg.solve(D[vi], xs[vi])
except:
print 'singular vertex',vi
return ret
#@profile
def lunterp2D(M,s):
'''Given a triangle of uv coordinates, compute the barycenrtic coordinates of the uv coordinate.'''
[a,b],[c,d],[e,f] = M[1]-M[0],M[2]-M[0],s-M[0]
det = a*d-b*c
if not det: return (-10,-10,-10) # triangle is a straight line.. (-inf,inf,inf) maybe
w1 = (e*d - f*c)/det
w2 = (f*a - e*b)/det
return (1-w1-w2,w1,w2)
def lunterp3D(M,s):
'''Given a triangle of 3D coordinates, compute the barycenrtic coordinates of the 3D vector s.'''
w1,w2 = np.linalg.lstsq((M[1:]-M[0]).T,s-M[0],rcond=1e-6)[0]
#[a,b,c],[d,e,f],[g,h,i] = M[1]-M[0],M[2]-M[0],s-M[0]
## [w1 w2] [a b c] = [g h i]
## [d e f]
## TODO
#det = a*d-b*c
#if not det: return (-10,-10,-10) # triangle is a straight line.. (-inf,inf,inf) maybe
#w1 = (e*d - f*c)/det
#w2 = (f*a - e*b)/det
return (1-w1-w2,w1,w2)
def getMapping(hi_geo, triangles, lo_geo, threshold = 20.0):
'''given a hi-res geometry and topology, and a lo-res geometry, find the triangles and barycentric weights that
when applied to the hi-res geometry, best fit the lo-res geometry.
The mapping is returned as a list of weight triples and a list of index triples, per vertex.
The output vertex is the weighted sum of the extracted indicated source vertices.'''
is3D = (hi_geo.shape[1] == 3)
lunterp = lunterp3D if is3D else lunterp2D
numVertsHi = hi_geo.shape[0]
numVertsLo = lo_geo.shape[0]
weights = np.zeros((numVertsLo,3),dtype=np.float32)
indices = -np.ones((numVertsLo,3),dtype=np.int32)
import ISCV
cloud = ISCV.HashCloud3D(hi_geo, threshold) if is3D else ISCV.HashCloud2D(hi_geo, threshold)
scores,matches,matches_splits = cloud.score(lo_geo.copy())
# the indices of the closest 3 hi verts to each lo vert
D = [matches[m0+np.argsort(scores[m0:m1])[:3]] if m0 != m1 else [] for m0,m1 in zip(matches_splits[:-1],matches_splits[1:])]
# for speed-up, compute all the triangles involving each hi vertex.
T = [[] for x in xrange(numVertsHi)]
for ti,tri in enumerate(triangles):
for tj in tri: T[tj].append(tri)
bads = []
for vi,(lo_x,nearIndices,ws,xis) in enumerate(zip(lo_geo,D,weights,indices)):
best = -10
for ni in nearIndices:
for tri in T[ni]:
xws = lunterp(hi_geo[tri], lo_x)
sc = np.min(xws)
if sc > best: # pick the best triangle (the one that it's closest to being inside)
best = sc
xis[:] = tri
ws[:] = xws
if best >= 0: break
if best >= 0: break
# the vertex *might* not be inside any of these triangles
if best < -0.1:
bads.append(vi)
ws[:] = 0.0 # ensure there's no weight
xis[:] = -1 # and no label
if len(bads):
print 'vertices outside',len(bads)
print bads[:10],'...'
which = np.where(indices[:,0] != -1)[0]
print len(which), 'vertices inside'
return which,weights[which],indices[which]
def getMapping2(hi_geo, triangles, lo_geo, threshold = 20.0):
'''given a hi-res geometry and topology, and a lo-res geometry, find the triangles and barycentric weights that
when applied to the hi-res geometry, best fit the lo-res geometry.
The mapping is returned as a list of weight triples and a list of index triples, per vertex.
The output vertex is the weighted sum of the extracted indicated source vertices.'''
is3D = (hi_geo.shape[1] == 3)
lunterp = lunterp3D if is3D else lunterp2D
numVertsHi = hi_geo.shape[0]
which = np.where((lo_geo[:,0] < 1e10) * (lo_geo[:,0] > -1e10))[0]
numVertsLo = len(lo_geo)
weights = np.zeros((numVertsLo,3),dtype=np.float32)
indices = -np.ones((numVertsLo,3),dtype=np.int32)
import ISCV
cloud = ISCV.HashCloud3D(hi_geo, threshold) if is3D else ISCV.HashCloud2D(hi_geo, threshold)
scores,matches,matches_splits = cloud.score(lo_geo[which].copy())
# the indices of the closest 3 hi verts to each lo vert
D = [matches[m0+np.argsort(scores[m0:m1])[:3]] if m0 != m1 else [] for m0,m1 in zip(matches_splits[:-1],matches_splits[1:])]
# for speed-up, compute all the triangles involving each hi vertex.
T = [[] for x in xrange(numVertsHi)]
for ti,tri in enumerate(triangles):
for tj in tri: T[tj].append(tri)
bads = []
for (vi,lo_x,nearIndices,ws,xis) in zip(which,lo_geo[which],D,weights[which],indices[which]):
best = -10
for ni in nearIndices:
for tri in T[ni]:
xws = lunterp(hi_geo[tri], lo_x)
sc = np.min(xws)
if sc > best: # pick the best triangle (the one that it's closest to being inside)
best = sc
xis[:] = tri
ws[:] = xws
if best >= 0: break
if best >= 0: break
# the vertex *might* not be inside any of these triangles
if best < -0.1:
bads.append(vi)
if len(bads):
print 'vertices outside',len(bads)
print bads[:10],'...'
return weights,indices
def topologicalMappings(uvs, triangles, uv2s, triangles2, threshold = 0.02):
a2b = getMapping(uvs, triangles, uv2s, threshold)
b2a = getMapping(uv2s, triangles2, uvs, threshold)
return a2b,b2a
def computeTopoMap(ted_obj, tony_obj, ted_vts_copy = None, tony_vts_copy = None):
'''Compute the direct mapping between geometries. Take into account differences in topology
using the (possibly adjusted to make them conform better) texture coordinates.'''
if ted_vts_copy is None: ted_vts_copy = ted_obj['vt']
if tony_vts_copy is None: tony_vts_copy = tony_obj['vt']
#assert(np.all(ted_obj['tris'] == tony_obj['tris'])) # TODO WHY? see 633
(mw,mws,mis),(mw2,mw2s,mi2s) = topologicalMappings(ted_vts_copy, ted_obj['tris'], tony_vts_copy, tony_obj['tris'])
x2s = renderGeo(tony_obj['v'],mw2s,mi2s)
nearVerts = findCloseVerts(ted_obj['v'][mw])
D = computeLocalCoordinates2(ted_obj['v'][mw], x2s, nearVerts)
return (mw,mws,mis),(mw2,mw2s,mi2s),x2s,D
def renderGeo(xs,weights,indices,out=None):
numVerts = weights.shape[0]
if out is None: out = np.zeros((numVerts,3),dtype=np.float32)
np.sum(weights.reshape(numVerts,-1,1)*xs[indices],axis=1,out=out)
#for rs,xws,xis in zip(out,weights,indices): np.dot(xws,xs[xis],out=rs) # equivalent code
return out
'''Given multiple shapes, each vertex can be expressed as some function of its neighbouring vertices.'''
'''Spectral decomposition might help'''
def flipMouth(obj):
uvs = obj['vt']
which = np.where((uvs[:,1] < 0.182) * (uvs[:,0] > 0.45) * (uvs[:,0] < 0.55))[0]
uvs[which,0] = 1.0 - uvs[which,0]
#@profile
def connectedComponents(triangles):
numVerts = np.max(triangles)+1
T = np.arange(numVerts,dtype=np.int32)
while True:
tmp = T.copy()
for tri in triangles: T[tri] = np.min(T[tri])
if np.all(T == tmp): break
groups = np.unique(T)
components = []
for gval in groups:
components.append(np.where(T == gval)[0])
reorder = np.argsort(map(len,components))[::-1]
return [components[i] for i in reorder]
def rotate90(obj, t=90.0):
t = np.radians(t)
vs = obj['v']
vns = obj['vn']
rot = np.array([[np.cos(t),-np.sin(t),0],[np.sin(t),np.cos(t),0],[0,0,1]],dtype=np.float32)
vs[:] = np.dot(vs,rot)*0.1
vns[:] = np.dot(vns,rot)
#vs[:] *= (np.max(vs[:,1]) - vs[:,1].reshape(-1,1) + 1)/(np.mean(vs[:,1]**2)**0.5)
#@profile
def readFlatObjFlipMouth(obj_filename):
obj = read_OBJ(obj_filename)
flatten_OBJ_and_x10(obj)
flipMouth(obj)
return obj
def makeLoResShapeMat(hi_rest, hi_mat, weights, indices):
'''given a hi-res shape matrix and points derived from it by barycentric triangle weights, compute the lo-res shape matrix.'''
lo_rest = renderGeo(hi_rest, weights, indices)
lo_mat = np.zeros((hi_mat.shape[0],lo_rest.shape[0],3),dtype=np.float32)
print 'makeLoResShapeMat shapes',hi_mat.shape,lo_mat.shape
for hi,lo in zip(hi_mat,lo_mat):
renderGeo(hi, weights, indices, out=lo)
print 'done'
return lo_rest, lo_mat
def fitLoResShapeMat_old(lo_rest, lo_mat, pts, indices=None, bounds=None):
'''given a lo-res shape matrix and points, find the vector that minimises the rms error.
to solve only a subset of the points, provide the indices.
this is simplistic for now!'''
if indices is None: indices = np.arange((lo_rest.shape[0]),dtype=np.int32)
M = lo_mat[:,indices,:].reshape(-1,len(indices)*3)
v = (pts - lo_rest[indices]).reshape(-1)
#ret = np.linalg.lstsq(M.T,v,rcond = 1e-2)[0]
ret = np.linalg.lstsq(np.dot(M,M.T)+1e0*np.eye(M.shape[0]),np.dot(M,v),rcond = 1e-2)[0]
print 'stats',np.min(ret),np.mean(ret),np.max(ret)
return ret
def fitLoResShapeMat(lo_rest, lo_mat, pts, Aoffset=10.0, Boffset=3.0, x_0=None, indices=None, bounds=None):
'''Solve for blendshape weights that give the best constrained fit of model to target.
M_verts*bshapes x_bshapes + neutral_verts = target_verts.
M x + (N-T) = 0
x^T M^T M x + 2 x^T M^T (N-T) + (N-T)^T (N-T) = 0. # ignore third term, which is strictly positive
Add regularising terms: x ~ x_0 and sum(x).
Aoffset * (x^T x - 2 x^T x_0 + x_0^T x_0). # ignore third term, which is strictly positive
Boffset * sum(x).'''
# Objective function and Jacobian for quadratic solver
def ObjectiveFunction(x,A,B): return np.dot(x.T, (np.dot(A,x) + B))
def Jacobian(x,A,B): return np.dot(A,2*x) + B
if x_0 is None or len(x_0) != lo_mat.shape[0]: x_0 = np.zeros(lo_mat.shape[0],dtype=np.float64)
if indices is None: indices = np.arange((lo_rest.shape[0]),dtype=np.int32)
if len(indices) == 0: return x_0 # argh!
M = lo_mat[:,indices,:].reshape(-1,len(indices)*3)
v = (lo_rest[indices] - pts).reshape(-1)
A = np.dot(M, M.T).astype(np.float64)
A[np.diag_indices_from(A)] += (Aoffset + 1e-8)
B = np.array(2*np.dot(M, v) + Boffset - (2*Aoffset)*x_0,dtype=np.float64)
x = x_0.copy() #np.ones(M.shape[0],dtype=np.float64)*0.5
from scipy.optimize import minimize
res = minimize(ObjectiveFunction, x, args=(A,B), method='TNC',bounds=bounds,jac=Jacobian)
#print '\r',res['fun'],len(np.where(res.x)[0]); sys.stdout.flush()
return res.x
def animateHead(newFrame):
global ted_geom,ted_geom2,ted_shape,tony_geom,tony_shape,tony_geom2,tony_obj,ted_obj,diff_geom,c3d_frames
global tony_shape_vector,tony_shape_mat,ted_lo_rest,ted_lo_mat,ted_lo_which,c3d_points
global md,movies
tony_geom.image,tony_geom.bindImage,tony_geom.bindId = ted_geom.image,ted_geom.bindImage,ted_geom.bindId # reuse the texture!
fo = 55
MovieReader.readFrame(md, seekFrame=((newFrame+fo)/2))
view = QApp.view()
frac = (newFrame % 200) / 100.
if (frac > 1.0): frac = 2.0 - frac
fi = newFrame%len(c3d_frames)
frame = c3d_frames[fi][ted_lo_which]
which = np.where(frame[:,3] == 0)[0]
x3ds = frame[which,:3]
#print which,x3ds.shape,ted_lo_rest.shape,ted_lo_mat.shape
bnds = np.array([[0,1]]*ted_lo_mat.shape[0],dtype=np.float32)
#print len(ted_lo_which),len(which),ted_lo_which,which
tony_shape_vector[:] = fitLoResShapeMat(ted_lo_rest, ted_lo_mat, x3ds, Aoffset=10.0, Boffset=3.0, x_0=tony_shape_vector, indices=which, bounds = bnds)
#global tony_shape_vectors; tony_shape_vector[:] = tony_shape_vectors[newFrame%len(tony_shape_vectors)]
#tony_shape_vector *= 0.
#tony_shape_vector += (np.random.random(len(tony_shape_vector)) - 0.5)*0.2
if 1:
ted_shape_v = np.dot(ted_shape_mat_T, tony_shape_vector).reshape(-1,3)
else:
import ISCV
ted_shape_v = np.zeros_like(ted_obj['v'])
ISCV.dot(ted_shape_mat_T, tony_shape_vector, ted_shape_v.reshape(-1))
tony_shape_v = ted_shape_v
#tony_shape_v = tony_shape['v']*frac
ted_geom.setVs(ted_obj['v'] + ted_shape_v) #ted_shape['v'] * frac)
tony_geom.setVs(tony_obj['v'] + tony_shape_v - np.array([200,0,0],dtype=np.float32))
ted_geom2.setVs(ted_obj['v'] * (1.0 - frac) + tony_tedtopo_obj['v'] * frac + np.array([200,0,0],dtype=np.float32))
#if len(ted_shape_v) == len(tony_shape_v):
# tony_geom2.setVs(tony_obj['v'] + ted_shape_v - [400,0,0])
# diff_geom.setVs(ted_obj['v'] + tony_shape_v - ted_shape_v - [600,0,0])
#print [c3d_labels[i] for i in which]
surface_points.vertices = np.dot(ted_lo_mat.T, tony_shape_vector).T + ted_lo_rest
surface_points.colour = [0,1,0,1] # green
c3d_points.vertices = x3ds
c3d_points.colour = [1,0,0,1] # red
QApp.app.updateGL()
if __name__ == '__main__':
import UI
from UI import QApp, QGLViewer
from UI import GLMeshes
import os, sys
if len(sys.argv) > 1:
filename = sys.argv[1]
geom_dict = flatten_OBJ_and_x10(read_OBJ(filename))
ted_geom = GLMeshes(['ted'],[geom_dict['v']], [geom_dict['tris']], vts = [geom_dict['vt']], transforms = [np.eye(3,4)])
primitives = [ted_geom]
QGLViewer.makeViewer(primitives = primitives)
exit()
from GCore import Calibrate
import MovieReader
import C3D
global ted_geom,ted_geom2,ted_shape,tony_geom,tony_shape,tony_geom2,tony_obj,ted_obj,diff_geom,c3d_frames
global tony_shape_vector,tony_shape_mat,ted_lo_rest,ted_lo_mat,c3d_points
global md,movies
ted_dir = os.path.join(os.environ['GRIP_DATA'],'ted')
wavFilename = os.path.join(ted_dir,'32T01.WAV')
md = MovieReader.open_file(wavFilename)
c3d_filename = os.path.join(ted_dir,'201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_02.c3d')
c3d_dict = C3D.read(c3d_filename)
c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'],c3d_dict['fps'],c3d_dict['labels']
if False: # only for cleaned-up data
c3d_subject = 'TedFace'
which = np.where([s.startswith(c3d_subject) for s in c3d_labels])[0]
c3d_frames = c3d_frames[:,which,:]
c3d_labels = [c3d_labels[i] for i in which]
print c3d_labels
if False: # this is for the cleaned-up data (don't apply the other offset...)
offset = Calibrate.composeRT(Calibrate.composeR( (0.0,0.0, 0)),(0,0,-8),0) # 0.902
c3d_frames[:,:,:3] = np.dot(c3d_frames[:,:,:3] - offset[:3,3],offset[:3,:3])[:,:,:3]
offset = Calibrate.composeRT(Calibrate.composeR( (3.9,-38.7, 0)),(-159.6,188.8,123-12),0) # 0.902
c3d_frames[:,:,:3] = np.dot(c3d_frames[:,:,:3] - offset[:3,3],offset[:3,:3])[:,:,:3]
geos = []
dat_directory = os.path.join(os.environ['GRIP_DATA'],'dat')
if False: # experiments involving deformation transfer
geos_filename = 'geos'
if not os.path.exists(geos_filename):
ted_dir = os.environ['GRIP_DATA']
ted_obj = readFlatObjFlipMouth(os.path.join(ted_dir,'ted.obj'))
ted_shape = readFlatObjFlipMouth(os.path.join(ted_dir,'tedopen.obj'))
ted_shape['v'] -= ted_obj['v']
tony_obj = readFlatObjFlipMouth(os.path.join(ted_dir,'tony.obj'))
nearVerts = trianglesToNearVerts(ted_obj['tris'], steps = 15)
IO.save(geos_filename,(ted_obj,ted_shape,tony_obj,nearVerts))
else:
_,(ted_obj,ted_shape,tony_obj,nearVerts) = IO.load(geos_filename)
for target in ['ape']: #['andy','avatar','baboon','bigdog','evilelf','fatbat','feline','fishman','kadel','lizardman','mannequin','shaman','ted','tony','troll','wolf']:
if True:
#target = 'baboon'
target_filename = os.path.join(dat_directory,target+'.dat')
if True: #not os.path.exists(target_filename):
ted_dir = os.path.join(os.environ['GRIP_DATA'],'ted')
tony_obj = readFlatObjFlipMouth(os.path.join(ted_dir,target+'.obj'))
if target == 'ape' or target == 'apenew': flipMouth(tony_obj) # the ape's mouth is already flipped!
print tony_obj['v'].shape, ted_obj['v'].shape
print np.mean(map(len,nearVerts))
vts = ted_obj['vt']
tony_shape = {'v':0*tony_obj['v']}
if True:
print 'computing x-to-x scheme'
ted_ccs = connectedComponents(ted_obj['tris'])
print len(ted_ccs),map(len,ted_ccs)
tony_ccs = connectedComponents(tony_obj['tris'])
print len(tony_ccs),map(len,tony_ccs)
for gp in range(7):
print gp,np.mean(ted_obj['vt'][ted_ccs[gp]],axis=0) - np.mean(tony_obj['vt'][tony_ccs[gp]],axis=0)
ted_vts_copy = ted_obj['vt'].copy()
tony_vts_copy = tony_obj['vt'].copy()
tony_vts_copy[tony_ccs[0]] += np.array([-0.0029, 0],dtype=np.float32)
tony_vts_copy[tony_ccs[3],0] = 0.715+ tony_vts_copy[tony_ccs[3],0]
tony_vts_copy[tony_ccs[4],0] = 0.715+ tony_vts_copy[tony_ccs[4],0]
(mws,mis),(mw2s,mi2s),x2s,D = computeTopoMap(ted_obj, tony_obj, ted_vts_copy, tony_vts_copy)
print len(np.where(mws > 0.98)[0])
tony_tedtopo_obj = { 'v':x2s,'vt':ted_obj['vt'], 'tris':ted_obj['tris'] }
tony_shape = {'v':renderGeo(renderMotion(D, ted_shape['v']), mws,mis)} # reuse everything
elif True:
Dsrc = computeLocalCoordinates(ted_obj['v'], vts, nearVerts)
Dtgt = computeLocalCoordinates(tony_obj['v'], vts, nearVerts)
localMotion = computeMotion(ted_shape['v'], Dsrc)
tony_shape['v']= renderMotion(Dtgt, localMotion)
else:
steps = 3
tony_incr = tony_obj['v'].copy()
ted_incr = ted_obj['v'].copy()
ted_step = ted_shape['v'] * (1.0/steps)
for it in xrange(steps):
Dtgt = computeLocalCoordinates(tony_incr, vts, nearVerts)
Dsrc = computeLocalCoordinates(ted_incr, vts, nearVerts)
localMotion = computeMotion(ted_step, Dsrc)
tony_incr += renderMotion(Dtgt, localMotion)
ted_incr += ted_step
tony_shape['v'][:] = tony_incr - tony_obj['v']
IO.save(target_filename,(tony_obj,tony_shape))
else:
_,(tony_obj,tony_shape) = IO.load(target_filename)
else: #except Exception, e:
print 'oops',target,e
if True:
geos_filename = os.path.join(dat_directory,'ted_new.dat')
if not os.path.exists(geos_filename):
ted_obj = readFlatObjFlipMouth(os.path.join(ted_dir,'Ted_NEWRIG_Neutral_Moved.obj'))
else:
_,(ted_obj,nearVerts) = IO.load(geos_filename)
target = 'andy'
tony_obj = readFlatObjFlipMouth(os.path.join(ted_dir,target+'.obj'))
_,tony_shapes = IO.load(os.path.join(dat_directory,target+'_shapes.dat'))
num_shapes = len(tony_shapes)
print num_shapes
tony_shape_mat = np.zeros((num_shapes,tony_shapes[0]['v'].shape[0],3),dtype=np.float32)
for t,ts in zip(tony_shape_mat, tony_shapes): t[:] = ts['v']
tony_shape_vector = 0.2*np.ones(num_shapes,dtype=np.float32)
tony_shape_v = np.dot(tony_shape_mat.T, tony_shape_vector).T
tony_tedtopo_obj = {'v': tony_obj['v'].copy() }
ted_shape = {'v':tony_shape_v.copy()}
tony_shape = {'v':tony_shape_v.copy()}
if True: # ted_shape_mat
try:
ted_shape_mat = IO.load('ted_shape_mat')[1]
except:
geos_filename = os.path.join(dat_directory,'ted_new.dat')
if not os.path.exists(geos_filename):
ted_obj = readFlatObjFlipMouth(os.path.join(ted_dir,'Ted_NEWRIG_Neutral_Moved.obj'))
else:
_,(ted_obj,nearVerts) = IO.load(geos_filename)
_,ted_shapes = IO.load(os.path.join(dat_directory,'ted_shapes.dat'))
num_shapes = len(ted_shapes)
ted_shape_mat = np.zeros((num_shapes,ted_shapes[0]['v'].shape[0],3),dtype=np.float32)
for t,ts in zip(ted_shape_mat, ted_shapes): t[:] = ts['v']
IO.save('ted_shape_mat',ted_shape_mat)
# HACK scale ted... it looks like the correct value is 0.90197, which Shridhar introduced
ted_obj['v'] *= 0.902
ted_shape_mat *= 0.902
lo_geo = c3d_frames[0,:,:3]
ted_lo_which,weights,indices = getMapping(ted_obj['v'], ted_obj['tris'], lo_geo, threshold = 20.0)
#for it,v in enumerate(zip(weights,indices)): print it,v
ted_lo_rest, ted_lo_mat = makeLoResShapeMat(ted_obj['v'], ted_shape_mat, weights, indices)
print np.sum(ted_shape_mat),np.sum(ted_lo_mat)
ted_shape_mat_T = ted_shape_mat.reshape(ted_shape_mat.shape[0],-1).T.copy()
if 0: # diagnostic
tmp = np.sort(np.sum((ted_shape_mat_T!=0), axis=1))
dtmp = tmp[1:]-tmp[:-1]
diff = np.where(dtmp)[0]
print dtmp[diff]
print 'sort',diff[1:]-diff[:-1]
u,s,vt = np.linalg.svd(ted_shape_mat_T, full_matrices=False)
print s/s[0]
#ted_obj['v'] -= np.mean(ted_obj['v'],axis=0)
#tony_obj['v'] -= np.mean(tony_obj['v'],axis=0)
#tony_tedtopo_obj['v'] -= np.mean(tony_tedtopo_obj['v'],axis=0)
#rotate90(tony_obj,10)
tony_obj['v'] -= np.array([0, 1750, 0],dtype=np.float32)
tony_tedtopo_obj['v'] -= np.array([0, 1750, 0],dtype=np.float32)
display_offset = np.array([0,1000,0],dtype=np.float32) # show above the ground plane
tony_obj['v'] += display_offset
tony_tedtopo_obj['v'] += display_offset
ted_obj['v'] += display_offset
ted_lo_rest += display_offset
c3d_frames[:,:,:3] += display_offset
offset[:3,3] -= np.dot(offset[:3,:3],display_offset)
draw_normals = False
if draw_normals:
geos.append(UI.GLGeometry(vs=zip(tony_obj['v'],tony_obj['v']+Dtgt[:,:,0]*0.005), tris=range(Dtgt.shape[0]*2), drawStyle = 'wire',colour=[1,0,0,1]))
geos.append(UI.GLGeometry(vs=zip(tony_obj['v'],tony_obj['v']+Dtgt[:,:,1]*0.005), tris=range(Dtgt.shape[0]*2), drawStyle = 'wire',colour=[0,1,0,1]))
geos.append(UI.GLGeometry(vs=zip(tony_obj['v'],tony_obj['v']+Dtgt[:,:,2]*0.005), tris=range(Dtgt.shape[0]*2), drawStyle = 'wire',colour=[0,0,1,1]))
#Dsrc = computeLocalCoordinates(ted_obj['v'], vts, nearVerts)
#Dtgt = computeLocalCoordinates(ted_obj['v'] + ted_shape['v'], vts, nearVerts)
#localMotion = computeMotion((tony_obj['v'] + [200,0,0])-ted_obj['v'], Dsrc)
#tony_shape['v']= renderMotion(Dtgt, localMotion)+ (ted_obj['v'] + ted_shape['v']) - (tony_obj['v'] + [200,0,0])
drawStyle='smooth'#'wire_over_smooth'
ted_geom = GLMeshes(['ted'],[ted_obj['v']], [ted_obj['tris']], vts = [ted_obj['vt']], transforms = [np.eye(3,4)])
#ted_geom = UI.GLGeometry(vs = ted_obj['v'], vts = ted_obj['vt'], tris = ted_obj['tris'], transformData=None, drawStyle=drawStyle)
xspacer = np.array([200,0,0],dtype=np.float32)
ted_geom2 = UI.GLGeometry(vs = ted_obj['v'] + xspacer, vts = ted_obj['vt'], tris = ted_obj['tris'], transformData=None, drawStyle=drawStyle)
tony_geom = GLMeshes(['tony'], [tony_obj['v'] - xspacer], [tony_obj['tris']], vts=[tony_obj['vt']], transforms=[np.eye(3,4)]) #UI.GLGeometry(vs = tony_obj['v'] - xspacer, vts = tony_obj['vt'], tris = tony_obj['tris'], transformData=None, drawStyle=drawStyle)
#tony_geom2 = UI.GLGeometry(vs = tony_obj['v'] + [-400,0,0], vts = tony_obj['vt'], tris = tony_obj['tris'], transformData=None, drawStyle=drawStyle)
#diff_geom = UI.GLGeometry(vs = ted_obj['v'] + [-600,0,0], vts = ted_obj['vt'], tris = ted_obj['tris'], transformData=None, drawStyle=drawStyle)
# the tiara points are defined in an svg file, in units of bogons
# in the file there is a matrix scale of 0.95723882 (dots per bogon) and a diameter of 14.06605 bogons = 3.8mm
# 25.4 mmpi / 90 dpi * 0.95723882 dpb_from_svg = 3.8mm / 14.06605 bogon_diameter_from_svg = 0.270 mm_per_bogon
mm_per_bogon = 0.270154067
head_pts_bogons = np.array([
[ 23.24843216, -273.46289062],
[ 53.5888443 , -290.25338745],
[ 65.81463623, -341.46832275],
[ 101.07259369, -361.53491211],
[ 122.78975677, -391.83300781],
[ 136.22935486, -352.81604004],
[ 114.80623627, -318.71374512],
[ 167.69758606, -335.17553711],
[ 214.97885132, -337.76928711],
[ 268.80731201, -338.53485107],
[ 316.49282837, -331.34683228],
[ 350.15072632, -349.76928711],
[ 363.43197632, -315.17553711],
[ 170.24447632, -407.59741211],
[ 221.22885132, -405.47241211],
[ 270.54135132, -409.98803711],
[ 325.93197632, -398.12866211],
[ 362.99447632, -379.26928711],
[ 395.51010132, -350.64428711],
[ 426.2131958 , -314.92553711],
[ 417.29135132, -288.36303711],
[ 447.74447632, -274.65991211]], dtype=np.float32)
head_pts = np.zeros((head_pts_bogons.shape[0],3),dtype=np.float32)
head_pts[:,0] = mm_per_bogon*head_pts_bogons[:,0]
head_pts[:,1] = -mm_per_bogon*head_pts_bogons[:,1] # our y-axis is up
#print head_pts
head_pts += [150,-100,0] - np.mean(head_pts,axis=0)
#head_pts += [85,-193,0]
head_pts = np.dot(head_pts - offset[:3,3],offset[:3,:3])
c3d_points = UI.GLPoints3D([])
surface_points = UI.GLPoints3D([])
head_points = UI.GLPoints3D(head_pts); head_points.colour = (0,1,1,1.0)
# generate the animation
if False:
tsv_filename = 'tony_shape_vectors6'
try:
tony_shape_vectors = IO.load(tsv_filename)[1]
except:
tony_shape_vectors = np.zeros((len(c3d_frames), ted_lo_mat.shape[0]),dtype=np.float32)
bnds = np.array([[0,1]]*ted_lo_mat.shape[0],dtype=np.float32)
x_0 = np.zeros(ted_lo_mat.shape[0],dtype=np.float32)
for fi, frame in enumerate(c3d_frames):
which = np.where(frame[:,3] == 0)[0]
x3ds = frame[which,:3]
#print which,x3ds.shape,ted_lo_rest.shape,ted_lo_mat.shape
x_0[:] = tony_shape_vectors[fi] = fitLoResShapeMat(ted_lo_rest, ted_lo_mat, x3ds, indices=which, bounds=bnds, x_0=x_0)
print '\rfitting',fi,; sys.stdout.flush()
IO.save(tsv_filename,tony_shape_vectors)
primitives = [head_points,c3d_points,surface_points,ted_geom,ted_geom2,tony_geom]
primitives.extend(geos)
QGLViewer.makeViewer(timeRange = (0,len(c3d_frames)-1), callback = animateHead, primitives = primitives)
exit()
|
{"hexsha": "813894db8b8d69a2f9b922451f207c10c66a81a2", "size": 32131, "ext": "py", "lang": "Python", "max_stars_repo_path": "IO/OBJReader.py", "max_stars_repo_name": "davidsoncolin/IMS", "max_stars_repo_head_hexsha": "7a9c44275b4ebf5b16c04338628425ec876e3a0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "IO/OBJReader.py", "max_issues_repo_name": "davidsoncolin/IMS", "max_issues_repo_head_hexsha": "7a9c44275b4ebf5b16c04338628425ec876e3a0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "IO/OBJReader.py", "max_forks_repo_name": "davidsoncolin/IMS", "max_forks_repo_head_hexsha": "7a9c44275b4ebf5b16c04338628425ec876e3a0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-18T12:11:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-18T12:11:53.000Z", "avg_line_length": 42.389182058, "max_line_length": 260, "alphanum_fraction": 0.6810245557, "include": true, "reason": "import numpy,from scipy", "num_tokens": 10890}
|
// Copyright (c) 2010 by BBNT Solutions LLC
// All Rights Reserved.
#include <boost/algorithm/string.hpp>
#include "Generic/common/leak_detection.h" // This must be the first #include
#include "Generic/theories/Parse.h"
#include "Generic/theories/RelMention.h"
#include "Generic/theories/RelMentionSet.h"
#include "Generic/theories/SentenceTheory.h"
#include "Generic/theories/SynNode.h"
#include "Generic/theories/Value.h"
#include "Generic/theories/ValueMentionSet.h"
#include "Generic/theories/ValueSet.h"
#include "Generic/theories/Proposition.h"
#include "Generic/theories/PropositionSet.h"
#include "Generic/theories/DocTheory.h"
#include "Generic/patterns/features/PatternFeature.h"
#include "Generic/patterns/features/PatternFeatureSet.h"
#include "Generic/patterns/features/PropPFeature.h"
#include "Generic/patterns/features/MentionPFeature.h"
#include "Generic/icews/TenseDetection.h"
#include "Generic/icews/Stories.h"
// copied from QueryDate
// these are expressions for specifically parsing TIMEX normalizations of dates
// YYYY-MM-DD
const boost::wregex TenseDetection::_timex_regex_ymd(L"^([12][0-9][0-9][0-9])-([0123]?[0-9])-([0123]?[0-9]).*");
// YYYY-MM
const boost::wregex TenseDetection::_timex_regex_ym(L"^([12][0-9][0-9][0-9])-([01]?[0-9]).*");
// YYYY-W##
const boost::wregex TenseDetection::_timex_regex_yw(L"^([12][0-9][0-9][0-9])-W([012345][0-9]).*");
// ####T
const boost::wregex TenseDetection::_timex_clock_time(L"^([12][0-9][0-9][0-9])T.*");
// YYYY
const boost::wregex TenseDetection::_timex_regex_y(L"^([12][0-9][0-9][0-9]).*");
// YYY
const boost::wregex TenseDetection::_timex_regex_decade(L"^([12][0-9][0-9])$");
// past XX months or years or decades
const boost::wregex TenseDetection::_timex_regex_past_my(L"^(P[0-9]+[MYD]E?)");
const boost::wregex TenseDetection::_superlative(L"the \\S*est ");
void TenseDetection::setTense(PatternFeatureSet_ptr match, ICEWSEventMentionFinder::MatchData& matchData, const DocTheory *docTheory, SentenceTheory *sentTheory) {
Symbol tense = getTense(match, matchData, docTheory, sentTheory);
if (!tense.is_null()) {
matchData.tense = tense;
return;
}
}
Symbol TenseDetection::getTense(PatternFeatureSet_ptr match, ICEWSEventMentionFinder::MatchData& matchData, const DocTheory *docTheory, SentenceTheory *sentTheory) {
matchData.timeValueMention = NULL;
MentionSet *ms = sentTheory->getMentionSet();
ValueMentionSet *vms = sentTheory->getValueMentionSet();
PropositionSet *ps = sentTheory->getPropositionSet();
std::wstring event_code_str = matchData.eventCode.to_string();
bool is_violence = (event_code_str.find(L"18") == 0 || event_code_str.find(L"19") == 0);
// Look for dates that directly modify propositions/mentions in the PatternFeatureSet
bool is_historical = false;
Symbol tense = Symbol();
for (size_t f = 0; f < match->getNFeatures(); f++) {
const Proposition *prop = 0;
const Mention *ment = 0;
const SynNode *node = 0;
if (PropPFeature_ptr sf = boost::dynamic_pointer_cast<PropPFeature>(match->getFeature(f))) {
prop = sf->getProp();
node = prop->getPredHead();
if (prop->getNArgs() > 0 && prop->getArg(0)->getRoleSym() == Argument::REF_ROLE &&
prop->getArg(0)->getType() == Argument::MENTION_ARG)
ment = prop->getArg(0)->getMention(ms);
} else if (MentionPFeature_ptr sf = boost::dynamic_pointer_cast<MentionPFeature>(match->getFeature(f))) {
ment = sf->getMention();
node = ment->getNode();
prop = ps->getDefinition(ment->getIndex());
} else continue;
// constructions like "he was sentenced thursday for his bombing of the city last month" are dangerous
int prop_hist = getHistoricity(docTheory, sentTheory, prop, matchData.timeValueMention, 0);
if (prop_hist == OLDER_THAN_ONE_MONTH_ONGOING && (event_code_str == L"173" || event_code_str == L"175")) {
Symbol predSym = prop->getPredSymbol();
if (predSym == Symbol(L"detained") || predSym == Symbol(L"imprisoned") || predSym == Symbol(L"jailed"))
prop_hist = OLDER_THAN_ONE_MONTH;
}
if (is_violence && prop != 0 && prop_hist == WITHIN_ONE_MONTH) {
Symbol predSym = prop->getPredSymbol();
if (predSym == Symbol(L"wanted") || predSym == Symbol(L"arrested") || predSym == Symbol(L"condemned") || predSym == Symbol(L"denounced") ||
predSym == Symbol(L"vilified") || predSym == Symbol(L"admonished") || predSym == Symbol(L"blasted") || predSym == Symbol(L"assailed") ||
predSym == Symbol(L"castigated") || predSym == Symbol(L"censured") || predSym == Symbol(L"castigated") || predSym == Symbol(L"chastised") ||
predSym == Symbol(L"chided") || predSym == Symbol(L"criticized") || predSym == Symbol(L"criticised") || predSym == Symbol(L"decried") ||
predSym == Symbol(L"denigrated") || predSym == Symbol(L"deplored") || predSym == Symbol(L"derided") || predSym == Symbol(L"lambasted") ||
predSym == Symbol(L"rebuked") || predSym == Symbol(L"scolded") || predSym == Symbol(L"slammed") || predSym == Symbol(L"court-martialed") ||
predSym == Symbol(L"charged") || predSym == Symbol(L"tried") || predSym == Symbol(L"sentenced"))
continue;
}
if (prop_hist == WITHIN_ONE_MONTH)
return ICEWSEventMention::CURRENT_TENSE;
if (tense.is_null() || tense == ICEWSEventMention::ONGOING_TENSE) {
if (prop_hist == OLDER_THAN_ONE_MONTH)
tense = ICEWSEventMention::HISTORICAL_TENSE;
else if (prop_hist == OLDER_THAN_ONE_MONTH_ONGOING)
tense = ICEWSEventMention::ONGOING_TENSE;
}
if (tense.is_null() || tense == ICEWSEventMention::ONGOING_TENSE) {
Symbol temp_tense = getTense(node);
if (!temp_tense.is_null())
tense = temp_tense;
}
if (tense.is_null() || tense == ICEWSEventMention::ONGOING_TENSE) {
Symbol temp_tense = getTense(sentTheory, ment);
if (!temp_tense.is_null())
tense = temp_tense;
}
}
if (!tense.is_null())
return tense;
// Look for full-sentence modifying values (anything at the beginning of a sentence)
// Also look for "in DATE" constructions which are more likely to apply to the whole sentence context
for (int v = 0; v < vms->getNValueMentions(); v++) {
ValueMention *val = vms->getValueMention(v);
if (val->getStartToken() == 0) {
if (getHistoricity(docTheory, sentTheory, val) == OLDER_THAN_ONE_MONTH){
matchData.timeValueMention = val;
return ICEWSEventMention::HISTORICAL_TENSE;
}
} else if (val->getStartToken() < 3 && 3 < sentTheory->getTokenSequence()->getNTokens()) {
int counter = 0;
std::wstring tokStr = sentTheory->getTokenSequence()->getToken(0)->getSymbol().to_string();
boost::to_lower(tokStr);
if (tokStr == L"earlier" || tokStr == L"later" || tokStr == L"already") {
tokStr = sentTheory->getTokenSequence()->getToken(1)->getSymbol().to_string();
boost::to_lower(tokStr);
}
if ((tokStr == L"in" || tokStr == L"on" || tokStr == L"but") && getHistoricity(docTheory, sentTheory, val) == OLDER_THAN_ONE_MONTH){
matchData.timeValueMention = val;
return ICEWSEventMention::HISTORICAL_TENSE;
}
if ((tokStr == L"between" || tokStr == L"from") && getHistoricity(docTheory, sentTheory, val) == OLDER_THAN_ONE_MONTH){
matchData.timeValueMention = val;
return ICEWSEventMention::ONGOING_TENSE;
}
}
}
// A violent event is historical if any value mention in the sentence is historical
// We do not do this for 14 (Protest) and 17 (Coerce) [tested empirically]
if (is_violence) {
for (int v = 0; v < vms->getNValueMentions(); v++) {
ValueMention *val = vms->getValueMention(v);
int historicity = getHistoricity(docTheory, sentTheory, val);
if (historicity == OLDER_THAN_ONE_MONTH){
matchData.timeValueMention = val;
return ICEWSEventMention::HISTORICAL_TENSE;
}
else if (historicity == OLDER_THAN_ONE_MONTH_ONGOING){
matchData.timeValueMention = val;
return ICEWSEventMention::ONGOING_TENSE;
}
}
}
return Symbol();
}
Symbol TenseDetection::getTense(const SynNode* node) {
if (node == 0)
return Symbol();
const SynNode *dominatingSNode = node;
while (dominatingSNode != 0 && dominatingSNode->getTag() != Symbol(L"S")) {
dominatingSNode = dominatingSNode->getParent();
}
// Get constructions like "Three years after ..." ??
if (dominatingSNode != 0 && dominatingSNode->getParent() != 0) {
const SynNode *tempNode = dominatingSNode->getParent();
if (tempNode->getTag() == Symbol(L"SBAR") && tempNode->getHeadIndex() != 0 && tempNode->getHeadWord() == Symbol(L"after")) {
const SynNode *possibleTime = tempNode->getChild(0);
if (possibleTime->getHeadWord() == Symbol(L"years") ||
possibleTime->getHeadWord() == Symbol(L"months") ||
possibleTime->getHeadWord() == Symbol(L"decades") ||
possibleTime->getHeadWord() == Symbol(L"centuries") ||
possibleTime->getHeadWord() == Symbol(L"year") ||
possibleTime->getHeadWord() == Symbol(L"month") ||
possibleTime->getHeadWord() == Symbol(L"decade") ||
possibleTime->getHeadWord() == Symbol(L"century"))
{
//std::cout << "node historical\n";
return ICEWSEventMention::HISTORICAL_TENSE;
}
}
}
return Symbol();
}
Symbol TenseDetection::getTense(SentenceTheory *sentTheory, const Mention* mention) {
if (mention == 0)
return Symbol();
// look for historical modifiers on any of these mentions
PropositionSet *ps = sentTheory->getPropositionSet();
for (int p = 0; p < ps->getNPropositions(); p++) {
const Proposition *prop = ps->getProposition(p);
if (prop->getPredSymbol().is_null())
continue;
std::wstring predicate = prop->getPredSymbol().to_string();
if (prop->getPredType() == Proposition::MODIFIER_PRED &&
prop->getNArgs() > 0 && prop->getArg(0)->getType() == Argument::MENTION_ARG &&
prop->getArg(0)->getMentionIndex() == mention->getIndex())
{
// then this is a modifier for this mention
if (predicate == L"long-standing" || predicate == L"longstanding" ||
predicate == L"standing" || predicate == L"longest-standing" ||
predicate == L"long-running" || predicate == L"longrunning" ||
predicate == L"longest-running")
{
//std::cout << "mention ongoing historical\n";
return ICEWSEventMention::ONGOING_TENSE;
} else if (predicate == L"past") {
//std::cout << "mention past historical\n";
return ICEWSEventMention::HISTORICAL_TENSE;
} else if (!mention->getEntityType().matchesPER() && predicate == L"-old") {
//std::cout << "mention -old historical\n";
return ICEWSEventMention::ONGOING_TENSE;
}
}
// anniversary of X
if (prop->getPredType() == Proposition::NOUN_PRED &&
(predicate == L"decade" || predicate == L"decades" || predicate == L"year" || predicate == L"years" ||
predicate == L"months" || predicate == L"anniversary" || predicate == L"commemoration"))
{
for (int a = 0; a < prop->getNArgs(); a++) {
if (prop->getArg(a)->getRoleSym() == Symbol(L"of") &&
prop->getArg(a)->getType() == Argument::MENTION_ARG &&
prop->getArg(a)->getMentionIndex() == mention->getIndex())
{
if (predicate == L"anniversary" || predicate == L"commemoration") {
//std::cout << "mention anniversary historical\n";
return ICEWSEventMention::HISTORICAL_TENSE;
} else {
//std::cout << "months/years/decades of... historical\n";
return ICEWSEventMention::ONGOING_TENSE;
}
}
}
}
}
return Symbol();
}
int TenseDetection::getHistoricity(const DocTheory *docTheory, SentenceTheory *sentTheory, const Proposition *prop, ValueMention *&time, int depth) {
if (prop == 0){
return UNKNOWN;
}
// Avoid cycles
if (depth > 10){
return UNKNOWN;
}
MentionSet *ms = sentTheory->getMentionSet();
ValueMentionSet *vms = sentTheory->getValueMentionSet();
int referent_index = -1;
for (int a = 0; a < prop->getNArgs(); a++) {
Argument *arg = prop->getArg(a);
if (arg->getRoleSym() == Argument::REF_ROLE)
referent_index = arg->getMentionIndex();
ValueMention *val = getValueFromArgument(ms, vms, arg, false);
if (val != 0) {
int hist = getHistoricity(docTheory, sentTheory, val);
if (hist == OLDER_THAN_ONE_MONTH && isSinceValue(sentTheory, val))
hist = OLDER_THAN_ONE_MONTH_ONGOING;
if (hist != UNKNOWN){
time = val;
return hist;
}
}
if (arg->getRoleSym() == Symbol(L"following") || arg->getRoleSym() == Symbol(L"after") || arg->getRoleSym() == Symbol(L"since")) {
int hist = UNKNOWN;
if (arg->getType() == Argument::PROPOSITION_ARG) {
if (arg->getProposition() != prop)
hist = getHistoricity(docTheory, sentTheory, arg->getProposition(), time, depth + 1);
} else if (arg->getType() == Argument::MENTION_ARG) {
const Proposition *defProp = sentTheory->getPropositionSet()->getDefinition(arg->getMentionIndex());
if (defProp != 0 && defProp != prop)
hist = getHistoricity(docTheory, sentTheory, defProp, time, depth + 1);
}
if (hist == OLDER_THAN_ONE_MONTH && arg->getRoleSym() == Symbol(L"since"))
hist = OLDER_THAN_ONE_MONTH_ONGOING;
if (hist != UNKNOWN){
return hist;
}
}
if (arg->getType() == Argument::MENTION_ARG &&
prop->getPredSymbol() != Symbol(L"sentenced") &&
(arg->getRoleSym() == Symbol(L"in") || arg->getRoleSym() == Symbol(L"for") || arg->getRoleSym() == Argument::TEMP_ROLE))
{
std::wstring headword = arg->getMention(ms)->getNode()->getHeadWord().to_string();
if ((headword == L"months" || headword == L"years" || headword == L"year" || headword == L"decades" || headword == L"decade") &&
!hasSuperlative(sentTheory))
{
std::wstring mentionText = arg->getMention(ms)->getNode()->toTextString();
if (mentionText.find(L"past ") != std::wstring::npos) {
//std::cout << "in/for past months/years/decades\n";
return OLDER_THAN_ONE_MONTH_ONGOING;
} else {
//std::cout << "in/for months/years/decades\n";
return OLDER_THAN_ONE_MONTH;
}
}
}
}
// in TIME when EVENT
// did X in TIME as EVENT
PropositionSet *ps = sentTheory->getPropositionSet();
for (int p = 0; p < ps->getNPropositions(); p++) {
const Proposition *newProp = ps->getProposition(p);
ValueMention *temporalVM = 0;
bool allow_value = false;
for (int a = 0; a < newProp->getNArgs(); a++) {
Argument *arg = newProp->getArg(a);
if (arg->getRoleSym() == Argument::REF_ROLE && arg->getMentionIndex() == referent_index)
allow_value = true;
ValueMention *temp = getValueFromArgument(ms, vms, arg, newProp->getPredSymbol() == Symbol(L"when"));
if (temp != 0 && temp->getType() == Symbol(L"TIMEX2"))
temporalVM = temp;
if (arg->getType() == Argument::PROPOSITION_ARG &&
(arg->getRoleSym() == Symbol(L"when") || arg->getRoleSym() == Symbol(L"as") || arg->getRoleSym() == Symbol(L"after")) &&
arg->getProposition() == prop)
{
allow_value = true;
}
}
if (allow_value && temporalVM != 0) {
time = temporalVM;
return getHistoricity(docTheory, sentTheory, temporalVM);
}
}
return UNKNOWN;
}
bool TenseDetection::hasSuperlative(SentenceTheory *st) {
std::wstring sentString = st->getPrimaryParse()->getRoot()->toTextString();
if (sentString.find(L"the first") != std::wstring::npos)
return true;
boost::wcmatch matchResult;
if (boost::regex_search(sentString.c_str(), matchResult, _superlative))
return true;
return false;
}
ValueMention* TenseDetection::getValueFromArgument(MentionSet *ms, ValueMentionSet *vms, Argument *arg, bool is_known_temp) {
is_known_temp = is_known_temp || arg->getRoleSym() == Argument::TEMP_ROLE;
if (arg->getType() == Argument::MENTION_ARG) {
const Mention *ment = arg->getMention(ms);
ValueMention *val = getValueFromMention(vms, ment, is_known_temp);
if (val != 0)
return val;
if (ment->getMentionType() == Mention::LIST) {
const Mention *child = ment->getChild();
while (child != 0) {
ValueMention *val = getValueFromMention(vms, child, is_known_temp);
if (val != 0)
return val;
child = child->getNext();
}
}
}
return 0;
}
ValueMention* TenseDetection::getValueFromMention(ValueMentionSet *vms, const Mention *ment, bool is_known_temp) {
int stoken = ment->getNode()->getStartToken();
int etoken = ment->getNode()->getEndToken();
bool has_date_head = (ment->getNode()->getHeadPreterm()->getTag() == Symbol(L"DATE-NNP"));
// this is horrible! and copied from ValueMentionPattern!
for (int v = 0; v < vms->getNValueMentions(); v++) {
ValueMention *val = vms->getValueMention(v);
if ((val->getStartToken() == stoken && val->getEndToken() == etoken) ||
((is_known_temp || has_date_head) && val->getStartToken() >= stoken && val->getEndToken() <= etoken))
{
return val;
}
if (has_date_head && val->getStartToken() == stoken)
return val;
}
return 0;
}
bool TenseDetection::isSinceValue(SentenceTheory *st, ValueMention *val) {
if (val->getStartToken() != 0) {
std::wstring precedingTok = st->getTokenSequence()->getToken(val->getStartToken() - 1)->getSymbol().to_string();
boost::to_lower(precedingTok);
if (precedingTok == L"since")
return true;
}
return false;
}
int TenseDetection::getHistoricity(const DocTheory *docTheory, SentenceTheory *st, ValueMention *val) {
// this is somewhat copied from QueryDate
Value *fullValue = docTheory->getValueSet()->getValueByValueMention(val->getUID());
if (fullValue == 0 || fullValue->getTimexVal().is_null())
return UNKNOWN;
// doesn't count if it's preceded by "since" and there is a superlative in the sentence like "the first" or "the -est"
if (isSinceValue(st, val) && hasSuperlative(st)) {
//std::cout << "superlative since, ignoring\n";
return UNKNOWN;
}
std::wstring originalText = L"";
for (int t = val->getStartToken(); t <= val->getEndToken(); t++) {
originalText += st->getTokenSequence()->getToken(t)->getSymbol().to_string();
originalText += L" ";
}
std::transform(originalText.begin(), originalText.end(), originalText.begin(), towlower);
if (originalText.compare(L"the past ") == 0)
return OLDER_THAN_ONE_MONTH;
std::wstring valString = fullValue->getTimexVal().to_string();
//SessionLogger::info("BRANDY") << originalText << ": " << valString << "\n";
// these are often wrong, so don't rely on them here to return recent dates
// remember that originalText will have a space on its end
bool is_untrustworthy = false;
if (originalText.find(L" day ") != std::wstring::npos)
is_untrustworthy = true;
if (originalText.find(L" night") != std::wstring::npos || originalText.compare(L"night ") == 0)
is_untrustworthy = true;
std::string publicationDate = Stories::getStoryPublicationDate(docTheory->getDocument());
if (publicationDate.empty())
return UNKNOWN;
if (publicationDate.size() != 10) {
SessionLogger::info("ICEWS") << "Unexpected publication date format"
<< '"' << publicationDate << '"';
return UNKNOWN;
}
int doc_year = atoi(publicationDate.substr(0,4).c_str());
int doc_month = atoi(publicationDate.substr(5,2).c_str());
int doc_day = atoi(publicationDate.substr(8,2).c_str());
int target_month = doc_month;
int target_year = doc_year;
int target_day = doc_day;
// subtract month (crudely)
if (target_month > 1)
target_month--;
else {
target_month = 12;
target_year--;
}
// run a regex over the timex value (sadly these aren't always straight year/month/date)
boost::wcmatch matchResult;
if (boost::regex_match(valString.c_str(), matchResult, _timex_clock_time)) {
return UNKNOWN;
} else if (boost::regex_match(valString.c_str(), matchResult, _timex_regex_ymd)) {
std::wstring wstr = matchResult[1];
int year = _wtoi(wstr.c_str());
wstr = matchResult[2];
int month = _wtoi(wstr.c_str());
wstr = matchResult[3];
int day = _wtoi(wstr.c_str());
// same year/month
if (year == doc_year && month == doc_month)
return (is_untrustworthy ? UNKNOWN : WITHIN_ONE_MONTH);
// previous month, later day
if (((year == doc_year && month == doc_month - 1) || (year == doc_year - 1 && month == 12 && doc_month == 1)) && day >= doc_day)
return (is_untrustworthy ? UNKNOWN : WITHIN_ONE_MONTH);
if (year < target_year || (year == target_year && month < target_month) || (year == target_year && month == target_month && day < target_day))
return OLDER_THAN_ONE_MONTH;
} else if (boost::regex_match(valString.c_str(), matchResult, _timex_regex_ym)) {
std::wstring wstr = matchResult[1];
int year = _wtoi(wstr.c_str());
wstr = matchResult[2];
int month = _wtoi(wstr.c_str());
if (year == doc_year && month == doc_month)
return (is_untrustworthy ? UNKNOWN : WITHIN_ONE_MONTH);
if (year < target_year || (year == target_year && month <= target_month))
return OLDER_THAN_ONE_MONTH;
} else if (boost::regex_match(valString.c_str(), matchResult, _timex_regex_yw)) {
// we're going to assume this only happens for stuff like "last week", which is never historical
return WITHIN_ONE_MONTH;
} else if (boost::regex_match(valString.c_str(), matchResult, _timex_regex_y)) {
std::wstring wstr = matchResult[1];
int year = _wtoi(wstr.c_str());
// use document year here, not target year (even in January 2008, 2007 is historical)
if (year < doc_year)
return OLDER_THAN_ONE_MONTH;
// we don't return WITHIN_ONE_MONTH here
} else if (boost::regex_match(valString.c_str(), matchResult, _timex_regex_decade)) {
std::wstring wstr = matchResult[1];
int year = _wtoi(wstr.c_str());
year = year * 10 + 9; // 198 --> 1989
// use document year here, not target year (even in January 2008, 2007 is historical)
if (year < doc_year)
return OLDER_THAN_ONE_MONTH;
}
return UNKNOWN;
}
|
{"hexsha": "f537240a48dd0edf59e1a58fe0f4215d3e712dc8", "size": 22149, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Generic/icews/TenseDetection.cpp", "max_stars_repo_name": "BBN-E/serif", "max_stars_repo_head_hexsha": "1e2662d82fb1c377ec3c79355a5a9b0644606cb4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-03-24T19:57:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T19:57:00.000Z", "max_issues_repo_path": "src/Generic/icews/TenseDetection.cpp", "max_issues_repo_name": "BBN-E/serif", "max_issues_repo_head_hexsha": "1e2662d82fb1c377ec3c79355a5a9b0644606cb4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Generic/icews/TenseDetection.cpp", "max_forks_repo_name": "BBN-E/serif", "max_forks_repo_head_hexsha": "1e2662d82fb1c377ec3c79355a5a9b0644606cb4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.7900552486, "max_line_length": 166, "alphanum_fraction": 0.6603458395, "num_tokens": 6381}
|
Require Import Coq.Arith.Peano_dec.
Require Import Coq.Structures.OrderedType.
Require Import Coq.Logic.FunctionalExtensionality.
Require Import Coq.Sets.Ensembles.
Require Import Ascii.
Require Import Coq.ZArith.Znat.
Require Import Coq.Program.Equality.
Add LoadPath "." as Top0.
Require Import Top0.Tactics.
Require Import Top0.Keys.
Require Import Top0.Definitions.
Require Import Top0.Nameless.
Require Import Top0.CorrectnessLemmas.
Require Import Top0.AdditionalLemmas.
Require Import Top0.Environment.
Require Import Top0.Heap.
Require Import Top0.Determinism.
Require Import Top0.Axioms.
Require Import Omega.
Module TypeSoundness.
Import Heap.
Import Environment.
Module RMapOrdProp := FMapFacts.OrdProperties R.
Lemma subst_rho_open_close_rgn :
forall rho n w v' rho' r r0 x,
lc_type_rgn r0 ->
find_R w rho = Some v' ->
fold_subst_rgn rho r = fold_subst_rgn rho' (closing_rgn_in_rgn2 n x r0) ->
fold_subst_rgn rho' (opening_rgn_in_rgn2 n (Rgn2_Const true true v') (closing_rgn_in_rgn2 n x r0)) =
fold_subst_rgn rho (opening_rgn_in_rgn2 n (mk_rgn_type w) r).
Proof.
intros rho n w v' rho' r r0 x Hlc1 HF H.
unfold rgn2_in_typ in r.
unfold rgn2_in_typ in r0.
unfold rgn2_in_exp in w.
dependent induction r; dependent induction Hlc1; simpl in *.
- repeat rewrite subst_rho_rgn_const in *. auto.
- destruct (RMapP.eq_dec r0 x); subst; simpl in *.
+ rewrite subst_rho_index in H. rewrite subst_rho_rgn_const in H. inversion H.
+ auto.
- auto.
- destruct (RMapP.eq_dec r x); subst; simpl in *.
+ rewrite subst_rho_index in H.
destruct (subst_rho_fvar_1 rho n0) as [[v H0] | H0];
rewrite H0 in H; inversion H.
+ auto.
- rewrite subst_rho_index in H. rewrite subst_rho_rgn_const in H. inversion H.
- destruct (RMapP.eq_dec r x); subst; simpl in *.
+ repeat rewrite subst_rho_index in H. inversion H; subst.
rewrite NPeano.Nat.eqb_refl.
rewrite subst_rho_rgn_const.
dependent induction w; simpl.
* inversion HF; subst.
rewrite subst_rho_rgn_const.
reflexivity.
* inversion HF. symmetry.
apply subst_rho_fvar_2. now simpl.
+ rewrite subst_rho_index in H.
destruct (subst_rho_fvar_1 rho' r) as [[v H0] | H0];
rewrite H0 in H; inversion H.
Qed.
Lemma subst_rho_open_close_sa:
forall rho n w v' rho' sa sa1 x,
lc_type_sa sa ->
find_R w rho = Some v' ->
fold_subst_sa rho sa1 = fold_subst_sa rho' (closing_rgn_in_sa2 n x sa) ->
fold_subst_sa rho' (opening_rgn_in_sa2 n (Rgn2_Const true true v') (closing_rgn_in_sa2 n x sa)) =
fold_subst_sa rho (opening_rgn_in_sa2 n (mk_rgn_type w) sa1).
Proof.
intros rho n w v' rho' sa sa1 x Hlc HF H.
unfold fold_subst_sa.
inversion Hlc; subst; induction sa1;
unfold fold_subst_sa in H; inversion H; simpl in *;
erewrite subst_rho_open_close_rgn; eauto.
Qed.
Lemma subst_rho_open_close_eps:
forall rho n w v' rho' e e1 x,
lc_type_eps e ->
find_R w rho = Some v' ->
fold_subst_eps rho e1 = fold_subst_eps rho' (closing_rgn_in_eps2 n x e) ->
fold_subst_eps rho' (opening_rgn_in_eps2 n (Rgn2_Const true true v') (closing_rgn_in_eps2 n x e)) =
fold_subst_eps rho (opening_rgn_in_eps2 n (mk_rgn_type w) e1).
Proof.
intros rho n w v' rho' e e1 x Hcl1 HF H.
apply Extensionality_Ensembles.
unfold Same_set, Included.
split; intros; unfold In in *.
- unfold fold_subst_eps. unfold fold_subst_eps in H0.
unfold opening_rgn_in_eps2, closing_rgn_in_eps2. unfold opening_rgn_in_eps2, closing_rgn_in_eps2 in H0.
destruct H0 as [sa [[sa' [[sa'' [H2 H3]] H4]] H5]].
rewrite <- H5. rewrite <- H4. rewrite <- H3.
inversion Hcl1. destruct (H0 sa'').
assert (fold_subst_sa rho sa = fold_subst_sa rho' (closing_rgn_in_sa2 n x sa'') /\ e1 sa /\ e sa'')
by (eapply subst_rho_eps_aux_1; eauto).
assert(H' : fold_subst_sa rho' (opening_rgn_in_sa2 n (Rgn2_Const true true v')
(closing_rgn_in_sa2 n x sa'')) =
fold_subst_sa rho (opening_rgn_in_sa2 n (mk_rgn_type w) sa))
by (apply subst_rho_open_close_sa; auto; intuition).
rewrite H'.
exists (opening_rgn_in_sa2 n (mk_rgn_type w) sa).
intuition.
exists sa.
split; [ assumption | reflexivity].
- unfold fold_subst_eps. unfold fold_subst_eps in H0.
unfold opening_rgn_in_eps2, closing_rgn_in_eps2. unfold opening_rgn_in_eps2, closing_rgn_in_eps2 in H0.
destruct H0 as [sa [[sa' [H1 H2]] H3]].
rewrite <- H3. rewrite <- H2.
exists (opening_rgn_in_sa2 n (Rgn2_Const true true v') (closing_rgn_in_sa2 n x sa)).
inversion Hcl1. destruct (H0 sa).
split.
+ exists (closing_rgn_in_sa2 n x sa). split; [ | reflexivity].
exists sa. split; [ | reflexivity].
apply subst_rho_eps_aux_1 with (sa := sa') (sa':=sa) in H; auto.
+ eapply subst_rho_open_close_sa; eauto.
apply subst_rho_eps_aux_1 with (sa := sa') (sa':=sa) in H; auto.
destruct H as [A [B C]]; auto.
Qed.
Lemma subst_rho_open_close :
forall rho w v' rho' x tyr0 tyr,
lc_type tyr0 ->
find_R w rho = Some v' ->
subst_rho rho' (close_var x tyr0) = subst_rho rho tyr ->
subst_rho rho' (open (mk_rgn_type (Rgn2_Const true false v')) (close_var x tyr0)) =
subst_rho rho (open (mk_rgn_type w) tyr).
Proof.
unfold open, close_var.
intros rho w v' rho' x tyr0 tyr Hcl1 HF.
generalize dependent 0.
generalize dependent tyr. generalize dependent tyr0.
induction tyr0; induction tyr; intros n;
simpl;
repeat (rewrite subst_rho_natural ||
rewrite subst_rho_boolean ||
rewrite subst_rho_unit ||
rewrite subst_rho_forallrgn ||
rewrite subst_rho_effect ||
rewrite subst_rho_pair
);
try (solve [intro Z; inversion Z | intro Y; reflexivity | intro X; assumption |
intros; rewrite subst_rho_tyref in H; inversion H |
intros; rewrite subst_rho_arrow in H; inversion H ]).
- inversion Hcl1; subst.
intros. f_equal; inversion H.
+ erewrite <- IHtyr0_1; eauto.
+ erewrite <- IHtyr0_2; eauto.
- intro. symmetry in H. rewrite subst_rho_tyref in H.
rewrite subst_rho_tyref in H. inversion H as [ [HR1 HR2] ].
repeat rewrite subst_rho_tyref. f_equal.
+ erewrite subst_rho_open_close_rgn; eauto. now inversion Hcl1.
+ erewrite IHtyr0; eauto. now inversion Hcl1.
- intro. symmetry in H. rewrite subst_rho_arrow in H.
rewrite subst_rho_tyref in H. now inversion H.
- intro. rewrite subst_rho_tyref in H. rewrite subst_rho_arrow in H. now inversion H.
- repeat rewrite subst_rho_arrow. intro Z. inversion Z.
f_equal.
+ rewrite <- IHtyr0_1; auto. now inversion Hcl1.
+ apply subst_rho_open_close_eps; [ now inversion Hcl1 | assumption | now inversion Z].
+ rewrite <- IHtyr0_2; auto. now inversion Hcl1.
+ apply subst_rho_open_close_eps; [ now inversion Hcl1 | assumption | now inversion Z].
+ rewrite <- IHtyr0_3; auto. now inversion Hcl1.
- repeat rewrite subst_rho_forallrgn.
intro Z; inversion Z.
f_equal.
+ apply subst_rho_open_close_eps; [ now inversion Hcl1 | assumption | now inversion Z].
+ rewrite <- IHtyr0; auto. now inversion Hcl1.
Qed.
Lemma ty_sound_var :
forall x v stty rho env ctxt t,
TcEnv (stty, rho, env, ctxt) ->
find_E x env = Some v -> find_T x ctxt = Some t ->
TcVal (stty, v, subst_rho rho t).
Proof.
intros x v stty rho env ctxt t HTcEnv FindEnv FindCtxt. (* Hclosed. *)
inversion_clear HTcEnv as [? ? ? ? HBst HFwd HBack HTc].
destruct (HFwd x v FindEnv) as [y FindEnv'].
rewrite FindEnv' in FindCtxt. inversion FindCtxt; subst.
eapply HTc; [eexact FindEnv | eexact FindEnv' ]. (*| assumption]. *)
Qed.
Lemma ty_sound_closure:
forall stty rgns env rho ctxt f x ec ee tyx tyc effc effe,
TcRho (rho, rgns) ->
TcInc (ctxt, rgns)->
TcEnv (stty, rho, env, ctxt) ->
TcExp (ctxt, rgns, Mu f x ec ee, Ty2_Arrow tyx effc tyc effe Ty2_Effect, Empty_Static_Action) ->
TcVal (stty, Cls (env, rho, Mu f x ec ee), subst_rho rho (Ty2_Arrow tyx effc tyc effe Ty2_Effect)).
Proof.
intros; econstructor; eauto.
Qed.
Lemma ty_sound_region_closure:
forall stty rgns env rho ctxt x er tyr effr,
TcRho (rho, rgns) ->
TcInc (ctxt, rgns) ->
TcEnv (stty, rho, env,ctxt) ->
TcExp (ctxt, rgns, Lambda x er, Ty2_ForallRgn (close_var_eff x effr) (close_var x tyr), Empty_Static_Action) ->
TcVal (stty, Cls (env, rho, Lambda x er), subst_rho rho (Ty2_ForallRgn (close_var_eff x effr) (close_var x tyr))).
Proof.
intros. econstructor; eauto.
Qed.
Lemma weakening_trans :
forall stty stty' stty'',
(forall (l : ST.key) (t : tau),
ST.find (elt:=tau) l stty = Some t -> ST.find (elt:=tau) l stty' = Some t) ->
(forall (l : ST.key) (t : tau),
ST.find (elt:=tau) l stty' = Some t -> ST.find (elt:=tau) l stty'' = Some t) ->
(forall (l : ST.key) (t : tau),
ST.find (elt:=tau) l stty = Some t -> ST.find (elt:=tau) l stty'' = Some t).
Proof.
intros stty stty' stty'' Weak Weak'.
intros l t ?. apply Weak'. now apply Weak.
Qed.
Lemma bound_var_is_fresh :
forall rho rgns x,
TcRho (rho, rgns) -> not_set_elem rgns x -> ~ R.In (elt:=Region) x rho.
Proof.
intros rho rgns x H1 H2.
inversion H1; subst.
unfold not_set_elem in H2. unfold Ensembles.Complement in H2.
unfold not. intro.
apply RMapP.in_find_iff in H.
apply H2.
eapply H0; eassumption.
Qed.
Lemma ty_sound:
forall e env rho hp hp' v dynamic_eff,
(hp, env, rho, e) ⇓ (hp', v, dynamic_eff) ->
forall stty ctxt rgns t static_eff,
TcHeap (hp, stty) ->
TcRho (rho, rgns) ->
TcEnv (stty, rho, env, ctxt) ->
TcExp (ctxt, rgns, e, t, static_eff) ->
exists stty',
(forall l t', ST.find l stty = Some t' -> ST.find l stty' = Some t')
/\ TcHeap (hp', stty')
/\ TcVal (stty', v, subst_rho rho t).
Proof.
intros e env rho hp hp' v dynamic_eff D.
dynamic_cases (dependent induction D) Case;
intros stty ctxt rgns t static_eff Hhp Hrho Henv Hexp;
inversion Hexp; subst.
Case "cnt n".
exists stty; (split; [| split]; auto). rewrite subst_rho_natural.
econstructor; eassumption.
Case "bool b".
exists stty; (split; [| split]; auto). rewrite subst_rho_boolean.
econstructor; eassumption.
Case "var x".
exists stty; (split; [| split]; auto).
eapply ty_sound_var; eassumption.
Case "mu_abs".
exists stty; (split; [| split]; auto).
eapply ty_sound_closure; try (solve [eassumption]). auto.
assert (TcInc (ctxt, rgns)) by admit.
auto.
Case "rgn_abs".
exists stty; (split; [| split]; auto).
eapply ty_sound_region_closure; try (solve [eassumption]). auto.
assert (TcInc (ctxt, rgns)) by admit.
auto.
Case "mu_app".
edestruct IHD1 as [sttym [Weak1 [TcHeap1 TcVal_mu]]]; eauto.
edestruct IHD2 as [sttya [Weaka [TcHeapa TcVal_arg]]]; eauto.
eapply ext_stores__env; eauto.
inversion TcVal_mu as [ | | | ? ? ? ? ? ? ? ? TcRho_rho' TcEnv_env' TcExp_abs | | |] ; subst.
inversion TcExp_abs as [ | | | ? ? ? ? ? ? ? ? ? ? ? ? TcExp_ec TcExp_ee | | | | | | | | | | | | | | | | | | | | | ]; subst.
rewrite <- H5 in TcVal_mu.
do 2 rewrite subst_rho_arrow in H5. inversion H5.
assert (SubstEq1: subst_rho rho' tyx = subst_rho rho tya) by assumption.
assert (SubstEq2: subst_rho rho' tyc = subst_rho rho t) by assumption.
rewrite <- SubstEq1 in TcVal_arg.
unfold update_rec_E, update_rec_T in *.
edestruct IHD3 as [sttyb [Weakb [TcHeapb TcVal_res]]]; eauto.
SCase "TcEnv".
apply update_env. apply update_env. eapply ext_stores__env; eauto.
eapply ext_stores__val; eauto. eassumption.
exists sttyb; intuition.
Case "rgn_app".
edestruct IHD1 as [sttyl [Weak1 [TcHeap1 TcVal_lam]]]; eauto.
inversion TcVal_lam as [ | | | ? ? ? ? ? ? ? TcRho_rho' TcInc' TcEnv_env' TcExp_lam | | |]; subst.
inversion TcExp_lam as [ | | | | ? ? ? ? ? ? ? ? ? TcExp_eb | | | | | | | | | | | | | | | | | | | | ]; subst.
edestruct IHD2 as [sttyr [Weak2 [TcHeap2 TcVal_res]]]; eauto using update_env, ext_stores__env.
apply update_rho. assumption. assumption. eapply extended_rho; eauto.
exists sttyr; intuition.
rewrite subst_rho_forallrgn in H5.
rewrite subst_rho_forallrgn in H5.
inversion H5.
unfold update_R in TcVal_res.
simpl in TcVal_res. rewrite subst_add_comm in TcVal_res.
SCase "abstraction body is well typed".
unfold subst_in_type in TcVal_res.
rewrite SUBST_AS_CLOSE_OPEN in TcVal_res; auto.
erewrite subst_rho_open_close in TcVal_res; eauto.
SCase "bound variable is free".
eapply bound_var_is_fresh; eauto.
Case "eff_app".
edestruct IHD1 as [sttym [Weak1 [TcHeap1 TcVal_mu]]]; eauto.
edestruct IHD2 as [sttya [Weaka [TcHeapa TcVal_arg]]]; eauto using ext_stores__env.
inversion TcVal_mu as [ | | | ? ? ? ? ? ? ? TcRho_rho' TcInc' TcEnv_env' TcExp_abs | | |]; subst.
inversion TcExp_abs as [ | | | | ? ? ? ? ? ? ? ? ? TcExp_eb | | | | | | | | | | | | | | | | | | | | ]; subst.
edestruct IHD3 as [sttyb [Weakb [TcHeapb TcVal_res]]]; eauto.
SCase "Extended Env".
apply update_env.
SSCase "TcEnv". apply update_env.
SSSCase "Extended". eapply ext_stores__env; eauto.
SSSCase "Extended TcVal". rewrite <- H4 in TcVal_mu. eapply ext_stores__val; eauto.
SSCase "TcVal". do 2 rewrite subst_rho_arrow in H4.
inversion H4.
assert (SubstEq: subst_rho rho' tyx = subst_rho rho tya) by assumption.
rewrite <- SubstEq in TcVal_arg. eassumption.
exists sttyb. intuition.
rewrite subst_rho_effect. rewrite subst_rho_effect in TcVal_res.
assumption.
Case "par_pair".
edestruct IHD3 as [sttym [Weak1 [TcHeap1 TcVal_app1]]]; eauto.
edestruct IHD4 as [sttya [Weaka [TcHeapa TcVal_app2]]]; eauto.
(*inversion TcVal_app1 as [A B [C D HRApp1] | | | | | |]; subst.
inversion TcVal_app2 as [A B [C D HRApp2] | | | | | |]; subst.*)
exists (Functional_Map_Union sttya sttym). intuition.
SCase "Weakening".
apply UnionStoreTyping; [apply Weaka | apply Weak1]; auto.
SCase "TcHeap".
eapply UnionTcHeap with (theta1:=theta1) (theta2:=theta2); eauto.
SCase "TcVal".
rewrite subst_rho_pair.
econstructor; [eapply TcValExtended_2 | eapply TcValExtended_1]; eauto.
Case "cond_true".
edestruct IHD1 as [sttyb [Weakb [TcHeapvb TcVal_e0]]]; eauto.
edestruct IHD2 as [stty1 [Weak1 [TcHeapv1 TcVal_e1]]];
eauto using ext_stores__env.
exists stty1. intuition.
Case "cond_false".
edestruct IHD1 as [sttyb [Weakb [TcHeapvb TcVal_e0]]]; eauto.
edestruct IHD2 as [stty2 [Weak2 [TcHeapv2 TcVal_e2]]];
eauto using ext_stores__env.
exists stty2. intuition.
Case "new_ref e".
destruct IHD with (stty := stty)
(ctxt := ctxt)
(rgns := rgns)
(t := t0)
(static_eff := veff)
as [sttyv [Weakv [TcHeapv TcVal_v]]]; eauto.
assert (find_H (r, allocate_H heap' r) heap' = None)
by (apply allocate_H_fresh).
exists (update_ST ((r, allocate_H heap' r), subst_rho rho t0) sttyv); split; [ | split].
SCase "Extended stores".
intros k' t' STfind. destruct k' as [r' l'].
destruct (eq_nat_dec r r'); destruct (eq_nat_dec (allocate_H heap' r) l'); subst.
SSCase "New address must be fresh, prove by contradiction".
apply Weakv in STfind.
inversion_clear TcHeapv as [? ? ? STfind_Hfind ?].
destruct (STfind_Hfind (r', allocate_H heap' r') t' STfind) as [x F].
assert (C : None = Some x) by (rewrite <- F; rewrite <- H0; reflexivity).
discriminate.
SSCase "Existing addresses are well-typed 1".
apply ST_diff_key_2; [ simpl; intuition; apply n; congruence | now apply Weakv in STfind ].
SSCase "Existing addresses are well-typed 2".
apply ST_diff_key_2; [ simpl; intuition; apply n; congruence | now apply Weakv in STfind ].
SSCase "Existing addresses are well-typed 3".
apply ST_diff_key_2; [simpl; intuition; apply n; congruence | now apply Weakv ].
SCase "Heap typeness".
apply update_heap_fresh; eauto.
remember (find_ST (r, allocate_H heap' r) sttyv) as to; symmetry in Heqto.
destruct to as [ t | ].
SSCase "New address must be fresh, prove by contradiction".
inversion_clear TcHeapv as [? ? ? STfind_Hfind ?].
destruct (STfind_Hfind (r, allocate_H heap' r) t Heqto) as [? ex].
rewrite H0 in ex. discriminate.
SSCase "Heap typeness is preserved".
reflexivity.
SCase "Loc is well-typed".
simpl in H; inversion H; subst.
rewrite subst_rho_tyref. unfold mk_rgn_type. rewrite subst_rho_rgn_const.
econstructor. apply ST_same_key_1.
intro.
eapply TcVal_implies_closed in TcVal_v; eauto.
Case "get_ref e".
destruct IHD with (hp'0 := hp')
(v := Loc (Rgn2_Const true false s) l)
(stty := stty)
(rgns := rgns)
(ctxt := ctxt)
(t := Ty2_Ref (mk_rgn_type ((Rgn2_Const true false s))) t)
(static_eff := aeff)
(dynamic_eff := aacts)
as [sttyv [Weakv [TcHeapv TcVal_v]]]; eauto.
exists sttyv. split; [ | split].
SCase "HeapTyping extends".
apply Weakv.
SCase "Heap is well typed".
apply TcHeapv.
SCase "Value is well-typed".
inversion_clear TcHeapv as [? ? ? ? HeapTcVal]. eapply HeapTcVal; eauto.
inversion TcVal_v; subst; simpl in H; inversion H; subst.
rewrite subst_rho_tyref in H7. inversion H7. subst.
assumption.
Case "set_ref e1 e2".
destruct IHD1 with (hp' := heap')
(v := Loc (Rgn2_Const true false s) l)
(stty := stty)
(ctxt := ctxt)
(rgns := rgns)
(t := Ty2_Ref (mk_rgn_type ((Rgn2_Const true false s))) t0)
(static_eff := aeff)
(dynamic_eff := aacts)
as [sttya [Weaka [TcHeapa TcVal_a]]]; eauto.
destruct IHD2 with (stty := sttya)
(ctxt := ctxt)
(rgns := rgns)
(t := t0)
(static_eff := veff)
as [sttyv [Weakv [TcHeapv TcVal_v]]]; eauto using ext_stores__env.
exists sttyv. split; [ | split].
SCase "HeapTyping extends".
eapply weakening_trans; eauto.
SCase "New heap is well typed".
apply update_heap_exists with (t:= subst_rho rho t0).
{ assumption. }
{ apply Weakv. inversion TcVal_a; subst.
simpl in H; inversion H; subst.
rewrite subst_rho_tyref in H4. inversion H4. subst.
assumption. }
{ assumption. }
SCase "Result value is well-typed".
rewrite subst_rho_unit. constructor.
Case "nat_plus x y".
edestruct IHD1 as [sttyx [Weakx [TcHeapvx TcVal_x]]]; eauto.
edestruct IHD2 as [sttyy [Weaky [TcHeapvy TcVal_y]]];
eauto using ext_stores__env.
exists sttyy. intuition. rewrite subst_rho_natural. constructor.
Case "nat_minus x y".
edestruct IHD1 as [sttyx [Weakx [TcHeapvx TcVal_x]]]; eauto.
edestruct IHD2 as [sttyy [Weaky [TcHeapvy TcVal_y]]];
eauto using ext_stores__env.
exists sttyy. intuition. rewrite subst_rho_natural. constructor.
Case "nat_times x y".
edestruct IHD1 as [sttyx [Weakx [TcHeapvx TcVal_x]]]; eauto.
edestruct IHD2 as [sttyy [Weaky [TcHeapvy TcVal_y]]];
eauto using ext_stores__env.
exists sttyy. intuition. rewrite subst_rho_natural. constructor.
Case "bool_eq x y".
edestruct IHD1 as [sttyx [Weakx [TcHeapvx TcVal_x]]]; eauto.
edestruct IHD2 as [sttyy [Weaky [TcHeapvy TcVal_y]]];
eauto using ext_stores__env.
exists sttyy. intuition. rewrite subst_rho_boolean. constructor.
Case "alloc_abs".
exists stty. intuition. rewrite subst_rho_effect. constructor.
Case "read_abs".
exists stty. intuition. rewrite subst_rho_effect. constructor.
Case "write_abs".
exists stty. intuition. rewrite subst_rho_effect. constructor.
Case "read_conc".
exists stty. intuition.
assert (hp = hp') by (eapply EmptyTracePreservesHeap_1; eauto; reflexivity); now subst.
rewrite subst_rho_effect. constructor.
Case "write_conc".
exists stty. intuition.
assert (hp = hp') by (eapply EmptyTracePreservesHeap_1; eauto; reflexivity); now subst.
rewrite subst_rho_effect. constructor.
Case "eff_concat". exists stty. intuition. rewrite subst_rho_effect. constructor.
Case "eff_top". exists stty. intuition. rewrite subst_rho_effect. constructor.
Case "eff_empty". exists stty. intuition. rewrite subst_rho_effect. constructor.
Admitted.
End TypeSoundness.
|
{"author": "esmifro", "repo": "SurfaceEffects", "sha": "3450e4b771de4062ab73ee20947adf3f9de579ba", "save_path": "github-repos/coq/esmifro-SurfaceEffects", "path": "github-repos/coq/esmifro-SurfaceEffects/SurfaceEffects-3450e4b771de4062ab73ee20947adf3f9de579ba/TypeSystem.v"}
|
# -*- coding: utf-8 -*-
import json
import os
from typing import List
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import user_events as uev
from user_events import UserEvents
import event_type as et
def retention_by_period(events: UserEvents):
"""
Args:
events - UserEvents instance with user events data and metadata
Returns:
retention_list - for each user contains list of periods where user participated. Each period in the list
reduced to minimum period
"""
grouped = events.data.groupby(events.user_id_column)[events.period_id_column].apply(np.array)
grouped = grouped.dropna()
grouped = grouped.apply(lambda x: x - np.amin(x))
retention_list = []
for periods_list in grouped:
retention_list.extend(periods_list)
return retention_list
def plot_retention_by_period(retention_data: List):
periods_label = 'Retention Period'
retention_label = 'Retention %'
users_num_label = 'Number of users'
unique, counts = np.unique(retention_data, return_counts=True)
retention_graph = pd.DataFrame(
{periods_label: unique,
users_num_label: counts,
retention_label: counts/counts.max()
})
retention_graph = retention_graph.sort_index().copy()
print(retention_graph)
retention_graph.plot(kind='line', x=periods_label, y=retention_label, figsize=(15, 10), marker='o')
plt.title('Retention by time period', fontsize=35)
plt.rcParams.update({'font.size': 25})
plt.ylim([0, 1])
plt.xlim([0, retention_graph[retention_label].max()])
plt.xticks(np.arange(0, retention_graph[periods_label].max()+1), fontsize=20)
plt.yticks(fontsize=20)
plt.grid()
for index, row in retention_graph.iterrows():
plt.text(row[periods_label], row[retention_label], round(row[retention_label]*100, 2), fontsize=25)
if __name__ == '__main__':
events = uev.UserEvents.init_from_files(events_metadata_path='../data/events_metadata.json',
periods_metadata_path='../data/periods_metadata.json')
events.add_period_to_data()
config_path = '../retention/retention_config.json'
with open(os.path.abspath(config_path), 'r') as f:
config = json.load(f)
config_events = config['events']
# filter by event types from config
event_types = et.load_event_types_from_json(config_events)
if len(event_types) > 0:
events.filter_by_event_type(event_types, add_event_type_name=True)
events.data = events.data[[events.user_id_column, events.period_id_column]]
events.data.drop_duplicates(inplace=True)
events.data.set_index(events.user_id_column, inplace=True)
retention = retention_by_period(events)
plot_retention_by_period(retention)
plt.show()
|
{"hexsha": "6a95a0b102fcaca38ee23ff5cc7c564bc4a9dd0d", "size": 2868, "ext": "py", "lang": "Python", "max_stars_repo_path": "retention/retention_by_period.py", "max_stars_repo_name": "bibamur/prodoct-analytics-suite", "max_stars_repo_head_hexsha": "4c65124809a754ff2013a1a709d7e67aaaeb3346", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "retention/retention_by_period.py", "max_issues_repo_name": "bibamur/prodoct-analytics-suite", "max_issues_repo_head_hexsha": "4c65124809a754ff2013a1a709d7e67aaaeb3346", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "retention/retention_by_period.py", "max_forks_repo_name": "bibamur/prodoct-analytics-suite", "max_forks_repo_head_hexsha": "4c65124809a754ff2013a1a709d7e67aaaeb3346", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5909090909, "max_line_length": 114, "alphanum_fraction": 0.6963040446, "include": true, "reason": "import numpy", "num_tokens": 636}
|
import sys
import numpy as np
from Keyword import Keyword
from Utterance import Utterance
import Distance
import log
def spoken_term_detection_truncated(keywords, utterances, left_encode_num, right_encode_num, distance_type, output_dir):
for i in range(len(keywords)):
keyword_sampling_feature = keywords[i].sampling_feature
keyword_phone_num = keyword_sampling_feature.shape[0]
(left_position, right_position) = get_truncated_position(keyword_phone_num, left_encode_num, right_encode_num)
keyword_indexes_selected = get_real_indexes(range(keyword_phone_num), left_encode_num, right_encode_num)
keyword_sampling_feature_selected = keyword_sampling_feature[keyword_indexes_selected, :]
output_file = output_dir + keywords[i].getFileId() + ".RESULT"
fid = open(output_file, "w")
for j in range(len(utterances)):
utterance_sampling_feature = utterances[j].sampling_feature
utterance_phone_num = utterance_sampling_feature.shape[0]
utterance_indexes_selected = get_real_indexes(range(utterance_phone_num), left_encode_num, right_encode_num)
utterance_sampling_feature_selected = utterance_sampling_feature[utterance_indexes_selected, :]
distance = Distance.distance(keyword_sampling_feature_selected[:, left_position:right_position], utterance_sampling_feature_selected[:, left_position:right_position], distance_type, sub_num=40)
fid.writelines(str(distance.min()) + "\n")
fid.close()
log.Log("finished the std for keyword " + str(keywords[i].getFileId()))\
def spoken_term_detection(keywords, utterances, left_encode_num, right_encode_num, distance_type, output_dir):
for i in range(len(keywords)):
keyword_sampling_feature = keywords[i].sampling_feature
keyword_phone_num = keyword_sampling_feature.shape[0]
keyword_indexes_selected = get_real_indexes(range(keyword_phone_num), left_encode_num, right_encode_num)
keyword_sampling_feature_selected = keyword_sampling_feature[keyword_indexes_selected, :]
output_file = output_dir + keywords[i].getFileId() + ".RESULT"
fid = open(output_file, "w")
for j in range(len(utterances)):
utterance_sampling_feature = utterances[j].sampling_feature
utterance_phone_num = utterance_sampling_feature.shape[0]
utterance_indexes_selected = get_real_indexes(range(utterance_phone_num), left_encode_num, right_encode_num)
utterance_sampling_feature_selected = utterance_sampling_feature[utterance_indexes_selected, :]
distance = Distance.distance(keyword_sampling_feature_selected, utterance_sampling_feature_selected, distance_type, sub_num=40)
fid.writelines(str(distance.min()) + "\n")
fid.close()
log.Log("finished the std for keyword " + str(keywords[i].getFileId()))\
def get_truncated_position(phone_num, left_encode_num, right_encode_num):
encode_num = left_encode_num + right_encode_num
if phone_num < encode_num:
if right_encode_num == 0:
left_phone_num = phone_num
right_phone_num = 0
else:
left_phone_num = phone_num//2 + 1
right_phone_num = phone_num//2 - 1 + phone_num%2
left_jump = left_encode_num - left_phone_num
right_jump = right_encode_num - right_phone_num
left_position = left_jump * PHONE_LEN
right_position = (encode_num - right_jump) * PHONE_LEN
else:
left_position = 0
right_position = encode_num * PHONE_LEN
return (left_position, right_position)
def get_real_indexes(indexes, left_encode_num, right_encode_num):
if len(indexes) > left_encode_num + right_encode_num:
return indexes[left_encode_num-1 : len(indexes)-right_encode_num]
elif right_encode_num == 0:
return [indexes[-1]]
else:
return [indexes[len(indexes)//2]]
def sampling(entities, sampling_type):
for i in range(len(entities)):
entities[i].downSampling(sampling_type)
if __name__=='__main__':
if len(sys.argv) < 13:
print("USAGE: python " + sys.argv[0] + " keyword_dir keyword_list utterance_dir utterance_list left_encode_num, right_encode_num, feature_type distance_type keyword_sampling_type utterance_sampling_type output_dir truncated_mod")
exit(1)
keyword_dir = sys.argv[1]
keyword_list_file = sys.argv[2]
utterance_dir = sys.argv[3]
utterance_list_file = sys.argv[4]
left_encode_num = int(sys.argv[5])
right_encode_num = int(sys.argv[6])
feature_type = sys.argv[7]
distance_type = sys.argv[8]
keyword_sampling_type = sys.argv[9]
utterance_sampling_type = sys.argv[10]
output_dir = sys.argv[11]
truncated_mod = int(sys.argv[12])
# read keyword and do downsampling
keyword_lists = open(keyword_list_file).readlines()
keywords = []
for i in range(len(keyword_lists)):
keyword_id = keyword_lists[i].strip()
new_entity = Keyword(keyword_dir, keyword_id, feature_type, phone_type="PHN39", wav_sampling_rate=16000)
keywords.append(new_entity)
# read utterance and do downsampling
utterance_lists = open(utterance_list_file).readlines()
utterances = []
for i in range(len(utterance_lists)):
utterance_id = utterance_lists[i].strip()
new_entity = Utterance(utterance_dir, utterance_id, feature_type, phone_type="PHN39", wav_sampling_rate=16000)
utterances.append(new_entity)
# down sampling of keyword and utterance
sampling(keywords, keyword_sampling_type)
sampling(utterances, utterance_sampling_type)
if (truncated_mod==False):
spoken_term_detection(keywords, utterances, left_encode_num, right_encode_num, distance_type, output_dir)
else:
spoken_term_detection_truncated(keywords, utterances, left_encode_num, right_encode_num, distance_type, output_dir)
|
{"hexsha": "e77cd51f3f4f5f61940dd55b0e12f4534101f14e", "size": 6004, "ext": "py", "lang": "Python", "max_stars_repo_path": "Encode_STD_v2/std.py", "max_stars_repo_name": "jingyonghou/TIMIT_STD", "max_stars_repo_head_hexsha": "743112e79115ddc31ed3ebd7c4f7d1d361dfd7e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-12-12T07:28:39.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-12T03:07:42.000Z", "max_issues_repo_path": "Encode_STD_v2/std.py", "max_issues_repo_name": "jingyonghou/TIMIT_STD", "max_issues_repo_head_hexsha": "743112e79115ddc31ed3ebd7c4f7d1d361dfd7e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Encode_STD_v2/std.py", "max_forks_repo_name": "jingyonghou/TIMIT_STD", "max_forks_repo_head_hexsha": "743112e79115ddc31ed3ebd7c4f7d1d361dfd7e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.1327433628, "max_line_length": 238, "alphanum_fraction": 0.7250166556, "include": true, "reason": "import numpy", "num_tokens": 1313}
|
! This module provides the burner for the dvode test problem. This burner
! should not be used in any real MAESTRO run.
!
! More information in the README file
!
module burner_module
use bl_types
use bl_constants_module
use network
use bl_error_module
contains
subroutine burner(Xin, dt, tol, Xout)
implicit none
real(kind=dp_t), intent(in ) :: Xin(nspec), dt, tol(2)
real(kind=dp_t), intent( out) :: Xout(nspec)
! allocate storage for the input state
real(kind=dp_t), dimension(nspec) :: y, ydot
! our problem is stiff, tell ODEPACK that. 21 means stiff, jacobian
! function is supplied, 22 means stiff, figure out my jacobian through
! differencing
integer, parameter :: MF_ANALYTIC_JAC = 21, MF_NUMERICAL_JAC = 22
! tolerance parameters:
!
! itol specifies whether to use an single absolute tolerance for
! all variables (1), or to pass an array of absolute tolerances, one
! for each variable with a scalar relative tol (2), a scalar absolute
! and array of relative tolerances (3), or arrays for both (4)
!
! The error is determined as e(i) = rtol*abs(y(i)) + atol, and must
! be > 0. Since we have some compositions that may be 0 initially,
! we will specify both an absolute and a relative tolerance.
!
integer, parameter :: ITOL = 4
real(kind=dp_t), dimension(nspec) :: atol, rtol
real(kind=dp_t) :: time
! we want to do a normal computation, and get the output values of y(t)
! after stepping though dt
integer, PARAMETER :: ITASK = 1
! istate determines the state of the calculation. A value of 1 meeans
! this is the first call to the problem -- this is what we will want.
! Note, istate is changed over the course of the calculation, so it
! cannot be a parameter
integer :: istate
! we will override the maximum number of steps, so turn on the
! optional arguments flag
integer, parameter :: IOPT = 1
! declare a real work array of size 22 + 9*nspec + 2*nspec**2 and an
! integer work array of since 30 + nspec
integer, parameter :: LRW = 22 + 9*nspec + 2*nspec**2
real(kind=dp_t), dimension(LRW) :: rwork
integer, parameter :: LIW = 30 + nspec
integer, dimension(LIW) :: iwork
real(kind=dp_t) :: rpar
integer :: ipar
EXTERNAL jac, f_rhs, dvode
integer :: i
! set the tolerances.
atol(:) = tol(1)
rtol(:) = tol(2)
! we want VODE to re-initialize each time we call it
istate = 1
rwork(:) = ZERO
iwork(:) = 0
! set the maximum number of steps allowed (the VODE default is 500)
iwork(6) = 15000
! initialize the integration time
time = ZERO
! abundances
y = Xin
! call the integration routine
call dvode(f_rhs, nspec, y, time, dt, ITOL, rtol, atol, ITASK, &
istate, IOPT, rwork, LRW, iwork, LIW, jac, MF_NUMERICAL_JAC,&
rpar, ipar)
if (istate < 0) then
print *, 'ERROR: integration failed in net'
print *, 'istate = ', istate
print *, 'time = ', time
call bl_error("ERROR in burner: integration failed")
endif
! store the new mass fractions
Xout(:) = max(min(y(:), ONE), ZERO)
end subroutine burner
end module burner_module
|
{"hexsha": "3fd89340c49fb4755d6195205d52f836a829cf3a", "size": 3380, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Microphysics/networks/dvode_test/burner.f90", "max_stars_repo_name": "sailoridy/MAESTRO", "max_stars_repo_head_hexsha": "f957d148d2028324a2a1076be244f73dad63fd67", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2017-05-15T15:28:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-09T08:13:32.000Z", "max_issues_repo_path": "Microphysics/networks/dvode_test/burner.f90", "max_issues_repo_name": "sailoridy/MAESTRO", "max_issues_repo_head_hexsha": "f957d148d2028324a2a1076be244f73dad63fd67", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2017-06-14T23:05:00.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-28T16:40:42.000Z", "max_forks_repo_path": "Microphysics/networks/dvode_test/burner.f90", "max_forks_repo_name": "sailoridy/MAESTRO", "max_forks_repo_head_hexsha": "f957d148d2028324a2a1076be244f73dad63fd67", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2017-06-14T14:52:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T07:16:09.000Z", "avg_line_length": 26.6141732283, "max_line_length": 76, "alphanum_fraction": 0.6313609467, "num_tokens": 964}
|
#!/usr/bin/env python
###########################
# Required Pacakges
##########################
import math
import random
import os
import shutil
import sys
import warnings
import argparse
import collections
import csv
from timeit import default_timer as timer
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
import scipy.stats as stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import spams
class staNMF:
'''
2016 Amy E Campbell
Python 2.7 implementation of Siqi Wu's 03/2016 Stability NMF (staNMF)
Solves non-negative matrix factorization over a range of principal patterns
(PPs) with randomly sampled initial NMF "guesses" for an n x m matrix using
the SPArse Modeling Software (J. Mairal, F. Bach, J. Ponce and G. Sapiro,
2010)
USAGE:
Can be called and used by a script, imported as "staNMF"
(see requirements.txt for required packages)
To read in Wu et al's 2016 drosophila spatial expression data, set:
filename = 'example'
INPUT:
:param: filename (str, required) : Specify full file extension to a ".csv"
containing a table with columns and rows labeled, or set filename='example'
:param: folderID (str, optional with default ""): allows user to specify
a unique (to the user's working directory) identifier for the 'staNMFDicts'
folder that the runNMF() method creates
:param: K1 (int, optional with default 15): lowest # PP's (K) tested
:param: K2 (int, optional with default 30): highest # PP's (K) tested
:param: sample_weights (bool or list, optional, default False):
performs weighting step on full data matrix to account for multiple columns
of the same name (filename='example'defaults to special case of weighting
(delimited by ".")) If sample_weights is a list of custom weights, it must
be equal in length to the number of columns in the matrix, and this list of
weights will be applied across columns
:param: seed (int, optional with default 123): sets numpy random seed
:param: replicates(int or tuple of ints of length 2, optional with default
int 100):
Specifies which/ how many bootstrapped repetitions to be performed on each
value of K, for use in stability analysis; if a list of length 2 is given,
self.replicates is set to a list of ints between the first and second
elements of this tuple. If it is set to an integer, self.replicates is set
to a list of ints between 0 and the given integer
Number of bootstrapped
repetitions of NMF to be performed on each value of K, for use in stability
analysis
:param: NMF_finished (bool, optional with default False): True if runNMF
has been completed for the dataset and k range for which you wish to
calculate instability. To surpass NMF step if fileID file already contains
factorization solutions for X in your range [K1, K2], set to True
:param: parallel (bool, optional with default False): True if NMF is to be
run in parallel such that the instability calculation should write a file
for each K containing its instability index
'''
def __init__(self, filename, folderID="", K1=15, K2=30,
sample_weights=False, seed=123, replicates=100,
NMF_finished=False, parallel=False):
warnings.filterwarnings("ignore")
self.K1 = K1
self.K2 = K2
self.sample_weights = sample_weights
self.seed = seed
self.guess = np.array([])
self.guessdict = {}
self.parallel = parallel
if isinstance(replicates, int):
self.replicates = range(replicates)
elif isinstance(replicates, tuple):
start, stop = replicates
self.replicates = range(replicates[0], replicates[1])
self.X = []
if filename == 'example':
self.fn = os.path.join("data", "WuExampleExpression.csv")
self.sample_weights = True
else:
self.fn = filename
self.folderID = folderID
self.rowidmatrix = []
self.NMF_finished = NMF_finished
self.instabilitydict = {}
self.load_data()
self.instabilityarray = []
self.stability_finished = False
random.seed(self.seed)
def initialguess(self, X, K, i):
'''
Randomly samples K columns from X; sets input matrix guess to be a
fortran array, and sets 'guesslist', a list of the column indices
sampled from X for this replication of NMF;
Arguments:
:param: X(numpy array, required): full matrix
:param: K(int, required): Number of columns to select at random to be
used as the 'initial guess' for the K PPs in the current simulation
of NMF
:param: i(int, required): Key at which indexlist will be stored(current
replicate of NMF)
Usage:
Called by runNMF
'''
indexlist = random.sample(np.arange(1, X.shape[1]), K)
self.guess = np.asfortranarray(X[:, indexlist])
self.guessdict[i] = indexlist
def load_data(self):
'''
Loads full data matrix from .csv file into numpy array; if the
self.sample_weights variable is True, weights column names by their
number of replicate occurances
Usage:
Called by constructor
'''
if not self.NMF_finished:
csvfile = open(self.fn, "r")
workingmatrix = pd.read_csv(csvfile, index_col=0)
self.rowidmatrix = workingmatrix.index.values
colnames = workingmatrix.columns.values
if self.sample_weights is not False:
if isinstance(self.sample_weights, list):
if len(self.sample_weights) != len(colnames):
raise ValueError("sample_weights length must equal the"
" number of columns.")
else:
weight = self.sample_weights
else:
# Special formatting case for Wu et al. expression data
if self.fn == os.path.join("data",
"WuExampleExpression.csv"):
colnames = [(str(x).split('.'))[0] for x in colnames]
colUnique = np.unique(colnames)
colNum = np.zeros(len(colUnique))
weight = np.zeros(len(colnames))
for i in range(len(colUnique)):
colNum[i] = list(colnames).count(colUnique[i])
weight[i] = 1/(colNum[i])
workingmatrix = workingmatrix.apply(lambda x: weight * x,
axis=1)
workingmatrix = workingmatrix.applymap(lambda x: math.sqrt(x))
X1 = (np.array(workingmatrix).astype(float))
self.X = np.asfortranarray(X1)
def runNMF(self, **kwargs):
'''
Iterates through range of integers between the K1 and K2 provided (By
default, K1=15 and K2=30), runs NMF using SPAMS package; outputs
NMF matrix files (.csv form) and updates self.guessdict containing the
columns selected for the initial guess input(as calculated by
staNMF.initialguess())
Usage: Called by user (ex: '$ instance.runNMF()')
Arguments: Optional **kwargs allows user to update spams.trainDL()
parameters
Return: None
Output:
(k2-k1) folders, each containing files for every replicate
(labeled factorization_<replicate>.csv) , and each containing
a 'selectedcolumns.txt' file, which prints 'self.guessdict', a
dictionary with keys <factorzation #>, values <columns selected>
'''
self.NMF_finished = False
numPatterns = np.arange(self.K1, self.K2+1)
for k in range(len(numPatterns)):
K = numPatterns[k]
path = str("./staNMFDicts" + str(self.folderID) + "/K=" + str(K) +
"/")
try:
os.makedirs(path)
except OSError:
if not (os.path.isdir(path)):
raise
m, n = np.shape(self.X)
print("Working on " + str(K) + "...\n")
param = {"numThreads": -1,
# minibatch size
"batchsize": min(1024, n),
# Number of columns in solution
"K": K,
"lambda1": 0,
# Number of iterations to go into this round of NMF
"iter": 500,
# Specify optimization problem to solve
"mode": 2,
# Specify convex set
"modeD": 0,
# Positivity constraint on coefficients
"posAlpha": True,
# Positivity constraint on solution
"posD": True,
# Limited information about progress
"verbose": False,
"gamma1": 0}
for p in param:
if p not in kwargs:
kwargs[p] = param[p]
for l in self.replicates:
self.initialguess(self.X, K, l)
Dsolution = spams.trainDL(
# Matrix
self.X,
# Initial guess as provided by initialguess()
D=self.guess,
**kwargs)
# write solution to a csv file in the staNMFDicts/k=K/ folder
outputfilename = "factorization_" + str(l) + ".csv"
outputfilepath = os.path.join(path, outputfilename)
Dsolution1 = pd.DataFrame(Dsolution, index=self.rowidmatrix)
Dsolution1.to_csv(outputfilepath, header=None)
indexoutputstring = "selectedcolumns" + str(K) + ".csv"
indexoutputpath = os.path.join(path, indexoutputstring)
with open(indexoutputpath, "w") as indexoutputfile:
for m in sorted(self.guessdict):
indexoutputfile.write(str(m) + '\t' +
str(self.guessdict[m]) + '\n')
self.NMF_finished = True
def amariMaxError(self, correlation):
'''
Computes what Wu et al. (2016) described as a 'amari-type error'
based on average distance between factorization solutions
Return:
Amari distance distM
Arguments:
:param: correlation: k by k matrix of pearson correlations
Usage: Called by instability()
'''
n, m = correlation.shape
maxCol = np.absolute(correlation).max(0)
colTemp = np.mean((1-maxCol))
maxRow = np.absolute(correlation).max(1)
rowTemp = np.mean((1-maxRow))
distM = (rowTemp + colTemp)/(2)
return distM
def findcorrelation(self, A, B, k):
'''
Construct k by k matrix of Pearson product-moment correlation
coefficients for every combination of two columns in A and B
:param: A : first NMF solution matrix
:param: B : second NMF solution matrix, of same dimensions as A
:param: k : number of columns in each matrix A and B
Return: numpy array of dimensions k by k, where array[a][b] is the
correlation between column 'a' of X and column 'b'
Usage:
Called by instability()
'''
corrmatrix = []
for a in range(k):
for b in range(k):
c = np.corrcoef(A[:, a], B[:, b])
corrmatrix.append(c[0][1])
return np.asarray(corrmatrix).reshape(k, k)
def instability(self, k1=0, k2=0):
'''
Performs instability calculation for NMF factorizations for each K
within the range entered
Arguments:
:param: k1 (int, optional, default self.K1): lower bound of K to
plot against stability
:param: k2 (int, optional, default self.K2): upper bound of K to
plot against instability
Return:
"instability.csv" containing instability index
for each K between and including k1 and k2; updates
self.instabilitydict (required for makeplot())
'''
if k1 == 0:
k1 = self.K1
if k2 == 0:
k2 = self.K2
numReplicates = len(self.replicates)
if self.NMF_finished is False:
("staNMF Error: runNMF is not complete\n")
else:
numPatterns = np.arange(k1, k2+1)
modelK = numPatterns[0]
path = str("./staNMFDicts" + str(self.folderID) + "/K=" +
str(modelK)+"/")
inputfilename = "factorization_0.csv"
inputfilepath = os.path.join(path, inputfilename)
inputfile = open(inputfilepath, "rb")
reader = csv.reader(inputfile, delimiter=',')
matrix1 = np.array(list(reader))
firstmatrix = matrix1[:, 1:]
inputfile.close()
d = np.size(firstmatrix, 0)
for k in numPatterns:
("Calculating instability for " + str(k))
path = str("./staNMFDicts" + str(self.folderID) + "/K=" +
str(k)+"/")
Dhat = np.zeros((numReplicates, d, k))
for replicate in range(numReplicates):
inputfilename = "factorization_" + str(replicate) + ".csv"
inputfilepath = os.path.join(path, inputfilename)
with open(inputfilepath, "rb") as inputfile:
matrix1 = pd.read_csv(inputfile, header=None)
inputmatrix = matrix1.drop(0, axis=1)
inputmatrix.columns = np.arange(0, matrix1.shape[1]-1)
Dhat[replicate] = inputmatrix
distMat = np.zeros(shape=(numReplicates, numReplicates))
for i in range(numReplicates):
for j in range(i, numReplicates):
x = Dhat[i]
y = Dhat[j]
CORR = self.findcorrelation(x, y, k)
distMat[i][j] = self.amariMaxError(CORR)
distMat[j][i] = distMat[i][j]
self.instabilitydict[k] = (np.sum(distMat) / (numReplicates *
(numReplicates-1)))
if self.parallel:
outputfile = open(str(path + "instability.csv"), "w")
outputwriter = csv.writer(outputfile)
outputwriter.writerow([k, self.instabilitydict[k]])
outputfile.close()
if not self.parallel:
outputfile = open("instability.csv", "w")
outputwriter = csv.writer(outputfile)
for i in sorted(self.instabilitydict):
outputwriter.writerow([i, self.instabilitydict[i]])
def get_instability(self):
'''
Retrieves instability values calculated in this instance of staNMF
Returns:
dictionary with keys K, values instability index
Usage: Called by user (not required for output of 'instablity.csv', but
returns usable python dictionary of these calculations)
'''
if self.stability_finished:
return self.instabilitydict
else:
print("Instability has not yet been calculated for your NMF"
"results. Use staNMF.instability() to continue.")
def plot(self, dataset_title="Drosophila Spatial Expression Data", xmax=0,
xmin=-1, ymin=0, ymax=0, xlab="K", ylab="Instability Index"):
'''
Plots instability results for all K's between and including K1 and K2
with K on the X axis and instability on the Y axis
Arguments:
:param: dataset_title (str, optional, default "Drosophila
Expression Data")
:param: ymax (float, optional, default
largest Y + (largest Y/ # of points)
:param: xmax (float, optional, default K2+1)
:param: xlab (string, default "K") x-axis label
:param: ylab (string, default "Instability Index") y-axis label
Returns: None, saves plot as <dataset_title>.png
Usage: Called by user to generate plot
'''
kArray = []
if self.parallel:
for K in range(self.K1, self.K2):
kpath = "./staNMFDicts{}/K={}/instability.csv".format(
self.folderID, K)
df = pd.read_csv(kpath)
kArray.append(int(df.columns[0]))
self.instabilityarray.append(float(df.columns[1]))
else:
for i in sorted(self.instabilitydict):
kArray.append(i)
self.instabilityarray.append(self.instabilitydict[i])
if xmax == 0:
xmax = self.K2 + 1
if xmin == -1:
xmin = self.K1
ymin = 0
ymax = max(self.instabilityarray) + (max(self.instabilityarray) /
len(self.instabilityarray))
plt.plot(kArray, self.instabilityarray)
plt.axis([xmin, xmax, ymin, ymax])
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.axes.titlesize = 'smaller'
plt.title(str('Stability NMF Results: Principal Patterns vs.'
'Instability in ' + dataset_title))
plotname = str(dataset_title + ".png")
plt.savefig(plotname)
def ClearDirectory(self, k_list):
'''
A storage-saving option that clears the entire directory of each K
requested, including the instability.csv file in each folder
:param: k_list (list, required) list of K's to delete corresponding
directories of
NOTE: this should only be used after stability has been calculated for
each K you wish to delete.
'''
for K in k_list:
path = "./staNMFDicts{}/K={}/".format(self.folderID, K)
shutil.rmtree(path)
|
{"hexsha": "16630b9f3bb0332bfe92da7e7b81e871be1e404d", "size": 18309, "ext": "py", "lang": "Python", "max_stars_repo_path": "staNMF/staNMF.py", "max_stars_repo_name": "greenelab/staNMF", "max_stars_repo_head_hexsha": "2e5a8ed322d4221a5907ce5a479cbbe5ff8653ad", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2016-10-21T16:16:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-27T19:29:13.000Z", "max_issues_repo_path": "staNMF/staNMF.py", "max_issues_repo_name": "greenelab/staNMF", "max_issues_repo_head_hexsha": "2e5a8ed322d4221a5907ce5a479cbbe5ff8653ad", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2016-09-09T22:08:39.000Z", "max_issues_repo_issues_event_max_datetime": "2016-11-10T15:29:50.000Z", "max_forks_repo_path": "staNMF/staNMF.py", "max_forks_repo_name": "greenelab/staNMF", "max_forks_repo_head_hexsha": "2e5a8ed322d4221a5907ce5a479cbbe5ff8653ad", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-09-09T20:30:55.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-06T21:57:50.000Z", "avg_line_length": 37.2892057026, "max_line_length": 79, "alphanum_fraction": 0.5684089792, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 4076}
|
"""
This file implements the class for Burgers equation.
"""
import numpy
class Burgers(object):
def __init__(self):
pass
def flux(self, q):
return q**2 / 2
def max_lambda(self, q):
return numpy.max(numpy.abs(q))
|
{"hexsha": "d9267c397e7656ef627624a99db2cee60612fbb1", "size": 271, "ext": "py", "lang": "Python", "max_stars_repo_path": "coding_exercises/solutions/systems/burgers.py", "max_stars_repo_name": "IanHawke/icts-2020", "max_stars_repo_head_hexsha": "531d0d505fc83b709223f1c924b5e7e08f8c08a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-05-19T12:00:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-08T09:23:20.000Z", "max_issues_repo_path": "coding_exercises/solutions/systems/burgers.py", "max_issues_repo_name": "IanHawke/icts-2020", "max_issues_repo_head_hexsha": "531d0d505fc83b709223f1c924b5e7e08f8c08a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "coding_exercises/solutions/systems/burgers.py", "max_forks_repo_name": "IanHawke/icts-2020", "max_forks_repo_head_hexsha": "531d0d505fc83b709223f1c924b5e7e08f8c08a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-05-08T18:26:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-01T13:17:54.000Z", "avg_line_length": 15.0555555556, "max_line_length": 52, "alphanum_fraction": 0.5682656827, "include": true, "reason": "import numpy", "num_tokens": 63}
|
from .. import get_endpoint
from .cases_func import f_3p_1im_dep
import math
import numpy as np
import unittest
method = "CICO_ONE_PASS"
class getEndpointTest(unittest.TestCase):
def test_default_options(self):
res0 = [get_endpoint(
[3., 2., 2.1],
i,
lambda x: f_3p_1im_dep(x),
method,
loss_crit=9
) for i in range(3)]
self.assertTrue(math.isclose(res0[0].value, 5.0, abs_tol=1e-2))
self.assertTrue(len(res0[0].profilePoints) > 0)
self.assertTrue(res0[0].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[0].direction == "right")
self.assertTrue(math.isclose(res0[1].value, 2.0+2.0*math.sqrt(2.), abs_tol=1e-2))
self.assertTrue(len(res0[1].profilePoints) > 0)
self.assertTrue(res0[1].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[1].direction == "right")
self.assertTrue(len(res0[2].profilePoints) == 0)
self.assertTrue(res0[2].status == "SCAN_BOUND_REACHED")
self.assertTrue(res0[2].direction == "right")
def test_left_direction(self):
res0 = [get_endpoint(
[3., 2., 2.1],
i,
lambda x: f_3p_1im_dep(x),
method,
direction="left",
loss_crit=9
) for i in range(3)]
self.assertTrue(math.isclose(res0[0].value, 1.0, abs_tol=1e-2))
self.assertTrue(len(res0[0].profilePoints) > 0)
self.assertTrue(res0[0].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[0].direction == "left")
self.assertTrue(math.isclose(res0[1].value, 2.0 - 2.0 * math.sqrt(2.), abs_tol=1e-2))
self.assertTrue(len(res0[1].profilePoints) > 0)
self.assertTrue(res0[1].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[1].direction == "left")
self.assertTrue(len(res0[2].profilePoints) == 0)
self.assertTrue(res0[2].status == "SCAN_BOUND_REACHED")
self.assertTrue(res0[2].direction == "left")
def test_log(self):
res0 = [get_endpoint(
[3., 2., 2.1],
i,
lambda x: f_3p_1im_dep(x),
method,
loss_crit=9,
scale=["log","direct", "log"]
) for i in range(3)]
self.assertTrue(math.isclose(np.log10(res0[0].value), np.log10(5.), abs_tol=1e-2))
self.assertTrue(len(res0[0].profilePoints) > 0)
self.assertTrue(res0[0].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[0].direction == "right")
self.assertTrue(math.isclose(res0[1].value, 2.0 + 2.0 * math.sqrt(2.), abs_tol=1e-2))
self.assertTrue(len(res0[1].profilePoints) > 0)
self.assertTrue(res0[1].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[1].direction == "right")
self.assertTrue(len(res0[2].profilePoints) == 0)
self.assertTrue(res0[2].status == "SCAN_BOUND_REACHED")
self.assertTrue(res0[2].direction == "right")
#unittest.main(argv=['first-arg-is-ignored'], exit=False)
|
{"hexsha": "69f59b051def9686d4205db451a108aabab1520b", "size": 3109, "ext": "py", "lang": "Python", "max_stars_repo_path": "likelihoodprofiler/tests/test_get_endpoint.py", "max_stars_repo_name": "vetedde/lhp.py", "max_stars_repo_head_hexsha": "fd73c1cd24ae66f2be89833ab3f6c9c7bae68a72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-19T08:42:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-20T09:23:22.000Z", "max_issues_repo_path": "likelihoodprofiler/tests/test_get_endpoint.py", "max_issues_repo_name": "vetedde/lhp.py", "max_issues_repo_head_hexsha": "fd73c1cd24ae66f2be89833ab3f6c9c7bae68a72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-12-26T17:31:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T22:17:42.000Z", "max_forks_repo_path": "likelihoodprofiler/tests/test_get_endpoint.py", "max_forks_repo_name": "vetedde/lhp.py", "max_forks_repo_head_hexsha": "fd73c1cd24ae66f2be89833ab3f6c9c7bae68a72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3827160494, "max_line_length": 93, "alphanum_fraction": 0.5995496944, "include": true, "reason": "import numpy", "num_tokens": 872}
|
#!/usr/bin/env python
# marker_track.py: Code to track AR marker with respect to Kinect and Baxter
# Author: Nishanth Koganti
# Date: 2016/06/15
# Source: https://github.com/osrf/baxter_demos/blob/master/scripts/get_ar_calib.py
import tf
import yaml
import math
import rospy
import numpy as np
from math import pi
# get tf details from transformation matrix
def getTfFromMatrix(matrix):
scale, shear, angles, trans, persp = tf.transformations.decompose_matrix(matrix)
return trans, tf.transformations.quaternion_from_euler(*angles), angles
# lookup tf transform between two frames
def lookupTransform(tf_listener, target, source):
tf_listener.waitForTransform(target, source, rospy.Time(), rospy.Duration(4.0))
trans, rot = tf_listener.lookupTransform(target, source, rospy.Time())
euler = tf.transformations.euler_from_quaternion(rot)
source_target = tf.transformations.compose_matrix(translate = trans, angles = euler)
return source_target
def main():
# initialize ros node
rospy.init_node('marker_track')
# load calibration file
savePath = rospy.get_param('~save_path')
paramFile = rospy.get_param('~parameters_file')
with open(paramFile, 'r') as f:
params = yaml.load(f)
# parameter initialization
frame = params['frame']
markernum = params['markernum']
# create tf listener and broadcaster
tf_listener = tf.TransformListener()
# loop rate
rate = rospy.Rate(30)
def saveData():
# write to yaml file
print 'write tracking data to files'
np.savetxt('%sbaxter_trajectory' % (savePath), baxterTraj, delimiter=',', fmt='%.4f')
np.savetxt('%skinect_trajectory' % (savePath), kinectTraj, delimiter=',', fmt='%.4f')
rospy.on_shutdown(saveData)
# create empty matrices to save tracked data
kinectTraj = np.empty(shape=[0,3])
baxterTraj = np.empty(shape=[0,3])
# Publish transform and marker
while not rospy.is_shutdown():
# base to refernce
base_marker = lookupTransform(tf_listener, frame, '/base')
trans_baxter, rot, rot_euler = getTfFromMatrix(np.linalg.inv(base_marker))
# marker to camera
marker_camera = lookupTransform(tf_listener, '/ar_marker_'+str(markernum), '/kinect2_link')
trans_camera, rot, rot_euler = getTfFromMatrix(np.linalg.inv(marker_camera))
kinectTraj = np.vstack([kinectTraj,np.asarray(trans_camera)])
baxterTraj = np.vstack([baxterTraj,np.asarray(trans_baxter)])
rate.sleep()
if __name__=='__main__':
main()
|
{"hexsha": "0f85550572a815ec57628f7ce1d2c235c6e87380", "size": 2571, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/marker_track.py", "max_stars_repo_name": "ShibataLabPrivate/kinect_baxter_calibration", "max_stars_repo_head_hexsha": "f969e6bfdab691da928d4ea1b7512b19c66a20b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2017-07-21T19:46:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-16T02:59:08.000Z", "max_issues_repo_path": "scripts/marker_track.py", "max_issues_repo_name": "ShibataLabPrivate/kinect_baxter_calibration", "max_issues_repo_head_hexsha": "f969e6bfdab691da928d4ea1b7512b19c66a20b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2016-06-15T09:46:41.000Z", "max_issues_repo_issues_event_max_datetime": "2016-06-16T06:19:14.000Z", "max_forks_repo_path": "scripts/marker_track.py", "max_forks_repo_name": "ShibataLab/kinect_baxter_calibration", "max_forks_repo_head_hexsha": "f969e6bfdab691da928d4ea1b7512b19c66a20b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-11-04T09:16:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-01T20:28:13.000Z", "avg_line_length": 32.1375, "max_line_length": 99, "alphanum_fraction": 0.7032283158, "include": true, "reason": "import numpy", "num_tokens": 627}
|
[STATEMENT]
lemma univ_basic_semialg_set_to_semialg_set:
assumes "P \<in> carrier Q\<^sub>p_x"
assumes "m \<noteq> 0"
shows "to_R1 ` (univ_basic_semialg_set m P) = basic_semialg_set 1 m (from_Qp_x P)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>a. [a]) ` univ_basic_semialg_set m P = basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. (\<lambda>a. [a]) ` univ_basic_semialg_set m P \<subseteq> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
2. basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P) \<subseteq> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
show "(\<lambda>a. [a]) ` univ_basic_semialg_set m P \<subseteq> basic_semialg_set 1 m (from_Qp_x P)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>a. [a]) ` univ_basic_semialg_set m P \<subseteq> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P \<Longrightarrow> x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P \<Longrightarrow> x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
[PROOF STEP]
assume A: "x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P"
[PROOF STATE]
proof (state)
this:
x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
goal (1 subgoal):
1. \<And>x. x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P \<Longrightarrow> x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
obtain b y where by_def:"b \<in> carrier Q\<^sub>p \<and> y \<in> carrier Q\<^sub>p \<and> (P \<bullet> b) = (y[^]m) \<and> x = [b]"
[PROOF STATE]
proof (prove)
using this:
x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
goal (1 subgoal):
1. (\<And>b y. b \<in> carrier Q\<^sub>p \<and> y \<in> carrier Q\<^sub>p \<and> P \<bullet> b = y [^] m \<and> x = [b] \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding univ_basic_semialg_set_def
[PROOF STATE]
proof (prove)
using this:
x \<in> (\<lambda>a. [a]) ` {a \<in> carrier Q\<^sub>p. \<exists>y\<in>carrier Q\<^sub>p. P \<bullet> a = y [^] m}
goal (1 subgoal):
1. (\<And>b y. b \<in> carrier Q\<^sub>p \<and> y \<in> carrier Q\<^sub>p \<and> P \<bullet> b = y [^] m \<and> x = [b] \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
b \<in> carrier Q\<^sub>p \<and> y \<in> carrier Q\<^sub>p \<and> P \<bullet> b = y [^] m \<and> x = [b]
goal (1 subgoal):
1. \<And>x. x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P \<Longrightarrow> x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
b \<in> carrier Q\<^sub>p \<and> y \<in> carrier Q\<^sub>p \<and> P \<bullet> b = y [^] m \<and> x = [b]
[PROOF STEP]
have "x \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)"
[PROOF STATE]
proof (prove)
using this:
b \<in> carrier Q\<^sub>p \<and> y \<in> carrier Q\<^sub>p \<and> P \<bullet> b = y [^] m \<and> x = [b]
goal (1 subgoal):
1. x \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
[PROOF STEP]
using A Qp.to_R1_closed[of b]
[PROOF STATE]
proof (prove)
using this:
b \<in> carrier Q\<^sub>p \<and> y \<in> carrier Q\<^sub>p \<and> P \<bullet> b = y [^] m \<and> x = [b]
x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
b \<in> carrier Q\<^sub>p \<Longrightarrow> [b] \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
goal (1 subgoal):
1. x \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
[PROOF STEP]
unfolding univ_basic_semialg_set_def
[PROOF STATE]
proof (prove)
using this:
b \<in> carrier Q\<^sub>p \<and> y \<in> carrier Q\<^sub>p \<and> P \<bullet> b = y [^] m \<and> x = [b]
x \<in> (\<lambda>a. [a]) ` {a \<in> carrier Q\<^sub>p. \<exists>y\<in>carrier Q\<^sub>p. P \<bullet> a = y [^] m}
b \<in> carrier Q\<^sub>p \<Longrightarrow> [b] \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
goal (1 subgoal):
1. x \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
goal (1 subgoal):
1. \<And>x. x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P \<Longrightarrow> x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
[PROOF STEP]
show "x \<in> basic_semialg_set 1 m (from_Qp_x P)"
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
goal (1 subgoal):
1. x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
[PROOF STEP]
using by_def Qp_x_Qp_poly_eval assms
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
b \<in> carrier Q\<^sub>p \<and> y \<in> carrier Q\<^sub>p \<and> P \<bullet> b = y [^] m \<and> x = [b]
\<lbrakk>?P \<in> carrier (UP Q\<^sub>p); ?a \<in> carrier Q\<^sub>p\<rbrakk> \<Longrightarrow> ?P \<bullet> ?a = eval_at_point Q\<^sub>p [?a] (UP_to_IP Q\<^sub>p 0 ?P)
P \<in> carrier (UP Q\<^sub>p)
m \<noteq> 0
goal (1 subgoal):
1. x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
[PROOF STEP]
unfolding basic_semialg_set_def
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>)
b \<in> carrier Q\<^sub>p \<and> y \<in> carrier Q\<^sub>p \<and> P \<bullet> b = y [^] m \<and> x = [b]
\<lbrakk>?P \<in> carrier (UP Q\<^sub>p); ?a \<in> carrier Q\<^sub>p\<rbrakk> \<Longrightarrow> ?P \<bullet> ?a = eval_at_point Q\<^sub>p [?a] (UP_to_IP Q\<^sub>p 0 ?P)
P \<in> carrier (UP Q\<^sub>p)
m \<noteq> 0
goal (1 subgoal):
1. x \<in> {q \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>). \<exists>y\<in>carrier Q\<^sub>p. eval_at_point Q\<^sub>p q (UP_to_IP Q\<^sub>p 0 P) = y [^] m}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<lambda>a. [a]) ` univ_basic_semialg_set m P \<subseteq> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
goal (1 subgoal):
1. basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P) \<subseteq> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
show "basic_semialg_set 1 m (from_Qp_x P) \<subseteq> (\<lambda>a. [a]) ` univ_basic_semialg_set m P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P) \<subseteq> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P) \<Longrightarrow> x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P) \<Longrightarrow> x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
assume A: "x \<in> basic_semialg_set 1 m (from_Qp_x P)"
[PROOF STATE]
proof (state)
this:
x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
goal (1 subgoal):
1. \<And>x. x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P) \<Longrightarrow> x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
[PROOF STEP]
obtain b where b_def: "b \<in> carrier Q\<^sub>p \<and> x = [b]"
[PROOF STATE]
proof (prove)
using this:
x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
goal (1 subgoal):
1. (\<And>b. b \<in> carrier Q\<^sub>p \<and> x = [b] \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding basic_semialg_set_def
[PROOF STATE]
proof (prove)
using this:
x \<in> {q \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>). \<exists>y\<in>carrier Q\<^sub>p. eval_at_point Q\<^sub>p q (UP_to_IP Q\<^sub>p 0 P) = y [^] m}
goal (1 subgoal):
1. (\<And>b. b \<in> carrier Q\<^sub>p \<and> x = [b] \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis (mono_tags, lifting) mem_Collect_eq Qp.to_R1_to_R Qp.to_R_pow_closed)
[PROOF STATE]
proof (state)
this:
b \<in> carrier Q\<^sub>p \<and> x = [b]
goal (1 subgoal):
1. \<And>x. x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P) \<Longrightarrow> x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
obtain y where y_def: "y \<in> carrier Q\<^sub>p \<and> (Qp_ev (from_Qp_x P) [b] = (y[^]m))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>y. y \<in> carrier Q\<^sub>p \<and> eval_at_point Q\<^sub>p [b] (UP_to_IP Q\<^sub>p 0 P) = y [^] m \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using A b_def
[PROOF STATE]
proof (prove)
using this:
x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P)
b \<in> carrier Q\<^sub>p \<and> x = [b]
goal (1 subgoal):
1. (\<And>y. y \<in> carrier Q\<^sub>p \<and> eval_at_point Q\<^sub>p [b] (UP_to_IP Q\<^sub>p 0 P) = y [^] m \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding basic_semialg_set_def
[PROOF STATE]
proof (prove)
using this:
x \<in> {q \<in> carrier (Q\<^sub>p\<^bsup>1\<^esup>). \<exists>y\<in>carrier Q\<^sub>p. eval_at_point Q\<^sub>p q (UP_to_IP Q\<^sub>p 0 P) = y [^] m}
b \<in> carrier Q\<^sub>p \<and> x = [b]
goal (1 subgoal):
1. (\<And>y. y \<in> carrier Q\<^sub>p \<and> eval_at_point Q\<^sub>p [b] (UP_to_IP Q\<^sub>p 0 P) = y [^] m \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
y \<in> carrier Q\<^sub>p \<and> eval_at_point Q\<^sub>p [b] (UP_to_IP Q\<^sub>p 0 P) = y [^] m
goal (1 subgoal):
1. \<And>x. x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P) \<Longrightarrow> x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
have " P \<bullet> b = (y[^]m)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<bullet> b = y [^] m
[PROOF STEP]
using assms y_def b_def Qp_x_Qp_poly_eval
[PROOF STATE]
proof (prove)
using this:
P \<in> carrier (UP Q\<^sub>p)
m \<noteq> 0
y \<in> carrier Q\<^sub>p \<and> eval_at_point Q\<^sub>p [b] (UP_to_IP Q\<^sub>p 0 P) = y [^] m
b \<in> carrier Q\<^sub>p \<and> x = [b]
\<lbrakk>?P \<in> carrier (UP Q\<^sub>p); ?a \<in> carrier Q\<^sub>p\<rbrakk> \<Longrightarrow> ?P \<bullet> ?a = eval_at_point Q\<^sub>p [?a] (UP_to_IP Q\<^sub>p 0 ?P)
goal (1 subgoal):
1. P \<bullet> b = y [^] m
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
P \<bullet> b = y [^] m
goal (1 subgoal):
1. \<And>x. x \<in> basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P) \<Longrightarrow> x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
P \<bullet> b = y [^] m
[PROOF STEP]
show " x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P"
[PROOF STATE]
proof (prove)
using this:
P \<bullet> b = y [^] m
goal (1 subgoal):
1. x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
using y_def b_def
[PROOF STATE]
proof (prove)
using this:
P \<bullet> b = y [^] m
y \<in> carrier Q\<^sub>p \<and> eval_at_point Q\<^sub>p [b] (UP_to_IP Q\<^sub>p 0 P) = y [^] m
b \<in> carrier Q\<^sub>p \<and> x = [b]
goal (1 subgoal):
1. x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
[PROOF STEP]
unfolding basic_semialg_set_def univ_basic_semialg_set_def
[PROOF STATE]
proof (prove)
using this:
P \<bullet> b = y [^] m
y \<in> carrier Q\<^sub>p \<and> eval_at_point Q\<^sub>p [b] (UP_to_IP Q\<^sub>p 0 P) = y [^] m
b \<in> carrier Q\<^sub>p \<and> x = [b]
goal (1 subgoal):
1. x \<in> (\<lambda>a. [a]) ` {a \<in> carrier Q\<^sub>p. \<exists>y\<in>carrier Q\<^sub>p. P \<bullet> a = y [^] m}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<in> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
basic_semialg_set 1 m (UP_to_IP Q\<^sub>p 0 P) \<subseteq> (\<lambda>a. [a]) ` univ_basic_semialg_set m P
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5695, "file": "Padic_Field_Padic_Field_Powers", "length": 42}
|
'''
This script makes a hdf5 style dataset with all images in a chosen directory.
Gram matrices computed here are never normalized by the number of channels.
Normalization is done if necessary on the training stage.
'''
import numpy as np
import h5py
import keras
import keras.backend as K
from keras.applications import vgg16
from training import get_style_features
from utils import preprocess_image_scale, config_gpu, std_input_list
import os
import argparse
if __name__ == "__main__":
def_sl = ['block1_conv2', 'block2_conv2',
'block3_conv3', 'block4_conv3']
parser = argparse.ArgumentParser()
parser.add_argument('--style_dir', type=str, default='gram_imgs',
help='Directory that contains the images.')
parser.add_argument('--gram_dataset_path', type=str, default='grams.h5',
help='Name of the output hdf5 file.')
parser.add_argument('--style_imgs', type=str, default=None, nargs='+',
help='Style image file names.')
parser.add_argument('--style_img_size', type=int, default=[None], nargs='+',
help='Largest size of the style images')
parser.add_argument('--style_layers', type=str, nargs='+', default=def_sl)
parser.add_argument('--gpu', type=str, default='')
parser.add_argument('--allow_growth', default=False, action='store_true')
args = parser.parse_args()
config_gpu(args.gpu, args.allow_growth)
loss_net = vgg16.VGG16(weights='imagenet', include_top=False)
targets_dict = dict([(layer.name, layer.output) for layer in loss_net.layers])
s_targets = get_style_features(targets_dict, args.style_layers)
get_style_target = K.function([loss_net.input], s_targets)
gm_lists = [[] for l in args.style_layers]
img_list = []
img_size_list = []
# Get style image names or get all images in the directory
if args.style_imgs is None:
args.style_imgs = os.listdir(args.style_dir)
# Check the image sizes
args.style_img_size = std_input_list(args.style_img_size, len(args.style_imgs), 'Image size')
for img_name, img_size in zip(args.style_imgs, args.style_img_size):
try:
print(img_name)
img = preprocess_image_scale(os.path.join(args.style_dir, img_name),
img_size=img_size)
s_targets = get_style_target([img])
for l, t in zip(gm_lists, s_targets):
l.append(t)
img_list.append(os.path.splitext(img_name)[0])
img_size_list.append(img_size)
except IOError as e:
print('Could not open file %s as image.' %img_name)
mtx = []
for l in gm_lists:
mtx.append(np.concatenate(l))
f = h5py.File(args.gram_dataset_path, 'w')
f.attrs['img_names'] = img_list
f.attrs['img_sizes'] = img_size_list
for name, m in zip(args.style_layers, mtx):
f.create_dataset(name, data=m)
f.flush()
f.close()
|
{"hexsha": "1693b7da3017be130519e3f413a10aba6738fcf2", "size": 3010, "ext": "py", "lang": "Python", "max_stars_repo_path": "make_gram_dataset.py", "max_stars_repo_name": "Antinomy20001/neural-style-keras", "max_stars_repo_head_hexsha": "a7fe77db3f565813c2fb8cfd35c533b52928a13e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2017-02-10T18:01:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T13:46:31.000Z", "max_issues_repo_path": "make_gram_dataset.py", "max_issues_repo_name": "Antinomy20001/neural-style-keras", "max_issues_repo_head_hexsha": "a7fe77db3f565813c2fb8cfd35c533b52928a13e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-11-29T17:04:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-08T17:40:21.000Z", "max_forks_repo_path": "make_gram_dataset.py", "max_forks_repo_name": "Antinomy20001/neural-style-keras", "max_forks_repo_head_hexsha": "a7fe77db3f565813c2fb8cfd35c533b52928a13e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2017-05-12T06:26:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-17T03:07:45.000Z", "avg_line_length": 35.8333333333, "max_line_length": 97, "alphanum_fraction": 0.6591362126, "include": true, "reason": "import numpy", "num_tokens": 672}
|
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow_docs.vis import embed
import numpy as np
import cv2
# Import matplotlib libraries
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
import matplotlib.patches as patches
# Some modules to display an animation using imageio.
import imageio
interpreter = tf.lite.Interpreter(model_path="../../models/lite-model_movenet_singlepose_lightning_3.tflite")
interpreter.allocate_tensors()
'''
Check input/output details
'''
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print("== Input details ==")
print("name:", input_details[0]['name'])
print("shape:", input_details[0]['shape'])
print("type:", input_details[0]['dtype'])
print("\n== Output details ==")
print("name:", output_details[0]['name'])
print("shape:", output_details[0]['shape'])
print("type:", output_details[0]['dtype'])
'''
This gives a list of dictionaries.
'''
tensor_details = interpreter.get_tensor_details()
for dict in tensor_details:
i = dict['index']
tensor_name = dict['name']
shape = dict['shape']
# scales = dict['quantization_parameters']['scales']
# zero_points = dict['quantization_parameters']['zero_points']
# tensor = interpreter.tensor(i)()
print(i, type, tensor_name, shape)# , scales.shape, zero_points.shape, tensor.shape)
|
{"hexsha": "6b3b0b9476fa6dfe88275cdd40ce07bcd8791a0b", "size": 1397, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tools/tflite_weight_viewer.py", "max_stars_repo_name": "flymin/movenet", "max_stars_repo_head_hexsha": "a3a74593f622370570506302b04153968abbd1ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2021-07-09T08:24:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T11:51:58.000Z", "max_issues_repo_path": "src/tools/tflite_weight_viewer.py", "max_issues_repo_name": "flymin/movenet", "max_issues_repo_head_hexsha": "a3a74593f622370570506302b04153968abbd1ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2021-07-14T06:28:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T02:29:28.000Z", "max_forks_repo_path": "src/tools/tflite_weight_viewer.py", "max_forks_repo_name": "flymin/movenet", "max_forks_repo_head_hexsha": "a3a74593f622370570506302b04153968abbd1ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2021-07-12T09:28:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T11:58:35.000Z", "avg_line_length": 28.5102040816, "max_line_length": 109, "alphanum_fraction": 0.7408732999, "include": true, "reason": "import numpy", "num_tokens": 307}
|
#include <boost/test/unit_test.hpp>
#include "../../src/shared/state.h"
BOOST_AUTO_TEST_CASE(TestStaticAssert)
{
BOOST_CHECK(1);
}
BOOST_AUTO_TEST_CASE(TestGameObject)
{
{
state::ApparitionArea apparitionArea {};
BOOST_CHECK_EQUAL(apparitionArea.getX(), 0);
BOOST_CHECK_EQUAL(apparitionArea.getY(), 0);
}
{
state::ApparitionArea apparitionArea {5, 10};
BOOST_CHECK_EQUAL(apparitionArea.getX(), 5);
BOOST_CHECK_EQUAL(apparitionArea.getY(), 10);
}
}
/* vim: set sw=2 sts=2 et : */
|
{"hexsha": "505d8af097dfe2438310ef8ae6feb2efcc39037f", "size": 519, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/shared/test_apparition_area.cpp", "max_stars_repo_name": "Welteam/Projet-IS", "max_stars_repo_head_hexsha": "4feeeb39aca9af720f22c8bb3a41f2583fb8cb9b", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/shared/test_apparition_area.cpp", "max_issues_repo_name": "Welteam/Projet-IS", "max_issues_repo_head_hexsha": "4feeeb39aca9af720f22c8bb3a41f2583fb8cb9b", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/shared/test_apparition_area.cpp", "max_forks_repo_name": "Welteam/Projet-IS", "max_forks_repo_head_hexsha": "4feeeb39aca9af720f22c8bb3a41f2583fb8cb9b", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.2222222222, "max_line_length": 49, "alphanum_fraction": 0.6955684008, "num_tokens": 141}
|
### A Pluto.jl notebook ###
# v0.19.3
using Markdown
using InteractiveUtils
# ╔═╡ 19afaf4e-b19b-47a3-8c4c-31b8879f392d
using JSON, StanSample, Statistics, NamedTupleTools, Random
# ╔═╡ f19cee90-c255-4760-abdc-3c3da106ff9b
stan_chris = "
data {
int n_rows;
int<lower=1> n_cols;
matrix<lower=0>[n_rows,n_cols] x;
}
parameters {
real mu;
}
model {
mu ~ normal(0, 1);
}";
# ╔═╡ 52d5a30c-37ac-4726-abb4-e4e77f9a1871
sm = SampleModel("chris", stan_chris);
# ╔═╡ 7a0e2a02-46de-403e-9c88-f0c39c86951d
begin
seed = 65445
Random.seed!(seed)
end;
# ╔═╡ 07e20b3a-f0e9-4842-b3eb-f8016f24331d
begin
n_rows = 8
n_cols = 2
x = zeros(n_rows, n_cols)
end
# ╔═╡ 3a61ff36-66c3-4612-aef6-7c39fa8387b2
indata = (x = x, n_rows = n_rows, n_cols = n_cols)
# ╔═╡ 9fc648c2-5a17-4d4e-9f94-f03e96e39c34
size(indata.x)
# ╔═╡ a5c57d03-8bbc-41af-81b3-db4239d74037
function convert_matrices_in_nt_or_dict(d::Union{NamedTuple, Dict})
dct = typeof(d) <: NamedTuple ? dct = convert(Dict, d) : d
for key in keys(dct)
if typeof(dct[key]) <: Matrix
dct[key] = Matrix(dct[key]')
end
end
dct
end
# ╔═╡ f4e2869a-686d-4b00-958d-7d5dc957de51
data = convert_matrices_in_nt_or_dict(indata)
# ╔═╡ c92e7631-2fa3-4867-b417-c7661bbcbce8
rc = stan_sample(sm; data, seed)
# ╔═╡ 43512b07-2ea2-4f23-b6da-279202cc61b5
if success(rc)
df = read_samples(sm , :dataframe)
[mean(df.mu), std(df.mu)]
end
# ╔═╡ e7bb012a-2f39-4ce3-818c-9976f5ada1d5
function convert_matrices(d::T) where {T <: Vector}
dd = copy(d)
for i in 1:length(dd)
dct = typeof(dd[i]) <: NamedTuple ? dct = convert(Dict, dd[i]) : dd[i]
for key in keys(dct)
if typeof(dct[key]) <: Matrix
dct[key] = Matrix(dct[key]')
end
end
dd[i] = dct
end
dd
end
# ╔═╡ 80b8fde0-eeb6-4035-8ca6-042117ad9fcc
dd = [(x = x, n_rows = n_rows, n_cols = n_cols) for i in 1:4]
# ╔═╡ c26b973e-6cc9-45d6-982c-632136e049c8
# ╔═╡ 155c4b2d-b151-4212-b390-a90cfc52c579
begin
typeof(dd[1])
dct = convert(Dict, dd[1])
typeof(dct)
end
# ╔═╡ 2525e4b5-abd6-4209-8fea-6d69b939d694
let
rc = stan_sample(sm; data=dct, seed)
if success(rc)
df = read_samples(sm , :dataframe)
[mean(df.mu), std(df.mu)]
end
[mean(df.mu), std(df.mu)]
end
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
NamedTupleTools = "d9ec5142-1e00-5aa0-9d6a-321866360f50"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
StanSample = "c1514b29-d3a0-5178-b312-660c88baa699"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[compat]
JSON = "~0.21.3"
NamedTupleTools = "~0.14.0"
StanSample = "~6.4.0"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.9.0-DEV"
manifest_format = "2.0"
project_hash = "ff9bc0ea24be11f2223101e1a5610a25d2106397"
[[deps.ANSIColoredPrinters]]
git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c"
uuid = "a4c015fc-c6ff-483c-b24f-f7ea428134e9"
version = "0.0.1"
[[deps.ArgCheck]]
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
version = "2.3.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.ArrayInterface]]
deps = ["Compat", "IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"]
git-tree-sha1 = "c933ce606f6535a7c7b98e1d86d5d1014f730596"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "5.0.7"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.BitTwiddlingConvenienceFunctions]]
deps = ["Static"]
git-tree-sha1 = "28bbdbf0354959db89358d1d79d421ff31ef0b5e"
uuid = "62783981-4cbd-42fc-bca8-16325de8dc4b"
version = "0.1.3"
[[deps.CPUSummary]]
deps = ["CpuId", "IfElse", "Static"]
git-tree-sha1 = "baaac45b4462b3b0be16726f38b789bf330fcb7a"
uuid = "2a0fbf3d-bb9c-48f3-b0a9-814d99fd7ab9"
version = "0.1.21"
[[deps.CSV]]
deps = ["CodecZlib", "Dates", "FilePathsBase", "InlineStrings", "Mmap", "Parsers", "PooledArrays", "SentinelArrays", "Tables", "Unicode", "WeakRefStrings"]
git-tree-sha1 = "873fb188a4b9d76549b81465b1f75c82aaf59238"
uuid = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
version = "0.10.4"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "9950387274246d08af38f6eef8cb5480862a435f"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.14.0"
[[deps.ChangesOfVariables]]
deps = ["ChainRulesCore", "LinearAlgebra", "Test"]
git-tree-sha1 = "bf98fa45a0a4cee295de98d4c1462be26345b9a1"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.2"
[[deps.CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[deps.Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "b153278a25dd42c65abbf4e62344f9d22e59191b"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.43.0"
[[deps.CompatHelperLocal]]
deps = ["DocStringExtensions", "Pkg", "UUIDs"]
git-tree-sha1 = "908a50c2805d2571379fe1595585148c8352f63e"
uuid = "5224ae11-6099-4aaa-941d-3aab004bd678"
version = "0.1.20"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "0.5.2+0"
[[deps.CpuId]]
deps = ["Markdown"]
git-tree-sha1 = "fcbb72b032692610bfbdb15018ac16a36cf2e406"
uuid = "adafc99b-e345-5852-983c-f28acb93d879"
version = "0.3.1"
[[deps.Crayons]]
git-tree-sha1 = "249fe38abf76d48563e2f4556bebd215aa317e15"
uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
version = "4.1.1"
[[deps.DataAPI]]
git-tree-sha1 = "fb5f5316dd3fd4c5e7c30a24d50643b73e37cd40"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.10.0"
[[deps.DataFrames]]
deps = ["Compat", "DataAPI", "Future", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrettyTables", "Printf", "REPL", "Reexport", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"]
git-tree-sha1 = "6c19003824cbebd804a51211fd3bbd81bf1ecad5"
uuid = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
version = "1.3.3"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "3daef5523dd2e769dad2365274f760ff5f282c7d"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.11"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[deps.DensityInterface]]
deps = ["InverseFunctions", "Test"]
git-tree-sha1 = "80c3e8639e3353e5d2912fb3a1916b8455e2494b"
uuid = "b429d917-457f-4dbc-8f4c-0cc954292b1d"
version = "0.4.0"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Distributions]]
deps = ["ChainRulesCore", "DensityInterface", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns", "Test"]
git-tree-sha1 = "70f5bfdfbdc6c9d2b7a143d70ae88f4cb7b193b1"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.56"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[deps.Documenter]]
deps = ["ANSIColoredPrinters", "Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"]
git-tree-sha1 = "6edbf28671b4df4f692e54ae72f1e35851cfbf38"
uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
version = "0.27.16"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.FilePathsBase]]
deps = ["Compat", "Dates", "Mmap", "Printf", "Test", "UUIDs"]
git-tree-sha1 = "129b104185df66e408edd6625d480b7f9e9823a0"
uuid = "48062228-2e41-5def-b9a4-89aafe57970f"
version = "0.9.18"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "246621d23d1f43e3b9c368bf3b72b2331a27c286"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.13.2"
[[deps.Formatting]]
deps = ["Printf"]
git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8"
uuid = "59287772-0a20-5a39-b81b-1366585eb4c0"
version = "0.4.2"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.HostCPUFeatures]]
deps = ["BitTwiddlingConvenienceFunctions", "IfElse", "Libdl", "Static"]
git-tree-sha1 = "18be5268cf415b5e27f34980ed25a7d34261aa83"
uuid = "3e5b6fbb-0976-4d2c-9146-d79de83f2fb0"
version = "0.1.7"
[[deps.Hwloc]]
deps = ["Hwloc_jll"]
git-tree-sha1 = "92d99146066c5c6888d5a3abc871e6a214388b91"
uuid = "0e44f5e4-bd66-52a0-8798-143a42290a1d"
version = "2.0.0"
[[deps.Hwloc_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "303d70c961317c4c20fafaf5dbe0e6d610c38542"
uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8"
version = "2.7.1+0"
[[deps.IOCapture]]
deps = ["Logging", "Random"]
git-tree-sha1 = "f7be53659ab06ddc986428d3a9dcc95f6fa6705a"
uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89"
version = "0.2.2"
[[deps.IfElse]]
git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1"
uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173"
version = "0.1.1"
[[deps.InlineStrings]]
deps = ["Parsers"]
git-tree-sha1 = "61feba885fac3a407465726d0c330b3055df897f"
uuid = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48"
version = "1.1.2"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "91b5dcf362c5add98049e6c29ee756910b03051d"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.3"
[[deps.InvertedIndices]]
git-tree-sha1 = "bee5f1ef5bf65df56bdd2e40447590b272a5471f"
uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f"
version = "1.1.0"
[[deps.IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1"
[[deps.JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.3"
[[deps.LayoutPointers]]
deps = ["ArrayInterface", "LinearAlgebra", "ManualMemory", "SIMDTypes", "Static"]
git-tree-sha1 = "b651f573812d6c36c22c944dd66ef3ab2283dfa1"
uuid = "10f19ff3-798f-405d-979b-55457f8fc047"
version = "0.1.6"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.81.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "76c987446e8d555677f064aaac1145c4c17662f8"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.14"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[deps.ManualMemory]]
git-tree-sha1 = "bcaef4fc7a0cfe2cba636d84cda54b5e4e4ca3cd"
uuid = "d125e4d3-2237-4719-b19c-fa641b8a4667"
version = "0.1.8"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.0+0"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MonteCarloMeasurements]]
deps = ["Distributed", "Distributions", "LinearAlgebra", "MacroTools", "Random", "RecipesBase", "Requires", "SLEEFPirates", "StaticArrays", "Statistics", "StatsBase", "Test"]
git-tree-sha1 = "03619e255664666b352a5e5f6b45e8b00d439870"
uuid = "0987c9cc-fe09-11e8-30f0-b96dd679fdca"
version = "1.0.8"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.2.1"
[[deps.NamedTupleTools]]
git-tree-sha1 = "befc30261949849408ac945a1ebb9fa5ec5e1fd5"
uuid = "d9ec5142-1e00-5aa0-9d6a-321866360f50"
version = "0.14.0"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.20+0"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
version = "0.8.1+0"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[deps.OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "3114946c67ef9925204cc024a73c9e679cebe0d7"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.8"
[[deps.Parameters]]
deps = ["OrderedCollections", "UnPack"]
git-tree-sha1 = "34c0e9ad262e5f7fc75b10a9952ca7692cfc5fbe"
uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a"
version = "0.12.3"
[[deps.Parsers]]
deps = ["Dates"]
git-tree-sha1 = "1285416549ccfcdf0c50d4997a94331e88d68413"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.3.1"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.8.0"
[[deps.PooledArrays]]
deps = ["DataAPI", "Future"]
git-tree-sha1 = "a6062fe4063cdafe78f4a0a81cfffb89721b30e7"
uuid = "2dfb63ee-cc39-5dd5-95bd-886bf059d720"
version = "1.4.2"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.3.0"
[[deps.PrettyTables]]
deps = ["Crayons", "Formatting", "Markdown", "Reexport", "Tables"]
git-tree-sha1 = "dfb54c4e414caa595a1f2ed759b160f5a3ddcba5"
uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d"
version = "1.3.1"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "78aadffb3efd2155af139781b8a8df1ef279ea39"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.4.2"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.RecipesBase]]
git-tree-sha1 = "6bf3f380ff52ce0832ddd3a2a7b9538ed1bcca7d"
uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
version = "1.2.1"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.Rmath]]
deps = ["Random", "Rmath_jll"]
git-tree-sha1 = "bf3188feca147ce108c76ad82c2792c57abe7b1f"
uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa"
version = "0.7.0"
[[deps.Rmath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "68db32dff12bb6127bac73c209881191bf0efbb7"
uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f"
version = "0.3.0+0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.SIMDTypes]]
git-tree-sha1 = "330289636fb8107c5f32088d2741e9fd7a061a5c"
uuid = "94e857df-77ce-4151-89e5-788b33177be4"
version = "0.1.0"
[[deps.SLEEFPirates]]
deps = ["IfElse", "Static", "VectorizationBase"]
git-tree-sha1 = "ac399b5b163b9140f9c310dfe9e9aaa225617ff6"
uuid = "476501e8-09a2-5ece-8869-fb82de89a1fa"
version = "0.6.32"
[[deps.SentinelArrays]]
deps = ["Dates", "Random"]
git-tree-sha1 = "6a2f7d70512d205ca8c7ee31bfa9f142fe74310c"
uuid = "91c51154-3ec4-41a3-a24f-3f23e20d615c"
version = "1.3.12"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[deps.SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SpecialFunctions]]
deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "5ba658aeecaaf96923dce0da9e703bd1fe7666f9"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.1.4"
[[deps.StanBase]]
deps = ["CSV", "DataFrames", "DelimitedFiles", "Distributed", "DocStringExtensions", "Documenter", "JSON", "Parameters", "Random", "StanDump", "Unicode"]
git-tree-sha1 = "fe0c4b9057c1e23e1f8e27666e63f71eedf7b023"
uuid = "d0ee94f6-a23d-54aa-bbe9-7f572d6da7f5"
version = "4.4.0"
[[deps.StanDump]]
deps = ["ArgCheck", "DocStringExtensions"]
git-tree-sha1 = "bfaebe19ada44a52a6c797d48473f1bb22fd0853"
uuid = "9713c8f3-0168-54b5-986e-22c526958f39"
version = "0.2.0"
[[deps.StanSample]]
deps = ["CSV", "CompatHelperLocal", "DataFrames", "DelimitedFiles", "Distributed", "DocStringExtensions", "JSON", "MonteCarloMeasurements", "NamedTupleTools", "OrderedCollections", "Parameters", "Random", "Reexport", "Requires", "StanBase", "StanDump", "TableOperations", "Tables", "Unicode"]
git-tree-sha1 = "7301a753d5e78694b0a9bd4fe7df6c366ef500ec"
uuid = "c1514b29-d3a0-5178-b312-660c88baa699"
version = "6.4.0"
[[deps.Static]]
deps = ["IfElse"]
git-tree-sha1 = "91181e5820a400d1171db4382aa36e7fd19bee27"
uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3"
version = "0.6.3"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "cd56bf18ed715e8b09f06ef8c6b781e6cdc49911"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.4.4"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "c82aaa13b44ea00134f8c9c89819477bd3986ecd"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.3.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "8977b17906b0a1cc74ab2e3a05faa16cf08a8291"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.16"
[[deps.StatsFuns]]
deps = ["ChainRulesCore", "InverseFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "5950925ff997ed6fb3e985dcce8eb1ba42a0bbe7"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "0.9.18"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.0"
[[deps.TableOperations]]
deps = ["SentinelArrays", "Tables", "Test"]
git-tree-sha1 = "e383c87cf2a1dc41fa30c093b2a19877c83e1bc1"
uuid = "ab02a1b2-a7df-11e8-156e-fb1833f50b87"
version = "1.2.0"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits", "Test"]
git-tree-sha1 = "5ce79ce186cc678bbb5c5681ca3379d1ddae11a1"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.7.0"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.6"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.UnPack]]
git-tree-sha1 = "387c1f73762231e86e0c9c5443ce3b4a0a9a0c2b"
uuid = "3a884ed6-31ef-47d7-9d2a-63182c4928ed"
version = "1.0.2"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.VectorizationBase]]
deps = ["ArrayInterface", "CPUSummary", "HostCPUFeatures", "Hwloc", "IfElse", "LayoutPointers", "Libdl", "LinearAlgebra", "SIMDTypes", "Static"]
git-tree-sha1 = "858e541ffc21873e45aeaf744e0d015966e0328e"
uuid = "3d5dd08c-fd9d-11e8-17fa-ed2836048c2f"
version = "0.21.30"
[[deps.WeakRefStrings]]
deps = ["DataAPI", "InlineStrings", "Parsers"]
git-tree-sha1 = "b1be2855ed9ed8eac54e5caff2afcdb442d52c23"
uuid = "ea10d353-3f73-51f8-a26c-33c1cb351aa5"
version = "1.4.2"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.12+3"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.1.0+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.41.0+1"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "16.2.1+1"
"""
# ╔═╡ Cell order:
# ╠═19afaf4e-b19b-47a3-8c4c-31b8879f392d
# ╠═f19cee90-c255-4760-abdc-3c3da106ff9b
# ╠═52d5a30c-37ac-4726-abb4-e4e77f9a1871
# ╠═7a0e2a02-46de-403e-9c88-f0c39c86951d
# ╠═07e20b3a-f0e9-4842-b3eb-f8016f24331d
# ╠═3a61ff36-66c3-4612-aef6-7c39fa8387b2
# ╠═9fc648c2-5a17-4d4e-9f94-f03e96e39c34
# ╠═a5c57d03-8bbc-41af-81b3-db4239d74037
# ╠═f4e2869a-686d-4b00-958d-7d5dc957de51
# ╠═c92e7631-2fa3-4867-b417-c7661bbcbce8
# ╠═43512b07-2ea2-4f23-b6da-279202cc61b5
# ╠═e7bb012a-2f39-4ce3-818c-9976f5ada1d5
# ╠═80b8fde0-eeb6-4035-8ca6-042117ad9fcc
# ╠═c26b973e-6cc9-45d6-982c-632136e049c8
# ╠═155c4b2d-b151-4212-b390-a90cfc52c579
# ╠═2525e4b5-abd6-4209-8fea-6d69b939d694
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
|
{"hexsha": "e48002ec22f2172f9c4abbb93de72a3bea818203", "size": 24088, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Examples_Test_Cases/matrixinput_nt.jl", "max_stars_repo_name": "goedman/Stan.jl", "max_stars_repo_head_hexsha": "197c60555b14ab90b4efb9b2902e151b9944eb52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 91, "max_stars_repo_stars_event_min_datetime": "2015-01-07T03:39:29.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-06T17:46:01.000Z", "max_issues_repo_path": "test/Examples_Test_Cases/matrixinput_nt.jl", "max_issues_repo_name": "goedman/Stan.jl", "max_issues_repo_head_hexsha": "197c60555b14ab90b4efb9b2902e151b9944eb52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 43, "max_issues_repo_issues_event_min_datetime": "2015-08-17T22:27:50.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-21T13:38:40.000Z", "max_forks_repo_path": "test/Examples_Test_Cases/matrixinput_nt.jl", "max_forks_repo_name": "goedman/Stan.jl", "max_forks_repo_head_hexsha": "197c60555b14ab90b4efb9b2902e151b9944eb52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2015-01-20T05:22:27.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-06T09:48:06.000Z", "avg_line_length": 30.7244897959, "max_line_length": 292, "alphanum_fraction": 0.7321072733, "num_tokens": 10789}
|
import unittest
from rasp.model import *
from rasp.core import Primitive, get_vocab
def set_seed(seed):
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class TestTransformer(unittest.TestCase):
def test_model_string_input(self):
set_seed(4)
model = get_model()
out, loss = model("hello")
self.assertEqual(tokens(out.argmax(-1)), ['etffn'])
out, loss = model(["hello", "world"])
self.assertEqual(tokens(out.argmax(-1)), ['etffn', 'zkofk'])
out, loss = model(["hello", "wd", "sdfg"])
self.assertEqual(tokens(out.argmax(-1)), ['etffn', 'zkxxx', 'jkkdx'])
def test_train(self):
import os, sys
import random
import torch
import numpy as np
from rasp.daily import folder
sys.path.append(os.path.join(folder(folder(__file__)), "primitives"))
from primitives import functional as F
# check if the loading is working
# print(F.identity("foo"))
vocab, ivocab = get_vocab()
def identity_dataset(n = 200, m = 32):
# since our manual primitives take care of the input output
# we can batch the dataset into buckets of similar lengths
set_seed(4)
ds = []
for _ in range(n): # generate samples
x = "".join([
ivocab[_i] for _i in np.random.randint(0, len(vocab) - 1, size = (np.random.randint(m) + 1,))
])
ds.append(x)
# create the dataset
m = max([len(x) for x in ds])
for i,s in enumerate(ds):
s = s[:m]
if np.random.random() > 0.6:
_i = np.random.randint(len(s))
_j = _i + np.random.randint(5)
_v = ivocab[np.random.randint(25)]
s = s[_i] + "".join([_v for _ in range(_i, _j, 1)]) + s[_j:]
ds[i] = s[:m]
return ds
# create dataset
ds = identity_dataset()
# define the primitive
p = Primitive("identity")
# print("Test 1D:", tokens(p(ds[0])[0].argmax(-1)))
# print("Test (batch):", tokens(p(ds[:2])[0].argmax(-1)))
# train the network things
p.train(ds, F.identity)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "cbdee39dbe1ee231335a32a4e11ea8764ff3ab63", "size": 2114, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/model_test.py", "max_stars_repo_name": "evelynmitchell/rasp", "max_stars_repo_head_hexsha": "9b33bbf911e6c4ff018c9883c39eb698c0abe803", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-20T22:05:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T22:05:37.000Z", "max_issues_repo_path": "test/model_test.py", "max_issues_repo_name": "evelynmitchell/rasp", "max_issues_repo_head_hexsha": "9b33bbf911e6c4ff018c9883c39eb698c0abe803", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-24T17:20:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-24T17:30:59.000Z", "max_forks_repo_path": "test/model_test.py", "max_forks_repo_name": "evelynmitchell/rasp", "max_forks_repo_head_hexsha": "9b33bbf911e6c4ff018c9883c39eb698c0abe803", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-24T16:59:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-24T16:59:05.000Z", "avg_line_length": 28.9589041096, "max_line_length": 103, "alphanum_fraction": 0.600756859, "include": true, "reason": "import numpy", "num_tokens": 577}
|
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import pathlib, shutil, os
from typing import overload, Callable, Dict, Generic, Iterable, Iterator, List, Mapping, Sequence, \
Tuple, TypeVar, Union
from collections import abc
try:
from typing import Protocol
except ImportError: # Workaround for Python < 3.8.
Protocol = Generic
from .input_transforms import PGD
from . import visualization
PathLike = Union[str, os.PathLike]
TensorLike = Union[np.ndarray, torch.Tensor]
_T_co = TypeVar("_T_co", covariant=True)
class SizedIterable(Iterable[_T_co], Protocol[_T_co]):
"""A SizedIterable is any Iterable type that supports `len`."""
def __len__(self) -> int:
...
_T = TypeVar("_T")
_S = TypeVar("_S")
_V = TypeVar("_V")
class map_iterable(SizedIterable[_S], Generic[_S]):
"""Apply a function to every item of an iterable, preserving `__len__`.
`map_iterable(f, it)` works just as `map(f, it)`,
except it returns a `SizedIterable` instead of an iterator:
you can call `len()` on it and you can call `iter()` repeatedly on it.
"""
def __init__(self, func: Callable[[_T], _S], iterable: SizedIterable[_T]):
self.func = func
self.iterable = iterable
def __iter__(self) -> Iterator[_S]:
for item in self.iterable:
yield self.func(item)
def __len__(self) -> int:
return len(self.iterable)
# map_structure is hard to type properly, but here we handle the most common cases.
@overload
def map_structure(data: _V, f: Callable[[_T], _T]) -> _V: ...
@overload
def map_structure(data: Tuple[_T, ...], f: Callable[[_T], _S]) -> Tuple[_S, ...]: ...
@overload
def map_structure(data: Dict[_V, _T], f: Callable[[_T], _S]) -> Dict[_V, _S]: ...
@overload
def map_structure(data: Mapping[_V, _T], f: Callable[[_T], _S]) -> Mapping[_V, _S]: ...
@overload
def map_structure(data: List[_T], f: Callable[[_T], _S]) -> List[_S]: ...
@overload
def map_structure(data: Sequence[_T], f: Callable[[_T], _S]) -> Sequence[_S]: ...
def map_structure(data, f: Callable):
"""Apply a function to every item in a structure, recursing into lists, dicts, etc.
For example `map_structure((a, b), f)` returns `(f(a), f(b))` when a, b are tensors.
We recurse into `Mapping` and `Sequence` types.
We do not recurse into `Iterable` types in general: for example `f` is applied
to a `torch.Tensor` as a whole, not to each element individually.
This mimics how pytorch goes through dataitems when collating them into batches,
see `torch.utils.data.dataloader.default_collate`
or `torch.utils.data._utils.pin_memory.pin_memory`.
"""
if isinstance(data, tuple): # tuple or subtypes like namedtuple
return type(data)(*(map_structure(v, f) for v in data))
elif isinstance(data, dict): # dict or subtypes like OrderedDict
return type(data)(**{k: map_structure(v, f) for k, v in data.items()})
elif isinstance(data, abc.Mapping):
return {k: map_structure(v, f) for k, v in data.items()}
elif isinstance(data, list) or isinstance(data, tuple):
return [map_structure(v, f) for v in data]
else:
return f(data)
def show_structure(st):
"""Returns structure resembling given structure but with simple repr"""
ret = {}
for k, v in st.items():
if isinstance(v, dict):
ret[k] = show_structure(st[k])
elif isinstance(v, torch.Tensor):
ret[k] = v.shape
elif isinstance(v, list):
ret[k] = len(v)
return ret
class Timer():
def __init__(self) -> None:
self.started = self.time()
def time(self) -> float:
return time.monotonic()
def elapsed(self) -> float:
return self.time() - self.started
class DefaultDict(dict):
"""Same as collections.defaultdict but you can pass arguments to the factory function."""
def __init__(self, factory):
super().__init__
self.factory = factory
def __missing__(self, key):
res = self[key] = self.factory(key)
return res
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def get_device(what: Union[torch.nn.Module, torch.Tensor, torch.device]):
"""Returns the device of the first tensor parameter in `what`
WARNING: tensor's devices in the model might be inconsistent.
"""
while isinstance(what, torch.nn.Module):
what = next(iter(what.parameters()))
if isinstance(what, torch.Tensor):
what = what.device
return what
def mkdir_and_preserve_group(path: PathLike) -> str:
"""Mkdir all ancestors and set the same group owner as the first existing ancestor."""
path = pathlib.Path(path)
ancestor = path
while not ancestor.exists():
ancestor = ancestor.parent
group = ancestor.group()
path.mkdir(parents=True, exist_ok=True)
ancestor = path
while not ancestor.group() == group:
shutil.chown(ancestor, group=group)
ancestor = ancestor.parent
return group
def flatten_dict(dictionary, sep='/'):
"""Flatten nested dictionaries, compressing keys"""
if not dictionary:
return {}
df = pd.json_normalize(dictionary, sep=sep)
return df.to_dict(orient='records')[0]
def transfer_model(model, num_classes=2):
model.fc = nn.Linear(model.fc.in_features, num_classes)
def freeze_model(model):
for param in model.parameters():
param.requires_grad = False
def visualize_gradient(t):
'''
Visualize gradients of model. To transform gradient to image range [0, 1], we
subtract the mean, divide by 2*3 standard deviations, and then clip.
Args:
t (tensor): input tensor (usually gradients)
'''
mt = torch.mean(t, dim=[2, 3], keepdim=True).expand_as(t)
st = torch.std(t, dim=[2, 3], keepdim=True).expand_as(t)
return torch.clamp((t - mt) / (2*3 * st) + 0.5, 0, 1)
def get_accuracy(logits, target):
pred = logits.argmax(dim=1)
accuracy = (pred == target).sum().item() / len(target) * 100
return accuracy
def memory_summary(device = 'cuda'):
"""pass `cuda:1` for summary for cuda:1"""
return torch.cuda.memory_summary(device)
|
{"hexsha": "204d0c8d69aa4fff761073923b9b1ac4dc501081", "size": 6579, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/robbytorch/utils.py", "max_stars_repo_name": "badochov/robbytorch", "max_stars_repo_head_hexsha": "460617eace7d89e093c62051490fa05b86373d64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-07-05T12:33:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-21T12:44:21.000Z", "max_issues_repo_path": "src/robbytorch/utils.py", "max_issues_repo_name": "Swarzkopf314/robbytorch", "max_issues_repo_head_hexsha": "aac4c7b7396cd5756642fb9f9bbc52cce5ecfce2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-21T12:41:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T12:41:20.000Z", "max_forks_repo_path": "src/robbytorch/utils.py", "max_forks_repo_name": "Swarzkopf314/robbytorch", "max_forks_repo_head_hexsha": "aac4c7b7396cd5756642fb9f9bbc52cce5ecfce2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-09T12:07:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-08T16:32:07.000Z", "avg_line_length": 29.6351351351, "max_line_length": 100, "alphanum_fraction": 0.6470588235, "include": true, "reason": "import numpy", "num_tokens": 1665}
|
from itertools import *
import networkx as nx
import random
def powerset(iterable):
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
def number_of_cuts(G):
edge_list = G.edges()
count = 0
for e in powerset(range(len(edge_list))):
H = nx.Graph()
H.add_nodes_from(range(len(G.nodes())))
#print H.nodes()
for i in e:
H.add_edge(*edge_list[i])
#print H.edges()
if not nx.is_connected(H):
count += 1
#print H.edges()
return count
def number_of_rcuts(G, r1, r2):
edge_list = G.edges()
count = 0
for e in powerset(range(len(edge_list))):
H = nx.Graph()
H.add_nodes_from(range(len(G.nodes())))
#print H.nodes()
for i in e:
H.add_edge(*edge_list[i])
#print H.edges()
if not nx.has_path(H, r1, r2) and nx.number_connected_components(H) == 2:
count += 1
#print H.edges()
return count
def number_of_rforest(G, r1, r2):
edge_list = G.edges()
count = 0
for e in powerset(range(len(edge_list))):
H = nx.Graph()
H.add_nodes_from(range(len(G.nodes())))
#print H.nodes()
for i in e:
H.add_edge(*edge_list[i])
#print H.edges()
if not nx.has_path(H, r1, r2) and nx.number_connected_components(H) == 2 and nx.is_forest(H):
count += 1
#print H.edges()
return count
def number_of_pathmatching(G):
edge_list = G.edges()
count = 0
for e in powerset(range(len(edge_list))):
H = nx.Graph()
H.add_nodes_from(range(len(G.nodes())))
#print H.nodes()
for i in e:
H.add_edge(*edge_list[i])
#print H.edges()
is_pm = True
for v in G.nodes():
if nx.degree(H, v) > 2:
is_pm = False
break
if is_pm:
if nx.is_forest(H):
count += 1
#print H.edges()
return count
def number_of_partition(G):
count = 0
for n in powerset(range(len(G.nodes()))):
if len(n) >= 1 and len(n) < G.number_of_nodes():
H1 = G.subgraph(n)
if not nx.is_connected(H1):
continue
nbar = []
for i in range(0, len(G.nodes())):
if i not in n:
nbar.append(i)
H2 = G.subgraph(nbar)
if not nx.is_connected(H2):
continue
count += 1
return count / 2
def number_of_3partition(G):
edge_list = G.edges()
count = 0
for n in powerset(range(len(G.nodes()))):
if len(n) == 0:
continue
H1 = G.subgraph(n)
if not nx.is_connected(H1):
continue
nbar1 = []
for i in range(0, len(G.nodes())):
if i not in n:
nbar1.append(i)
for n2 in powerset(nbar1):
if len(n2) == 0:
continue
H2 = G.subgraph(n2)
if not nx.is_connected(H2):
continue
nbar = []
for i in range(0, len(G.nodes())):
if i not in n and i not in n2:
nbar.append(i)
if len(nbar) == 0:
continue
H3 = G.subgraph(nbar)
if not nx.is_connected(H3):
continue
count += 1
return count / 6
def output_graph_as_edge_list(G, f):
f.write(str(len(G.nodes())))
f.write('\n')
for e in G.edges():
x = '{0:d} {1:d}\n'.format(e[0] + 1, e[1] + 1)
f.write(x)
for i in range(0, 3):
G = nx.gnp_random_graph(8, 0.2 + i * 0.2, i + 1)
edge_list = G.edges()
print edge_list
print "is_connected = " + str(nx.is_connected(G)) + ", # of cuts = " + str(number_of_cuts(G))
print "# of rcuts = " + str(number_of_rcuts(G, 0, len(G.nodes()) - 1))
print "# of rforest = " + str(number_of_rforest(G, 0, len(G.nodes()) - 1))
print "# of pathmatching = " + str(number_of_pathmatching(G))
print "# of partition = " + str(number_of_partition(G))
print "# of 3partition = " + str(number_of_3partition(G))
s = 0
t = 0
while s == t:
s = random.randrange(0, len(G.nodes()))
t = random.randrange(0, len(G.nodes()))
print "# of {0:d}-{1:d} paths = {2:d}".format(s + 1, t + 1, len(list(nx.all_simple_paths(G, source = s, target = t))))
f = open("random_graph{0:d}.txt".format(i + 1), "w")
output_graph_as_edge_list(G, f)
f.close()
|
{"hexsha": "31adb7c7b24a637703fee7f0cd205a5846bcc16a", "size": 4599, "ext": "py", "lang": "Python", "max_stars_repo_path": "testdata/makegraph.py", "max_stars_repo_name": "junkawahara/frontier", "max_stars_repo_head_hexsha": "4ae3eb360c96511ec5f3592b8bc85a1d8bce3aec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2015-08-02T14:23:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-18T13:45:47.000Z", "max_issues_repo_path": "testdata/makegraph.py", "max_issues_repo_name": "junkawahara/frontier", "max_issues_repo_head_hexsha": "4ae3eb360c96511ec5f3592b8bc85a1d8bce3aec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-07-26T01:52:38.000Z", "max_issues_repo_issues_event_max_datetime": "2017-07-26T01:59:21.000Z", "max_forks_repo_path": "testdata/makegraph.py", "max_forks_repo_name": "junkawahara/frontier", "max_forks_repo_head_hexsha": "4ae3eb360c96511ec5f3592b8bc85a1d8bce3aec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2015-07-29T22:19:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-20T17:19:40.000Z", "avg_line_length": 30.0588235294, "max_line_length": 122, "alphanum_fraction": 0.5116329637, "include": true, "reason": "import networkx", "num_tokens": 1260}
|
#' Tidy Starting Lineups
#'
#' @param j msf object
#' @param ... additional arguments. currently unused
#' @export
tidy.msf_lineup <- function(j, ...) {
# game
game_id <- j[["game"]][["id"]]
game_time <- msf_time(j[["game"]][["startTime"]])
# lineups
team_lineups <- j[["teamLineups"]]
team1 <- parse_game_lineups_team(team_lineups[[1]])
team2 <- parse_game_lineups_team(team_lineups[[2]])
lineups <- rbind(team1, team2)
lineups[["game_id"]] <- as.character(game_id)
lineups[["game_time"]] <- game_time
lineups[c("game_id", "game_time", setdiff(colnames(lineups), c("game_id", "game_time")))]
}
#' @keywords internal
parse_game_lineups_team <- function(j) {
# team
team_id <- j[["team"]][["id"]]
team <- j[["team"]][["abbreviation"]]
# expected
expected <- j[["expected"]][["lineupPositions"]]
expected_position <- purrr::map_chr(expected, "position")
expected_id <- purrr::map_int(expected, c("player", "id"), .default = NA_integer_)
expected_fname <- purrr::map_chr(expected, c("player", "firstName"), .default = NA_character_)
expected_lname <- purrr::map_chr(expected, c("player", "lastName"), .default = NA_character_)
# actual
actual <- j[["actual"]][["lineupPositions"]]
actual_position <- purrr::map_chr(actual, "position")
actual_id <- purrr::map_int(actual, c("player", "id"), .default = NA_integer_)
actual_fname <- purrr::map_chr(actual, c("player", "firstName"), .default = NA_character_)
actual_lname <- purrr::map_chr(actual, c("player", "lastName"), .default = NA_character_)
tibble::tibble(
team_id = as.character(team_id),
team = team,
expected_position = expected_position,
expected_id = as.character(expected_id),
expected_name = paste(expected_fname, expected_lname),
actual_position = actual_position,
actual_id = as.character(actual_id),
actual_name = paste(actual_fname, actual_lname)
)
}
#' Tidy Game Boxscores
#'
#' @param j msf object
#' @param ... additional arguments. currently unused
#' @export
tidy.msf_boxscore <- function(j, ...) {
home_stats <- parse_boxscore_stats(j[[c("stats", "home", "players")]])
away_stats <- parse_boxscore_stats(j[[c("stats", "away", "players")]])
# add team_id
home_stats[["team_id"]] <- as.character(j[[c("game", "homeTeam", "id")]])
home_stats[["team"]] <- j[[c("game", "homeTeam", "abbreviation")]]
away_stats[["team_id"]] <- as.character(j[[c("game", "awayTeam", "id")]])
away_stats[["team"]] <- j[[c("game", "awayTeam", "abbreviation")]]
# combine home and away
stats <- do.call(rbind, list(home_stats, away_stats))
# add game id and time
game_id <- j[["game"]][["id"]]
game_time <- msf_time(j[["game"]][["startTime"]])
stats[["game_id"]] <- as.character(game_id)
stats[["game_time"]] <- game_time
to_front <- c("game_id", "game_time", "team_id", "team")
stats <- stats[c(to_front, setdiff(colnames(stats), to_front))]
stats
}
#' @keywords internal
parse_boxscore_stats <- function(j) {
# extract player info
player_info <- purrr::map_dfr(j, ~ purrr::compact(.x[["player"]]))
player_info[["player_id"]] <- as.character(player_info[["id"]])
player_info[["player_name"]] <- paste(player_info[["firstName"]], player_info[["lastName"]])
player_info <- player_info[c("player_id", "player_name", "position")]
# extract player stats
player_stats <- purrr::map(j, ~ purrr::flatten(.x[["playerStats"]][[1]]))
# remove PerGame columns (this is only one game)
player_stats <- purrr::map(player_stats, ~ .x[!grepl("PerGame", names(.x))])
# combine into table
player_stats <- purrr::map_dfr(player_stats, tibble::as_tibble)
tibble::as_tibble(cbind(player_info, player_stats))
}
|
{"hexsha": "180d941c9259c6f60f7d3879c19f612897e0ab40", "size": 3697, "ext": "r", "lang": "R", "max_stars_repo_path": "R/parse-msf-by-game.r", "max_stars_repo_name": "zamorarr/msf2", "max_stars_repo_head_hexsha": "afad5cedf1eb87c83795154d345b8c4860823bc1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-26T01:13:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T16:14:17.000Z", "max_issues_repo_path": "R/parse-msf-by-game.r", "max_issues_repo_name": "zamorarr/msf2", "max_issues_repo_head_hexsha": "afad5cedf1eb87c83795154d345b8c4860823bc1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/parse-msf-by-game.r", "max_forks_repo_name": "zamorarr/msf2", "max_forks_repo_head_hexsha": "afad5cedf1eb87c83795154d345b8c4860823bc1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-10-26T14:04:30.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-26T14:04:30.000Z", "avg_line_length": 34.8773584906, "max_line_length": 96, "alphanum_fraction": 0.6643224236, "num_tokens": 1060}
|
from pygenetic import Population, Evolution, Statistics
import random
import collections
import bisect
import math
import numpy as np
class GAEngine:
"""
This Class is the main driver program which contains and invokes the operators used in Genetic algorithm
GAEngine keeps track of specific type of operators the user has specified for running the algorithm
Methods :
---------
addCrossoverHandler(crossover_handler, weight, *args)
Sets the function to be used for crossover operation
addMutationHandler(mutation_handler, weight, *args)
Sets the function to be used for mutation operation
doCrossover(cross_func, member1, member2)
Calls crossover handler passing 2 given members as parameters
doMutation(self, mut_func, member)
Calls mutation handler passing member as parameter
setSelectionHandler(selection_handler, *args)
Sets the function to be used for selection operation
setFitnessHandler(fit_function, *args)
Sets the function to be used for calculating fitness of chromosome
calculateFitness(chromosome)
Invokes fitness function (fitness_func) to compute the fitness score of a chromosome
generateFitnessMappings()
Generates a list of tuples (individual, fitness_score) and also stores the tuple
containing fittest chromosome [best_fitness] depending on fitness_type(max/min/equal)
handle_selection(repeat_chromosome_sorting=False)
Invokes generateFitnessMappings() if repeat_chromosome_sorting=True and selection_handler called
normalizeWeights()
Normalizes crossover and mutation handler weights, result is a CDF
chooseCrossoverHandler()
Selects crossover handler from available handlers based on weightage given to handlers
chooseMutationHandler()
Selects mutation handler from available handlers based on weightage given to handlers
setEvolution(evolution)
Sets evolution instance member to the parameter passed during class initialization
addStatistic(statistic,statistic_function)
Appends a new statistic function
evolve(noOfIterations=50)
The interface provided to start evolution
continue_evolve(noOfIterations=20)
Performs the evolution by invoking the evolve method from Evolution.py module
as many times as number of iterations specified by user or terminates if optimal
solution is found.
Also invokes compute method from Statistics.py module to generate graph
Instance Members
-----------
fitness_func : A function reference
The fitness function to be used, passed as a function argument
factory : Instance of any subclass of ChromosomeFactory class
Generates and returns the initial population of candidate solutions
population_size : int
The number of candidate solutions that can exist after every iteration
cross_prob : float (0.0 to 1.0)
The Crossover probability of crossover operation which determines the extent to which crossover between parents
(default is 0.7)
mut_prob : float (0.0 to 1.0)
The mutation probability of mutation operation which determines extent to which candidates should be mutated
(default is 0.1)
fitness_type : string
Indicates the nature of fitness value (higher/lower/equal) to be considered during selection of candidates
(default is max)
adaptive_mutation : boolean
If set rate of mutation of candidates dynamically changes during execution depending on diversity in population
(default is true)
initial_mut_prob : float
If adaptive mutation is True, this stores the initial mutation probability
crossover_handlers : list
Stores all crossover handlers added to solve problem
crossover_handlers_weights : list
Stores the weights associated with crossover handlers in crossover_handlers. Sums up to 1
mutation_handlers : list
Stores all mutation handlers added to solve problem
mutation_handlers_weights : list
Stores the weights associated with mutation handlers in mutation_handlers. Sums up to 1
selection_handler : A function reference
Stores the function selected by user to perform selection
fitness_mappings : list
List containing population and associated fitness values, refreshed each generation
population : Class Instance
An instance of the class "Population" used to create initial population.
best_fitness : tuple
Stores the best fitness for each generation
evolution : Class Instance
An instance of any class that supports implementation of evolution
population_control : boolean
If true, population size is maintained. Else, population size may vary in each iteration
hall_of_fame_injection : boolean
If true, best chromosome among all past generations is injected into population after a set number of generations
efficient_iteration_halt : boolean
If true, halts evolution when the best chromosome remains the same for a consecutive number of generations
fitness_external_data : list
Stores any additional data structures required for fitness calculation
selection_external_data : list
Stores any additional data structures required to perform selection
crossover_external_data : dict
Stores any additional data required to perform crossover. Data is mapped to specific crossover function
mutation_external_data : dict
Stores any additional data required to perform mutation. Data is mapped to specific mutation function
hall_of_fame : tuple
Stores the best chromosome among all the chromosomes generated in all past generations
extra_statistics : dict
Stores any extra statistics added by user
"""
def __init__(self,factory,population_size=100,cross_prob=0.7,mut_prob=0.1,fitness_type='max',adaptive_mutation=True, population_control=False,hall_of_fame_injection=True,efficient_iteration_halt=False,use_pyspark=False):
self.fitness_func = None
self.factory = factory
self.cross_prob = cross_prob
self.adaptive_mutation = adaptive_mutation
if self.adaptive_mutation:
self.initial_mut_prob = mut_prob
else:
self.mut_prob = mut_prob
self.crossover_handlers = []
self.crossover_handlers_weights = []
self.mutation_handlers = []
self.mutation_handlers_weights = []
self.selection_handler = None
self.fitness_type = fitness_type
self.fitness_mappings = None
self.population = None
self.population_size = population_size
if type(self.fitness_type) == str:
if self.fitness_type == 'max':
self.best_fitness = None, float("-inf")
elif self.fitness_type == 'min':
self.best_fitness = None, float("inf")
else:
raise Exception('Invalid Fitness Type given to GAEngine')
elif type(self.fitness_type) == tuple or type(self.fitness_type) == list:
if self.fitness_type[0] == 'equal':
self.best_fitness = None, float("inf")
else:
raise Exception('Invalid Fitness Type given to GAEngine')
self.evolution = Evolution.StandardEvolution(pyspark=use_pyspark)
self.population_control = population_control
self.hall_of_fame_injection = hall_of_fame_injection
self.efficient_iteration_halt = efficient_iteration_halt
self.fitness_external_data = []
self.selection_external_data = []
self.crossover_external_data = {}
self.mutation_external_data = {}
self.hall_of_fame = None
self.extra_statistics = {}
def addCrossoverHandler(self,crossover_handler, weight = 1, *args):
"""
Adds crossover handler staticmethod defined in Utils.py and
appends the weightage to be given to the handler
Parameters :
----------
crossover_handler : Method defined in Utils.py or custom
weight : integer relative weightage
any number of extra data required for specific handler
"""
self.crossover_handlers.append(crossover_handler)
self.crossover_handlers_weights.append(weight)
xtra_args = []
for arg in args:
xtra_args.append(arg)
self.crossover_external_data.update({crossover_handler:tuple(xtra_args)})
def addMutationHandler(self,mutation_handler, weight = 1, *args):
"""
Adds mutation handler staticmethod defined in Utils.py and
appends the weightage to be given to the handler
Parameters :
----------
mutation_handler : Method defined in Utils.py or custom
weight : integer relative weightage
any number of extra data required for specific handler
"""
self.mutation_handlers.append(mutation_handler)
self.mutation_handlers_weights.append(weight)
xtra_args = []
for arg in args:
xtra_args.append(arg)
self.mutation_external_data.update({mutation_handler:tuple(xtra_args)})
def doCrossover(self, cross_func, member1, member2):
"""
Calls crossover handler passing 2 given members as parameters
Parameters :
----------
cross_func : Crossover handler to be used
member1 : Parent 1
member2 : Parent 2
Returns :
----------
Tuple containing two children chromosomes resulting from crossover between passed parents
"""
if cross_func in self.crossover_external_data:
return cross_func(member1, member2, *(self.crossover_external_data[cross_func]))
else:
return cross_func(member1, member2)
def doMutation(self, mut_func, member):
"""
Calls mutation handler passing a member as parameter
Parameters :
----------
mut_func : mutation handler to be used
member : The chromosome to perform mutation on
Returns :
----------
A chromosome resulting from mutation on passed member
"""
if mut_func in self.mutation_external_data:
return mut_func(member, *(self.mutation_external_data[mut_func]))
else:
return mut_func(member)
def setSelectionHandler(self,selection_handler, *args):
"""
Sets function to be used for selection_handler
Parameters:
----------
selection_handler : Function to be used to perform selection, can be custom
any extra data required to do selection
"""
self.selection_handler = selection_handler
for arg in args:
self.selection_external_data.append(arg)
def setFitnessHandler(self, fit_function, *args):
"""
Sets function to be used to calculate fitness
Parameters:
----------
fit_function : Function to be used to calculate fitness of a chromosome, can be custom
any extra data required to calculate fitness
"""
self.fitness_func = fit_function
for arg in args:
self.fitness_external_data.append(arg)
def calculateFitness(self,chromosome):
"""
Invokes fitness function (fitness_func) to compute the fitness score of a chromosome
Parameters:
----------
chromosome for which fitness is to be calculated
Returns:
--------
Fitness value of chromosome
"""
if self.fitness_external_data:
return self.fitness_func(chromosome, *(self.fitness_external_data))
else:
return self.fitness_func(chromosome)
def generateFitnessMappings(self):
"""
Generates a list of tuples (individual, fitness_score) and also stores the tuple
containing fittest chromosome [best_fitness] depending on fitness_type(max/min/equal)
"""
self.fitness_mappings = [(member, self.calculateFitness(member)) for member in self.population.members]
if type(self.fitness_type) == str:
if self.fitness_type == 'max':
self.fitness_mappings.sort(key=lambda x:x[1],reverse=True)
self.best_fitness = self.fitness_mappings[0]
if self.hall_of_fame:
if self.best_fitness[1] > self.hall_of_fame[1]:
self.hall_of_fame = self.best_fitness
else:
self.hall_of_fame = self.best_fitness
elif self.fitness_type == 'min':
self.fitness_mappings.sort(key=lambda x:x[1])
self.best_fitness = self.fitness_mappings[0]
if self.hall_of_fame:
if self.best_fitness[1] < self.hall_of_fame[1]:
self.hall_of_fame = self.best_fitness
else:
self.hall_of_fame = self.best_fitness
elif type(self.fitness_type) == tuple or type(self.fitness_type) == list:
self.fitness_mappings.sort(key=lambda x:abs(x[1]-self.fitness_type[1]))
self.best_fitness = self.fitness_mappings[0]
if self.hall_of_fame:
if abs(self.fitness_type[1] - self.best_fitness[1]) < abs(self.fitness_type[1] - self.hall_of_fame[1]):
self.hall_of_fame = self.best_fitness
else:
self.hall_of_fame = self.best_fitness
def handle_selection(self,repeat_chromosome_sorting=False):
"""
Invokes generateFitnessMappings() if repeat_chromosome_sorting=True to generate list of (chromosome,fitness)
Invokes selection_handler staticmethod defined in Utils.py module or custom
Parameters :
----------
repeat_chromosome_sorting : To sort before selection or not
Returns :
---------
List of limited number of fittest members of population
"""
if repeat_chromosome_sorting:
self.generateFitnessMappings()
if self.selection_external_data:
return self.selection_handler(self.fitness_mappings,self, *(self.selection_external_data))
else:
return self.selection_handler(self.fitness_mappings,self)
def normalizeWeights(self):
"""
Normalizes the weights of mutation and crossover handlers
"""
# Normalizing crossover and mutation handler weights, result is a CDF
total = sum(self.mutation_handlers_weights)
cumsum = 0
for i in range(len(self.mutation_handlers_weights)):
cumsum += self.mutation_handlers_weights[i]
self.mutation_handlers_weights[i] = cumsum/total
total = sum(self.crossover_handlers_weights)
cumsum = 0
for i in range(len(self.crossover_handlers_weights)):
cumsum += self.crossover_handlers_weights[i]
self.crossover_handlers_weights[i] = cumsum/total
def chooseCrossoverHandler(self):
"""
Selects crossover handler from available handlers based on weightage given to handlers
Returns :
--------
The selected crossover handler function
"""
x = random.random()
idx = bisect.bisect(self.crossover_handlers_weights, x)
return self.crossover_handlers[idx]
def chooseMutationHandler(self):
"""
Selects mutation handler from available handlers based on weightage given to handlers
Returns :
--------
The selected mutation handler function
"""
x = random.random()
idx = bisect.bisect(self.mutation_handlers_weights, x)
return self.mutation_handlers[idx]
def setEvolution(self,evolution):
"""
Sets evolution instance member to the parameter passed during class initialization
Parameters :
----------
evolution : A predefined evoluton class instance or instance of custom class
"""
self.evolution = evolution
def addStatistic(self,statistic,statistic_function):
"""
Appends a new statistic function
Parameters :
----------
statistic : The statistic for which to add the function being passed
statistic_function : The function to be added to the statistic
"""
if type(statistic) != str:
raise Exception('Invalid Statistics key')
self.extra_statistics[statistic] = statistic_function
def evolve(self,noOfIterations=50):
"""
The interface provided to start evolution
Parameters :
-----------
noOfIterations : int
default value : 50
"""
self.population = Population.Population(self.factory,self.population_size)
self.statistics = Statistics.Statistics()
self.last_20_fitnesses = collections.deque([])
self.continue_evolve(noOfIterations)
def continue_evolve(self, noOfIterations=20):
"""
Performs the evolution by invoking the evolve method from Evolution.py module
as many times as number of iterations specified by user or terminates if optimal
solution is found.
Also invokes compute method from Statistics.py module to generate graph
Parameters :
-----------
noOfIterations : int
default value : 20
"""
self.normalizeWeights()
if self.population == None:
raise Exception('Call evolve before calling continue_evolve')
print("gen\tavg\t\tbest\tworst\t")
i=0
while i<noOfIterations:
self.generateFitnessMappings()
fitnesses = [ x[1] for x in self.fitness_mappings]
self.statistics.add_statistic('best-fitness',self.fitness_mappings[0][1])
self.statistics.add_statistic('worst-fitness',self.fitness_mappings[-1][1])
self.mean_fitness = sum(fitnesses)/len(fitnesses)
self.statistics.add_statistic('avg-fitness',self.mean_fitness)
self.diversity = math.sqrt(sum((fitness - self.mean_fitness)**2 for fitness in fitnesses)) / len(fitnesses)
print("%i\t%.2f\t\t%s\t%s\t" % (len(self.statistics.statistic_dict['best-fitness']),self.mean_fitness,self.fitness_mappings[0][1],self.fitness_mappings[-1][1]))
if self.adaptive_mutation:
self.mut_prob = self.initial_mut_prob * ( 1 + ((self.best_fitness[1]-self.diversity) / (self.diversity+self.best_fitness[1]) ) )
self.mut_prob = np.clip(self.mut_prob,0.0001,0.8)
self.statistics.add_statistic('mutation_rate',self.mut_prob)
self.statistics.add_statistic('diversity',self.diversity)
for statistic in self.extra_statistics:
self.statistics.add_statistic(statistic,self.extra_statistics[statistic](self.fitness_mappings,self))
result = self.evolution.evolve(self)
if self.hall_of_fame_injection and (i+1)%20 == 0:
self.population.new_members.insert(0,self.hall_of_fame[0])
if self.population_control:
if len(self.population.new_members) > self.population_size:
self.population.new_members = self.population.new_members[:self.population_size]
elif len(self.population.new_members) < self.population_size:
self.population.new_members = self.population.new_members * int(self.population_size/len(self.population.new_members)) + self.population.new_members[:self.population_size%len(self.population.new_members)]
if self.efficient_iteration_halt:
if len(self.last_20_fitnesses)==20:
self.last_20_fitnesses.popleft()
self.last_20_fitnesses.append(self.best_fitness[1])
if all(x == self.last_20_fitnesses[0] for x in self.last_20_fitnesses):
break
else:
self.last_20_fitnesses.append(self.best_fitness[1])
# For next iteration
self.population.members = self.population.new_members
self.population.new_members = []
if result == 1:
print('GA Problem Solved')
break
i += 1
|
{"hexsha": "bc3a9675f3a1dd3e6b37cfd4eabd27e12f6ac8e7", "size": 17899, "ext": "py", "lang": "Python", "max_stars_repo_path": "pygenetic/GAEngine.py", "max_stars_repo_name": "QuailAutomation/pygenetic", "max_stars_repo_head_hexsha": "93b0240a1942b882df30b53d856a87becca1d7ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-30T05:13:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-15T19:54:28.000Z", "max_issues_repo_path": "pygenetic/GAEngine.py", "max_issues_repo_name": "QuailAutomation/pygenetic", "max_issues_repo_head_hexsha": "93b0240a1942b882df30b53d856a87becca1d7ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-19T20:30:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-19T20:30:25.000Z", "max_forks_repo_path": "pygenetic/GAEngine.py", "max_forks_repo_name": "QuailAutomation/pygenetic", "max_forks_repo_head_hexsha": "93b0240a1942b882df30b53d856a87becca1d7ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-02T20:52:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-07T15:52:15.000Z", "avg_line_length": 33.7716981132, "max_line_length": 221, "alphanum_fraction": 0.7589809487, "include": true, "reason": "import numpy", "num_tokens": 4167}
|
[STATEMENT]
lemma map_add_restrict_comm:
"S \<inter> T = {} \<Longrightarrow> h |` S ++ h' |` T = h' |` T ++ h |` S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. S \<inter> T = {} \<Longrightarrow> h |` S ++ h' |` T = h' |` T ++ h |` S
[PROOF STEP]
apply (drule restrict_map_disj')
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ?h |` S \<bottom> ?h' |` T \<Longrightarrow> h |` S ++ h' |` T = h' |` T ++ h |` S
[PROOF STEP]
apply (erule map_add_com)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 238, "file": "Separation_Algebra_ex_capDL_Abstract_Separation_D", "length": 3}
|
"""Test the module under sampler."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections import Counter
import pytest
import numpy as np
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_array_equal
from imblearn.over_sampling import RandomOverSampler
RND_SEED = 0
X = np.array([[0.04352327, -0.20515826], [0.92923648, 0.76103773], [
0.20792588, 1.49407907
], [0.47104475, 0.44386323], [0.22950086, 0.33367433], [0.15490546, 0.3130677],
[0.09125309, -0.85409574], [0.12372842, 0.6536186],
[0.13347175, 0.12167502], [0.094035, -2.55298982]])
Y = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1])
def test_ros_init():
sampling_strategy = 'auto'
ros = RandomOverSampler(
sampling_strategy=sampling_strategy, random_state=RND_SEED)
assert ros.random_state == RND_SEED
def test_ros_fit_resample():
ros = RandomOverSampler(random_state=RND_SEED)
X_resampled, y_resampled = ros.fit_resample(X, Y)
X_gt = np.array([[0.04352327, -0.20515826], [0.92923648, 0.76103773], [
0.20792588, 1.49407907
], [0.47104475, 0.44386323], [0.22950086, 0.33367433], [
0.15490546, 0.3130677
], [0.09125309, -0.85409574], [0.12372842, 0.6536186],
[0.13347175, 0.12167502], [0.094035, -2.55298982],
[0.92923648, 0.76103773], [0.47104475, 0.44386323],
[0.92923648, 0.76103773], [0.47104475, 0.44386323]])
y_gt = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0])
assert_allclose(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_ros_fit_resample_half():
sampling_strategy = {0: 3, 1: 7}
ros = RandomOverSampler(
sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = ros.fit_resample(X, Y)
X_gt = np.array([[0.04352327, -0.20515826], [0.92923648, 0.76103773], [
0.20792588, 1.49407907
], [0.47104475, 0.44386323], [0.22950086,
0.33367433], [0.15490546, 0.3130677],
[0.09125309, -0.85409574], [0.12372842, 0.6536186],
[0.13347175, 0.12167502], [0.094035, -2.55298982]])
y_gt = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1])
assert_allclose(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
@pytest.mark.filterwarnings("ignore:'return_indices' is deprecated from 0.4")
def test_random_over_sampling_return_indices():
ros = RandomOverSampler(return_indices=True, random_state=RND_SEED)
X_resampled, y_resampled, sample_indices = ros.fit_resample(X, Y)
X_gt = np.array([[0.04352327, -0.20515826], [0.92923648, 0.76103773], [
0.20792588, 1.49407907
], [0.47104475, 0.44386323], [0.22950086, 0.33367433], [
0.15490546, 0.3130677
], [0.09125309, -0.85409574], [0.12372842, 0.6536186],
[0.13347175, 0.12167502], [0.094035, -2.55298982],
[0.92923648, 0.76103773], [0.47104475, 0.44386323],
[0.92923648, 0.76103773], [0.47104475, 0.44386323]])
y_gt = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0])
assert_allclose(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert_array_equal(np.sort(np.unique(sample_indices)), np.arange(len(X)))
def test_multiclass_fit_resample():
y = Y.copy()
y[5] = 2
y[6] = 2
ros = RandomOverSampler(random_state=RND_SEED)
X_resampled, y_resampled = ros.fit_resample(X, y)
count_y_res = Counter(y_resampled)
assert count_y_res[0] == 5
assert count_y_res[1] == 5
assert count_y_res[2] == 5
def test_random_over_sampling_heterogeneous_data():
X_hetero = np.array([['xxx', 1, 1.0], ['yyy', 2, 2.0], ['zzz', 3, 3.0]],
dtype=np.object)
y = np.array([0, 0, 1])
ros = RandomOverSampler(random_state=RND_SEED)
X_res, y_res = ros.fit_resample(X_hetero, y)
assert X_res.shape[0] == 4
assert y_res.shape[0] == 4
assert X_res.dtype == object
assert X_res[-1, 0] in X_hetero[:, 0]
|
{"hexsha": "65bbc58aebb350e85c80b48710255a65d6e2f934", "size": 4126, "ext": "py", "lang": "Python", "max_stars_repo_path": "exl_env/lib/python3.6/site-packages/imblearn/over_sampling/tests/test_random_over_sampler.py", "max_stars_repo_name": "verma-varsha/fraud-detection", "max_stars_repo_head_hexsha": "13c5b0c274dfa2b68e82a4ee317e09223b5b663f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-06-08T06:38:27.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-08T06:38:30.000Z", "max_issues_repo_path": "exl_env/lib/python3.6/site-packages/imblearn/over_sampling/tests/test_random_over_sampler.py", "max_issues_repo_name": "verma-varsha/fraud-detection", "max_issues_repo_head_hexsha": "13c5b0c274dfa2b68e82a4ee317e09223b5b663f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exl_env/lib/python3.6/site-packages/imblearn/over_sampling/tests/test_random_over_sampler.py", "max_forks_repo_name": "verma-varsha/fraud-detection", "max_forks_repo_head_hexsha": "13c5b0c274dfa2b68e82a4ee317e09223b5b663f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-04-02T18:03:29.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-02T21:34:08.000Z", "avg_line_length": 39.6730769231, "max_line_length": 79, "alphanum_fraction": 0.6325739215, "include": true, "reason": "import numpy", "num_tokens": 1527}
|
import numpy as np
import torch
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import json
import argparse
from collections import OrderedDict
import matplotlib.pyplot as plt
from torch import nn
from torch import optim
import torch.nn.functional as F
from PIL import Image
import glob, os
from torchvision import datasets, transforms, models
parser = argparse.ArgumentParser(description='Model parameters')
parser.add_argument('data_directory', help='data directory')
parser.add_argument('--save_dir', help='directory to save the neural network.')
parser.add_argument('--arch', help='the available models. Options are:vgg,densenet')
parser.add_argument('--learning_rate', help='learning rate')
parser.add_argument('--hidden_units', help='the number of hidden units')
parser.add_argument('--epochs', help='epochs')
parser.add_argument('--gpu',action='store_true', help='gpu')
args = parser.parse_args()
if args.arch not in ('vgg','densenet',None):
raise Exception('Please choose vgg or densenet')
if (args.gpu and not torch.cuda.is_available()):
raise Exception("You do not have GPU")
if(not os.path.isdir(args.data_directory)):
raise Exception('Directory does not exist!')
data_dir = os.listdir(args.data_directory)
data_dir = 'flowers'
train_dir = args.data_directory + '/train'
valid_dir = args.data_directory + '/valid'
test_dir = args.data_directory + '/test'
# TODO: Define your transforms for the training, validation, and testing sets
data_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# TODO: Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=data_transforms)
validation_data = datasets.ImageFolder(valid_dir, transform=data_transforms)
test_data = datasets.ImageFolder(test_dir, transform=data_transforms)
# TODO: Using the image datasets and the trainforms, define the dataloaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=32)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32)
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
if (args.arch is None):
arch_type = 'vgg'
else:
arch_type = args.arch
if(args.arch is 'densenet'):
model = models.densenet121(pretrained=True)
input_node=1024
else:
model = models.vgg19(pretrained=True)
input_node=25088
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
if (args.learning_rate is None):
lr = 0.001
else:
lr = float(args.learning_rate)
if (args.epochs is None):
epochs = 10
else:
epochs = int(args.epochs)
if (args.gpu):
device = 'cuda'
else:
device = 'cpu'
if (args.hidden_units is None):
hidden_units = 4096
else:
hidden_units = int(args.hidden_units)
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_node, hidden_units)),
('relu', nn.ReLU()),
('fc2', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
# Implement a function for the validation pass
def validation(model, testloader, criterion):
test_loss = 0
accuracy = 0
for images, labels in testloader:
images, labels = images.to(device), labels.to(device)
output = model.forward(images)
test_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return test_loss, accuracy
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=lr)
model.to(device)
print_every = 75
steps = 0
for e in range(epochs):
running_loss = 0
for images, labels in iter(train_loader):
steps += 1
optimizer.zero_grad()
images, labels = images.to(device), labels.to(device)
# Forward and backward passes
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
# Make sure network is in eval mode for inference
model.eval()
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
test_loss, accuracy = validation(model, validation_loader, criterion)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Validation Loss: {:.3f}.. ".format(test_loss/len(validation_loader)),
"Validation Accuracy: {:.3f}".format(accuracy/len(validation_loader) *100))
running_loss = 0
#To make training back on
model.train()
#save checkpoint
print('saving checkpoint')
if (args.save_dir is None):
save_dir = 'check.pth'
else:
save_dir = args.save_dir
checkpoint = {'optimizer' : optimizer,
'classifier' : model.classifier,
'model' : model,
'class_to_idx' : train_data.class_to_idx,
'optimizer_dict': optimizer.state_dict(),
'state_dict': model.state_dict(),
'arch': 'densenet121'}
torch.save(checkpoint, save_dir)
print('The Model is ready now')
|
{"hexsha": "bcb9d41055c4adf9b1fd9069b10f17b7c3608835", "size": 6184, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "MostafaKhaled2017/AI-project", "max_stars_repo_head_hexsha": "1c56f2d0c7a8d99d9b7baa7505f84892aa4f88dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train.py", "max_issues_repo_name": "MostafaKhaled2017/AI-project", "max_issues_repo_head_hexsha": "1c56f2d0c7a8d99d9b7baa7505f84892aa4f88dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "MostafaKhaled2017/AI-project", "max_forks_repo_head_hexsha": "1c56f2d0c7a8d99d9b7baa7505f84892aa4f88dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7195767196, "max_line_length": 93, "alphanum_fraction": 0.6355109961, "include": true, "reason": "import numpy", "num_tokens": 1318}
|
import os
import json
import shutil
import torch
import numpy as np
from collections import Counter, OrderedDict
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def copy_checkpoint(folder='./', filename='checkpoint.pth.tar',
copyname='copy.pth.tar'):
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, copyname))
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_json(f_path):
with open(f_path, 'r') as f:
return json.load(f)
def save_json(obj, f_path):
with open(f_path, 'w') as f:
json.dump(obj, f, ensure_ascii=False)
class OrderedCounter(Counter, OrderedDict):
"""Counter that remembers the order elements are first encountered"""
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
def l2_normalize(x, dim=1):
return x / torch.sqrt(torch.sum(x**2, dim=dim).unsqueeze(dim))
|
{"hexsha": "16da1cab525b52ea9b45765ed202b981d83ae3e6", "size": 1644, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils/utils.py", "max_stars_repo_name": "mhw32/contrastive-learning-scaffold", "max_stars_repo_head_hexsha": "3173c736969da7f1a218cdbe3da039a7ddb8c541", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-29T18:44:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-29T18:44:51.000Z", "max_issues_repo_path": "src/utils/utils.py", "max_issues_repo_name": "mhw32/contrastive-learning-scaffold", "max_issues_repo_head_hexsha": "3173c736969da7f1a218cdbe3da039a7ddb8c541", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils/utils.py", "max_forks_repo_name": "mhw32/contrastive-learning-scaffold", "max_forks_repo_head_hexsha": "3173c736969da7f1a218cdbe3da039a7ddb8c541", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6875, "max_line_length": 80, "alphanum_fraction": 0.6246958637, "include": true, "reason": "import numpy", "num_tokens": 384}
|
''' 5-statistics-error.py
=========================
AIM: Perform basic statistics on the data and gets the maximal stray light flux for one orbit
INPUT: files: - <orbit_id>_misc/orbits.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_misc/ : file one stat file
in <orbit_id>_figures/ : error evolution, max. stray light evolution
CMD: python 5-statistics-error.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: <none>
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import os
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.figures as figures
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# Orbit id
orbit_id = 1001
# Error threshold
p = 0.1
# Flux limitation [ph/(px s)]
rqmt_flux = 1
# File name for the output data file (same as in 2-statistics-step.py)
data_file = 'statistics-error.dat'
# Show plots and detailled analysis ?
show = True
# Fancy plots ?
fancy = True
###########################################################################
### INITIALISATION
# File name for the computed orbit file
error_file = 'error_evolution.dat'
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
if fancy: figures.set_fancy()
if os.path.isfile(folder_misc+data_file):
os.remove(folder_misc+data_file)
f = open(folder_misc+data_file,'w')
###########################################################################
### Load which orbits were computed
data = np.loadtxt(folder_misc+error_file, delimiter=',')
# Data type:
# ref,val,step,error,max_sl,shift
### Error evolution
print >> f, '# ERRORS'
print >> f, '# ! All errors are normalised to 1'
print >> f, '# ! ie 1.0 = 100%'
print >> f, 'error_max:', np.amax(data[:,3])
print >> f, 'error_min:', np.amin(data[:,3])
print >> f, 'error_mean:', np.mean(data[:,3])
print >> f, 'error_std:', np.std(data[:,3])
fig=plt.figure()
ax=plt.subplot(111)
ax.yaxis.set_major_locator(MultipleLocator(5))
ax.yaxis.set_minor_locator(MultipleLocator(1))
ax.xaxis.grid(True,'minor')
ax.yaxis.grid(True,'minor')
ax.xaxis.grid(True,'major',linewidth=2)
ax.yaxis.grid(True,'major',linewidth=2)
xx = data[:,1]/param.last_orbits[orbit_id]*365.
xx = figures.convert_date(xx)
plt.plot(xx, data[:,3]*100, linewidth=1.5)
plt.plot([xx[0],xx[-1]], [p*100., p*100.], color='r', lw=3)
fig.autofmt_xdate()
plt.ylim([0, 15])
plt.ylabel(r'$\mathrm{Error\ to\ previous\ step\ [\%]}$')
# Saves the figure
fname = '%serror_evolution_%d_%d' % (folder_figures,orbit_id,sl_angle)
figures.savefig(fname,fig,fancy)
############ STRAY LIGHT
print >> f, '# STRAY LIGHT'
# Get the direction of minimum stray light
id_min = find_nearest(data[:,4],np.amin(data[np.where(data[:,4]>0)]))
orbit_max = data[id_min, 1]
time_min, ra_min, dec_min, sl_min = find_direction_flux(orbit_max, orbit_id,find='min', folder=folder_flux)
print >> f, 'min:', sl_min
print >> f, 'minute_min:', time_min
print >> f, 'RA_min:', ra_min
print >> f, 'DEC_min:', dec_min
print >> f, 'mean:', np.mean(data[:,4])
print >> f, 'stddev:', np.std(data[:,4])
# Get the direction of maximum stray light
id_max = find_nearest(data[:,4],np.amax(data[:,4]))
orbit_max = data[id_max, 1]
time_max, ra_max, dec_max, sl_max = find_direction_flux(orbit_max, orbit_id, folder=folder_flux)
print >> f, 'max:', np.amax(sl_max)
print >> f, 'minute_max:', time_max
print >> f, 'RA_max:', ra_max
print >> f, 'DEC_max:', dec_max
print >> f, 'mean:', np.mean(data[:,4])
print >> f, 'stddev:', np.std(data[:,4])
print >> f, 'orbit_above_rqmt:', np.shape(data[np.where(data[:,4]>rqmt_flux)])[0]
print >> f, 'total_orbits:', np.shape(data)[0]
### Maximal sl
fig=plt.figure()
ax=plt.subplot(111)
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
ax.xaxis.grid(True,'minor')
ax.yaxis.grid(True,'minor')
ax.xaxis.grid(True,'major',linewidth=2)
ax.yaxis.grid(True,'major',linewidth=2)
plt.plot(xx, data[:,4], linewidth=3)
plt.plot([xx[0],xx[-1]], [rqmt_flux, rqmt_flux], color='r', lw=3)
fig.autofmt_xdate()
plt.ylabel(r'$\mathrm{Maximum\ stray\ light\ flux\ }\left[\frac{\mathrm{ph}}{\mathrm{px}\cdot\mathrm{s}}\right]$')
# Saves the figure
fname = '%sstray_light_flux_%d_%d' % (folder_figures,orbit_id,sl_angle)
figures.savefig(fname,fig,fancy)
####################################################################
fig=plt.figure()
ax=plt.subplot(111)
# zooms
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_minor_locator(MultipleLocator(0.02))
#ax.xaxis.set_major_locator(MultipleLocator(20.))
ax.xaxis.grid(True,'minor')
ax.yaxis.grid(True,'minor')
ax.xaxis.grid(True,'major',linewidth=2)
ax.yaxis.grid(True,'major',linewidth=2)
plt.plot(xx, data[:,4], linewidth=3)
fig.autofmt_xdate()
plt.ylim([0, 0.2])
plt.ylabel(r'$\mathrm{Maximum\ stray\ light\ flux\ }\left[\frac{\mathrm{ph}}{\mathrm{px}\cdot\mathrm{s}}\right]$')
# Saves the figure
fname = '%sstray_light_flux_zoom_%d_%d' % (folder_figures,orbit_id,sl_angle)
figures.savefig(fname,fig,fancy)
if show: plt.show()
f.close()
|
{"hexsha": "06f767275f3bdeb2f69907096e11cdb3a0ad90d0", "size": 5532, "ext": "py", "lang": "Python", "max_stars_repo_path": "5_statistics_error.py", "max_stars_repo_name": "kuntzer/SALSA-public", "max_stars_repo_head_hexsha": "79fd601d3999ac977bbc97be010b2c4ef81e4c35", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-30T09:59:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-30T09:59:41.000Z", "max_issues_repo_path": "5_statistics_error.py", "max_issues_repo_name": "kuntzer/SALSA-public", "max_issues_repo_head_hexsha": "79fd601d3999ac977bbc97be010b2c4ef81e4c35", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "5_statistics_error.py", "max_forks_repo_name": "kuntzer/SALSA-public", "max_forks_repo_head_hexsha": "79fd601d3999ac977bbc97be010b2c4ef81e4c35", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-30T10:38:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-30T10:38:54.000Z", "avg_line_length": 28.9633507853, "max_line_length": 114, "alphanum_fraction": 0.6576283442, "include": true, "reason": "import numpy", "num_tokens": 1515}
|
#!/usr/bin/env python
__date__ = '2019-March-6'
__version__ = '0.9.43a'
import sys
import numpy
import scipy
import matplotlib
import lmfit
try:
import wx
except:
wx = None
def make_banner():
authors = "M. Newville, M. Koker, B. Ravel, and others"
sysvers = sys.version
if '\n' in sysvers:
sysvers = sysvers.split('\n')[0]
lines = ["Larch %s (%s) %s" % (__version__, __date__, authors),
"Python: %s" % (sysvers)]
reqs = []
for mod in (numpy, scipy, matplotlib, lmfit, wx):
if mod is not None:
try:
vers = "%s %s" % (mod.__name__, mod.__version__)
except:
vers = "%s not available" % (mod.__name__)
reqs.append(vers)
lines.append(', '.join(reqs))
linelen = max([len(line) for line in lines])
border = '='*min(linelen, 75)
lines.insert(0, border)
lines.append(border)
return '\n'.join(lines)
|
{"hexsha": "b7091c0436f4a857b8b3f57bc1facb0bc22df692", "size": 954, "ext": "py", "lang": "Python", "max_stars_repo_path": "larch/version.py", "max_stars_repo_name": "Bob620/xraylarch", "max_stars_repo_head_hexsha": "f8d38e6122cc0e8c990b0f024db3b503a5fbf057", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "larch/version.py", "max_issues_repo_name": "Bob620/xraylarch", "max_issues_repo_head_hexsha": "f8d38e6122cc0e8c990b0f024db3b503a5fbf057", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "larch/version.py", "max_forks_repo_name": "Bob620/xraylarch", "max_forks_repo_head_hexsha": "f8d38e6122cc0e8c990b0f024db3b503a5fbf057", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.7142857143, "max_line_length": 67, "alphanum_fraction": 0.5639412998, "include": true, "reason": "import numpy,import scipy", "num_tokens": 279}
|
module zFunOriginal
contains
subroutine zfun(z,fu)
!
! routine which evaluates the plasma dispersion function. uses
! numerical integration (absolute value of the complex argument, z,
! less than 5) or asymptotic expansion.
!
complex z,fu,temp1,temp2,z2,tpiiod
dimension c(21),d(21),e(21),f(21),w(21)
data delta/5.0e-1/, yi/-1.0e+0/, n/21/, itest/0/
data zero/0.0e+0/, osqpi/5.6418958355e-1/, tpi/6.28318530718e+0/
!
if(itest .eq. 1) go to 1
itest=1
!*** define weights and store constants used for integration.
!*** weights are derived from a 3 point integration scheme.
nm3=n-3
conoi=delta*osqpi
w(1)=conoi*3.75e-1
w(2)=conoi*1.1666666667e+0
w(3)=conoi*9.5833333333e-1
do 20 i=1,3
nmip1=n-i+1
20 w(nmip1)=w(i)
do 30 i=4,nm3
30 w(i)=conoi
no2=n/2
no2p1=no2+1
x=zero
y=yi
e(no2p1)=x
f(no2p1)=y
temp1=cmplx(x,y)
temp2=cexp(-temp1*temp1)
c(no2p1)=real(temp2)*w(no2p1)
d(no2p1)=aimag(temp2)*w(no2p1)
do 200 i=1,no2
x=delta*float(i)
npi=no2p1+i
nmi=no2p1-i
temp1=cmplx(x,y)
temp2=cexp(-temp1*temp1)
c(npi)=real(temp2)*w(npi)
c(nmi)=real(temp2)*w(nmi)
d(npi)=aimag(temp2)*w(npi)
d(nmi)=aimag(temp2)*w(nmi)*(-1.0e+0)
e(nmi)=-x
e(npi)=x
f(npi)=y
f(nmi)=y
200 continue
tpiod=tpi/delta
tpiiod=cmplx(zero,tpiod)
!*** begin calculations.
1 g=real(z)
yy=aimag(z)
h=abs(yy)
za=z*conjg(z)
if(za .ge. 2.5e+1) go to 5
z2=z*z
!*** numerical integration.
!*** f=1/sqrt(pi)*sum of...w(i)*exp(-x(i)**2)/(x(i)-z)...i=1,n.
!*** integration is along a line x(i) in the complex plane, where
!*** the imaginary part of x(i)=yi and the difference between
!*** successive real parts of x(i)=delta. limits of integration
!*** are from -delta*n/2 to delta*n/2.
!*** compute the integral by taking the sum from 1 to n of the
!*** constants divided by x(i)-z. uses real arithmetic.
zr=0.0e+0
zi=0.0e+0
do 7 i=1,n
a=e(i)-g
b=f(i)-h
den=a*a+b*b
oden=1.0e+0/den
zr=zr+(a*c(i)+b*d(i))*oden
zi=zi+(a*d(i)-b*c(i))*oden
7 continue
!*** add the correction term.
fu=cmplx(zr,zi)+(0.0e+0,-3.5449077018e+0)*cexp(-z2-tpiod* &
(h-yi)+tpiiod*g)
if(yy .ge. zero) go to 6
!*** imaginary part of argument is negative.
fu=conjg(fu)+(0.0e+0,3.5449077018e+0)*cexp(-z2)
go to 6
!*** magnitude of argument is greater than 5, use
!*** asymptotic expansion.
5 call aexpan(z,g,h,yy,fu)
6 return
end subroutine zfun
subroutine aexpan(z,g,h,yy,fu)
!
! routine which computes the plasma dispersion function using
! asymptotic expansion. if the imaginary part of the argument, yy,
! is equal to zero real arithmetic is used.
!
complex z,fu,a,z2,oz2
data zero/0.0e+0/, n/8/
!
if(yy .eq. zero) go to 1
!*** complex arithmetic.
z2=z*z
oz2=1.0e+0/z2
fu=-1.0e+0/z
a=fu
en=5.0e-1
do 10 i=1,n
a=en*a*oz2
fu=fu+a
10 en=en+1.0e+0
if(yy .gt. zero) go to 30
if(h .gt. sqrt(g*g+1.72e+2)) go to 20
fu=fu+(0.0e+0,3.5449077018e+0)*cexp(-z2)
go to 30
!*** error stop to avoid overflow.
20 write (51,100) z
100 format(//1x,'*** error stop in frdcnt routine, argument is', &
' too small, arg =',1p2e14.6)
stop
!*** real arithmetic.
1 x2=g*g
ox2=1.0e+0/x2
f=-1.0e+0/g
b=f
en=5.0e-1
do 11 i=1,n
b=en*b*ox2
f=f+b
11 en=en+1.0e+0
c=1.7724538509e+0*exp(-x2)
fu=cmplx(f,c)
30 return
end subroutine aexpan
!
!
end module zfunOriginal
|
{"hexsha": "3fd2dc5643c5bb1bb1a7d55533f8aea90b129a52", "size": 3837, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/zfunOriginal.f90", "max_stars_repo_name": "efdazedo/aorsa2d", "max_stars_repo_head_hexsha": "ce0b8c930715277eeb4d23e60cc88434ffdaa583", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-02-13T21:57:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T16:47:51.000Z", "max_issues_repo_path": "src/zfunOriginal.f90", "max_issues_repo_name": "efdazedo/aorsa2d", "max_issues_repo_head_hexsha": "ce0b8c930715277eeb4d23e60cc88434ffdaa583", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-02-23T20:33:30.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-23T20:34:31.000Z", "max_forks_repo_path": "src/zfunOriginal.f90", "max_forks_repo_name": "efdazedo/aorsa2d", "max_forks_repo_head_hexsha": "ce0b8c930715277eeb4d23e60cc88434ffdaa583", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-02-15T16:50:58.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-23T14:07:59.000Z", "avg_line_length": 26.8321678322, "max_line_length": 71, "alphanum_fraction": 0.5590304926, "num_tokens": 1513}
|
import pioneer.common.constants as Constants
from pioneer.common.logging_manager import LoggingManager
from pioneer.das.api.sources.filesource import FileSource, try_all_patterns
from pioneer.das.api.loaders import pickle_loader
from ruamel.std import zipfile
import multiprocessing
import numpy as np
import pandas as pd
import os
import re
import sys
import threading
import time
import traceback
import yaml
class ZipFileSource(FileSource):
"""Loads a list of files from a zip archive."""
def __init__(self, path, pattern=None, sort=True, loader=None,
check_timestamps=True, mode="lock"):
super(ZipFileSource, self).__init__(path, pattern, sort, loader)
self.path = path
self.member_cached = None
self.lock = multiprocessing.RLock()
self.tlock = threading.RLock()
self.archive = None
self.set_mode(mode)
self.nb_data_per_pkl_file = 1
self.files, self.time_of_issues, self.timestamps, self.nb_data_per_pkl_file = self.get_files(self.access)
if check_timestamps:
self._check_timestamps_consistency()
def get_files(self, access):
try:
members = access( lambda archive: archive.infolist())
except Exception as e:
traceback.print_exc()
raise e
files = []
timestamps = None
time_of_issues = None
nb_data_per_pkl_file = 1
for m in members:
name = m.filename
if self.pattern is None:
match, self.pattern = try_all_patterns(name)
else:
match = re.match(self.pattern, name)
if match:
groups = match.groups()
if self.sort and not groups:
raise ValueError('no groups')
if self.sort and groups:
sample = (int(groups[0]), m)
else:
sample = m
files.append(sample)
# read config for multiple rows per .pkl file (high fps sensors)
elif(name == Constants.CONFIG_YML_PATTERN):
def f_CONFIG_YML_PATTERN(archive):
with archive.open(name) as stream:
data_yaml = yaml.safe_load(stream)
return data_yaml['nb_vectors_in_pkl']
nb_data_per_pkl_file = access(f_CONFIG_YML_PATTERN)
# read timestamps
elif(name == Constants.TIMESTAMPS_CSV_PATTERN):
def f_TIMESTAMPS_CSV_PATTERN(archive):
with archive.open(name) as stream:
# sensor_ts = np.loadtxt(stream, dtype='u8', delimiter=' ', ndmin=2)
sensor_ts = pd.read_csv(stream, delimiter=" ", dtype='u8', header=None).values
timestamps = sensor_ts[:,0]
# check if ts is always go up, for imu data (more than 1 data per pkl file) the test is not complet -> to improve
if len(timestamps)>2 and (np.min(np.diff(timestamps.astype(np.int64)))<0):
LoggingManager.instance().warning('Timestamps are not strictly increasing for datasource file {}'.format(self.path))
if sensor_ts.shape[1] > 1:
time_of_issues = sensor_ts[:,1]
else:
time_of_issues = timestamps
return time_of_issues, timestamps
time_of_issues, timestamps = access(f_TIMESTAMPS_CSV_PATTERN)
if self.sort:
files.sort()
files = [s[1] for s in files]
return files, time_of_issues, timestamps, nb_data_per_pkl_file
def set_mode(self, mode):
def access_file(f):
with zipfile.ZipFile(self.path, 'r') as archive:
return f(archive)
def access_raw(f):
return f(self.archive)
def access_lock(f):
exception = None
while True:
try: # workaround for BadZipFile in multiprocessing mode
with self.lock:
with self.tlock:
return f(self.archive)
except Exception as e:
if False:
sys.stdout.write(f"\rZipFileSource {os.path.basename(self.path)} read error, falling back to 'file' mode{' ' * 10}")
sys.stdout.flush()
try:
if self.archive is not None:
self.archive.close()
self.archive = None
self.files, self.time_of_issues, self.timestamps, self.nb_data_per_pkl_file = self.get_files(access_file)
rv = access_file(f)
self.archive = zipfile.ZipFile(self.path, 'r')
return rv
except Exception as access_file_e:
self.archive = zipfile.ZipFile(self.path, 'r')
print(f'\rZipFileSource fallback failed: {access_file_e}, retrying infinitely')
time.sleep(0.1)
if mode == "lock":
self.archive = zipfile.ZipFile(self.path, 'r')
self.access = access_lock
elif mode == "file":
self.archive = None
self.access = access_file
elif mode == "none":
self.archive = zipfile.ZipFile(self.path, 'r')
self.access = access_raw
def __del__(self):
if self.archive is not None:
self.archive.close()
def __len__(self):
return self.timestamps.shape[0]
def __getitem__(self, index):
if index >= len(self):
raise IndexError(f'For datasource {os.path.basename(self.path)} index {index} >= len {len(self)}')
if self.nb_data_per_pkl_file == 1:
member = self.files[index]
data_bytes = self.access( lambda archive: archive.read(member))
return self.loader(data_bytes)
# pkl containing nb_data_per_pkl_file data rows
else:
if(index<0):
index = len(self) + index
if(index < 0):
raise IndexError('index {} < 0'.format(index))
member = self.files[index//self.nb_data_per_pkl_file]
# no reload
if self.member_cached is None or self.member_cached!=member:
data_bytes = self.access( lambda archive: archive.read(member))
self.data_cached = self.loader(data_bytes)
self.member_cached=member
return self.data_cached[index%self.nb_data_per_pkl_file] # compatible with array and struct array
def _get_nb_timestamps_and_files_or_rows(self):
# nb timestamps
nts = len(self)
# nb files, or rows
if(self.nb_data_per_pkl_file == 1):
nfiles = len(self.files)
else:
# if empty no data
if len(self.files) == 0:
nfiles = 0
# else, (N-1) x nb_data_per_pkl_file + nb rows in last file
else:
data_bytes = self.access(lambda archive: archive.read(self.files[-1]))
nfiles = (len(self.files)-1)*self.nb_data_per_pkl_file + self.loader(data_bytes).shape[0]
return nts, nfiles
def _check_timestamps_consistency(self):
nts, nfiles = self._get_nb_timestamps_and_files_or_rows()
if nfiles != nts:
n = min(nts, nfiles)
LoggingManager.instance().warning('The number of timestamps and data files are '
'different for sensor %s (nfiles: %d != nts: %d). '
'Keeping the %d first timestamps and files'
%(self.path, nfiles, nts, n))
self.timestamps = self.timestamps[:n]
if(self.nb_data_per_pkl_file == 1):
self.files = self.files[:n]
else:
if n%self.nb_data_per_pkl_file == 0:
self.files = self.files[:int(n/self.nb_data_per_pkl_file)]
else:
# on va conserver un fichier à la fin qui ne sera pas utilisé en entier
self.files = self.files[:n//self.nb_data_per_pkl_file+1]
nts, nfiles = self._get_nb_timestamps_and_files_or_rows()
assert nfiles == nts
def get(self, name, loader=None):
if loader is None:
loader = pickle_loader
try:
data_bytes = self.access(lambda archive: archive.read(archive.getinfo(name)))
data = loader(data_bytes)
except KeyError:
data = None
return data
|
{"hexsha": "e691c29491733542a4eaffeff4811e6241fc7885", "size": 8861, "ext": "py", "lang": "Python", "max_stars_repo_path": "pioneer/das/api/sources/zip_filesource.py", "max_stars_repo_name": "leddartech/pioneer.das.api", "max_stars_repo_head_hexsha": "35f2c541ea8d1768d5f4612ea8d29cb2ba8345b7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-02-19T16:24:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-01T17:51:22.000Z", "max_issues_repo_path": "pioneer/das/api/sources/zip_filesource.py", "max_issues_repo_name": "leddartech/pioneer.das.api", "max_issues_repo_head_hexsha": "35f2c541ea8d1768d5f4612ea8d29cb2ba8345b7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-02-25T08:56:37.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-20T20:58:26.000Z", "max_forks_repo_path": "pioneer/das/api/sources/zip_filesource.py", "max_forks_repo_name": "leddartech/pioneer.das.api", "max_forks_repo_head_hexsha": "35f2c541ea8d1768d5f4612ea8d29cb2ba8345b7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-01T07:47:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-07T20:50:27.000Z", "avg_line_length": 39.0352422907, "max_line_length": 144, "alphanum_fraction": 0.5527592822, "include": true, "reason": "import numpy", "num_tokens": 1810}
|
<html><head>
<meta charset="utf-8">
<title>Dr.J</title>
<link href="style/main.css" rel="stylesheet" type="text/css">
<link rel="apple-touch-icon" sizes="57x57" href="icons/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="icons/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="icons/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="icons/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="icons/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="icons/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="icons/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="icons/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="icons/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="icons/android-icon-192x192.png">
<link rel="icon" type="image/png" sizes="32x32" href="icons/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="icons/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="icons/favicon-16x16.png">
<link rel="manifest" href="icons/manifest.json">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="icons/ms-icon-144x144.png">
<meta name="theme-color" content="#ffffff">
<link rel="apple-touch-startup-image" href="meta/apple-touch-startup-image-640x1096.png" media="(device-width: 320px) and (device-height: 568px) and (-webkit-device-pixel-ratio: 2)"> <!-- iPhone 5+ -->
<link rel="apple-touch-startup-image" href="meta/apple-touch-startup-image-640x920.png" media="(device-width: 320px) and (device-height: 480px) and (-webkit-device-pixel-ratio: 2)"> <!-- iPhone, retina -->
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="black">
<meta name="HandheldFriendly" content="True">
<meta name="MobileOptimized" content="320">
<meta name="viewport" content="width=device-width, target-densitydpi=160dpi, initial-scale=1.0, maximum-scale=1, user-scalable=no, minimal-ui">
</head>
<body>
<div class="container">
<div class="heading">
<img style="vertical-align:middle;" src="icons/apple-icon.png" align="middle">
<h1 class="title" style="line-height: 250%">Dr.J.</h1>
<div class="scores-container" style="display: none;">
<div class="score-container">0</div>
<div class="best-container">2800</div>
</div>
</div>
<div class="above-game">
<p class="game-intro">Join elements and get to the <strong>SODIUM tile!</strong></p>
<a class="restart-button">New Game</a>
<p class="game-intro"><a class="save-button" style="margin:0 5px">Save current game</a>
<a class="load-button">Load saved game</a></p>
</div>
<div class="app-notice-container">
<span class="notice-close-button">x</span>
<div class="notice-body">Game has been saved successfully :)</div>
</div>
<div class="game-container">
<div class="grid-container">
<div class="grid-row">
<div class="grid-cell"></div>
<div class="grid-cell"></div>
<div class="grid-cell"></div>
<div class="grid-cell"></div>
</div>
<div class="grid-row">
<div class="grid-cell"></div>
<div class="grid-cell"></div>
<div class="grid-cell"></div>
<div class="grid-cell"></div>
</div>
<div class="grid-row">
<div class="grid-cell"></div>
<div class="grid-cell"></div>
<div class="grid-cell"></div>
<div class="grid-cell"></div>
</div>
<div class="grid-row">
<div class="grid-cell"></div>
<div class="grid-cell"></div>
<div class="grid-cell"></div>
<div class="grid-cell"></div>
</div>
</div>
<div class="tile-container"><div class="tile tile-2 tile-position-2-4 tile-new"><div class="tile-inner">H</div></div><div class="tile tile-4 tile-position-4-4 tile-new"><div class="tile-inner">He</div></div></div>
</div>
<p class="game-explanation">
<strong class="important">How to play:</strong> Use your <strong>arrow keys</strong> to move the tiles. When two tiles with the same element touch, they <strong>merge into one !</strong>
</p>
<hr>
<p>
<strong class="important">Note:</strong> This site is the official version of Dr.J. You can play it on your phone via <a href="https://git.io/vo93m">https://git.io/vo93m.</a> All other apps or sites are derivatives or fakes, and should be used with caution.
</p>
<hr>
<p>
Created by Hjondi based on <a href="https://gabrielecirulli.github.io/2048/" target="_blank">2048.</a> And on <a href="https://itunes.apple.com/us/app/1024!/id823499224" target="_blank">1024 by Veewo Studio</a> and conceptually similar to <a href="http://asherv.com/threes/" target="_blank">Threes by Asher Vollmer.</a>
</p>
</div>
<script src="js/periodic_table.js"></script>
<script src="js/bind_polyfill.js"></script>
<script src="js/classlist_polyfill.js"></script>
<script src="js/animframe_polyfill.js"></script>
<script src="js/keyboard_input_manager.js"></script>
<script src="js/html_actuator.js"></script>
<script src="js/grid.js"></script>
<script src="js/tile.js"></script>
<script src="js/session_storage_manager.js"></script>
<script src="js/local_storage_manager.js"></script>
<script src="js/game_manager.js"></script>
<script src="js/application.js"></script>
</body></html>
|
{"hexsha": "921e14245e4675c0887212f04a5b07189896e357", "size": 5713, "ext": "r", "lang": "R", "max_stars_repo_path": "mrs.r", "max_stars_repo_name": "aboodmw3/mrs.r", "max_stars_repo_head_hexsha": "031417e07499a0d60daad22d95024791c3c74697", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mrs.r", "max_issues_repo_name": "aboodmw3/mrs.r", "max_issues_repo_head_hexsha": "031417e07499a0d60daad22d95024791c3c74697", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mrs.r", "max_forks_repo_name": "aboodmw3/mrs.r", "max_forks_repo_head_hexsha": "031417e07499a0d60daad22d95024791c3c74697", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.6782608696, "max_line_length": 323, "alphanum_fraction": 0.6485209172, "num_tokens": 1645}
|
#!/usr/bin/env python
import numpy as np
from matplotlib import *
import matplotlib.pyplot as plt
import sys
import os
swin_hdr = np.dtype([ ('sync', 'i4'), \
('ver', 'i4'), \
('no_bl','i4'), \
('mjd', 'i4'), \
('sec', 'f8'), \
('config_idx', 'i4'), \
('src_idx', 'i4'), \
('freq_idx', 'i4'), \
('polar', 'a2'), \
('pulsarbin', 'i4'), \
('weight', 'f8'), \
('uvw', 'f8', 3)])
class DiFXScan(object):
def __init__(self):
self.mjd = -1
self.fullsec = -1
self.dur = -1
class DiFX(object):
def __init__(self):
self.path = ''
self.fmt = ''
self.stn = []
self.bl_dict= {}
self.nchan = -1
self.freq_list = []
self.nfreq = -1
self.ap = -1.0
self.bw = -1.0
self.scan = {}
def load_scan(self, scan_no):
scan = self.scan[scan_no]
swin_rec = np.dtype([ ('h', swin_hdr), \
('vis', 'c8', self.nchan)])
foldername = self.fmt % (scan_no)
filename = "%s/%s/DIFX_%5d_%06d.s0000.b0000" \
% (self.path, foldername, scan.mjd, scan.sec)
recs = np.fromfile(filename, dtype = swin_rec)
return recs
def rec2arr(self, recs, scan_no, bl_no, freq_list):
scan = self.scan[scan_no]
nfreq = len(freq_list)
fd = {}
for i, fid in enumerate(freq_list):
fd[fid] = i
nap = int(np.ceil(self.scan[scan_no].dur / self.ap))
buf = np.zeros((nap, nfreq, self.nchan), dtype = np.complex64)
head= np.zeros((nap, nfreq), dtype = swin_hdr)
arr2rec = {}
nap = 0
for i, rec in enumerate(recs):
# print rec['h']
if rec['h']['no_bl'] != bl_no:
continue
fid = rec['h']['freq_idx']
if not fd.has_key(fid):
continue
apid = int(((rec['h']['mjd'] - scan.mjd) * 86400.0 \
+ (rec['h']['sec'] - scan.sec)) / self.ap)
buf[apid, fd[fid], :] = rec['vis'][:]
head[apid, fd[fid]] = rec['h']
arr2rec[apid * nfreq + fid] = i
nap += 1
if nap % 10000 == 0:
print '%d APs. have been loaded.' % (nap)
# if nap == 1050000:
# break
return head, buf, arr2rec
|
{"hexsha": "bf10d83bbf4d6b4184c359a6c19c590c46b47790", "size": 2810, "ext": "py", "lang": "Python", "max_stars_repo_path": "difxfile.py", "max_stars_repo_name": "liulei/VOLKS", "max_stars_repo_head_hexsha": "eb459cef8f10a8f27a37eb633c5d070fa39f5279", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "difxfile.py", "max_issues_repo_name": "liulei/VOLKS", "max_issues_repo_head_hexsha": "eb459cef8f10a8f27a37eb633c5d070fa39f5279", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "difxfile.py", "max_forks_repo_name": "liulei/VOLKS", "max_forks_repo_head_hexsha": "eb459cef8f10a8f27a37eb633c5d070fa39f5279", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-12-13T21:30:39.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-06T03:06:51.000Z", "avg_line_length": 30.2150537634, "max_line_length": 72, "alphanum_fraction": 0.3918149466, "include": true, "reason": "import numpy", "num_tokens": 784}
|
from __future__ import print_function
import argparse
import os
import h5py
import numpy as np
import sys
from molecules.model import MoleculeVAE
from molecules.utils import one_hot_array, one_hot_index, from_one_hot_array, \
decode_smiles_from_indexes, load_dataset
from pylab import figure, axes, scatter, title, show
from rdkit import Chem
from rdkit.Chem import Draw
LATENT_DIM = 292
TARGET = 'autoencoder'
def get_arguments():
parser = argparse.ArgumentParser(description='Molecular autoencoder network')
parser.add_argument('data', type=str, help='File of latent representation tensors for decoding.')
parser.add_argument('model', type=str, help='Trained Keras model to use.')
parser.add_argument('--save_h5', type=str, help='Name of a file to write HDF5 output to.')
parser.add_argument('--target', type=str, default=TARGET,
help='What model to sample from: autoencoder, encoder, decoder.')
parser.add_argument('--latent_dim', type=int, metavar='N', default=LATENT_DIM,
help='Dimensionality of the latent representation.')
return parser.parse_args()
def read_latent_data(filename):
h5f = h5py.File(filename, 'r')
data = h5f['latent_vectors'][:]
charset = h5f['charset'][:]
h5f.close()
return (data, charset)
def autoencoder(args, model):
latent_dim = args.latent_dim
data, charset = load_dataset(args.data, split = False)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
sampled = model.autoencoder.predict(data[0].reshape(1, 120, len(charset))).argmax(axis=2)[0]
mol = decode_smiles_from_indexes(map(from_one_hot_array, data[0]), charset)
sampled = decode_smiles_from_indexes(sampled, charset)
print(mol)
print(sampled)
def decoder(args, model):
latent_dim = args.latent_dim
data, charset = read_latent_data(args.data)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
sampled = model.decoder.predict(data[0].reshape(1, latent_dim)).argmax(axis=2)[0]
sampled = decode_smiles_from_indexes(sampled, charset)
print(sampled)
def encoder(args, model):
latent_dim = args.latent_dim
data, charset = load_dataset(args.data, split = False)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
x_latent = model.encoder.predict(data)
if args.save_h5:
h5f = h5py.File(args.save_h5, 'w')
h5f.create_dataset('charset', data = charset)
h5f.create_dataset('latent_vectors', data = x_latent)
h5f.close()
else:
np.savetxt(sys.stdout, x_latent, delimiter = '\t')
def main():
args = get_arguments()
model = MoleculeVAE()
if args.target == 'autoencoder':
autoencoder(args, model)
elif args.target == 'encoder':
encoder(args, model)
elif args.target == 'decoder':
decoder(args, model)
if __name__ == '__main__':
main()
|
{"hexsha": "1302c3af20250ec051789aab7f12a2b1273ff78a", "size": 3281, "ext": "py", "lang": "Python", "max_stars_repo_path": "sample.py", "max_stars_repo_name": "jeammimi/chem2", "max_stars_repo_head_hexsha": "4580f802f50b511937c40f3063d3878c509a9e62", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 537, "max_stars_repo_stars_event_min_datetime": "2016-10-24T20:13:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T01:16:30.000Z", "max_issues_repo_path": "sample.py", "max_issues_repo_name": "jeammimi/chem2", "max_issues_repo_head_hexsha": "4580f802f50b511937c40f3063d3878c509a9e62", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 66, "max_issues_repo_issues_event_min_datetime": "2016-10-25T00:04:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-30T16:46:35.000Z", "max_forks_repo_path": "sample.py", "max_forks_repo_name": "jeammimi/chem2", "max_forks_repo_head_hexsha": "4580f802f50b511937c40f3063d3878c509a9e62", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 143, "max_forks_repo_forks_event_min_datetime": "2016-10-24T23:10:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-01T14:21:41.000Z", "avg_line_length": 33.4795918367, "max_line_length": 101, "alphanum_fraction": 0.6885096007, "include": true, "reason": "import numpy", "num_tokens": 801}
|
import cv2
import mediapipe as mp
import numpy as np
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_face_mesh = mp.solutions.face_mesh
def stack_images(scale, imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range(0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(
imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale
)
if len(imgArray[x][y].shape) == 2:
imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank] * rows
hor_con = [imageBlank] * rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None, scale, scale)
if len(imgArray[x].shape) == 2:
imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor = np.hstack(imgArray)
ver = hor
return ver
def get_bbox(x_values, y_values, margin=0):
return min(x_values) - margin, min(y_values) - margin, max(x_values) + margin, max(y_values) + margin
left_eye_indices = [33, 161, 160, 159, 158, 157, 173, 133, 155, 154, 153, 145, 144, 163, 7]
right_eye_indices = [362, 398, 384, 385, 386, 387, 388, 466, 263, 249, 390, 373, 374, 380, 381, 382]
left_eye_vertical_center_indices = [159, 145]
right_eye_vertical_center_indices = [386, 374]
left_eye_horizontal_center_indices = [33, 173]
right_eye_horizontal_center_indices = [398, 263]
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
cap.set(10, 200)
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 20
params.filterByArea = True
params.maxArea = 1000
detector = cv2.SimpleBlobDetector_create(params)
def blob_process(img, detector):
img = cv2.erode(img, (5, 5), iterations=2) # 1
img = cv2.dilate(img, (5, 5), iterations=4) # 2
img = cv2.medianBlur(img, 5) # 3
keypoints = detector.detect(img)
return keypoints
def empty(a):
pass
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars", (640, 70))
cv2.createTrackbar("Threshold 1", "TrackBars", 69, 255, empty)
cv2.createTrackbar("Threshold 2", "TrackBars", 127, 255, empty)
with mp_face_mesh.FaceMesh(
max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5, min_tracking_confidence=0.5
) as face_mesh:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
image = cv2.flip(image, 1)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = face_mesh.process(image)
# Draw the face mesh annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if faces := results.multi_face_landmarks:
shape = image.shape
left_eye_coords = [
(int((landmark := faces[0].landmark[i]).x * shape[1]), int(landmark.y * shape[0]), landmark.z)
for i in left_eye_indices
]
right_eye_coords = [
(int((landmark := faces[0].landmark[i]).x * shape[1]), int(landmark.y * shape[0]), landmark.z)
for i in right_eye_indices
]
bbox_offset = 3
x1, y1, x2, y2 = get_bbox(
[x for x, _, _ in left_eye_coords], [y for _, y, _ in left_eye_coords], bbox_offset
)
x3, y3, x4, y4 = get_bbox(
[x for x, _, _ in right_eye_coords], [y for _, y, _ in right_eye_coords], bbox_offset
)
z_min, z_max = min(z for _, _, z in left_eye_coords), max(z for _, _, z in left_eye_coords)
# print(z_min, z_max)
threshold1 = cv2.getTrackbarPos("Threshold 1", "TrackBars")
threshold2 = cv2.getTrackbarPos("Threshold 2", "TrackBars")
eye1, eye2 = image[y1:y2, x1:x2], image[y3:y4, x3:x4]
eye1, eye2 = cv2.cvtColor(eye1, cv2.COLOR_BGR2GRAY), cv2.cvtColor(eye2, cv2.COLOR_BGR2GRAY)
eye1, eye2 = cv2.GaussianBlur(eye1, (3, 3), 0.5), cv2.GaussianBlur(eye2, (3, 3), 0.5)
eye1, eye2 = cv2.Canny(eye1, threshold1, threshold2), cv2.Canny(eye2, threshold1, threshold2)
contours, hierarchy = cv2.findContours(eye1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
eye1 = cv2.cvtColor(eye1, cv2.COLOR_GRAY2BGR)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 10:
cv2.drawContours(eye1, cnt, -1, (255, 0, 0), 1)
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
# eye1 = cv2.erode(eye1, (5, 5), iterations=1) # 1
# eye1 = cv2.dilate(eye1, (5, 5), iterations=1) # 2
# _, eye1 = cv2.threshold(eye1, threshold1, 255, cv2.THRESH_BINARY)
# _, eye2 = cv2.threshold(eye2, threshold2, 255, cv2.THRESH_BINARY)
# keypoints1 = blob_process(eye1, detector)
# keypoints2 = blob_process(eye2, detector)
# eye1 = cv2.drawKeypoints(
# eye1, keypoints1, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
# )
# eye2 = cv2.drawKeypoints(
# eye2, keypoints2, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
# )
ratio = (y2 - y1) / (x2 - x1)
eye1, eye2 = cv2.resize(eye1, (300, int(300 * ratio))), cv2.resize(eye2, (300, int(300 * ratio)))
cv2.imshow("Eye 1", eye1)
cv2.imshow("Eye 2", eye2)
### DRAWING ON THE MAIN IMAGE ###
#################################
for x, y, z in left_eye_coords:
cv2.circle(
image, (x, y), radius=1, color=(0, int(np.interp(z, [z_min, z_max], [0, 255])), 0), thickness=1
)
for x, y, z in right_eye_coords:
cv2.circle(
image, (x, y), radius=1, color=(0, int(np.interp(z, [z_min, z_max], [0, 255])), 0), thickness=1
)
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.rectangle(image, (x3, y3), (x4, y4), (0, 0, 255), 2)
(x5, y5), (x6, y6) = [
(int((landmark := faces[0].landmark[i]).x * shape[1]), int(landmark.y * shape[0]))
for i in left_eye_vertical_center_indices
]
(x7, y7), (x8, y8) = [
(int((landmark := faces[0].landmark[i]).x * shape[1]), int(landmark.y * shape[0]))
for i in left_eye_horizontal_center_indices
]
cv2.line(image, (x5, y5), (x6, y6), (255, 0, 0), 1)
cv2.line(image, (x7, y7), (x8, y8), (255, 0, 0), 1)
(x9, y9), (x10, y10) = [
(int((landmark := faces[0].landmark[i]).x * shape[1]), int(landmark.y * shape[0]))
for i in right_eye_vertical_center_indices
]
(x11, y11), (x12, y12) = [
(int((landmark := faces[0].landmark[i]).x * shape[1]), int(landmark.y * shape[0]))
for i in right_eye_horizontal_center_indices
]
cv2.line(image, (x9, y9), (x10, y10), (255, 0, 0), 1)
cv2.line(image, (x11, y11), (x12, y12), (255, 0, 0), 1)
# for face_landmarks in faces:
# mp_drawing.draw_landmarks(
# image=image,
# landmark_list=face_landmarks,
# connections=mp_face_mesh.FACEMESH_TESSELATION,
# landmark_drawing_spec=None,
# connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_tesselation_style(),
# )
# # mp_drawing.draw_landmarks(
# # image=image,
# # landmark_list=face_landmarks,
# # connections=mp_face_mesh.FACEMESH_CONTOURS,
# # landmark_drawing_spec=None,
# # connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_contours_style(),
# # )
# # mp_drawing.draw_landmarks(
# # image=image,
# # landmark_list=face_landmarks,
# # connections=mp_face_mesh.FACEMESH_IRISES,
# # landmark_drawing_spec=None,
# # connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_iris_connections_style(),
# # )
# Flip the image horizontally for a selfie-view display.
cv2.imshow("MediaPipe Face Mesh", image)
if cv2.waitKey(5) & 0xFF == ord("s"):
break
cap.release()
print("stopped")
|
{"hexsha": "2b0ece6399a0f90aae257cde9b3dbda76d12c859", "size": 9877, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/OpenCV/Face Mesh/eye_tracking.py", "max_stars_repo_name": "S-c-r-a-t-c-h-y/coding-projects", "max_stars_repo_head_hexsha": "cad33aedb72720c3e3a37c7529e55abd3edb291a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python/OpenCV/Face Mesh/eye_tracking.py", "max_issues_repo_name": "S-c-r-a-t-c-h-y/coding-projects", "max_issues_repo_head_hexsha": "cad33aedb72720c3e3a37c7529e55abd3edb291a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/OpenCV/Face Mesh/eye_tracking.py", "max_forks_repo_name": "S-c-r-a-t-c-h-y/coding-projects", "max_forks_repo_head_hexsha": "cad33aedb72720c3e3a37c7529e55abd3edb291a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4795081967, "max_line_length": 119, "alphanum_fraction": 0.5561405285, "include": true, "reason": "import numpy", "num_tokens": 2867}
|
import os
import torch
import torch.utils.data
from torchvision.datasets.utils import download_url
import numpy as np
from .abstract import StandardVisionDataset
from .base import print_loaded_dataset_shapes, log_call_parameters
class GermanDataset(torch.utils.data.Dataset):
base_folder = "german"
relevant_files = ["german.data-numeric",
'random_indices.txt']
# define train/test indices
def __init__(self, root, train: bool = True, transform=None, target_transform=None, download: bool = False,):
super(GermanDataset, self).__init__()
assert transform is None and target_transform is None, "transforms not implemented"
self.root = root
self.base_dir = os.path.join(self.root, self.base_folder, 'raw')
data_file = os.path.join(self.base_dir, "german.data-numeric")
if download:
self.download()
data = np.loadtxt(data_file)
random_indices_file = os.path.join(self.base_dir, "random_indices.txt")
num_entries, num_features = data.shape
if not os.path.exists(random_indices_file):
print(f"{random_indices_file} does not exist, generating random indices")
rng = np.random.default_rng(seed=6174)
random_indices = np.arange(num_entries)
rng.shuffle(random_indices)
np.savetxt(fname=random_indices_file, X=random_indices, fmt="%d")
else:
random_indices = np.loadtxt(random_indices_file, dtype=int)
# do postprocessing of previous work
data[:, 24] = data[:, 24] - 1
index = (data[:, 6] == 1) | (data[:, 6] == 3) | (data[:, 6] == 4)
data[:, 6] = (index).astype(int)
if train:
relevant_indices = random_indices[:700]
else:
relevant_indices = random_indices[700:]
self.features = torch.from_numpy(np.concatenate((data[relevant_indices, 0:8], data[relevant_indices, 9:24]), axis=1)).float()
self.target_labels = torch.from_numpy(data[relevant_indices, 24]).long()
self.sensitive_labels = torch.from_numpy(data[relevant_indices, 6]).long()
def download(self):
if not os.path.exists(self.base_dir):
os.makedirs(self.base_dir)
download_url("https://raw.githubusercontent.com/human-analysis/MaxEnt-ARL/master/data/german/german.data-numeric",
self.base_dir)
def _check_integrity(self):
if not os.path.exists(os.path.join(self.base_dir, "german.data-numeric")):
return False
return True
def __len__(self):
return self.features.shape[0]
def __getitem__(self, idx):
return [self.features[idx], self.sensitive_labels[idx]], [self.target_labels[idx], self.sensitive_labels[idx]]
# TODO: for now use standard vision dataset, down the line change to standard tabular dataset or something
class German(StandardVisionDataset):
@log_call_parameters
def __init__(self, **kwargs):
super(German, self).__init__(**kwargs)
@property
def dataset_name(self) -> str:
return "german"
@property
def means(self):
return torch.tensor([0])
@property
def stds(self):
return torch.tensor([1])
@property
def train_transforms(self):
return []
@property
def test_transforms(self):
return []
def raw_dataset(self, data_dir: str, download: bool, train: bool, transform):
return GermanDataset(data_dir, download=download, train=train)
|
{"hexsha": "415c07880420b78fadc17132abecf0ccdd3ad175", "size": 3540, "ext": "py", "lang": "Python", "max_stars_repo_path": "nnlib/data_utils/german.py", "max_stars_repo_name": "amf272/nnlib", "max_stars_repo_head_hexsha": "6a14b73cc5bb2761b41c07931ba1c66ec2c4d75b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nnlib/data_utils/german.py", "max_issues_repo_name": "amf272/nnlib", "max_issues_repo_head_hexsha": "6a14b73cc5bb2761b41c07931ba1c66ec2c4d75b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nnlib/data_utils/german.py", "max_forks_repo_name": "amf272/nnlib", "max_forks_repo_head_hexsha": "6a14b73cc5bb2761b41c07931ba1c66ec2c4d75b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7142857143, "max_line_length": 133, "alphanum_fraction": 0.6556497175, "include": true, "reason": "import numpy", "num_tokens": 797}
|
from __future__ import division
from scipy.signal import blackmanharris
from numpy.fft import rfft, irfft
from numpy import argmax, sqrt, mean, absolute, arange, log10
import numpy as np
try:
import soundfile as sf
except ImportError:
from scikits.audiolab import Sndfile
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return sqrt(mean(absolute(a)**2))
def find_range(f, x):
"""
Find range between nearest local minima from peak at index x
"""
for i in arange(x+1, len(f)):
if f[i+1] >= f[i]:
uppermin = i
break
for i in arange(x-1, 0, -1):
if f[i] <= f[i-1]:
lowermin = i + 1
break
return (lowermin, uppermin)
def THDN(signal, sample_rate):
"""
Measure the THD+N for a signal and print the results
Prints the estimated fundamental frequency and the measured THD+N. This is
calculated from the ratio of the entire signal before and after
notch-filtering.
Currently this tries to find the "skirt" around the fundamental and notch
out the entire thing. A fixed-width filter would probably be just as good,
if not better.
"""
# Get rid of DC and window the signal
# TODO: Do this in the frequency domain, and take any skirts with it?
signal -= mean(signal)
windowed = signal * blackmanharris(len(signal)) # TODO Kaiser?
# Measure the total signal before filtering but after windowing
total_rms = rms_flat(windowed)
# Find the peak of the frequency spectrum (fundamental frequency), and
# filter the signal by throwing away values between the nearest local
# minima
f = rfft(windowed)
i = argmax(abs(f))
# Not exact
# print('Frequency: %f Hz' % (sample_rate * (i / len(windowed))))
frequency = ("%f" % (sample_rate * (i / len(windowed))))
lowermin, uppermin = find_range(abs(f), i)
f[lowermin: uppermin] = 0
# Transform noise back into the signal domain and measure it
# TODO: Could probably calculate the RMS directly in the frequency domain
# instead
noise = irfft(f)
THDN = rms_flat(noise) / total_rms
# print("THD+N: %.4f%% or %.1f dB" % (THDN * 100, 20 * log10(THDN)))
thdn = ("%.4f" % (THDN * 100))
return {
'frequency': frequency,
'thdn': thdn
}
def load(filename):
"""
Load a wave file and return the signal, sample rate and number of channels.
Can be any format that libsndfile supports, like .wav, .flac, etc.
"""
try:
wave_file = sf.SoundFile(filename)
signal = wave_file.read()
except ImportError:
wave_file = Sndfile(filename, 'r')
signal = wave_file.read_frames(wave_file.nframes)
channels = wave_file.channels
sample_rate = wave_file.samplerate
return signal, sample_rate, channels
def analyze_channels(filename, function):
"""
Given a filename, run the given analyzer function on each channel of the
file
"""
signal, sample_rate, channels = load(filename)
if channels == 1:
# Monaural
response_here = function(signal, sample_rate)
return response_here
elif channels == 2:
# Stereo
if np.array_equal(signal[:, 0], signal[:, 1]):
print('-- Left and Right channels are identical --')
function(signal[:, 0], sample_rate)
else:
print('-- Left channel --')
function(signal[:, 0], sample_rate)
print('-- Right channel --')
function(signal[:, 1], sample_rate)
else:
# Multi-channel
for ch_no, channel in enumerate(signal.transpose()):
print('-- Channel %d --' % (ch_no + 1))
function(channel, sample_rate)
def execute_thdn(filename):
try:
response_here = analyze_channels(filename, THDN)
return response_here
except Exception as e:
print('Couldn\'t analyze "' + filename + '"')
print(e)
# # For Testing
# import sys
# execute_thdn(sys.argv[1])
|
{"hexsha": "13dd5085a8e0460da13fc270546c5522b0774239", "size": 3674, "ext": "py", "lang": "Python", "max_stars_repo_path": "task/thdncalculator.py", "max_stars_repo_name": "joseph9991/Milestone1", "max_stars_repo_head_hexsha": "08f95e845a743539160e9a7330ca58ea20240229", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "task/thdncalculator.py", "max_issues_repo_name": "joseph9991/Milestone1", "max_issues_repo_head_hexsha": "08f95e845a743539160e9a7330ca58ea20240229", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "task/thdncalculator.py", "max_forks_repo_name": "joseph9991/Milestone1", "max_forks_repo_head_hexsha": "08f95e845a743539160e9a7330ca58ea20240229", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8732394366, "max_line_length": 76, "alphanum_fraction": 0.6978769733, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1006}
|
from subprocess import PIPE, run
import pandas as pd
import uuid
import numpy as np
import matplotlib.pyplot as plt
from perf import PerfObj
from sys import argv
import os
def plot_barchart_against(dfs: list, y_axis="L1-dcache-load-misses", test_names=None, save_file="bars.png"):
df1 = dfs[0][1]
if test_names is not None:
df1 = df1[df1["input"].isin(test_names)]
labels = sorted(test_names)
else:
labels = []
for name, df in dfs:
labels.extend(df["input"].unique())
labels = sorted(labels)
x = np.arange(len(labels)) # the label locations
width = 1/(len(dfs))*4/5 # the width of the bars
rects = []
for df_name, df in dfs:
rects.append(df)
bar_height = [[] for i in range(len(rects))]
error_height = [[] for i in range(len(rects))]
for label in labels:
for i, rect in enumerate(rects):
try:
bar_height[i].append(rect.loc[rect["input"] == label][y_axis].mean())
error_height[i].append(rect.loc[rect["input"] == label][y_axis].std())
except:
print("plot_shart_against: failed: ", label)
fig, ax = plt.subplots(figsize=(20,10))
for i in range(len(dfs)):
label = ""
df_name = dfs[i][0]
offset = (i - int(len(dfs)/2))
if len(dfs)%2 == 0:
offset += 1/2
ax.bar(x + offset*width, bar_height[i], width, yerr=error_height[i], label=label+df_name)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel(y_axis)
# ax.set_title('')
ax.set_xticks(x)
ax.set_xticklabels(labels)
plt.xticks(rotation=90)
ax.legend()
fig.savefig(save_file)
def plot_lines_against(dfs: list, x_axis="n", y_axis="L1-dcache-load-misses", test_names=None, save_file="lines.png"):
ys = [[] for i in range(len(dfs))]
fig, ax = plt.subplots()
for i in range(len(dfs)):
label = ""
df_name, df = dfs[i]
df = df.groupby([x_axis], as_index=False).mean()
df.plot(x=x_axis, y=y_axis, ax=ax, label=df_name)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel(y_axis)
# ax.set_title('')
ax.legend()
fig.savefig(save_file)
def record_program(program_name, log_folder=None):
Ns = list(range(100000, 1000000, 100000)) + list(range(1000000, 10000000, 1000000))
Qs = [100000000]
perf = PerfObj()
for n in Ns:
for q in Qs:
parsed_data = perf.record_cache(program_name, n, q)
if log_folder:
perf.get_records().to_pickle(f"{log_folder}/{uuid.uuid4()}.log")
return perf.get_records()
def mkdir(path):
try:
os.makedirs(path)
except FileExistsError:
# directory already exists
pass
def record(programs_to_record):
dfs = []
for program in programs_to_record:
log_folder = "logs/" + program.replace("./", "")
mkdir(log_folder)
df = record_program(program, log_folder)
dfs.append((program, df))
# plot_barchart_against(dfs, y_axis="L1-dcache-load-misses")
plot_lines_against(dfs, x_axis="n", y_axis="L1-dcache-load-misses")
def get_log_df(path):
log_files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(".log")]
ret_df = pd.DataFrame()
for log_file in log_files:
df = pd.read_pickle(log_file)
ret_df = ret_df.append(df, ignore_index=True)
return ret_df
def plot(programs):
names = [name for name in programs]
folders = [f"logs/{name}/" for name in names]
dfs = []
for name, folder in zip(names, folders):
df = get_log_df(folder)
dfs.append((name.replace('./perf_', ''), df))
plot_lines_against(dfs, x_axis="n", y_axis="L1-dcache-load-misses")
if __name__ == '__main__':
if len(argv) > 1:
record(argv[1:])
else:
programs = ["./perf_std_set", "./perf_co_dst"]
# programs = ["./perf_co_sst", "./perf_ca_sst", "./perf_built_co_sst", "./perf_simple_sst_recursive", "./perf_simple_sst_iterative"]
# programs = ["./co_matrix_walker", "./naive_matrix_walker"]
record(programs)
plot(programs)
|
{"hexsha": "427099b54065c95d459f974c588420ed365a7ad3", "size": 4246, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmark.py", "max_stars_repo_name": "6851-2021/Cache-Oblivious-Data-Structures", "max_stars_repo_head_hexsha": "07cbddeb175f6d838ae9ebb3bc86d4820d7a21ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-02T16:17:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-02T16:17:54.000Z", "max_issues_repo_path": "benchmark.py", "max_issues_repo_name": "6851-2021/Cache-Oblivious-Data-Structures", "max_issues_repo_head_hexsha": "07cbddeb175f6d838ae9ebb3bc86d4820d7a21ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmark.py", "max_forks_repo_name": "6851-2021/Cache-Oblivious-Data-Structures", "max_forks_repo_head_hexsha": "07cbddeb175f6d838ae9ebb3bc86d4820d7a21ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9248120301, "max_line_length": 140, "alphanum_fraction": 0.611163448, "include": true, "reason": "import numpy", "num_tokens": 1134}
|
import itertools
import os.path
import sys
import subprocess
import time
import fileinput
import numpy as np
import pandas as pd
# Enter 1 parameter: otu table with reads
path = sys.argv[1]
cond = path.split('/')[-1].split('.')[0]
def teach_predictor(path, params, same, job, wait):
time.sleep(1)
if not(wait is None):
wait = ' -hold_jid ' + wait + ' -cwd'
command = 'echo "bash ./teach_models.sh ' + path + ' ' + ' '.join([str(x) for x in params]) + ' ' + \
' '.join([str(x) for x in same]) +'" | qsub -N ' + job + wait
else:
command = 'echo "bash ./teach_models.sh ' + path + ' ' + ' '.join([str(x) for x in params]) + ' ' + \
' '.join([str(x) for x in same]) + '" | qsub -N ' + job + ' -cwd'
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# input_string = "./teach_models.sh {} {} {}\n".format(*(map(pipes.quote, [path, params, same])))
# print(input_string)
# p = subprocess.Popen(['qsub', '-N', job, '-cwd'], stdin=subprocess.PIPE)
# out, err = p.communicate(input_string)
return()
# Create a df with all possible combinations of parameters
columns = ['Taxon_trim', 'SparCC_pval', 'SparCC_cor', 'Tax_adjacency',
'Pair_fstat', 'RMSE_sign', 'Specificity', 'Sensitivity']
params = pd.DataFrame(columns=columns)
values = {}
values['Taxon_trim'] = np.r_[2:6] / 10.
values['SparCC_cor'] = np.r_[2:5] / 10.
values['SparCC_pval'] = [0.001, 0.01, 0.05]
values['Tax_adjacency'] = np.r_[0:4]
values['Pair_fstat'] = [0.001]
values['RMSE_sign'] = [0.33, 0.5, 0.66, 1., 1.5, 2.0, 3.0]
# values['Taxon_trim'] = [0.3]
# values['SparCC_cor'] = [0.3]
# values['SparCC_pval'] = [0.001,0.01]
# values['Tax_adjacency'] = [2,3]
# values['Pair_fstat'] = [0.01,0.001]
# values['RMSE_sign'] = [1.]
# Make a parameter table in cases there is none
# It will be needed for test_patients.py
filename = 'PARAM_' + cond + '.txt'
if not(os.path.exists(filename)):
permutations = [x for x in itertools.product(values['Taxon_trim'],
values['SparCC_pval'],
values['SparCC_cor'],
values['Tax_adjacency'],
values['Pair_fstat'],
values['RMSE_sign'])
]
# Make a file with all parameter combinations
for set in permutations:
newline = list(set) + [None, None]
params = params.append(pd.Series(newline, index=columns), ignore_index=True)
params.to_csv(filename, sep='\t', index=False)
# Erase brackets from tax_code, because later model calling can't work with them
# sed might not work for some reason
command = "sed -ie 's/\[/BRA/g;s/\]/KET/g;s/-/SLASH/g' %s" % path
subprocess.Popen(command, shell=True)
time.sleep(5)
command = "rm %se" % path
subprocess.Popen(command, shell=True)
with fileinput.FileInput(path, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(']', 'KET').replace('[','BRA').replace('-','SLASH'), end='')
# First, create SparCC-outputs
job = "Tax_trim"
for i in values['Taxon_trim']:
teach_predictor(path, [i, min(values['SparCC_pval']), 100, 100, 100], [0,1,1,1,1], job, None)
wait = job
# time.sleep(10)
# Then one level lower
job = "Spar_pval"
Tt_Sp = [x for x in itertools.product(values['Taxon_trim'],
values['SparCC_pval'])]
# Make it so first all SparCC with lowest P-value are calculated
# Otherwise
Tt_Sp = sorted(Tt_Sp, key=lambda x: x[1])
for i in Tt_Sp:
teach_predictor(path, [i[0], i[1], 100, 100, 100], [1,0,1,1,1], job, wait)
time.sleep(5)
wait = job
time.sleep(3600)
# And we go all the way down
job = "Filter_sig"
Tt_Sp_Sc_Ta = [x for x in itertools.product(values['Taxon_trim'],
values['SparCC_pval'],
values['SparCC_cor'],
values['Tax_adjacency'])]
for i in Tt_Sp_Sc_Ta:
teach_predictor(path, [i[0], i[1], i[2], int(i[3]), -1], [1,1,0,0,1], job, wait)
time.sleep(3)
wait = job
time.sleep(1000)
job = 'Calc_models'
for i in values['Taxon_trim']:
fstat_min = str(min(values['SparCC_pval']))
fstat_max = str(max(values['SparCC_pval']))
cor_thr = str(min(values['SparCC_cor']))
p_sigcor = './{}/trim_{}/pval_{}/sig_cor_{}.txt'.format(cond,str(i),fstat_max,fstat_max)
p_counts = './{}/trim_{}/train.txt'.format(cond,str(i))
p_taxcode = './{}/trim_{}/tax_code.txt'.format(cond,str(i))
out = './{}/trim_{}/'.format(cond,str(i))
command = '''echo 'R CMD BATCH "--args {} {} {} {} {} {} {}" ./Scripts/calculate_all.models.R' '''.format(p_sigcor, p_counts, p_taxcode, fstat_min, fstat_max, cor_thr, out)
command = command + '| qsub -N ' + job + ' -hold_jid ' + wait + ' -cwd'
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(5)
time.sleep(3600)
wait = job
job = 'pair_Fstat'
Tt_Sp_Sc_Ta_Pf = [x for x in itertools.product(values['Taxon_trim'],
values['SparCC_pval'],
values['SparCC_cor'],
values['Tax_adjacency'],
values['Pair_fstat'])]
for i in Tt_Sp_Sc_Ta_Pf:
teach_predictor(path, [i[0], i[1], i[2], i[3], i[4]], [1,1,1,1,0], job, wait)
time.sleep(1)
for f in ["Rplots.pdf", "cov_mat_SparCC.out", "get_pair_fstats.Rout"]:
command = 'echo "rm %s" | qsub -N CleanUp -hold_jid pair_Fstat -cwd' % f
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
command = 'echo "find . -maxdepth 1 -type f -size 0 | xargs -d"\n" rm -f" | qsub -N Finish -hold_jid CleanUp -cwd'
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# ls | grep -P '\.o' | xargs -d"\n" rm -f
# find . -maxdepth 1 -type f -size 0 | xargs -d"\n" rm -f
|
{"hexsha": "0de5fba780ebd3ffebaffd61672cd928716c2d72", "size": 6181, "ext": "py", "lang": "Python", "max_stars_repo_path": "linear_model/model_pick/rational/parallel_script.py", "max_stars_repo_name": "lotrus28/TaboCom", "max_stars_repo_head_hexsha": "b67d66e4c410375a9efa08c5e637301e78e9204b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "linear_model/model_pick/rational/parallel_script.py", "max_issues_repo_name": "lotrus28/TaboCom", "max_issues_repo_head_hexsha": "b67d66e4c410375a9efa08c5e637301e78e9204b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "linear_model/model_pick/rational/parallel_script.py", "max_forks_repo_name": "lotrus28/TaboCom", "max_forks_repo_head_hexsha": "b67d66e4c410375a9efa08c5e637301e78e9204b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3986928105, "max_line_length": 176, "alphanum_fraction": 0.5796796635, "include": true, "reason": "import numpy", "num_tokens": 1817}
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from scipy.sparse import rand as sparse_rand
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert np.sqrt(np.mean((X_iso - X_iso2) ** 2)) < 2 * noise_scale
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert .9 < clf.score(X, y)
def test_pipeline_with_nearest_neighbors_transformer():
# Test chaining NearestNeighborsTransformer and Isomap with
# neighbors_algorithm='precomputed'
algorithm = 'auto'
n_neighbors = 10
X, _ = datasets.make_blobs(random_state=0)
X2, _ = datasets.make_blobs(random_state=1)
# compare the chained version and the compact version
est_chain = pipeline.make_pipeline(
neighbors.KNeighborsTransformer(
n_neighbors=n_neighbors, algorithm=algorithm, mode='distance'),
manifold.Isomap(n_neighbors=n_neighbors, metric='precomputed'))
est_compact = manifold.Isomap(n_neighbors=n_neighbors,
neighbors_algorithm=algorithm)
Xt_chain = est_chain.fit_transform(X)
Xt_compact = est_compact.fit_transform(X)
assert_array_almost_equal(Xt_chain, Xt_compact)
Xt_chain = est_chain.transform(X2)
Xt_compact = est_compact.transform(X2)
assert_array_almost_equal(Xt_chain, Xt_compact)
def test_different_metric():
# Test that the metric parameters work correctly, and default to euclidean
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
# metric, p, is_euclidean
metrics = [('euclidean', 2, True),
('manhattan', 1, False),
('minkowski', 1, False),
('minkowski', 2, True),
(custom_metric, 2, False)]
X, _ = datasets.make_blobs(random_state=0)
reference = manifold.Isomap().fit_transform(X)
for metric, p, is_euclidean in metrics:
embedding = manifold.Isomap(metric=metric, p=p).fit_transform(X)
if is_euclidean:
assert_array_almost_equal(embedding, reference)
else:
with pytest.raises(AssertionError, match='not almost equal'):
assert_array_almost_equal(embedding, reference)
def test_isomap_clone_bug():
# regression test for bug reported in #6062
model = manifold.Isomap()
for n_neighbors in [10, 15, 20]:
model.set_params(n_neighbors=n_neighbors)
model.fit(np.random.rand(50, 2))
assert (model.nbrs_.n_neighbors ==
n_neighbors)
def test_sparse_input():
X = sparse_rand(100, 3, density=0.1, format='csr')
# Should not error
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
|
{"hexsha": "18133719bf85a5ae51f753065456a81ab5781017", "size": 6487, "ext": "py", "lang": "Python", "max_stars_repo_path": "chatbot_env/Lib/site-packages/sklearn/manifold/tests/test_isomap.py", "max_stars_repo_name": "rakmakan/Chatbot", "max_stars_repo_head_hexsha": "d04bc1526b56961a16c25148d9ef18c4f157e9c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6989, "max_stars_repo_stars_event_min_datetime": "2017-07-18T06:23:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:58:36.000Z", "max_issues_repo_path": "chatbot_env/Lib/site-packages/sklearn/manifold/tests/test_isomap.py", "max_issues_repo_name": "rakmakan/Chatbot", "max_issues_repo_head_hexsha": "d04bc1526b56961a16c25148d9ef18c4f157e9c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1978, "max_issues_repo_issues_event_min_datetime": "2017-07-18T09:17:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:28:43.000Z", "max_forks_repo_path": "site-packages/sklearn/manifold/tests/test_isomap.py", "max_forks_repo_name": "Wristlebane/Pyto", "max_forks_repo_head_hexsha": "901ac307b68486d8289105c159ca702318bea5b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1228, "max_forks_repo_forks_event_min_datetime": "2017-07-18T09:03:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T05:57:40.000Z", "avg_line_length": 34.3227513228, "max_line_length": 78, "alphanum_fraction": 0.629258517, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1462}
|
import numpy as np
from components.transforms import _underscore_to_cap
class BasicLearner():
"""
basis class for learners
"""
def _add_stat(self, name, value, T_env):
if isinstance(value, np.ndarray) and value.size == 1:
value = float(value)
if not hasattr(self, "_stats"):
self._stats = {}
if name not in self._stats:
self._stats[name] = []
self._stats[name+"_T_env"] = []
self._stats[name].append(value)
self._stats[name+"_T_env"].append(T_env)
if hasattr(self, "max_stats_len") and len(self._stats) > self.max_stats_len:
self._stats[name].pop(0)
self._stats[name+"_T"].pop(0)
if hasattr(self, "max_stats_len") and len(self._stats) > self.max_stats_len:
self._stats[name].pop(0)
self._stats[name+"_T"].pop(0)
# log to sacred if enabled
if hasattr(self.logging_struct, "sacred_log_scalar_fn"):
self.logging_struct.sacred_log_scalar_fn(key=_underscore_to_cap(name), val=value)
# log to tensorboard if enabled
if hasattr(self.logging_struct, "tensorboard_log_scalar_fn"):
self.logging_struct.tensorboard_log_scalar_fn(_underscore_to_cap(name), value, T_env)
# log to hdf if enabled
if hasattr(self.logging_struct, "hdf_logger"):
self.logging_struct.hdf_logger.log(_underscore_to_cap(name), value, T_env)
|
{"hexsha": "4908c9ce50a702261bd0469396ee71f535ec6e61", "size": 1467, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/learners/basic.py", "max_stars_repo_name": "ewanlee/mackrl", "max_stars_repo_head_hexsha": "6dd505aa09830f16c35a022f67e255db935c807e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2019-10-28T09:01:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-20T08:56:12.000Z", "max_issues_repo_path": "src/learners/basic.py", "max_issues_repo_name": "ewanlee/mackrl", "max_issues_repo_head_hexsha": "6dd505aa09830f16c35a022f67e255db935c807e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-25T06:50:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-25T06:50:05.000Z", "max_forks_repo_path": "src/learners/basic.py", "max_forks_repo_name": "ewanlee/mackrl", "max_forks_repo_head_hexsha": "6dd505aa09830f16c35a022f67e255db935c807e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-12-18T12:02:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-03T13:15:47.000Z", "avg_line_length": 34.1162790698, "max_line_length": 97, "alphanum_fraction": 0.633265167, "include": true, "reason": "import numpy", "num_tokens": 343}
|
////////////////////////////////////////////////////////////////////////////////////////////////////
// literals.hpp
//
// Copyright 2012 Eric Niebler.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_PROTO_V5_LITERALS_HPP_INCLUDED
#define BOOST_PROTO_V5_LITERALS_HPP_INCLUDED
#include <boost/proto/v5/proto_fwd.hpp>
#include <boost/proto/v5/expr.hpp>
namespace boost
{
namespace proto
{
inline namespace v5
{
// handy user-defined literal operators for building expressions
namespace literals
{
inline constexpr literal<char const *> operator "" _et(char const *ntbs, std::size_t) noexcept
{
return literal<char const *>(ntbs);
}
inline constexpr literal<wchar_t const *> operator "" _et(wchar_t const *ntws, std::size_t) noexcept
{
return literal<wchar_t const *>(ntws);
}
inline constexpr literal<char16_t const *> operator "" _et(char16_t const *ntws, std::size_t) noexcept
{
return literal<char16_t const *>(ntws);
}
inline constexpr literal<char32_t const *> operator "" _et(char32_t const *ntws, std::size_t) noexcept
{
return literal<char32_t const *>(ntws);
}
inline constexpr literal<char> operator "" _et(char ch) noexcept
{
return literal<char>(ch);
}
inline constexpr literal<wchar_t> operator "" _et(wchar_t ch) noexcept
{
return literal<wchar_t>(ch);
}
inline constexpr literal<char16_t> operator "" _et(char16_t ch) noexcept
{
return literal<char16_t>(ch);
}
inline constexpr literal<char32_t> operator "" _et(char32_t ch) noexcept
{
return literal<char32_t>(ch);
}
inline constexpr literal<unsigned long long> operator "" _et(unsigned long long l) noexcept
{
return literal<unsigned long long>(l);
}
inline constexpr literal<long double> operator "" _et(long double d) noexcept
{
return literal<long double>(d);
}
}
}
}
}
#endif
|
{"hexsha": "db32c7ba6dc764a422a3729f33507dac6a674b94", "size": 2644, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/proto/v5/literals.hpp", "max_stars_repo_name": "ericniebler/proto-0x", "max_stars_repo_head_hexsha": "b8d80f1434e37a2a32613cdf58b02b5f7143cc1f", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 11.0, "max_stars_repo_stars_event_min_datetime": "2016-10-27T08:55:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-16T03:30:17.000Z", "max_issues_repo_path": "boost/proto/v5/literals.hpp", "max_issues_repo_name": "ericniebler/proto-0x", "max_issues_repo_head_hexsha": "b8d80f1434e37a2a32613cdf58b02b5f7143cc1f", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2019-02-05T17:13:02.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-05T18:34:53.000Z", "max_forks_repo_path": "boost/proto/v5/literals.hpp", "max_forks_repo_name": "ericniebler/proto-0x", "max_forks_repo_head_hexsha": "b8d80f1434e37a2a32613cdf58b02b5f7143cc1f", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-02-26T17:15:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-26T17:15:12.000Z", "avg_line_length": 33.4683544304, "max_line_length": 118, "alphanum_fraction": 0.5079425113, "num_tokens": 494}
|
(** Generated by coq-of-ocaml *)
Require Import OCaml.OCaml.
Local Set Primitive Projections.
Local Open Scope string_scope.
Local Open Scope Z_scope.
Local Open Scope type_scope.
Import ListNotations.
Unset Positivity Checking.
Unset Guard Checking.
Inductive nat : Set :=
| O : nat
| S : nat -> nat.
Inductive natural : Set :=
| Zero : natural
| Succ : natural -> natural.
Fixpoint plus (plus_arg0 : natural) (plus_arg1 : natural) {struct plus_arg0}
: natural :=
match plus_arg0 with
| Zero => plus_arg1
| Succ n => Succ (plus n plus_arg1)
end.
Fixpoint mult (mult_arg0 : natural) (mult_arg1 : natural) {struct mult_arg0}
: natural :=
match mult_arg0 with
| Zero => Zero
| Succ n => plus (mult n mult_arg1) mult_arg1
end.
Definition synth (lf2 : natural) (lf1 : natural) : natural :=
plus Zero (plus lf1 lf2).
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal33_distrib_100_plus_assoc/goal33conj251_coqofml_VVKblj.v"}
|
from __future__ import division
import time
import sys
import math
import numpy as np
import torch
import torch.nn as nn
from onmt.Trainer import Statistics as BaseStatistics
from onmt.Utils import use_gpu
from cocoa.io.utils import create_path
class Statistics(BaseStatistics):
def output(self, epoch, batch, n_batches, start):
"""Write out statistics to stdout.
Args:
epoch (int): current epoch
batch (int): current batch
n_batch (int): total batches
start (int): start time of epoch.
"""
t = self.elapsed_time()
print(("Epoch %2d, %5d/%5d; loss: %6.2f; " +
"%3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed") %
(epoch, batch, n_batches,
self.mean_loss(),
self.n_src_words / (t + 1e-5),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush()
class Trainer(object):
"""
Class that controls the training process.
Args:
model(:py:class:`onmt.Model.NMTModel`): translation model to train
train_loss(:obj:`onmt.Loss.LossComputeBase`):
training loss computation
valid_loss(:obj:`onmt.Loss.LossComputeBase`):
training loss computation
optim(:obj:`onmt.Optim.Optim`):
the optimizer responsible for update
data_type(string): type of the source input: [text|img|audio]
norm_method(string): normalization methods: [sents|tokens]
grad_accum_count(int): accumulate gradients this many times.
"""
def __init__(self, model, train_loss, valid_loss, optim,
data_type='text', norm_method="sents",
grad_accum_count=1, utterance_builder=None):
# Basic attributes.
self.model = model
self.train_loss = train_loss
self.valid_loss = valid_loss
self.optim = optim
self.data_type = data_type
self.norm_method = norm_method # by sentences vs. by tokens
self.grad_accum_count = grad_accum_count
self.cuda = False
self.best_valid_loss = None
assert(grad_accum_count > 0)
# Set model in training mode.
self.model.train()
# For debugging
self.utterance_builder = utterance_builder
def learn(self, opt, data, report_func):
"""Train model.
Args:
opt(namespace)
model(Model)
data(DataGenerator)
"""
print('\nStart training...')
print(' * number of epochs: %d' % opt.epochs)
print(' * batch size: %d' % opt.batch_size)
for epoch in range(opt.epochs):
print('')
# 1. Train for one epoch on the training set.
train_iter = data.generator('train', cuda=use_gpu(opt))
train_stats = self.train_epoch(train_iter, opt, epoch, report_func)
print('Train loss: %g' % train_stats.mean_loss())
# 2. Validate on the validation set.
valid_iter = data.generator('dev', cuda=use_gpu(opt))
valid_stats = self.validate(valid_iter)
print('Validation loss: %g' % valid_stats.mean_loss())
# 3. Log to remote server.
#if opt.exp_host:
# train_stats.log("train", experiment, optim.lr)
# valid_stats.log("valid", experiment, optim.lr)
#if opt.tensorboard:
# train_stats.log_tensorboard("train", writer, optim.lr, epoch)
# train_stats.log_tensorboard("valid", writer, optim.lr, epoch)
# 4. Update the learning rate
self.epoch_step(valid_stats.ppl(), epoch)
# 5. Drop a checkpoint if needed.
if epoch >= opt.start_checkpoint_at:
self.drop_checkpoint(opt, epoch, valid_stats)
def train_epoch(self, train_iter, opt, epoch, report_func=None):
""" Train next epoch.
Args:
train_iter: training data iterator
epoch(int): the epoch number
report_func(fn): function for logging
Returns:
stats (:obj:`onmt.Statistics`): epoch loss statistics
"""
# Set model back to training mode.
self.model.train()
total_stats = Statistics()
report_stats = Statistics()
true_batchs = []
accum = 0
normalization = 0
num_batches = train_iter.next()
self.cuda = use_gpu(opt)
for batch_idx, batch in enumerate(train_iter):
true_batchs.append(batch)
accum += 1
if accum == self.grad_accum_count:
self._gradient_accumulation(true_batchs, total_stats, report_stats)
true_batchs = []
accum = 0
if report_func is not None:
report_stats = report_func(opt, epoch, batch_idx, num_batches,
total_stats.start_time, report_stats)
# Accumulate gradients one last time if there are any leftover batches
# Should not run for us since we plan to accumulate gradients at every
# batch, so true_batches should always equal candidate batches
if len(true_batchs) > 0:
self._gradient_accumulation(true_batchs, total_stats, report_stats)
true_batchs = []
return total_stats
def validate(self, valid_iter):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`onmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
self.model.eval()
stats = Statistics()
num_val_batches = valid_iter.next()
dec_state = None
for batch in valid_iter:
if batch is None:
dec_state = None
continue
elif not self.model.stateful:
dec_state = None
enc_state = dec_state.hidden if dec_state is not None else None
outputs, attns, dec_state = self._run_batch(batch, None, enc_state)
_, batch_stats = self.valid_loss.compute_loss(batch.targets, outputs)
stats.update(batch_stats)
# Set model back to training mode
self.model.train()
return stats
def epoch_step(self, ppl, epoch):
return self.optim.update_learning_rate(ppl, epoch)
def drop_checkpoint(self, opt, epoch, valid_stats, model_opt=None):
""" Save a resumable checkpoint.
Args:
opt (dict): option object
epoch (int): epoch number
fields (dict): fields and vocabulary
valid_stats : statistics of last validation run
"""
real_model = (self.model.module
if isinstance(self.model, nn.DataParallel)
else self.model)
real_generator = (real_model.generator.module
if isinstance(real_model.generator, nn.DataParallel)
else real_model.generator)
model_state_dict = real_model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'opt': opt if not model_opt else model_opt,
'epoch': epoch,
'optim': self.optim,
}
path = self.checkpoint_path(epoch, opt, valid_stats)
create_path(path)
if not opt.best_only:
print 'Save checkpoint {path}'.format(path=path)
torch.save(checkpoint, path)
self.save_best_checkpoint(checkpoint, opt, valid_stats)
def save_best_checkpoint(self, checkpoint, opt, valid_stats):
if self.best_valid_loss is None or valid_stats.mean_loss() < self.best_valid_loss:
self.best_valid_loss = valid_stats.mean_loss()
path = '{root}/{model}_best.pt'.format(
root=opt.model_path,
model=opt.model_filename)
print 'Save best checkpoint {path}'.format(path=path)
torch.save(checkpoint, path)
def checkpoint_path(self, epoch, opt, stats):
path = '{root}/{model}_loss{loss:.2f}_e{epoch:d}.pt'.format(
root=opt.model_path,
model=opt.model_filename,
loss=stats.mean_loss(),
epoch=epoch)
return path
def _run_batch(self, batch, dec_state=None, enc_state=None):
raise NotImplementedError
def _gradient_accumulation(self, true_batchs, total_stats, report_stats):
if self.grad_accum_count > 1:
self.model.zero_grad()
dec_state = None
for batch in true_batchs:
if batch is None:
dec_state = None
continue
elif not self.model.stateful:
dec_state = None
enc_state = dec_state.hidden if dec_state is not None else None
self.model.zero_grad()
outputs, attns, dec_state = self._run_batch(batch, None, enc_state)
loss, batch_stats = self.train_loss.compute_loss(batch.targets, outputs)
loss.backward()
self.optim.step()
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# Don't backprop fully.
if dec_state is not None:
dec_state.detach()
|
{"hexsha": "be7c8410c4250b92bece70ecdec69b6445f350a5", "size": 9652, "ext": "py", "lang": "Python", "max_stars_repo_path": "cocoa_folder/cocoa/neural/trainer.py", "max_stars_repo_name": "s-akanksha/DialoGraph_ICLR21", "max_stars_repo_head_hexsha": "d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-03-17T05:15:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T06:09:21.000Z", "max_issues_repo_path": "cocoa_folder/cocoa/neural/trainer.py", "max_issues_repo_name": "s-akanksha/DialoGraph_ICLR21", "max_issues_repo_head_hexsha": "d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-05-25T07:28:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T01:54:43.000Z", "max_forks_repo_path": "cocoa_folder/cocoa/neural/trainer.py", "max_forks_repo_name": "s-akanksha/DialoGraph_ICLR21", "max_forks_repo_head_hexsha": "d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-10-11T03:39:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-01T23:58:50.000Z", "avg_line_length": 35.0981818182, "max_line_length": 90, "alphanum_fraction": 0.5847492748, "include": true, "reason": "import numpy", "num_tokens": 2024}
|
module kind_module
implicit none
integer, parameter, public :: isp = selected_int_kind(9)
integer, parameter, public :: idp = selected_int_kind(18)
#ifdef QUAD_PRECISION
integer, parameter, public :: dp = 16
#elsif TEN_DIGIT_PRECISION
integer, parameter, public :: dp = selected_real_kind(10)
#else
integer, parameter, public :: dp = 8
#endif
#ifdef HAVE_QP
integer, parameter, public :: qp = 16
#elsif TEN_DIGIT_PRECISION
integer, parameter, public :: qp = selected_real_kind(10)
#else
integer, parameter, public :: qp = 8
#endif
end module kind_module
|
{"hexsha": "230fc222741c051aded7db28b1bf77c976453d7d", "size": 577, "ext": "f95", "lang": "FORTRAN", "max_stars_repo_path": "src/libAtoms/kind_module.f95", "max_stars_repo_name": "Sideboard/QUIP", "max_stars_repo_head_hexsha": "f41372609e4a92fcda9f33b695a666de3886822b", "max_stars_repo_licenses": ["NRL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/libAtoms/kind_module.f95", "max_issues_repo_name": "Sideboard/QUIP", "max_issues_repo_head_hexsha": "f41372609e4a92fcda9f33b695a666de3886822b", "max_issues_repo_licenses": ["NRL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/libAtoms/kind_module.f95", "max_forks_repo_name": "Sideboard/QUIP", "max_forks_repo_head_hexsha": "f41372609e4a92fcda9f33b695a666de3886822b", "max_forks_repo_licenses": ["NRL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.08, "max_line_length": 59, "alphanum_fraction": 0.733102253, "num_tokens": 157}
|
#ifndef QUBUS_UTIL_INDEX_TUPLE_HPP
#define QUBUS_UTIL_INDEX_TUPLE_HPP
#include <boost/container/small_vector.hpp>
namespace qubus
{
namespace util
{
template <typename T>
using index_tuple = boost::small_vector<T, 10>;
}
}
#endif
|
{"hexsha": "92f7f442a4a889b49d4a4d080fb9edeedd658ba1", "size": 235, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "util/include/qubus/util/index_tuple.hpp", "max_stars_repo_name": "qubusproject/Qubus", "max_stars_repo_head_hexsha": "0feb8d6df00459c5af402545dbe7c82ee3ec4b7c", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "util/include/qubus/util/index_tuple.hpp", "max_issues_repo_name": "qubusproject/Qubus", "max_issues_repo_head_hexsha": "0feb8d6df00459c5af402545dbe7c82ee3ec4b7c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util/include/qubus/util/index_tuple.hpp", "max_forks_repo_name": "qubusproject/Qubus", "max_forks_repo_head_hexsha": "0feb8d6df00459c5af402545dbe7c82ee3ec4b7c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.0555555556, "max_line_length": 47, "alphanum_fraction": 0.7872340426, "num_tokens": 60}
|
using Pkg
Pkg.activate(".")
Pkg.instantiate()
##
using DataFrames
using CSV
using Plots
using StatsPlots
using Plots.PlotMeasures
using Statistics
using StatsFuns
##
fpairs = CSV.read("../data/fpairs.txt", DataFrame, header=false)[:,1]
##
theme(:solarized_light)
#
upscale = 2 #8x upscaling in resolution
fntsm = Plots.font("sans-serif", pointsize=round(8.0*upscale))
fntlg = Plots.font("sans-serif", pointsize=round(10.0*upscale))
default(titlefont=fntlg, guidefont=fntlg, tickfont=fntsm, legendfont=fntsm)
default(size=(400*upscale,600*upscale)) #Plot canvas size
##
bs_ = []
for i in 1:28
push!(bs_, CSV.read("bsResults/$(lpad(i,2,"0")).csv", DataFrame, header=false))
end
bs = vcat(bs_...)
insertcols!(bs,1,:fpair=>fpairs)
rename!(bs, [:fpair, :universal, :lineage])
bs[:,:bf] = bs.universal .- bs.lineage
sort!(bs, :bf)
##
scatter(bs.bf, 1:28,
legend = false,
yticks= (1:28,bs.fpair),
xlab="(log) Bayes factor",
left_margin = upscale*5mm,
markersize=upscale*3,
)
savefig("../data/img/bayesfactor.pdf")
##
sort!(bs, :bf, rev=true)
p0 = logistic.(bs.bf)
bs[:,:cum_pv] = cumsum(1 .- p0)
output = DataFrame(
"feature pair" => bs.fpair,
"(log) Bayes Factor" => round.(bs.bf, digits=1),
"cumulative posterior probability" => rpad.(round.(bs.cum_pv, sigdigits=3), 5, "0"),
)
CSV.write("../data/tables/bf.tex", output, delim="&", newline="\\\\\n")
##
|
{"hexsha": "f61b32e7d204f0da86c1d2bd23e4db7a1d366114", "size": 1409, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "code/visualizeBS.jl", "max_stars_repo_name": "erathorn/phylogeneticTypology", "max_stars_repo_head_hexsha": "ba1fb23eed99b63708291bbbbdf49f1c2a3c1b07", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/visualizeBS.jl", "max_issues_repo_name": "erathorn/phylogeneticTypology", "max_issues_repo_head_hexsha": "ba1fb23eed99b63708291bbbbdf49f1c2a3c1b07", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/visualizeBS.jl", "max_forks_repo_name": "erathorn/phylogeneticTypology", "max_forks_repo_head_hexsha": "ba1fb23eed99b63708291bbbbdf49f1c2a3c1b07", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.301369863, "max_line_length": 88, "alphanum_fraction": 0.6593328602, "num_tokens": 467}
|
import argparse
import gym
from gym import wrappers
import os.path as osp
import random
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import dqn
from dqn_utils import *
from atari_wrappers import *
def cartpole_model(img_in, num_actions, scope, reuse=False):
# as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
with tf.variable_scope(scope, reuse=reuse):
# out = tf.ones(tf.shape(img_in))
out = img_in
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=16,
activation_fn=tf.nn.relu, scope='fc_input')
out = layers.fully_connected(out, num_outputs=num_actions,
activation_fn=None, scope='fc_head')
return out
def cartpole_learn(env, session, num_timesteps):
# This is just a rough estimate
num_iterations = float(num_timesteps) / 4.0
# lr_multiplier = 1.0
# lr_multiplier = 0.1
# lr_schedule = PiecewiseSchedule([
# (0, 1e-4 * lr_multiplier),
# (num_iterations / 2, 1e-5 * lr_multiplier),
# ],
# outside_value=5e-5 * lr_multiplier)
lr_schedule = InverseSchedule(initial_p=0.1, gamma=0.6)
optimizer = dqn.OptimizerSpec(
constructor=tf.train.GradientDescentOptimizer,
# constructor=tf.train.AdamOptimizer,
# kwargs=dict(epsilon=1e-4),
kwargs=dict(),
# constructor=tf.train.RMSPropOptimizer,
# kwargs=dict(epsilon=1e-1),
lr_schedule=lr_schedule
)
def stopping_criterion(env, t):
# notice that here t is the number of steps of the wrapped env,
# which is different from the number of steps in the underlying env
return get_wrapper_by_name(env, "Monitor").get_total_steps() >= num_timesteps
exploration_schedule = PiecewiseSchedule(
[
(0, 1.0),
# (0.2 * num_timesteps, 0.9),
# (0.5 * num_timesteps, 0.5),
(0.1 * num_timesteps, 0.1),
], outside_value=0.01
)
dqn.learn(
env,
q_func=cartpole_model,
optimizer_spec=optimizer,
session=session,
exploration=exploration_schedule,
stopping_criterion=stopping_criterion,
replay_buffer_size=100000,
batch_size=256,
gamma=0.99,
learning_starts=2000,
learning_freq=1,
frame_history_len=4,
target_update_freq=1000,
grad_norm_clipping=1000,
)
env.close()
def get_available_gpus():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
def get_session():
tf.reset_default_graph()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
session = tf.Session(config=tf_config)
print("AVAILABLE GPUS: ", get_available_gpus())
return session
def get_env(task, seed):
env_id = task.env_id
env = gym.make(env_id)
set_global_seeds(seed)
env.seed(seed)
expt_dir = '/tmp/hw3_vid_dir2/'
env = wrappers.Monitor(env, osp.join(expt_dir, "gym"), force=True)
env = wrap_deepmind(env)
return env
def main():
# Run training
max_timesteps = 100000
seed = 0 # Use a seed of zero (you may want to randomize the seed!)
env = gym.make("CartPole-v0")
env.seed(seed)
set_global_seeds(seed)
env = wrappers.Monitor(env, '/tmp/cartpole-experiment-1', force=True)
session = get_session()
cartpole_learn(env, session, num_timesteps=max_timesteps)
if __name__ == "__main__":
main()
|
{"hexsha": "3609e444b8c5bb2fb6f02e058031389515ff2334", "size": 4129, "ext": "py", "lang": "Python", "max_stars_repo_path": "hw3/run_dqn_cartpole.py", "max_stars_repo_name": "akashin/BerkeleyDeepRL", "max_stars_repo_head_hexsha": "62292fe932b0b6dbf06c5baa0b8b7dad75792142", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hw3/run_dqn_cartpole.py", "max_issues_repo_name": "akashin/BerkeleyDeepRL", "max_issues_repo_head_hexsha": "62292fe932b0b6dbf06c5baa0b8b7dad75792142", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw3/run_dqn_cartpole.py", "max_forks_repo_name": "akashin/BerkeleyDeepRL", "max_forks_repo_head_hexsha": "62292fe932b0b6dbf06c5baa0b8b7dad75792142", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5851851852, "max_line_length": 109, "alphanum_fraction": 0.6333252604, "include": true, "reason": "import numpy", "num_tokens": 999}
|
#!/usr/bin/env python3.7
"""
The copyrights of this software are owned by Duke University.
Please refer to the LICENSE.txt and README.txt files for licensing instructions.
The source code can be found on the following GitHub repository: https://github.com/wmglab-duke/ascent
"""
import random
import warnings
from typing import List, Tuple
from shapely.affinity import scale
from shapely.geometry import LineString, Point
import csv
import matplotlib.pyplot as plt
import numpy as np
import os
import scipy.stats as stats
# ascent
from src.utils import (Config, Configurable, DiamDistMode, Exceptionable, FiberGeometry,
FiberXYMode, FiberZMode, MyelinatedSamplingType, MyelinationMode, Saveable,
SetupMode, WriteMode)
from .sample import Sample
class FiberSet(Exceptionable, Configurable, Saveable):
"""
Required (Config.) JSON's:
MODEL
SIM
"""
def __init__(self, sample: Sample, exceptions_config: list):
"""
:param exceptions_config: preloaded exceptions.json data
"""
# set up superclasses
Configurable.__init__(self)
Exceptionable.__init__(self, SetupMode.OLD, exceptions_config)
# initialize empty lists of fiber points
self.sample = sample
self.fibers = None
self.out_to_fib = None
self.out_to_in = None
self.add(SetupMode.NEW, Config.FIBER_Z, os.path.join('config', 'system', 'fiber_z.json'))
def init_post_config(self):
if any([config.value not in self.configs.keys() for config in (Config.MODEL, Config.SIM)]):
self.throw(78)
return self
def generate(self, sim_directory: str, super_sample: bool = False):
"""
:return:
"""
xy_mode_name: str = self.search(Config.SIM, 'fibers', 'xy_parameters', 'mode')
xy_mode: FiberXYMode = [mode for mode in FiberXYMode if str(mode).split('.')[-1] == xy_mode_name][0]
fibers_xy = self._generate_xy(sim_directory)
self.out_to_fib, self.out_to_in = self._generate_maps(fibers_xy)
self.fibers = self._generate_z(fibers_xy, super_sample=super_sample)
return self
def write(self, mode: WriteMode, path: str):
"""
:param mode:
:param path:
:return:
"""
diams = []
for i, fiber_pre in enumerate(self.fibers if self.fibers is not None else []):
if not isinstance(fiber_pre, dict):
fiber = fiber_pre
else:
fiber = fiber_pre['fiber']
diam = fiber_pre['diam']
diams.append(diam)
with open(os.path.join(path, str(i) + WriteMode.file_endings.value[mode.value]), 'w') as f:
for row in [len(fiber)] + list(fiber):
if not isinstance(row, int):
for el in row:
f.write(str(el) + ' ')
else:
f.write(str(row) + ' ')
f.write("\n")
if len(diams) > 0:
diams_key_path = os.path.join(path, 'diams.txt')
with open(diams_key_path, "w") as f2:
np.savetxt(f2, diams, fmt='%0.1f')
return self
def _generate_maps(self, fibers_xy) -> Tuple[List, List]:
out_to_fib = []
out_to_in = []
inner_ind = 0
for i, fascicle in enumerate(self.sample.slides[0].fascicles):
out_to_in.append([])
out_to_fib.append([])
for j, inner in enumerate(fascicle.inners):
out_to_in[i].append(inner_ind)
out_to_fib[i].append([])
inner_ind += 1
for q, fiber in enumerate(fibers_xy):
if Point(fiber).within(inner.polygon()):
out_to_fib[i][j].append(q)
return out_to_fib, out_to_in
def _generate_xy(self, sim_directory: str) -> np.ndarray:
# get required parameters from configuration JSON (using inherited Configurable methods)
xy_mode_name: str = self.search(Config.SIM, 'fibers', 'xy_parameters', 'mode')
xy_mode: FiberXYMode = [mode for mode in FiberXYMode if str(mode).split('.')[-1] == xy_mode_name][0]
xy_parameters: dict = self.search(Config.SIM, 'fibers', 'xy_parameters')
my_xy_seed: int = xy_parameters.get('seed', 0)
# initialize result lists
points: List[Tuple[float]] = []
# small behavioral parameters
buffer: float = self.search(Config.SIM, 'fibers', 'xy_trace_buffer')
plot: bool = self.search(Config.SIM, 'fibers', 'plot')
# perform implemented mode
if self.search_mode(FiberZMode, Config.MODEL) == FiberZMode.EXTRUSION:
if xy_mode == FiberXYMode.CENTROID:
for fascicle in self.sample.slides[0].fascicles:
for inner in fascicle.inners:
for _ in (0,):
points.append(inner.centroid())
elif xy_mode == FiberXYMode.UNIFORM_DENSITY:
# DENSITY UNIT: axons / um^2
# this determines whether the density should be determined top-down or bottom-up
# case top_down == true: fetch target density and cap minimum axons if too low
# case top_down == false: (i.e. bottom-up) find density from target number and smallest inner by area
# also cap the number at a maximum!
top_down: bool = self.search(Config.SIM, 'fibers', 'xy_parameters', 'top_down')
if top_down: # do top-down approach
# get required parameters
target_density = self.search(Config.SIM, 'fibers', 'xy_parameters', 'target_density')
minimum_number = self.search(Config.SIM, 'fibers', 'xy_parameters', 'minimum_number')
for fascicle in self.sample.slides[0].fascicles:
for inner in fascicle.inners:
fiber_count = target_density * inner.area()
if fiber_count < minimum_number:
fiber_count = minimum_number
for point in inner.random_points(fiber_count, buffer=buffer, my_xy_seed=my_xy_seed):
points.append(point)
my_xy_seed += 1
else: # do bottom-up approach
# get required parameters
target_number = self.search(Config.SIM, 'fibers', 'xy_parameters', 'target_number')
maximum_number = self.search(Config.SIM, 'fibers', 'xy_parameters', 'maximum_number')
# calculate target density
min_area = np.amin([[fascicle.smallest_trace().area()
for fascicle in self.sample.slides[0].fascicles]])
target_density = float(target_number) / min_area
for fascicle in self.sample.slides[0].fascicles:
for inner in fascicle.inners:
fiber_count = target_density * inner.area()
if fiber_count > maximum_number:
fiber_count = maximum_number
for point in inner.random_points(fiber_count, buffer=buffer, my_xy_seed=my_xy_seed):
points.append(point)
my_xy_seed += 1
elif xy_mode == FiberXYMode.UNIFORM_COUNT:
count: int = self.search(Config.SIM, 'fibers', 'xy_parameters', 'count')
for fascicle in self.sample.slides[0].fascicles:
for inner in fascicle.inners:
for point in inner.random_points(count, buffer=buffer, my_xy_seed=my_xy_seed):
points.append(point)
my_xy_seed += 1
elif xy_mode == FiberXYMode.WHEEL:
# get required parameters
spoke_count: int = self.search(Config.SIM, 'fibers', 'xy_parameters', 'spoke_count')
point_count: int = self.search(Config.SIM, 'fibers', 'xy_parameters',
'point_count_per_spoke') # this number is PER SPOKE
find_centroid: bool = self.search(Config.SIM, 'fibers', 'xy_parameters', 'find_centroid')
angle_offset_is_in_degrees: bool = self.search(Config.SIM, 'fibers',
'xy_parameters',
'angle_offset_is_in_degrees')
angle_offset: float = self.search(Config.SIM, 'fibers', 'xy_parameters', 'angle_offset')
# convert angle offset to radians if necessary
if angle_offset_is_in_degrees:
angle_offset *= 2 * np.pi / 360
# master loop!
for fascicle in self.sample.slides[0].fascicles:
for inner in fascicle.inners:
if find_centroid:
points.append(inner.centroid())
# loop through spoke angles
for spoke_angle in (np.linspace(0, 2 * np.pi, spoke_count + 1)[:-1] + angle_offset):
# find the mean radius for a reference distance when "casting the spoke ray"
new_inner = inner.deepcopy()
new_inner.offset(None, -buffer)
mean_radius = new_inner.mean_radius()
# get a point that is assumed to be outside the trace
raw_outer_point = np.array(new_inner.centroid()) + [5 * mean_radius * np.cos(spoke_angle),
5 * mean_radius * np.sin(spoke_angle)]
# build a vector starting from the centroid of the trace
raw_spoke_vector = LineString([new_inner.centroid(),
tuple(raw_outer_point)])
# get that vector's intersection with the trace to find "trimmed" endpoint
intersection_with_boundary = raw_spoke_vector.intersection(new_inner.polygon().boundary)
# fix type of intersection with boundary
if not isinstance(intersection_with_boundary, Point):
intersection_with_boundary = list(intersection_with_boundary)[0]
# build trimmed vector
trimmed_spoke_vector = LineString([new_inner.centroid(),
tuple(intersection_with_boundary.coords)[0]])
# get scale vectors whose endpoints will be the desired points ([1:] to not include 0)
scaled_vectors: List[LineString] = [scale(trimmed_spoke_vector, *([factor] * 3),
origin=trimmed_spoke_vector.coords[0])
for factor in np.linspace(0, 1, point_count + 2)[1:-1]]
# loop through the end points of the vectors
for point in [vector.coords[1] for vector in scaled_vectors]:
points.append(point)
elif xy_mode == FiberXYMode.EXPLICIT:
if not os.path.exists(os.path.join(sim_directory, 'explicit.txt')):
self.throw(83)
with open(os.path.join(sim_directory, 'explicit.txt')) as f:
# advance header
next(f)
reader = csv.reader(f, delimiter=" ")
for row in reader:
points.append(tuple([float(row[0]), float(row[1])]))
# check that all fibers are within exactly one inner
for fiber in points:
if not any([Point(fiber).within(inner.polygon())
for fascicle in self.sample.slides[0].fascicles for inner in fascicle.inners]):
print("Explicit fiber coordinate: {} does not fall in an inner".format(fiber))
self.throw(71)
if plot:
plt.figure()
self.sample.slides[0].plot(final=False, fix_aspect_ratio=True)
for point in points:
plt.plot(point[0], point[1], 'r.', markersize = 1)
if self.search(Config.SIM, 'plot_folder',optional = True) == True:
plt.savefig(sim_directory+'/plots/fibers_xy.png',dpi=300)
plt.close()
else: plt.show()
else:
self.throw(30)
return points
def plot(self, ax: plt.Axes = None,
fiber_colors: List[Tuple[float, float, float, float]] = None,
size=10):
for fiber_ind, fiber in enumerate(self.fibers):
ax.plot(fiber[0][0], fiber[0][1], color=fiber_colors[fiber_ind], marker='o', markersize=size)
def _generate_z(self, fibers_xy: np.ndarray, override_length=None, super_sample: bool = False) -> np.ndarray:
fibers = []
def clip(values: list, start, end, myel: bool, is_points: bool = False) -> list:
step = 1
if myel:
step = 11
while 1:
if (start + 0.1) > (values[0] if not is_points else values[0][-1]):
values = values[step:]
elif (end - 0.1) < (values[-1] if not is_points else values[-1][-1]):
values = values[:-step]
else:
break
return values
def generate_myel_fiber_zs(diameter):
delta_z = \
paranodal_length_2 = \
inter_length = None
sampling_mode = self.search(Config.FIBER_Z,
MyelinationMode.parameters.value,
fiber_geometry_mode_name,
'sampling')
node_length, paranodal_length_1, inter_length_str = (
self.search(Config.FIBER_Z, MyelinationMode.parameters.value, fiber_geometry_mode_name, key)
for key in ('node_length', 'paranodal_length_1', 'inter_length')
)
# load in all the required specifications for finding myelinated z coordinates
if sampling_mode == MyelinatedSamplingType.DISCRETE.value:
diameters, my_delta_zs, paranodal_length_2s = (
self.search(Config.FIBER_Z, MyelinationMode.parameters.value, fiber_geometry_mode_name, key)
for key in ('diameters', 'delta_zs', 'paranodal_length_2s')
)
diameter_index = diameters.index(diameter)
delta_z = my_delta_zs[diameter_index]
paranodal_length_2 = paranodal_length_2s[diameter_index]
inter_length = eval(inter_length_str)
elif sampling_mode == MyelinatedSamplingType.INTERPOLATION.value:
paranodal_length_2_str, delta_z_str, inter_length_str = (
self.search(Config.FIBER_Z, MyelinationMode.parameters.value, fiber_geometry_mode_name, key)
for key in ('paranodal_length_2', 'delta_z', 'inter_length')
)
paranodal_length_2 = eval(paranodal_length_2_str)
if fiber_geometry_mode_name == FiberGeometry.B_FIBER.value:
inter_length = eval(inter_length_str)
delta_z = eval(delta_z_str)
elif fiber_geometry_mode_name == FiberGeometry.MRG_INTERPOLATION.value:
if diameter > 16.0 or diameter < 2.0:
self.throw(77)
if diameter >= 5.643:
delta_z = eval(delta_z_str["diameter_greater_or_equal_5.643um"])
else:
delta_z = eval(delta_z_str["diameter_less_5.643um"])
inter_length = eval(inter_length_str)
z_steps: List = []
while (sum(z_steps) - half_fiber_length) < 0.001:
z_steps += [(node_length / 2) + (paranodal_length_1 / 2),
(paranodal_length_1 / 2) + (paranodal_length_2 / 2),
(paranodal_length_2 / 2) + (inter_length / 2),
*([inter_length] * 5),
(inter_length / 2) + (paranodal_length_2 / 2),
(paranodal_length_2 / 2) + (paranodal_length_1 / 2),
(paranodal_length_1 / 2) + (node_length / 2)]
# account for difference between last node z and half fiber length -> must shift extra distance
my_z_shift_to_center_in_fiber_range = half_fiber_length - sum(z_steps)
reverse_z_steps = z_steps.copy()
reverse_z_steps.reverse()
# concat, cumsum, and other stuff to get final list of z points
my_zs = np.array(
list(
np.cumsum(
np.concatenate(
([0], reverse_z_steps, z_steps)
)
)
),
)
return my_zs, delta_z, my_z_shift_to_center_in_fiber_range
def build_fiber_with_offset(z_values: list, myel: bool, dz: float, my_x: float, my_y: float,
additional_offset: float = 0):
random_offset_value = 0
# get offset param - NOTE: raw value is a FRACTION of dz (explanation for multiplication by dz)
offset = self.search(Config.SIM, 'fibers', FiberZMode.parameters.value,'offset',optional=True)
if offset is None:
offset = 0
random_offset_value = dz * (random.random() - 0.5)
else:
if 0 <= offset <= 1:
offset = offset*dz
else:
self.throw(99)
# compute offset z coordinate
z_offset = [my_z + offset + random_offset_value + additional_offset for my_z in z_values]
# xy_mode_name: str = self.search(Config.SIM, 'fibers', 'xy_parameters', 'mode')
# xy_mode: FiberXYMode = [mode for mode in FiberXYMode if str(mode).split('.')[-1] == xy_mode_name][0]
# only clip if NOT an SL fiber
z_offset = clip(z_offset,
self.search(Config.SIM, 'fibers', FiberZMode.parameters.value, 'min'),
self.search(Config.SIM, 'fibers', FiberZMode.parameters.value, 'max'),
myel)
my_fiber = [(my_x, my_y, z) for z in z_offset]
return my_fiber
# %% START ALGORITHM
# get top-level fiber z generation
fiber_z_mode: FiberZMode = self.search_mode(FiberZMode, Config.MODEL)
# all functionality is only defined for EXTRUSION as of now
if fiber_z_mode == FiberZMode.EXTRUSION:
model_length = self.search(Config.MODEL, 'medium', 'proximal', 'length') if (
override_length is None) else override_length
if not 'min' in self.configs['sims']['fibers']['z_parameters'].keys() or \
not 'max' in self.configs['sims']['fibers']['z_parameters'].keys() or \
override_length is not None:
fiber_length = model_length if override_length is None else override_length
self.configs['sims']['fibers'][FiberZMode.parameters.value]['min'] = 0
self.configs['sims']['fibers'][FiberZMode.parameters.value]['max'] = fiber_length
if override_length is None:
warnings.warn('Program assumed fiber length same as proximal length since "min" and "max" fiber '
'length not defined in Config.Sim "fibers" -> "z_parameters"')
else:
min_fiber_z_limit = self.search(Config.SIM, 'fibers', FiberZMode.parameters.value, 'min')
max_fiber_z_limit = self.search(Config.SIM, 'fibers', FiberZMode.parameters.value, 'max')
if not max_fiber_z_limit > min_fiber_z_limit:
self.throw(105)
fiber_length = (max_fiber_z_limit - min_fiber_z_limit) if override_length is None else override_length
half_fiber_length = fiber_length / 2
if not ('longitudinally_centered' in self.configs['sims']['fibers']['z_parameters'].keys()):
longitudinally_centered = True
else:
longitudinally_centered = self.search(Config.SIM,
'fibers',
FiberZMode.parameters.value,
'longitudinally_centered')
if longitudinally_centered:
z_shift_to_center_in_model_range = (model_length - fiber_length) / 2
else:
z_shift_to_center_in_model_range = 0
# xy_mode_name: str = self.search(Config.SIM, 'fibers', 'xy_parameters', 'mode')
# xy_mode: FiberXYMode = [mode for mode in FiberXYMode if str(mode).split('.')[-1] == xy_mode_name][0]
# check that proximal model length is greater than or equal to fiber length (fibers only in nerve trunk)
# override this functionality if using SL (not in nerve trunk)
assert model_length >= fiber_length, 'proximal length: ({}) < fiber length: ({})'.format(model_length,
fiber_length)
fiber_geometry_mode_name: str = self.search(Config.SIM, 'fibers', 'mode')
# use key from above to get myelination mode from fiber_z
diams = []
diameter = self.search(Config.SIM, 'fibers', FiberZMode.parameters.value, 'diameter')
diam_distribution: bool = True if type(diameter) is dict else False
if super_sample:
myelinated = False
else:
myelinated: bool = self.search(
Config.FIBER_Z,
MyelinationMode.parameters.value,
fiber_geometry_mode_name,
'myelinated'
)
my_z_seed = self.search(Config.SIM, 'fibers', FiberZMode.parameters.value, 'seed')
if diam_distribution:
sampling_mode = self.search(Config.FIBER_Z,
MyelinationMode.parameters.value,
fiber_geometry_mode_name,
'sampling')
if myelinated and not (sampling_mode == MyelinatedSamplingType.INTERPOLATION.value):
self.throw(104)
distribution_mode_name = self.search(Config.SIM,
'fibers',
FiberZMode.parameters.value, 'diameter',
'mode')
distribution_mode: DiamDistMode = [mode for mode in DiamDistMode if
str(mode).split('.')[-1] == distribution_mode_name][0]
# seed rng
my_diam_seed: int = self.search(Config.SIM,
'fibers',
FiberZMode.parameters.value,
'diameter',
'seed')
np.random.seed(my_diam_seed)
fiber_diam_dist = None
if distribution_mode == DiamDistMode.UNIFORM:
# load parameters
lower_fiber_diam: float = self.search(Config.SIM,
'fibers',
FiberZMode.parameters.value,
'diameter',
'lower')
upper_fiber_diam: float = self.search(Config.SIM,
'fibers',
FiberZMode.parameters.value,
'diameter',
'upper')
# parameter checking
# positive values, order makes sense, etc
if lower_fiber_diam < 0:
self.throw(100)
if lower_fiber_diam > upper_fiber_diam:
self.throw(101)
fiber_diam_dist = stats.uniform(lower_fiber_diam, upper_fiber_diam - lower_fiber_diam)
elif distribution_mode == DiamDistMode.TRUNCNORM:
# load parameters
n_std_fiber_diam_limit: float = self.search(Config.SIM,
'fibers',
FiberZMode.parameters.value,
'diameter',
'n_std_limit')
mu_fiber_diam: float = self.search(Config.SIM,
'fibers',
FiberZMode.parameters.value,
'diameter',
'mu')
std_fiber_diam: float = self.search(Config.SIM,
'fibers',
FiberZMode.parameters.value,
'diameter',
'std')
lower_fiber_diam = mu_fiber_diam - n_std_fiber_diam_limit * std_fiber_diam
upper_fiber_diam = mu_fiber_diam + n_std_fiber_diam_limit * std_fiber_diam
# parameter checking
# positive values, order makes sense, etc
if n_std_fiber_diam_limit == 0 and std_fiber_diam != 0:
self.throw(102)
if lower_fiber_diam < 0:
self.throw(103)
fiber_diam_dist = stats.truncnorm((lower_fiber_diam - mu_fiber_diam) / std_fiber_diam,
(upper_fiber_diam - mu_fiber_diam) / std_fiber_diam,
loc=mu_fiber_diam,
scale=std_fiber_diam)
diams = fiber_diam_dist.rvs(len(fibers_xy))
if myelinated and not super_sample: # MYELINATED
random.seed(my_z_seed)
if len(diams) == 0:
diams = [diameter] * len(fibers_xy)
for (x, y), diam in zip(fibers_xy, diams):
zs, delta_z, z_shift_to_center_in_fiber_range = generate_myel_fiber_zs(diam)
fiber_pre = build_fiber_with_offset(zs,
myelinated,
delta_z,
x, y,
z_shift_to_center_in_model_range + z_shift_to_center_in_fiber_range)
if np.amax(np.array(fiber_pre)[:, 2]) - np.amin(np.array(fiber_pre)[:, 2]) > fiber_length:
self.throw(119)
if diam_distribution:
fiber = {'diam': diam, 'fiber': fiber_pre}
else:
fiber = fiber_pre
fibers.append(fiber)
else: # UNMYELINATED
if super_sample:
if 'dz' in self.configs[Config.SIM.value]['supersampled_bases'].keys():
delta_z = self.search(Config.SIM, 'supersampled_bases', 'dz')
my_z_seed = 123
else:
self.throw(79)
else:
delta_z = self.search(Config.FIBER_Z,
MyelinationMode.parameters.value,
fiber_geometry_mode_name,
'delta_zs')
z_top_half = np.arange(fiber_length / 2, fiber_length + delta_z, delta_z)
z_bottom_half = -np.flip(z_top_half) + fiber_length
while z_top_half[-1] > fiber_length:
# trim top of top half
z_top_half = z_top_half[:-1]
z_bottom_half = z_bottom_half[1:]
if len(diams) == 0:
diams = [self.search(Config.SIM, 'fibers', FiberZMode.parameters.value, 'diameter')] * len(
fibers_xy)
for (x, y), diam in zip(fibers_xy, diams):
fiber_pre = build_fiber_with_offset(list(np.concatenate((z_bottom_half[:-1], z_top_half))),
myelinated,
delta_z,
x, y,
z_shift_to_center_in_model_range)
if np.amax(np.array(fiber_pre)[:, 2]) - np.amin(
np.array(fiber_pre)[:, 2]) > fiber_length: self.throw(119)
if diam_distribution:
fiber = {'diam': diam, 'fiber': fiber_pre}
else:
fiber = fiber_pre
fibers.append(fiber)
else:
self.throw(31)
return fibers
|
{"hexsha": "90b9279b746e8d80f9c8fe26531545d3baaf5e5e", "size": 31199, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/core/fiberset.py", "max_stars_repo_name": "wmglab-duke/ascent", "max_stars_repo_head_hexsha": "2ca8c39a4462a728108038294ddac27488e9758b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-07-21T18:03:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T17:25:33.000Z", "max_issues_repo_path": "src/core/fiberset.py", "max_issues_repo_name": "wmglab-duke/ascent", "max_issues_repo_head_hexsha": "2ca8c39a4462a728108038294ddac27488e9758b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-09-29T17:01:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-02T18:06:34.000Z", "max_forks_repo_path": "src/core/fiberset.py", "max_forks_repo_name": "wmglab-duke/ascent", "max_forks_repo_head_hexsha": "2ca8c39a4462a728108038294ddac27488e9758b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-09-02T09:39:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T22:42:01.000Z", "avg_line_length": 48.7484375, "max_line_length": 124, "alphanum_fraction": 0.4950158659, "include": true, "reason": "import numpy,import scipy", "num_tokens": 6037}
|
from unittest import TestCase
import time
import shutil
import os
from dat_analysis.dat_object.attributes.transition import Transition, default_transition_params, i_sense
from dat_analysis.dat_object.dat_hdf import DatHDF
from dat_analysis.hdf_file_handler import HDFFileHandler
import h5py
from dat_analysis.hdf_util import with_hdf_read
from tests import helpers
import numpy as np
import lmfit as lm
output_dir = 'Outputs/Transition/'
class Testing_Transition(Transition):
"""Override the normal init behaviour so it doesn't fail before reaching tests"""
class TestTransition(TestCase):
helpers.clear_outputs(output_dir)
dat = helpers.init_testing_dat(9111, output_directory=output_dir)
T = Testing_Transition(dat)
def test_get_default_params(self):
default_pars = self.T.get_default_params()
self.assertEqual(default_pars, default_transition_params())
def test_get_non_default_params(self):
self.T.initialize_minimum()
pars = self.T.get_default_params(self.T.x, self.T.data[0:5])
self.assertTrue(np.all([isinstance(p, lm.Parameters) for p in pars]))
def test_get_default_func(self):
self.assertEqual(self.T.get_default_func(), i_sense)
def test_initialize_minimum(self):
self.T.initialize_minimum()
self.assertTrue(self.T.initialized)
class TestExistingTransition(TestCase):
def setUp(self):
out_path = 'Outputs/Transition/DatHDFs[Transition].h5'
if os.path.exists(out_path):
os.remove(out_path)
shutil.copy2('fixtures/DatHDFs/Dat9111[Transition].h5', out_path)
with HDFFileHandler(out_path, 'r') as f:
self.dat = DatHDF(f) # A dat with Transition info already filled
self.t0 = time.time()
def test_load_avg_fit(self):
"""Check that getting an existing avg fit is fast"""
fit = self.dat.Transition.avg_fit
self.assertLess(time.time()-self.t0, 1) # Should take less than 1 second to retrieve fit from HDF
def test_load_avg_data(self):
"""Check that getting existing avg data is fast"""
data = self.dat.Transition.avg_data
self.assertLess(time.time()-self.t0, 1) # Should take less than 1 second to retrieve data from HDF
|
{"hexsha": "ee75c861f3005603ddc277e94c20bb4f2ec82830", "size": 2262, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_Transition.py", "max_stars_repo_name": "TimChild/dat_analysis", "max_stars_repo_head_hexsha": "2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_Transition.py", "max_issues_repo_name": "TimChild/dat_analysis", "max_issues_repo_head_hexsha": "2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_Transition.py", "max_forks_repo_name": "TimChild/dat_analysis", "max_forks_repo_head_hexsha": "2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9047619048, "max_line_length": 107, "alphanum_fraction": 0.724137931, "include": true, "reason": "import numpy", "num_tokens": 508}
|
From hahn Require Import Hahn.
Require Import Exec.
Require Import Events.
Section Scdrf.
Lemma drf_tot__hb_sc e X Y :
well_formed e ->
consistent e ->
data_race_free e ->
tot e X Y ->
overlap X Y ->
writes e X \/ writes e Y ->
hb e X Y \/ (same_loc X Y /\ sc e X /\ sc e Y).
Proof.
intros wf cst drf totXY overlapXY wXY.
unfold data_race_free in drf.
destruct drf with X Y as [hbXY | [hbYX | [[nWX nWY] | [novrlpXY | [slXY [scX scY]]]]]];
auto.
- exfalso.
destruct wf_tot with e as [[irrefl trans] tot]; try assumption.
apply irrefl with X.
apply trans with Y; try assumption.
apply cst_hb_tot; try assumption.
apply cst_ufxd.
assumption.
- exfalso.
destruct wXY; contradiction.
- exfalso.
destruct overlapXY.
apply novrlpXY with x.
destruct H.
split; assumption.
Qed.
Theorem sc_drf e :
well_formed e ->
consistent e ->
data_race_free e ->
seqcst e.
Proof.
intros wf cst drf.
unfold seqcst.
unfolder.
intros n.
split.
{ intros Y [X [rfbYX totXY]].
assert (hb e X Y \/ (same_loc X Y /\ sc e X /\ sc e Y)) as [hbXY | [slXY [scX scY]]]. {
apply drf_tot__hb_sc; auto.
exists n. split; destruct (rfb__dom e X Y n wf rfbYX); try assumption.
right. eapply rfb__w; eassumption.
}
- apply (cst_rf_hb e (cst_ufxd e cst) X).
exists Y.
split; try assumption.
econstructor. eauto.
- destruct (wf_tot e wf) as [[irrefl trans] _].
apply irrefl with X.
apply trans with Y; try assumption.
apply cst_hb_tot; try apply cst_ufxd; try assumption.
constructor. left. right.
apply sw_intro; auto.
eexists; eauto.
apply same_loc_sym. auto.
}
intros Z' [Z [[eqZZ' [wZ domZn]] [X [totZX [Y [rfbYX totYZ]]]]]].
subst.
assert (in_dom n Y /\ in_dom n X) as [domYn domXn]. {
apply rfb__dom with e; assumption.
}
assert (hb e Y Z \/ (same_loc Y Z /\ sc e Y /\ sc e Z)) as drfYZ. {
apply drf_tot__hb_sc; auto.
exists n; auto.
}
assert (hb e Z X \/ (same_loc Z X /\ sc e Z /\ sc e X)) as drfZX. {
apply drf_tot__hb_sc; auto.
exists n; auto.
}
assert (hb e Y X \/ (same_loc Y X /\ sc e Y /\ sc e X)) as drfYX. {
apply drf_tot__hb_sc; auto.
- destruct (wf_tot e wf) as [[_ trans] _].
apply trans with Z; assumption.
- exists n; auto.
- left.
apply rfb__w with X n; assumption.
}
destruct drfYZ as [hbYZ | [slYZ [ScY scZ]]];
destruct drfZX as [hbZX | [slZX [ScZ' ScX]]];
[ apply (cst_rf_hb_hb e (cst_ufxd e cst) n Z) |
destruct drfYX as [hbYX | [slYX [ScY _]]];
[apply (cst_ddagger e cst Z) | apply (cst_sw_tot e (cst_ufxd e cst) Z)] |
destruct drfYX as [hbYX | [slYX [_ ScX]]];
[apply (cst_dagger e cst Z) | apply (cst_sw_tot e (cst_ufxd e cst) Z)] |
apply (cst_sw_tot e (cst_ufxd e cst) Z)
];
unfolder;
split; auto;
repeat (eexists; split; eauto).
- split; try assumption.
apply rfb__rf with n.
assumption.
- apply sw_intro; auto.
apply rfb__rf with n.
assumption.
- split; try assumption.
apply rfb__rf with n.
assumption.
- split.
+ apply (cst_hb_tot e (cst_ufxd e cst)).
eassumption.
+ eapply same_loc_trans;
try eassumption.
apply same_loc_sym.
assumption.
- apply sw_intro; auto.
apply rfb__rf with n.
assumption.
- apply sw_intro; auto.
apply rfb__rf with n.
assumption.
eapply same_loc_trans;
eassumption.
Qed.
End Scdrf.
|
{"author": "Biebar", "repo": "jsrelaxedmemorymodel_coq", "sha": "b0e5d5e470d7fcc579121f9013bf1df4ad5afe69", "save_path": "github-repos/coq/Biebar-jsrelaxedmemorymodel_coq", "path": "github-repos/coq/Biebar-jsrelaxedmemorymodel_coq/jsrelaxedmemorymodel_coq-b0e5d5e470d7fcc579121f9013bf1df4ad5afe69/Scdrf.v"}
|
#PyQt imports
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLineEdit
import sys
import os
from time import sleep
import pyrealsense2 as rs;
import numpy as np;
import cv2 as cv;
class Container(QWidget):
def __init__(self):
super().__init__();
self.__textField = "";
self.__count = "1";
self.initUI();
def initUI(self):
# Input text for classification
inputText = QLineEdit(self.__textField, self);
inputText.setToolTip('Input Gesture Classification Here');
inputText.resize(150, 100);
inputText.textChanged.connect(self.onTextChange);
# Input text for number of gestures to collect
inputImageCount = QLineEdit(self.__count, self);
inputImageCount.setToolTip('Input Amount Of Gestures To Capture');
inputImageCount.resize(150, 100);
inputImageCount.move(150,0);
inputImageCount.textChanged.connect(self.onImageCountChange);
# Capture button to initialize capturing of data.
captureButton = QPushButton('Capture Data', self);
captureButton.setToolTip('Use this button to capture an image from camera');
captureButton.resize(150, 100);
captureButton.move(300, 0);
captureButton.clicked.connect(self.onCaptureClick);
def onTextChange(self, text):
self.__textField = text;
def onImageCountChange(self, count):
self.__count = count;
def onCaptureClick(self):
if (self.__textField != ""):
self.addToDataset(self.__textField, int(self.__count));
sleep(2)
def getCountInDir(self, path):
return len(os.listdir(path))
def outputImage(self, image, classification):
path = './datasets/' + classification + '/'
if (not os.path.exists(path)):
os.mkdir(path)
path += str(self.getCountInDir(path)) + '.jpg'
cv.imwrite(path, image)
def removeBackground(self, image):
if (np.max(image) != 0):
minVal = np.min(image[np.nonzero(image)])
image[image > 1500 + minVal] = 0
return image
def addToDataset(self, text, count):
# Create a pipeline
pipeline = rs.pipeline()
#Create a config and configure the pipeline to stream for depth at 30fps
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
# Start streaming pipeline
profile = pipeline.start(config)
# Getting depth sensor
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
# Set a clipping distance of 1 meter
clipping_distance_in_meters = 1 #1 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# Align stream to depth
align_to = rs.stream.color
align = rs.align(align_to)
for i in range(count):
# Get frame of depth
frames = pipeline.wait_for_frames();
aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
# Compile a depth image
depth_image = np.asanyarray(aligned_depth_frame.get_data())
# Remove background
depth_image = self.removeBackground(depth_image)
# Turn image into three channel image
depth_image_3d = np.dstack((depth_image,depth_image,depth_image))
# Create a colormap depth image
depth_colormap = cv.applyColorMap(cv.convertScaleAbs(depth_image, alpha=0.03), cv.COLORMAP_JET)
# Create gray image so we can find contours
depth_gray = cv.cvtColor(depth_colormap, cv.COLOR_BGR2GRAY)
canny_output = cv.Canny(depth_gray, 100, 200);
contours, hierarchy = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# Create copy to draw contours on
contour_map = depth_colormap.copy()
contour_map = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
# Draw contours
for i in range(len(contours)):
color = (0, 255, 0)
cv.drawContours(contour_map, contours, i, color, 2, cv.LINE_8, hierarchy, 0)
self.outputImage(contour_map, self.__textField)
# Draw images side by side
images = np.hstack((depth_colormap, contour_map))
cv.namedWindow('Contour Example', cv.WINDOW_AUTOSIZE)
cv.imshow('Contour Example', images)
key = cv.waitKey(1)
def main():
app = QApplication(sys.argv);
window = Container();
window.setGeometry(0, 0, 450, 100);
window.setWindowTitle("Gesture Recognition Training GUI");
window.show();
sys.exit(app.exec_());
if __name__ == "__main__":
main();
|
{"hexsha": "0baaf93812456c6387128205b87e12449765ee40", "size": 4951, "ext": "py", "lang": "Python", "max_stars_repo_path": "addToDataset.py", "max_stars_repo_name": "ShaneClancy/gesture-recognition", "max_stars_repo_head_hexsha": "d6b3e14f6001fda71bb798435896529e2fc54750", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "addToDataset.py", "max_issues_repo_name": "ShaneClancy/gesture-recognition", "max_issues_repo_head_hexsha": "d6b3e14f6001fda71bb798435896529e2fc54750", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "addToDataset.py", "max_forks_repo_name": "ShaneClancy/gesture-recognition", "max_forks_repo_head_hexsha": "d6b3e14f6001fda71bb798435896529e2fc54750", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7880794702, "max_line_length": 107, "alphanum_fraction": 0.6277519693, "include": true, "reason": "import numpy", "num_tokens": 1081}
|
Set Warnings "-notation-overridden".
Require Import Coq.Program.Basics.
From Equations Require Import Equations.
Unset Equations With Funext.
Require Import Category.Lib.
Require Import Category.Theory.
Require Import Embed.Theory.Btree.
Require Import Embed.Theory.Lattice.
Generalizable All Variables.
Set Universe Polymorphism.
Set Nested Proofs Allowed.
Notation "btree!" := (@btree ()) (only parsing, at level 0).
Notation "bnil!" := (@bnil () tt) (only parsing, at level 0).
Notation "btree2!" := (@btree () * @btree ()) (only parsing, at level 0).
Equations and_btree : magma btree! :=
and_btree (bnil _) (bnil _) := bnil () ;
and_btree (bnil _) (bcons _ _) := bnil () ;
and_btree (bcons _ _) (bnil x) := bnil () ;
and_btree (bcons x y) (bcons z w) := bcons (and_btree x z) (and_btree y w).
Global Transparent and_btree.
Global Program Instance and_btree_Idempotent : Idempotent and_btree :=
{ idempotency := _ }.
Next Obligation.
revert x.
fix H 1.
intro.
destruct x.
rewrite and_btree_equation_1.
destruct u.
reflexivity.
rewrite and_btree_equation_4.
refine (bcons_eq _ _).
exact (@idempotency _ _ H _).
exact (@idempotency _ _ H _).
Defined.
Program Instance and_btree_Commutative : Commutative and_btree :=
{ commutativity := _ }.
Next Obligation.
revert b. revert a.
fix and_btree_Commutative 2.
intros.
destruct a, b.
reflexivity.
reflexivity.
reflexivity.
exact (bcons_eq (and_btree_Commutative _ _) (and_btree_Commutative _ _)).
Defined.
Fixpoint and_btree_associative (a b c : btree!) :
and_btree (and_btree a b) c = and_btree a (and_btree b c).
destruct a, b, c.
reflexivity.
reflexivity.
reflexivity.
reflexivity.
reflexivity.
reflexivity.
reflexivity.
unfold and_btree.
refine (bcons_eq _ _).
exact (and_btree_associative _ _ _).
exact (and_btree_associative _ _ _).
Defined.
(* Fails with fix *)
Program Instance and_btree_Associative : Associative and_btree :=
{ associativity := and_btree_associative }.
Equations or_btree : magma btree! :=
or_btree (bnil _) (bnil _) := bnil tt ;
or_btree (bcons x y) (bnil _) := bcons x y ;
or_btree (bnil _) (bcons x y) := bcons x y ;
or_btree (bcons x y) (bcons z w) := bcons (or_btree x z) (or_btree y w).
Global Transparent or_btree.
(* Almost the same proof as for and_btree *)
Program Instance or_btree_Idempotent : Idempotent or_btree :=
{ idempotency := _ }.
Next Obligation.
revert x.
fix H 1.
intro.
destruct x.
rewrite or_btree_equation_1.
destruct u.
reflexivity.
rewrite or_btree_equation_4.
refine (bcons_eq _ _).
exact (@idempotency _ _ H _).
exact (@idempotency _ _ H _).
Defined.
(* Almost the same proof as for and_btree *)
Program Instance or_btree_Commutative : Commutative or_btree :=
{ commutativity := _ }.
Next Obligation.
revert b. revert a.
fix or_btree_Commutative 2.
intros.
destruct a, b.
reflexivity.
reflexivity.
reflexivity.
exact (bcons_eq (or_btree_Commutative _ _) (or_btree_Commutative _ _)).
Defined.
(* Almost the same proof as for and_btree *)
Fixpoint or_btree_associative (a b c : btree!) :
or_btree (or_btree a b) c = or_btree a (or_btree b c).
destruct a, b, c.
reflexivity.
reflexivity.
reflexivity.
reflexivity.
reflexivity.
reflexivity.
reflexivity.
unfold or_btree.
refine (bcons_eq _ _).
exact (or_btree_associative _ _ _).
exact (or_btree_associative _ _ _).
Defined.
Program Instance or_btree_Associative : Associative or_btree :=
{ associativity := or_btree_associative }.
Fixpoint and_or_absorptive (a b : btree!) :
and_btree a (or_btree a b) = a.
destruct a, b.
unfold or_btree.
unfold and_btree.
destruct u.
reflexivity.
unfold or_btree.
unfold and_btree.
destruct u.
reflexivity.
unfold or_btree.
unfold and_btree.
refine (bcons_eq _ _).
exact (@idempotency _ _ and_btree_Idempotent _).
exact (@idempotency _ _ and_btree_Idempotent _).
unfold or_btree.
unfold and_btree.
refine (bcons_eq _ _).
exact (and_or_absorptive a1 b1).
exact (and_or_absorptive a2 b2).
Defined.
(* May not need Fixpoint, but doesn't allow funelim *)
Program Instance and_or_Absorptive :
Absorptive and_btree or_btree := { absorptivity := and_or_absorptive }.
(* Almost the same proof as for and_or_absorptive *)
Fixpoint or_and_absorptive (a b : btree!) :
or_btree a (and_btree a b) = a.
destruct a, b.
unfold or_btree.
unfold and_btree.
destruct u.
reflexivity.
unfold or_btree.
unfold and_btree.
destruct u.
reflexivity.
unfold or_btree.
unfold and_btree.
refine (bcons_eq _ _).
reflexivity.
reflexivity.
rewrite and_btree_equation_4.
rewrite or_btree_equation_4.
refine (bcons_eq _ _).
exact (or_and_absorptive a1 b1).
exact (or_and_absorptive a2 b2).
Defined.
Program Instance or_and_Absorptive :
Absorptive or_btree and_btree := { absorptivity := or_and_absorptive }.
Program Instance btree_unit_Lattice : Lattice btree := {
lmeet := and_btree;
ljoin := or_btree;
lmeet_commutative := and_btree_Commutative;
lmeet_associative := and_btree_Associative;
lmeet_absorptive := and_or_Absorptive;
lmeet_idempotent := and_btree_Idempotent;
ljoin_commutative := or_btree_Commutative;
ljoin_associative := or_btree_Associative;
ljoin_absorptive := or_and_Absorptive;
ljoin_idempotent := or_btree_Idempotent
}.
Fixpoint and_or_distributive (a b c : btree!) :
and_btree a (or_btree b c) = or_btree (and_btree a b) (and_btree a c).
destruct a, b, c.
unfold or_btree.
unfold and_btree.
reflexivity.
unfold or_btree.
unfold and_btree.
reflexivity.
unfold or_btree.
unfold and_btree.
reflexivity.
unfold or_btree.
unfold and_btree.
reflexivity.
unfold or_btree.
unfold and_btree.
reflexivity.
unfold or_btree.
unfold and_btree.
reflexivity.
unfold or_btree.
unfold and_btree.
reflexivity.
unfold or_btree.
unfold and_btree.
refine (bcons_eq _ _).
exact (and_or_distributive _ _ _).
exact (and_or_distributive _ _ _).
Defined.
Program Instance and_or_Distributive :
Distributive and_btree or_btree := { distributivity := and_or_distributive }.
Obligations.
|
{"author": "michaeljklein", "repo": "btree-lattice-experiments", "sha": "769670d3c98591a4ddb3854feea22eae554323f5", "save_path": "github-repos/coq/michaeljklein-btree-lattice-experiments", "path": "github-repos/coq/michaeljklein-btree-lattice-experiments/btree-lattice-experiments-769670d3c98591a4ddb3854feea22eae554323f5/Theory/Btree/Lattice.v"}
|
[STATEMENT]
lemma take_takefill [simp]: "m \<le> n \<Longrightarrow> take m (takefill fill n w) = takefill fill m w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. m \<le> n \<Longrightarrow> take m (takefill fill n w) = takefill fill m w
[PROOF STEP]
by (auto simp: le_iff_add take_takefill')
|
{"llama_tokens": 109, "file": "Word_Lib_Reversed_Bit_Lists", "length": 1}
|
# <center>Multiscale Geographically Weighted Regression - Binomial dependent variable</center>
The model has been explored and tested for multiple parameters on real and simulated datasets. The research includes the following outline with separate notebooks for each part.
**Notebook Outline:**
**Introduction Notebook (current)**
- [Introduction](#Introduction)
- [Introduction to the project](#Introduction-to-the-project)
- [Statistical Equations](#Statistical-Equations)
- [Local Scoring Algorithm](#Local-Scoring-Algorithm)
- [Notebooks with tests](#Notebooks-with-Tests)
- [References](#References)
[Back to the main page](https://mehak-sachdeva.github.io/MGWR_book/)
---
# Introduction
***
## Introduction to the problem
As prefaced earlier, the Geographically Weighted Regression model in PySAL can currently estimate Gaussian, Poisson and Logistic models though the Multiscale extension of the GWR model is currently limited to only Gaussian models. This part of the project aims to expand the MGWR model to nonlinear local spatial regression modeling techniques where the response outcomes may be binomial (or a Logit model). This will enable a richer and holistic local statistical modeling framework to model multi-scale process heterogeneity for the open source community.
## Statistical Equations
***
A conventional Logistic regression model with $x_1, x_2, ... ,x_k$ as predictors, a binary(Bernoulli) response variable y and l denoting the log-odds of the event that y=1, can be written as:
\begin{align}
l = log_b ( p / (1-p)) = ({\sum} {\beta} & _k x _{k,i}) \\
\end{align}
where $x_{k,1}$ is the kth explanatory variable in place i, $𝛽_{ks}$ are the parameters and p is the probability such that p = P( Y = 1 ).
By exponentiating the log-odds:
$p / (1-p) = b^ {𝛽_0+𝛽_1 x_1+𝛽_2 x_2} $
It follows from this - the probability that Y = 1 is:
$p = (b^ {𝛽_0 + 𝛽_1 x_1 + 𝛽_2 x_2}) / (b^ {𝛽_0 + 𝛽_1 x_1 + 𝛽_2 x_2} + 1)$ = $1 / (1 + b^ {-𝛽_0 + 𝛽_1 x_1 + 𝛽_2 x_2})$
# Local Scoring Algorithm
***
Following the technique from (Hastie & Tibshirani, 1986), for logisitic generalized additive models the model was estimated using the local scoring algorithm as follows:
1. Initialize the current estimate of the additive predictor $n_i^{old}$:<br>$n_i^{old} = {\sum} {\beta}_k X_k$<br>and the probability such P(Y=1): $p_i^{old} = exp({n_i^{old}})/(1+exp({n_i^{old}}))$ <br><br>
2. Compute the working response: <br>
$z_i = n_i^{old} + (y_i - p_i^{old})/(p_i^{old}(1-p_i^{old}))$<br><br>
3. compute weights $w_i = p_i^{old} (1-p_i^{old})$<br><br>
4. obtain $n_i^{new}$ by fitting a weighted additive model to $z_i$. In this the smoothers in the backfitting algorithm incorporate the additional weights and GWR is used for the linear parts.<br><br>
These steps are repeated until the relative change in the fitted coefficients and the functions is below a tolerance threshold (1e-05 in this case).
Reference for these equations: http://ugrad.stat.ubc.ca/~nancy/526_2003/projects/kazi2.pdf
### Further work required:
The parameters for the estimated model using Monte Carlo tests with simulated data are close to expected. Further exploration is required to theoretically justify the model in the context of spatial data models, especially MGWR.
As an exploration, this work includes results from both adding a stochastic error to the model during calibration and without it. Results for both are shown in the notebooks below.
# Notebooks with Tests
**[Initial module changes and univariate model check ](http://mehak-sachdeva.github.io/MGWR_book/Html/Binomial_MGWR_univariate_check)**
- Setup with libraries
- Fundamental equations for Binomial MGWR
- Example Dataset
- Helper functions
- Univariate example
- Parameter check
- Bandwidths check
**[Simulated Data example](http://mehak-sachdeva.github.io/MGWR_book/Html/Simulated_data_example_Binomial-MGWR)**
- Setup with libraries
- Create Simulated Dataset
- Forming independent variables
- Creating y variable with Binomial distribution
- Univariate example
- Bandwidth: Random initialization check
- Parameters check
- Multivariate example
- Bandwidths: Random initialization check
- Parameters check
- Global model parameter check
**[Real Data example](http://mehak-sachdeva.github.io/MGWR_book/Html/Real_data_example_Binomial-MGWR)**
- Setup with libraries
- Landslide Dataset
- Univariate example
- Bandwidth: Random initialization check
- Parameter check
- Multivariate example
- Bandwidths: Random initialization check
- MGWR bandwidths
- AIC, AICc, BIC check
## Monte Carlo Tests
***
### Monte Carlo tests for model estimated with error
**[Monte Carlo Simulation Visualization](http://mehak-sachdeva.github.io/MGWR_book/Html/Binomial_MGWR_MonteCarlo_Results)**
- Setup with libraries
- List bandwidths from pickles
- Parameter functions
- GWR bandwidth
- MGWR bandwidths
- AIC, AICc, BIC check
- AIC, AICc, BIC Boxplots for comparison
- Parameter comparison from MGWR and GWR
### Monte Carlo tests for model estimated without error
**[Monte Carlo Simulation Visualization](http://mehak-sachdeva.github.io/MGWR_book/Html/Binomial_MGWR_MonteCarlo_Results-we)**
- Setup with libraries
- List bandwidths from pickles
- Parameter functions
- GWR bandwidth
- MGWR bandwidths
- AIC, AICc, BIC check
- AIC, AICc, BIC Boxplots for comparison
- Parameter comparison from MGWR and GWR
# References:
1. Fotheringham, A. S., Yang, W., & Kang, W. (2017). Multiscale Geographically Weighted Regression (MGWR). Annals of the American Association of Geographers, 107(6), 1247–1265. https://doi.org/10.1080/24694452.2017.1352480
2. Yu, H., Fotheringham, A. S., Li, Z., Oshan, T., Kang, W., & Wolf, L. J. (2019). Inference in Multiscale Geographically Weighted Regression. Geographical Analysis, gean.12189. https://doi.org/10.1111/gean.12189
3. Hastie, T., & Tibshirani, R. (1986). Generalized Additive Models. Statistical Science, 1(3), 297–310. https://doi.org/10.1214/ss/1177013604
4. Wood, S. N. (2006). Generalized additive models : an introduction with R. Chapman & Hall/CRC.
[Back to the main page](https://mehak-sachdeva.github.io/MGWR_book/)
|
{"hexsha": "97840b37f3f5f1e13e2650b54faea4bb1e127598", "size": 8779, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Notebooks/.ipynb_checkpoints/Binomial_MGWR-checkpoint.ipynb", "max_stars_repo_name": "TaylorOshan/MGWR_workshop_book", "max_stars_repo_head_hexsha": "4c0be5cb08dfc669c8da0d1c074f3c5052a81c0a", "max_stars_repo_licenses": ["MIT-0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-01-21T08:30:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-24T05:40:43.000Z", "max_issues_repo_path": "Notebooks/Binomial_MGWR.ipynb", "max_issues_repo_name": "TaylorOshan/MGWR_book", "max_issues_repo_head_hexsha": "c59db902b34d625af4d0e1b90fbc95018a3de579", "max_issues_repo_licenses": ["MIT-0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Notebooks/Binomial_MGWR.ipynb", "max_forks_repo_name": "TaylorOshan/MGWR_book", "max_forks_repo_head_hexsha": "c59db902b34d625af4d0e1b90fbc95018a3de579", "max_forks_repo_licenses": ["MIT-0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-07-20T19:43:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-07T23:41:08.000Z", "avg_line_length": 37.0421940928, "max_line_length": 566, "alphanum_fraction": 0.613281695, "converted": true, "num_tokens": 1747}
|
/*
// Licensed to DynamoBI Corporation (DynamoBI) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. DynamoBI licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
*/
#include "fennel/common/CommonPreamble.h"
#include "fennel/common/PseudoUuid.h"
#include "fennel/common/TraceSource.h"
#include "fennel/test/TestBase.h"
#include <boost/test/test_tools.hpp>
#include <iostream>
using namespace fennel;
using std::string;
/** tests the common PseudoUuid class */
class PseudoUuidTest : public TestBase, public TraceSource
{
void testGeneration();
void testInvalid();
void testComparison();
void testParsing();
void testFormatting();
void testCopy();
public:
explicit PseudoUuidTest()
: TraceSource(shared_from_this(), "PseudoUuidTest")
{
FENNEL_UNIT_TEST_CASE(PseudoUuidTest, testGeneration);
FENNEL_UNIT_TEST_CASE(PseudoUuidTest, testInvalid);
FENNEL_UNIT_TEST_CASE(PseudoUuidTest, testComparison);
FENNEL_UNIT_TEST_CASE(PseudoUuidTest, testParsing);
FENNEL_UNIT_TEST_CASE(PseudoUuidTest, testFormatting);
FENNEL_UNIT_TEST_CASE(PseudoUuidTest, testCopy);
}
};
void PseudoUuidTest::testGeneration()
{
PseudoUuid uuid;
uuid.generate();
for (int i = 0; i < PseudoUuid::UUID_LENGTH; i++) {
if (uuid.getByte(i) != 0) {
// non-zero byte
return;
}
}
BOOST_ERROR("PseudoUuid::generate() generated all-zero UUID");
}
void PseudoUuidTest::testInvalid()
{
PseudoUuid uuid;
uuid.generateInvalid();
for (int i = 0; i < PseudoUuid::UUID_LENGTH; i++) {
BOOST_CHECK_MESSAGE(
uuid.getByte(i) == (uint8_t)0xFF, "invalid UUID not all 0xFF");
}
}
void PseudoUuidTest::testComparison()
{
PseudoUuid uuid1("00010203-0405-0607-0809-0A0B0C0D0E0F");
PseudoUuid uuid2("00010203-0405-0607-0809-0A0B0C0D0E0F");
PseudoUuid uuid3("0F0E0D0C-0B0A-0908-0706-050403020100");
BOOST_CHECK(uuid1 == uuid2);
BOOST_CHECK(uuid1 != uuid3);
BOOST_CHECK(uuid2 != uuid3);
}
void PseudoUuidTest::testParsing()
{
PseudoUuid uuid1("00010203-0405-0607-0809-0A0B0C0D0E0F");
for (int i = 0; i < PseudoUuid::UUID_LENGTH; i++) {
BOOST_CHECK_EQUAL(i, uuid1.getByte(i));
}
PseudoUuid uuid2("00000000-0000-0000-0000-000000000000");
for (int i = 0; i < PseudoUuid::UUID_LENGTH; i++) {
BOOST_CHECK_EQUAL(0, uuid2.getByte(i));
}
PseudoUuid uuid3("FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF");
for (int i = 0; i < PseudoUuid::UUID_LENGTH; i++) {
BOOST_CHECK_EQUAL((uint8_t)0xff, uuid3.getByte(i));
}
BOOST_CHECK_THROW(
PseudoUuid("bad"), FennelExcn);
BOOST_CHECK_THROW(
PseudoUuid("00112233-44-55-6677-8899-AABBCCDDEEFF"), FennelExcn);
}
void PseudoUuidTest::testFormatting()
{
string exp1 = "12345678-9abc-def0-1234-56789abcdef0";
string exp2 = "00000000-0000-0000-0000-000000000000";
PseudoUuid uuid1(exp1), uuid2(exp2);
string got1 = uuid1.toString();
string got2 = uuid2.toString();
BOOST_CHECK_EQUAL(exp1, got1);
BOOST_CHECK_EQUAL(exp2, got2);
}
void PseudoUuidTest::testCopy()
{
PseudoUuid uuid1("00010203-0405-0607-0809-0A0B0C0D0E0F");
PseudoUuid uuid2(uuid1);
PseudoUuid uuid3 = uuid1;
BOOST_CHECK_EQUAL(uuid1, uuid2);
BOOST_CHECK_EQUAL(uuid1, uuid3);
PseudoUuid uuid4;
uuid4.generateInvalid();
PseudoUuid uuid5 = uuid4;
PseudoUuid uuid6(uuid4);
BOOST_CHECK_EQUAL(uuid4, uuid5);
BOOST_CHECK_EQUAL(uuid4, uuid6);
}
FENNEL_UNIT_TEST_SUITE(PseudoUuidTest)
// End PseudoUuidTest.cpp
|
{"hexsha": "472694931896c10e11f0f01799fae2da8924d3e5", "size": 4253, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "fennel/test/PseudoUuidTest.cpp", "max_stars_repo_name": "alexavila150/luciddb", "max_stars_repo_head_hexsha": "e3125564eb18238677e6efb384b630cab17bb472", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14.0, "max_stars_repo_stars_event_min_datetime": "2015-07-21T06:31:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-13T14:18:33.000Z", "max_issues_repo_path": "fennel/test/PseudoUuidTest.cpp", "max_issues_repo_name": "alexavila150/luciddb", "max_issues_repo_head_hexsha": "e3125564eb18238677e6efb384b630cab17bb472", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-05-04T23:08:51.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-04T23:08:51.000Z", "max_forks_repo_path": "fennel/test/PseudoUuidTest.cpp", "max_forks_repo_name": "alexavila150/luciddb", "max_forks_repo_head_hexsha": "e3125564eb18238677e6efb384b630cab17bb472", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 22.0, "max_forks_repo_forks_event_min_datetime": "2015-01-03T14:27:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-14T02:09:13.000Z", "avg_line_length": 27.4387096774, "max_line_length": 75, "alphanum_fraction": 0.6959793087, "num_tokens": 1223}
|
# ***************************************************************
# Copyright (c) 2020 Jittor. Authors: Dun Liang <randonlang@gmail.com>. All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
class TestCodeOp(unittest.TestCase):
def test(self):
a = jt.random([10])
b = jt.code(a.shape, a.dtype, [a],
cpu_src='''
for (int i=0; i<in0shape0; i++)
@out(i) = @in0(i)*@in0(i)*2;
''',
cpu_grad_src = ['''
for (int i=0; i<in0shape0; i++) {
@out(i) = @dout(i)*@in0(i)*4;
}
'''])
na, nb = jt.fetch_sync([a,b])
assert np.allclose(na*na*2, nb)
c = jt.random([10])
da = jt.grad(c*b, a)
assert np.allclose(c.data*na*4, da.data), (c.data*na*4, da.data)
def test_multi_input(self):
a = jt.random([10])
b = jt.random([10])
c = jt.code(a.shape, a.dtype, [a,b],
cpu_src='''
for (int i=0; i<in0shape0; i++)
@out(i) = @in0(i)*@in1(i);
''',
cpu_grad_src = ['''
for (int i=0; i<in0shape0; i++) {
@out(i) = @dout(i)*@in1(i);
}
''', '''
for (int i=0; i<in0shape0; i++) {
@out(i) = @dout(i)*@in0(i);
}
'''])
da, db = jt.grad(c, [a, b])
assert np.allclose(c.data, a.data*b.data)
assert np.allclose(da.data, b.data)
assert np.allclose(db.data, a.data)
def test_header(self):
a = jt.array([3,2,1])
b = jt.code(a.shape, a.dtype, [a],
header='#include <algorithm>',
cpu_src="""
for (int i=0; i<in0shape0; i++)
@out(i) = @in0(i);
std::sort(&@out(0), &@out(in0shape0));
"""
)
assert (b.data==[1,2,3]).all()
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "b19e5c95327b8877e09f39d5a7fe58fcfd2b150a", "size": 2228, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/jittor/test/test_code_op.py", "max_stars_repo_name": "xmyqsh/jittor", "max_stars_repo_head_hexsha": "1260e19235e301a67cba57aebbc187a5c1386e1a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-02T15:54:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-02T15:54:41.000Z", "max_issues_repo_path": "python/jittor/test/test_code_op.py", "max_issues_repo_name": "xmyqsh/jittor", "max_issues_repo_head_hexsha": "1260e19235e301a67cba57aebbc187a5c1386e1a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/jittor/test/test_code_op.py", "max_forks_repo_name": "xmyqsh/jittor", "max_forks_repo_head_hexsha": "1260e19235e301a67cba57aebbc187a5c1386e1a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2769230769, "max_line_length": 92, "alphanum_fraction": 0.4169658887, "include": true, "reason": "import numpy", "num_tokens": 608}
|
from pyBRML import Array, utils
import pyBRML as brml
import numpy as np
class TestPyBRMLCore:
def test_multiply_potentials(self):
knife_index = [0,2,1]
knife_table = np.zeros((2,2,2))
knife_table[1,0,0] = 0.0
knife_table[1,1,0] = 0.04
knife_table[1,0,1] = 0.64
knife_table[1,1,1] = 0.0
knife_table[0,:,:] = 1 - knife_table[1,:,:]
knife = Array(knife_index, knife_table)
butler = Array([2],[0.4,0.6])
maid = Array([1],[0.2,0.8])
potentials = [knife,butler,maid]
joint = brml.multiply_potentials(potentials).table
correct_prob = 0.9897
num = joint[1,:,1].sum()
den = joint[1,:,:].sum()
prob = num / den
print(prob)
assert np.isclose(correct_prob, prob)
|
{"hexsha": "80816db0710c926e8e911992f36edc04a827499a", "size": 803, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyBRML/tests/test_core.py", "max_stars_repo_name": "anich003/brml_toolkit", "max_stars_repo_head_hexsha": "de8218bdf333902431d4c0055fcf5cb3dc47d0c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyBRML/tests/test_core.py", "max_issues_repo_name": "anich003/brml_toolkit", "max_issues_repo_head_hexsha": "de8218bdf333902431d4c0055fcf5cb3dc47d0c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyBRML/tests/test_core.py", "max_forks_repo_name": "anich003/brml_toolkit", "max_forks_repo_head_hexsha": "de8218bdf333902431d4c0055fcf5cb3dc47d0c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7407407407, "max_line_length": 58, "alphanum_fraction": 0.5691158157, "include": true, "reason": "import numpy", "num_tokens": 261}
|
# Обратная задача динамики
Рассмотрим обратную задачу динамики на примере двузвенного робота:
```python
from sympy import *
t = Symbol("t")
g = Symbol("g")
```
Создадим свое собственное описание положения:
```python
class Position:
def __init__(self, x, y, a):
super(Position, self).__init__()
self.x = x
self.y = y
self.a = a
def __add__(self, other):
c = cos(self.a)
s = sin(self.a)
return Position(
self.x + c * other.x - s * other.y,
self.y + s * other.x + c * other.y,
self.a + other.a
)
```
Тогда положения точек масс будет следущим:
```python
def bone_positions(q, l):
p1 = Position(0, 0, q[0])
p2 = p1 + Position(l[0], 0, q[1])
p3 = p2 + Position(l[1], 0, 0)
return [
p1 + Position(l[0] / 2, 0, 0),
p2 + Position(l[1] / 2, 0, 0),
p3
]
```
Опишем кинетическую энергию:
```python
def kinetic_energy(p, m, j):
vx = diff(p.x, t)
vy = diff(p.y, t)
v = sqrt(vx**2 + vy**2)
omega = diff(p.a, t)
return v**2 * m / 2 + omega**2 * j / 2
```
```python
def total_kinetic_energy(q, l, m, j):
[p1, p2, p3] = bone_positions(q, l)
kin1 = kinetic_energy(p1, m[0], j[0])
kin2 = kinetic_energy(p2, m[1], j[1])
kin3 = kinetic_energy(p3, m[2], j[2])
return kin1 + kin2 + kin3
```
И потенциальную:
```python
def potential_energy(p, m):
return p.y * g * m
```
```python
def total_potential_energy(q, l, m):
[p1, p2, p3] = bone_positions(q, l)
pot1 = potential_energy(p1, m[0])
pot2 = potential_energy(p2, m[1])
pot3 = potential_energy(p3, m[2])
return pot1 + pot2 + pot3
```
Тогда Лагранжиан будет иметь слудующий вид:
```python
def lagrangian(q, l, m, j):
return total_kinetic_energy(q, l, m, j) - total_potential_energy(q, l, m)
```
```python
q1, q2 = symbols("q_1, q_2", cls=Function)
q = [q1(t), q2(t)]
l1, l2 = symbols("l_1, l_2")
l = [l1, l2]
m = symbols("m_1, m_2, m_3")
j = symbols("J_1, J_2, J_3")
```
```python
lagrangian(q, l, m, j).simplify()
```
Силы, необходимые для обеспечения $q$ запишем следующим образом:
$$
Q_i = \frac{\partial}{\partial t} \frac{\partial L}{\partial \dot{q_i}} - \frac{\partial L}{ \partial q_i}
$$
```python
def force_calculation(q, l, m, j):
lgr = lagrangian(q, l, m, j)
force1 = diff(diff(lgr, diff(q[0], t)), t) - diff(lgr, q[0])
force2 = diff(diff(lgr, diff(q[1], t)), t) - diff(lgr, q[1])
return [
force1,
force2
]
```
```python
forces = force_calculation(q, l, m, j)
simplify(
forces[0]
)
```
```python
simplify(
forces[1]
)
```
Тогда, например, для удержания механизма в горизонтальном положении потребуется следующие силы:
```python
forces[0].replace(q[0], 0).replace(q[1], 0).simplify()
```
```python
forces[1].replace(q[0], 0).replace(q[1], 0).simplify()
```
|
{"hexsha": "155d5b3f4fc72209fa57d74d7fadf1973d32e169", "size": 6845, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "7 - Dynamics.ipynb", "max_stars_repo_name": "red-hara/jupyter-dh-notation", "max_stars_repo_head_hexsha": "0ffd305b3e67ce7dd3c20f2d1c719b53251dbf58", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "7 - Dynamics.ipynb", "max_issues_repo_name": "red-hara/jupyter-dh-notation", "max_issues_repo_head_hexsha": "0ffd305b3e67ce7dd3c20f2d1c719b53251dbf58", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "7 - Dynamics.ipynb", "max_forks_repo_name": "red-hara/jupyter-dh-notation", "max_forks_repo_head_hexsha": "0ffd305b3e67ce7dd3c20f2d1c719b53251dbf58", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.9391025641, "max_line_length": 125, "alphanum_fraction": 0.4689554419, "converted": true, "num_tokens": 1102}
|
! SUBROUTINE DDAWTS(RTOL,ATOL)
SUBROUTINE DDAWTS(function_parameter)
! IMPLICIT DOUBLE PRECISION(A-H,O-Z)
! DIMENSION RTOL(*),ATOL(*)
! DIMENSION ATOL(*)
! DIMENSION RTOL(*)
function_variable = function_parameter(1)
10 continue
END
|
{"hexsha": "2bc15dedd0f306623f78d33499b0c10eb8a6048e", "size": 280, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "tests/CompileTests/Fortran_tests/test2007_200.f", "max_stars_repo_name": "maurizioabba/rose", "max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 488, "max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z", "max_issues_repo_path": "tests/CompileTests/Fortran_tests/test2007_200.f", "max_issues_repo_name": "sujankh/rose-matlab", "max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 174, "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z", "max_forks_repo_path": "tests/CompileTests/Fortran_tests/test2007_200.f", "max_forks_repo_name": "sujankh/rose-matlab", "max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 146, "max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z", "avg_line_length": 20.0, "max_line_length": 47, "alphanum_fraction": 0.6321428571, "num_tokens": 84}
|
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2004-2012. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/container for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#include <boost/container/detail/config_begin.hpp>
#include <boost/container/vector.hpp>
#include <boost/container/string.hpp>
#include <string>
#include <vector>
#include <algorithm>
#include <cstring>
#include <cstdio>
#include <cstddef>
#include <new>
#include "dummy_test_allocator.hpp"
#include "check_equal_containers.hpp"
#include "expand_bwd_test_allocator.hpp"
#include "expand_bwd_test_template.hpp"
#include "propagate_allocator_test.hpp"
using namespace boost::container;
typedef test::dummy_test_allocator<char> DummyCharAllocator;
typedef basic_string<char, std::char_traits<char>, DummyCharAllocator> DummyString;
typedef test::dummy_test_allocator<DummyString> DummyStringAllocator;
typedef test::dummy_test_allocator<wchar_t> DummyWCharAllocator;
typedef basic_string<wchar_t, std::char_traits<wchar_t>, DummyWCharAllocator> DummyWString;
typedef test::dummy_test_allocator<DummyWString> DummyWStringAllocator;
namespace boost {
namespace container {
//Explicit instantiations of container::basic_string
template class basic_string<char, std::char_traits<char>, DummyCharAllocator>;
template class basic_string<wchar_t, std::char_traits<wchar_t>, DummyWCharAllocator>;
template class basic_string<char, std::char_traits<char>, test::simple_allocator<char> >;
template class basic_string<wchar_t, std::char_traits<wchar_t>, test::simple_allocator<wchar_t> >;
template class basic_string<char, std::char_traits<char>, std::allocator<char> >;
template class basic_string<wchar_t, std::char_traits<wchar_t>, std::allocator<wchar_t> >;
//Explicit instantiation of container::vectors of container::strings
template class vector<DummyString, DummyStringAllocator>;
template class vector<DummyWString, DummyWStringAllocator>;
}}
struct StringEqual
{
template<class Str1, class Str2>
bool operator ()(const Str1 &string1, const Str2 &string2) const
{
if(string1.size() != string2.size())
return false;
return std::char_traits<typename Str1::value_type>::compare
(string1.c_str(), string2.c_str(), string1.size()) == 0;
}
};
//Function to check if both lists are equal
template<class StrVector1, class StrVector2>
bool CheckEqualStringVector(StrVector1 *strvect1, StrVector2 *strvect2)
{
StringEqual comp;
return std::equal(strvect1->begin(), strvect1->end(),
strvect2->begin(), comp);
}
template<class CharType>
struct string_literals;
template<>
struct string_literals<char>
{
static const char *String()
{ return "String"; }
static const char *Prefix()
{ return "Prefix"; }
static const char *Suffix()
{ return "Suffix"; }
static const char *LongString()
{ return "LongLongLongLongLongLongLongLongLongLongLongLongLongString"; }
static char Char()
{ return 'C'; }
static void sprintf_number(char *buf, int number)
{
std::sprintf(buf, "%i", number);
}
};
template<>
struct string_literals<wchar_t>
{
static const wchar_t *String()
{ return L"String"; }
static const wchar_t *Prefix()
{ return L"Prefix"; }
static const wchar_t *Suffix()
{ return L"Suffix"; }
static const wchar_t *LongString()
{ return L"LongLongLongLongLongLongLongLongLongLongLongLongLongString"; }
static wchar_t Char()
{ return L'C'; }
static void sprintf_number(wchar_t *buffer, unsigned int number)
{
//For compilers without wsprintf, print it backwards
const wchar_t *digits = L"0123456789";
wchar_t *buf = buffer;
while(1){
int rem = number % 10;
number = number / 10;
*buf = digits[rem];
++buf;
if(!number){
*buf = 0;
break;
}
}
}
};
template<class CharType>
int string_test()
{
typedef std::basic_string<CharType> StdString;
typedef vector<StdString> StdStringVector;
typedef basic_string<CharType> BoostString;
typedef vector<BoostString> BoostStringVector;
const int MaxSize = 100;
//Create shared memory
{
BoostStringVector *boostStringVect = new BoostStringVector;
StdStringVector *stdStringVect = new StdStringVector;
BoostString auxBoostString;
StdString auxStdString(StdString(auxBoostString.begin(), auxBoostString.end() ));
CharType buffer [20];
//First, push back
for(int i = 0; i < MaxSize; ++i){
auxBoostString = string_literals<CharType>::String();
auxStdString = string_literals<CharType>::String();
string_literals<CharType>::sprintf_number(buffer, i);
auxBoostString += buffer;
auxStdString += buffer;
boostStringVect->push_back(auxBoostString);
stdStringVect->push_back(auxStdString);
}
if(!CheckEqualStringVector(boostStringVect, stdStringVect)){
return 1;
}
//Now push back moving
for(int i = 0; i < MaxSize; ++i){
auxBoostString = string_literals<CharType>::String();
auxStdString = string_literals<CharType>::String();
string_literals<CharType>::sprintf_number(buffer, i);
auxBoostString += buffer;
auxStdString += buffer;
boostStringVect->push_back(boost::move(auxBoostString));
stdStringVect->push_back(auxStdString);
}
if(!CheckEqualStringVector(boostStringVect, stdStringVect)){
return 1;
}
//push front
for(int i = 0; i < MaxSize; ++i){
auxBoostString = string_literals<CharType>::String();
auxStdString = string_literals<CharType>::String();
string_literals<CharType>::sprintf_number(buffer, i);
auxBoostString += buffer;
auxStdString += buffer;
boostStringVect->insert(boostStringVect->begin(), auxBoostString);
stdStringVect->insert(stdStringVect->begin(), auxStdString);
}
if(!CheckEqualStringVector(boostStringVect, stdStringVect)){
return 1;
}
//Now push front moving
for(int i = 0; i < MaxSize; ++i){
auxBoostString = string_literals<CharType>::String();
auxStdString = string_literals<CharType>::String();
string_literals<CharType>::sprintf_number(buffer, i);
auxBoostString += buffer;
auxStdString += buffer;
boostStringVect->insert(boostStringVect->begin(), boost::move(auxBoostString));
stdStringVect->insert(stdStringVect->begin(), auxStdString);
}
if(!CheckEqualStringVector(boostStringVect, stdStringVect)){
return 1;
}
//Now test long and short representation swapping
//Short first
auxBoostString = string_literals<CharType>::String();
auxStdString = string_literals<CharType>::String();
BoostString boost_swapper;
StdString std_swapper;
boost_swapper.swap(auxBoostString);
std_swapper.swap(auxStdString);
if(!StringEqual()(auxBoostString, auxStdString))
return 1;
if(!StringEqual()(boost_swapper, std_swapper))
return 1;
boost_swapper.swap(auxBoostString);
std_swapper.swap(auxStdString);
if(!StringEqual()(auxBoostString, auxStdString))
return 1;
if(!StringEqual()(boost_swapper, std_swapper))
return 1;
//Shrink_to_fit
auxBoostString.shrink_to_fit();
StdString(auxStdString).swap(auxStdString);
if(!StringEqual()(auxBoostString, auxStdString))
return 1;
//Reserve + shrink_to_fit
auxBoostString.reserve(boost_swapper.size()*2+1);
auxStdString.reserve(std_swapper.size()*2+1);
if(!StringEqual()(auxBoostString, auxStdString))
return 1;
auxBoostString.shrink_to_fit();
StdString(auxStdString).swap(auxStdString);
if(!StringEqual()(auxBoostString, auxStdString))
return 1;
//Long string
auxBoostString = string_literals<CharType>::LongString();
auxStdString = string_literals<CharType>::LongString();
boost_swapper = BoostString();
std_swapper = StdString();
boost_swapper.swap(auxBoostString);
std_swapper.swap(auxStdString);
if(!StringEqual()(auxBoostString, auxStdString))
return 1;
if(!StringEqual()(boost_swapper, std_swapper))
return 1;
boost_swapper.swap(auxBoostString);
std_swapper.swap(auxStdString);
//Shrink_to_fit
auxBoostString.shrink_to_fit();
StdString(auxStdString).swap(auxStdString);
if(!StringEqual()(auxBoostString, auxStdString))
return 1;
auxBoostString.clear();
auxStdString.clear();
auxBoostString.shrink_to_fit();
StdString(auxStdString).swap(auxStdString);
if(!StringEqual()(auxBoostString, auxStdString))
return 1;
//No sort
std::sort(boostStringVect->begin(), boostStringVect->end());
std::sort(stdStringVect->begin(), stdStringVect->end());
if(!CheckEqualStringVector(boostStringVect, stdStringVect)) return 1;
const CharType *prefix = string_literals<CharType>::Prefix();
const int prefix_size = std::char_traits<CharType>::length(prefix);
const CharType *sufix = string_literals<CharType>::Suffix();
for(int i = 0; i < MaxSize; ++i){
(*boostStringVect)[i].append(sufix);
(*stdStringVect)[i].append(sufix);
(*boostStringVect)[i].insert((*boostStringVect)[i].begin(),
prefix, prefix + prefix_size);
(*stdStringVect)[i].insert((*stdStringVect)[i].begin(),
prefix, prefix + prefix_size);
}
if(!CheckEqualStringVector(boostStringVect, stdStringVect)) return 1;
for(int i = 0; i < MaxSize; ++i){
std::reverse((*boostStringVect)[i].begin(), (*boostStringVect)[i].end());
std::reverse((*stdStringVect)[i].begin(), (*stdStringVect)[i].end());
}
if(!CheckEqualStringVector(boostStringVect, stdStringVect)) return 1;
for(int i = 0; i < MaxSize; ++i){
std::reverse((*boostStringVect)[i].begin(), (*boostStringVect)[i].end());
std::reverse((*stdStringVect)[i].begin(), (*stdStringVect)[i].end());
}
if(!CheckEqualStringVector(boostStringVect, stdStringVect)) return 1;
for(int i = 0; i < MaxSize; ++i){
std::sort(boostStringVect->begin(), boostStringVect->end());
std::sort(stdStringVect->begin(), stdStringVect->end());
}
if(!CheckEqualStringVector(boostStringVect, stdStringVect)) return 1;
for(int i = 0; i < MaxSize; ++i){
(*boostStringVect)[i].replace((*boostStringVect)[i].begin(),
(*boostStringVect)[i].end(),
string_literals<CharType>::String());
(*stdStringVect)[i].replace((*stdStringVect)[i].begin(),
(*stdStringVect)[i].end(),
string_literals<CharType>::String());
}
if(!CheckEqualStringVector(boostStringVect, stdStringVect)) return 1;
boostStringVect->erase(std::unique(boostStringVect->begin(), boostStringVect->end()),
boostStringVect->end());
stdStringVect->erase(std::unique(stdStringVect->begin(), stdStringVect->end()),
stdStringVect->end());
if(!CheckEqualStringVector(boostStringVect, stdStringVect)) return 1;
//Check addition
{
typedef std::basic_string<CharType> StdString;
typedef basic_string<CharType> BoostString;
BoostString bs2 = string_literals<CharType>::String();
StdString ss2 = string_literals<CharType>::String();
BoostString bs3 = string_literals<CharType>::Suffix();
StdString ss3 = string_literals<CharType>::Suffix();
BoostString bs4 = bs2 + bs3;
StdString ss4 = ss2 + ss3;
if(!StringEqual()(bs4, ss4)){
return 1;
}
bs4 = bs2 + BoostString();
ss4 = ss2 + StdString();
if(!StringEqual()(bs4, ss4)){
return 1;
}
bs4 = BoostString() + bs2;
ss4 = StdString() + ss2;
if(!StringEqual()(bs4, ss4)){
return 1;
}
bs4 = BoostString() + boost::move(bs2);
ss4 = StdString() + boost::move(ss2);
if(!StringEqual()(bs4, ss4)){
return 1;
}
bs2 = string_literals<CharType>::String();
ss2 = string_literals<CharType>::String();
bs4 = boost::move(bs2) + BoostString();
ss4 = boost::move(ss2) + StdString();
if(!StringEqual()(bs4, ss4)){
return 1;
}
bs2 = string_literals<CharType>::String();
ss2 = string_literals<CharType>::String();
bs4 = string_literals<CharType>::Prefix() + boost::move(bs2);
ss4 = string_literals<CharType>::Prefix() + boost::move(ss2);
if(!StringEqual()(bs4, ss4)){
return 1;
}
bs2 = string_literals<CharType>::String();
ss2 = string_literals<CharType>::String();
bs4 = boost::move(bs2) + string_literals<CharType>::Suffix();
ss4 = boost::move(ss2) + string_literals<CharType>::Suffix();
if(!StringEqual()(bs4, ss4)){
return 1;
}
bs2 = string_literals<CharType>::String();
ss2 = string_literals<CharType>::String();
bs4 = string_literals<CharType>::Prefix() + bs2;
ss4 = string_literals<CharType>::Prefix() + ss2;
if(!StringEqual()(bs4, ss4)){
return 1;
}
bs2 = string_literals<CharType>::String();
ss2 = string_literals<CharType>::String();
bs4 = bs2 + string_literals<CharType>::Suffix();
ss4 = ss2 + string_literals<CharType>::Suffix();
if(!StringEqual()(bs4, ss4)){
return 1;
}
bs2 = string_literals<CharType>::String();
ss2 = string_literals<CharType>::String();
bs4 = string_literals<CharType>::Char() + bs2;
ss4 = string_literals<CharType>::Char() + ss2;
if(!StringEqual()(bs4, ss4)){
return 1;
}
bs2 = string_literals<CharType>::String();
ss2 = string_literals<CharType>::String();
bs4 = bs2 + string_literals<CharType>::Char();
ss4 = ss2 + string_literals<CharType>::Char();
if(!StringEqual()(bs4, ss4)){
return 1;
}
}
//When done, delete vector
delete boostStringVect;
delete stdStringVect;
}
return 0;
}
bool test_expand_bwd()
{
//Now test all back insertion possibilities
typedef test::expand_bwd_test_allocator<char>
allocator_type;
typedef basic_string<char, std::char_traits<char>, allocator_type>
string_type;
return test::test_all_expand_bwd<string_type>();
}
template<class T, class A>
class string_propagate_test_wrapper
: public basic_string<T, std::char_traits<T>, A>
{
BOOST_COPYABLE_AND_MOVABLE(string_propagate_test_wrapper)
typedef basic_string<T, std::char_traits<T>, A> Base;
public:
string_propagate_test_wrapper()
: Base()
{}
string_propagate_test_wrapper(const string_propagate_test_wrapper &x)
: Base(x)
{}
string_propagate_test_wrapper(BOOST_RV_REF(string_propagate_test_wrapper) x)
: Base(boost::move(static_cast<Base&>(x)))
{}
string_propagate_test_wrapper &operator=(BOOST_COPY_ASSIGN_REF(string_propagate_test_wrapper) x)
{ this->Base::operator=(x); return *this; }
string_propagate_test_wrapper &operator=(BOOST_RV_REF(string_propagate_test_wrapper) x)
{ this->Base::operator=(boost::move(static_cast<Base&>(x))); return *this; }
void swap(string_propagate_test_wrapper &x)
{ this->Base::swap(x); }
};
int main()
{
if(string_test<char>()){
return 1;
}
if(string_test<wchar_t>()){
return 1;
}
if(!test_expand_bwd())
return 1;
if(!boost::container::test::test_propagate_allocator<string_propagate_test_wrapper>())
return 1;
return 0;
}
#include <boost/container/detail/config_end.hpp>
|
{"hexsha": "33f268336ddf83392f374cbd26be6fe7eb8f2137", "size": 17137, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/container/test/string_test.cpp", "max_stars_repo_name": "jmuskaan72/Boost", "max_stars_repo_head_hexsha": "047e36c01841a8cd6a5c74d4e3034da46e327bc1", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 198.0, "max_stars_repo_stars_event_min_datetime": "2015-01-13T05:47:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T04:46:46.000Z", "max_issues_repo_path": "libs/container/test/string_test.cpp", "max_issues_repo_name": "xiaoliang2121/Boost", "max_issues_repo_head_hexsha": "fc90c3fde129c62565c023f091eddc4a7ed9902b", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2015-03-19T08:23:23.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-24T07:48:47.000Z", "max_forks_repo_path": "libs/container/test/string_test.cpp", "max_forks_repo_name": "xiaoliang2121/Boost", "max_forks_repo_head_hexsha": "fc90c3fde129c62565c023f091eddc4a7ed9902b", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 139.0, "max_forks_repo_forks_event_min_datetime": "2015-01-15T20:09:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T15:21:16.000Z", "avg_line_length": 35.1889117043, "max_line_length": 100, "alphanum_fraction": 0.6134679349, "num_tokens": 3990}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 2 15:36:28 2017
@author: Anderson Banihirwe
Simple Particle Swarm Optimization (PSO)
"""
import random
import math
import time
import numpy as np
#---------- COST FUNCTION -----------------------------#
# function to optimize (minimize)
def cost_func(x):
total = 0.0
for i in range(len(x)):
total += x[i]**2
return total
#------------MAIN----------------------------------------#
class Particle:
def __init__(self, x0):
self.position_i = [] # particle position
self.velocity_i = [] # particle velocity
self.pos_best_i = [] # best position individual
self.error_best_i = -1 # best error individual
self.error_i = -1 # error individual
for i in range(0, ndims):
self.velocity_i.append(random.uniform(-1,1))
self.position_i.append(x0[i])
# evaluate current fitness
def evaluate_fitness(self, costFunc):
self.error_i = costFunc(self.position_i)
# Check to see if the current position is an individual best
if self.error_i < self.error_best_i or self.error_best_i == -1:
self.pos_best_i = self.position_i
self.error_best_i = self.error_i
# update new particle velocity
def update_velocity(self, pos_best_g):
w = 0.5 # constant inertia weight (how much to weigh the previous velocity)
c1 = 1 # cognitive constant
c2 = 2 # social constant
for i in range(0, ndims):
r1 = random.random()
r2 = random.random()
vel_cognitive = c1 * r1 * (self.pos_best_i[i] - self.position_i[i])
vel_social = c2 * r2 * (pos_best_g[i] - self.position_i[i])
self.velocity_i[i] = w * self.velocity_i[i] + vel_cognitive + vel_social
# update the particle position based off new velocity updates
def update_position(self, bounds):
for i in range(0, ndims):
self.position_i[i] += self.velocity_i[i]
# adjust maximum position if necessary
if self.position_i[i] > bounds[i][1]:
self.position_i[i] = bounds[i][1]
# adjust minimum position if necessary
if self.position_i[i] < bounds[i][0]:
self.position_i[i] = bounds[i][0]
class PSO():
def __init__(self, costFunc, x0, bounds, nparticles, maxiter):
global ndims
ndims = len(x0)
error_best_g = -1 # best error for group
pos_best_g = [] # best position for group
# establish the swarm
swarm = []
for i in range(0, nparticles):
swarm.append(Particle(x0))
# Find the time it takes to find the solution
t0 = time.clock()
# Begin optimization loop
i = 0
while i < maxiter:
# cycle through particles in swarm and evaluate fitness
for j in range(0, nparticles):
swarm[j].evaluate_fitness(costFunc)
# Determine if current particle is globally the best
if swarm[j].error_i < error_best_g or error_best_g == -1:
pos_best_g = list(swarm[j].position_i)
error_best_g = float(swarm[j].error_i)
# cycle through swarm and update velocities and positions
for j in range(0, nparticles):
swarm[j].update_velocity(pos_best_g)
swarm[j].update_position(bounds)
i+= 1
t1 = time.clock()
# Final Results
print("---Optimal solution found in {:.3f} secs for {} particles\n".format(t1 - t0, nparticles))
print("Best position : {}".format(pos_best_g))
print("Best Error : {}".format(error_best_g))
print("-------------------------------------------------------")
if __name__ == "__main__":
initial = [5, 5]
bounds = [(-10, 10), (-10, 10)]
PSO(cost_func, initial, bounds, nparticles=1000, maxiter=100)
|
{"hexsha": "439175715410eb88e031dc56443461452625ff61", "size": 4520, "ext": "py", "lang": "Python", "max_stars_repo_path": "projects/TSP/pyswarm.py", "max_stars_repo_name": "andersy005/artificial-intelligence", "max_stars_repo_head_hexsha": "dcf5ebb1959835aee7dacdb5a2cea14790f2cf01", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2017-08-31T23:27:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T13:20:10.000Z", "max_issues_repo_path": "projects/TSP/pyswarm.py", "max_issues_repo_name": "andersy005/artificial-intelligence", "max_issues_repo_head_hexsha": "dcf5ebb1959835aee7dacdb5a2cea14790f2cf01", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "projects/TSP/pyswarm.py", "max_forks_repo_name": "andersy005/artificial-intelligence", "max_forks_repo_head_hexsha": "dcf5ebb1959835aee7dacdb5a2cea14790f2cf01", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-01-01T13:39:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-26T14:41:39.000Z", "avg_line_length": 33.4814814815, "max_line_length": 104, "alphanum_fraction": 0.5048672566, "include": true, "reason": "import numpy", "num_tokens": 993}
|
#!/usr/bin/env python3
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
import argparse
import logging
import os
import math
from tqdm import tqdm
import numpy as np
from PIL import Image
from typing import List, Optional
import cv2
from enum import auto
from functools import lru_cache
import path_to_kapture_localization # noqa: F401
import kapture_localization.utils.logging
from kapture_localization.utils.cv_camera_matrix import get_camera_matrix_from_kapture
import kapture_localization.utils.path_to_kapture # noqa: F401
import kapture
import kapture.utils.logging
from kapture.io.csv import kapture_from_dir, kapture_to_dir, get_all_tar_handlers
from kapture.io.records import depth_map_from_file, get_image_fullpath, get_depth_map_fullpath
from kapture.io.tar import TarCollection
from kapture.utils.Collections import try_get_only_key_from_collection
from kapture.utils import AutoEnum
from kapture.io.features import get_keypoints_fullpath, image_keypoints_from_file
logger = logging.getLogger("create_3D_model_from_depth")
@lru_cache(maxsize=50)
def load_keypoints(keypoints_type, input_path, image_name, dtype, dsize, tar_handlers):
keypoints_path = get_keypoints_fullpath(keypoints_type, input_path, image_name, tar_handlers)
return image_keypoints_from_file(keypoints_path, dtype, dsize)
class Method(AutoEnum):
voxelgrid = auto()
all = auto()
def __str__(self):
return self.value
class VoxelGrid:
def __init__(self, cellsizes):
self.cellsizes = list(map(float, cellsizes))
self.levels = len(self.cellsizes)
self.min_cellsize = self.cellsizes[self.levels - 1]
self.cells = {}
self.grids = {}
self.count = self.levels * [0]
def get_voxelgrid_cell(self, pt, cellsize):
x = math.floor(pt[0] / cellsize)
y = math.floor(pt[1] / cellsize)
z = math.floor(pt[2] / cellsize)
return (x, y, z)
def append(self, pt, img_name):
idx = self.get_voxelgrid_cell(pt, self.min_cellsize)
if img_name not in self.cells[idx][1]:
self.cells[idx][1].append(img_name)
return self.cells[idx]
return None
def add(self, pt, pt_idx, img_name):
idx = self.get_voxelgrid_cell(pt, self.min_cellsize)
self.cells[idx] = (pt_idx, [img_name])
def create_indices(self, pt):
idx = []
for i in range(0, self.levels):
idx.append(self.get_voxelgrid_cell(pt, self.cellsizes[i]))
return idx
def set(self, grids, idx, count):
if count == self.levels - 1:
if not idx[count] in grids:
grids[idx[count]] = 1
self.count[count] += 1
return False
else:
return True
else:
if not idx[count] in grids:
grids[idx[count]] = {}
self.count[count] += 1
return self.set(grids[idx[count]], idx, count + 1)
def exists(self, pt):
idx = self.create_indices(pt)
return self.set(self.grids, idx, 0)
def print(self):
str = ''
for i, c in enumerate(self.count):
str += f'{i}: {c} '
logger.info(str)
def project_kp_to_3D(u, v, d, cx, cy, fx, fy):
x = d * ((u - cx) / fx)
y = d * ((v - cy) / fy)
return (x, y, d)
def create_3D_model_from_depth(input_path: str,
output_path: str,
keypoints_type: Optional[str],
depth_sensor_id: str,
topk: int,
method: Method,
cellsizes: List[str],
force: bool):
"""
Create 3D model from a kapture dataset that has registered depth data
Loads the kapture data then call create_3D_model_from_depth_from_loaded_data
"""
if os.path.exists(output_path) and not force:
print(f'outpath already exists, use --force to overwrite')
return -1
logger.info(f'loading {input_path}')
with get_all_tar_handlers(input_path,
mode={kapture.Keypoints: 'r',
kapture.Descriptors: 'r',
kapture.GlobalFeatures: 'r',
kapture.Matches: 'a'}) as tar_handlers:
kdata = kapture_from_dir(input_path, tar_handlers=tar_handlers)
create_3D_model_from_depth_from_loaded_data(kdata, input_path, tar_handlers,
output_path, keypoints_type,
depth_sensor_id, topk,
method, cellsizes, force)
def create_3D_model_from_depth_from_loaded_data(kdata: kapture.Kapture,
input_path: str,
tar_handlers: TarCollection,
output_path: str,
keypoints_type: Optional[str],
depth_sensor_id: str,
topk: int,
method: Method,
cellsizes: List[str],
force: bool):
"""
Create 3D model from a kapture dataset that has registered depth data
Assumes the kapture data is already loaded
"""
logger.info(f'create 3D model using depth data')
if os.path.exists(output_path) and not force:
print(f'outpath already exists, use --force to overwrite')
return -1
if kdata.rigs is not None:
assert kdata.trajectories is not None
kapture.rigs_remove_inplace(kdata.trajectories, kdata.rigs)
if keypoints_type is None:
keypoints_type = try_get_only_key_from_collection(kdata.keypoints)
assert keypoints_type is not None
assert kdata.keypoints is not None
assert keypoints_type in kdata.keypoints
if method == Method.voxelgrid:
vg = VoxelGrid(cellsizes)
# add all 3D points to map that correspond to a keypoint
logger.info('adding points from scan to kapture')
points3d = []
observations = kapture.Observations()
progress_bar = tqdm(total=len(list(kapture.flatten(kdata.records_camera, is_sorted=True))),
disable=logger.level >= logging.CRITICAL)
for timestamp, sensor_id, sensing_filepath in kapture.flatten(kdata.records_camera, is_sorted=True):
logger.info(f'total 3d points: {len(points3d)}, processing {sensing_filepath}')
# check if images have a pose
if timestamp not in kdata.trajectories:
logger.info('{} does not have a pose. skipping ...'.format(sensing_filepath))
continue
# check if depth map exists
depth_map_record = ''
if timestamp in kdata.records_depth:
if depth_sensor_id is None:
depth_id = sensor_id + '_depth'
else:
depth_id = depth_sensor_id
if depth_id in kdata.records_depth[timestamp]:
depth_map_record = kdata.records_depth[timestamp][depth_id]
depth_map_size = tuple([int(x) for x in kdata.sensors[depth_id].camera_params[0:2]])
depth_path = get_depth_map_fullpath(input_path, depth_map_record)
if not os.path.exists(depth_path):
logger.info('no 3D data found for {}. skipping ...'.format(sensing_filepath))
continue
depth_map = depth_map_from_file(depth_path, depth_map_size)
img = Image.open(get_image_fullpath(input_path, sensing_filepath)).convert(
'RGB')
assert img.size[0] == depth_map_size[0]
assert img.size[1] == depth_map_size[1]
kps_raw = load_keypoints(keypoints_type, input_path,
sensing_filepath,
kdata.keypoints[keypoints_type].dtype,
kdata.keypoints[keypoints_type].dsize,
tar_handlers)
_, camera_sensor_C, camera_dist = get_camera_matrix_from_kapture(np.zeros((1, 0, 2), dtype=np.float64),
kdata.sensors[sensor_id])
cv2_keypoints, depth_sensor_C, depth_dist = get_camera_matrix_from_kapture(kps_raw, kdata.sensors[depth_id])
assert np.isclose(depth_sensor_C, camera_sensor_C).all()
assert np.isclose(depth_dist, camera_dist).all()
if np.count_nonzero(camera_dist) > 0:
epsilon = np.finfo(np.float64).eps
stop_criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 500, epsilon)
undistorted_cv2_keypoints = cv2.undistortPointsIter(cv2_keypoints, camera_sensor_C, camera_dist,
R=None, P=camera_sensor_C,
criteria=stop_criteria)
else:
undistorted_cv2_keypoints = cv2_keypoints
cv2_keypoints = cv2_keypoints.reshape((kps_raw.shape[0], 2))
undistorted_cv2_keypoints = undistorted_cv2_keypoints.reshape((kps_raw.shape[0], 2))
points3d_img = []
rgb_img = []
kp_idxs = []
for idx_kp, kp in enumerate(cv2_keypoints[0:topk]):
u = round(kp[0])
v = round(kp[1])
undist_kp = undistorted_cv2_keypoints[idx_kp]
undist_u = round(undist_kp[0])
undist_v = round(undist_kp[1])
if u >= 0 and u < depth_map_size[0] and v >= 0 and v < depth_map_size[1]:
if depth_map[v, u] == 0:
continue
pt3d = project_kp_to_3D(undist_u, undist_v, depth_map[v, u],
depth_sensor_C[0, 2], depth_sensor_C[1, 2],
depth_sensor_C[0, 0], depth_sensor_C[1, 1])
points3d_img.append(pt3d)
rgb_img.append(img.getpixel((u, v)))
kp_idxs.append(idx_kp)
# transform to world coordinates (pt3d from a depth map is in camera coordinates)
# we use sensor_id here because we assume that the image and the corresponding depthmap have the same pose
# and sometimes, the pose might only be provided for the images
cam_to_world = kdata.trajectories[timestamp][sensor_id].inverse()
if len(points3d_img) == 0:
continue
points3d_img = cam_to_world.transform_points(np.array(points3d_img))
for idx_kp, pt3d, rgb in zip(kp_idxs, points3d_img, rgb_img):
if not np.isnan(pt3d).any():
# apply transform (alignment)
if method == Method.voxelgrid:
assert vg is not None
if not vg.exists(pt3d):
# add 3D point
points3d.append(list(pt3d) + list(rgb))
# add observation
observations.add(len(points3d) - 1, keypoints_type, sensing_filepath, idx_kp)
vg.add(pt3d, len(points3d) - 1, sensing_filepath)
else:
ret = vg.append(pt3d, sensing_filepath)
if ret is not None:
observations.add(ret[0], keypoints_type, sensing_filepath, idx_kp)
elif method == Method.all:
# add 3D point
points3d.append(list(pt3d) + list(rgb))
# add observation
observations.add(len(points3d) - 1, keypoints_type, sensing_filepath, idx_kp)
# save_3Dpts_to_ply(points3d, os.path.join(output_path, 'map.ply'))
progress_bar.update(1)
progress_bar.close()
kdata.points3d = kapture.Points3d(np.array(points3d))
kdata.observations = observations
logger.info('saving ...')
kapture_to_dir(output_path, kdata)
# save_3Dpts_to_ply(points3d, os.path.join(output_path, 'map.ply'))
logger.info('all done')
def create_3D_model_from_depth_command_line():
"""
build the argparse for create_3D_model_from_depth then call it
"""
parser = argparse.ArgumentParser(
description='Create 3D model from a kapture dataset that has registered depth data.')
parser_verbosity = parser.add_mutually_exclusive_group()
parser_verbosity.add_argument('-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,
action=kapture.utils.logging.VerbosityParser,
help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')
parser_verbosity.add_argument('-q', '--silent', '--quiet',
action='store_const', dest='verbose', const=logging.CRITICAL)
parser.add_argument('-i', '--input', required=True,
help=('input path to kapture dataset'))
parser.add_argument('-o', '--output', required=True,
help=('output path to kapture dataset'))
parser.add_argument('--keypoints-type', default=None, help='kapture keypoints type.')
parser.add_argument('-d', '--depth', default=None,
help=('depth sensor kapture id: if None, '
'then camera_id + _depth will be used; such as ipad0_depth'))
parser.add_argument('-k', '--topk', required=False, default=20000, type=int,
help=('number of keypoints to use.'))
parser.add_argument('--cellsizes', nargs='+', default=["10", "1", "0.01"],
help='cell sizes for hierarchical search')
parser.add_argument('-f', '-y', '--force', action='store_true', default=False,
help='Force delete output directory if already exists')
args = parser.parse_args()
logger.setLevel(args.verbose)
if args.verbose <= logging.DEBUG:
# also let kapture express its logs
kapture.utils.logging.getLogger().setLevel(args.verbose)
kapture_localization.utils.logging.getLogger().setLevel(args.verbose)
logger.debug(''.join(['\n\t{:13} = {}'.format(k, v)
for k, v in vars(args).items()]))
create_3D_model_from_depth(args.input, args.output, args.keypoints_type,
args.depth, args.topk,
Method.voxelgrid, args.cellsizes, args.force)
if __name__ == '__main__':
create_3D_model_from_depth_command_line()
|
{"hexsha": "12426e04cc883745e179601ccf8d68acfc5e04fe", "size": 14785, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/kapture_create_3D_model_from_depth.py", "max_stars_repo_name": "jkabalar/kapture-localization", "max_stars_repo_head_hexsha": "647ef7cfdfbdac37297682baca1bf13608b6d6e8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 118, "max_stars_repo_stars_event_min_datetime": "2020-11-04T16:48:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T13:15:37.000Z", "max_issues_repo_path": "tools/kapture_create_3D_model_from_depth.py", "max_issues_repo_name": "jkabalar/kapture-localization", "max_issues_repo_head_hexsha": "647ef7cfdfbdac37297682baca1bf13608b6d6e8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2020-10-19T09:01:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T09:12:31.000Z", "max_forks_repo_path": "tools/kapture_create_3D_model_from_depth.py", "max_forks_repo_name": "jkabalar/kapture-localization", "max_forks_repo_head_hexsha": "647ef7cfdfbdac37297682baca1bf13608b6d6e8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2020-11-25T05:28:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T07:20:23.000Z", "avg_line_length": 44.2664670659, "max_line_length": 118, "alphanum_fraction": 0.5870815015, "include": true, "reason": "import numpy", "num_tokens": 3244}
|
import logging
log = logging.getLogger(__name__)
def generate_data_dict(dataset, source, name='dict', verbose=False):
import numpy as np
import theano
dtype = theano.config.floatX
# get data into a dict, need to use the full dataset (no subset!)
state = dataset.open()
request = slice(0, dataset.num_examples)
data_dict = dataset.get_data(request=request)[dataset.sources.index(source)]
dataset.close(state)
# FIXME: move this to original dataset generator code
#data_dict = np.rollaxis(data_dict, 3, 1) # convert b01c format into bc01 format
shape = data_dict.shape
data_dict = theano.shared(theano._asarray(data_dict, dtype=dtype), # for GPU usage
name=name, borrow=False)
if verbose:
log.debug('generated data dict "{}", shape={}, type={}'
.format(data_dict, shape, data_dict.type))
return data_dict
|
{"hexsha": "9b3426d322129cdfc7333999672103231b66a411", "size": 923, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepthought/bricks/data_dict.py", "max_stars_repo_name": "maosenGao/openmiir-rl-2016", "max_stars_repo_head_hexsha": "d2e5744b1fa503a896994d8a70b3ca45d521db14", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-01-27T11:08:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T15:54:39.000Z", "max_issues_repo_path": "deepthought/bricks/data_dict.py", "max_issues_repo_name": "maosenGao/openmiir-rl-2016", "max_issues_repo_head_hexsha": "d2e5744b1fa503a896994d8a70b3ca45d521db14", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deepthought/bricks/data_dict.py", "max_forks_repo_name": "maosenGao/openmiir-rl-2016", "max_forks_repo_head_hexsha": "d2e5744b1fa503a896994d8a70b3ca45d521db14", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-01-26T23:49:55.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-30T05:31:35.000Z", "avg_line_length": 34.1851851852, "max_line_length": 87, "alphanum_fraction": 0.6684723727, "include": true, "reason": "import numpy,import theano", "num_tokens": 212}
|
import sklearn.cluster
from kmeans_gap import GAP
import grace
import grace.mask
import numpy as np
import sys
parallel = int(sys.argv[1] if (len(sys.argv) > 1) else 1)
if __name__=='__main__':
shape = grace.grids.shape
X = grace.grids.reshape(shape[0] * shape[1], shape[2])
mask = grace.mask.world().reshape(shape[0] * shape[1])
X = X[mask, :]
optimizer = GAP(verbose=True)
estimator = sklearn.cluster.KMeans(n_init=1, n_jobs=parallel)
optimizer.calculate(X, estimator, sims=20, ks=range(1,21))
np.savez('HPC-output/gap.npz', **optimizer.dump())
(K, G, sd) = optimizer.optimal()
print "Optimal amount of clusters: %d" % (K)
|
{"hexsha": "c93674f8b241375f9622e15ee5b80e96c86806b6", "size": 641, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/kmeans_gap_job.py", "max_stars_repo_name": "AndreasMadsen/grace", "max_stars_repo_head_hexsha": "bf472d30a2fac76145d3f68e819c92da4a1970ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-05-17T22:52:19.000Z", "max_stars_repo_stars_event_max_datetime": "2016-05-17T22:52:19.000Z", "max_issues_repo_path": "Code/kmeans_gap_job.py", "max_issues_repo_name": "AndreasMadsen/grace", "max_issues_repo_head_hexsha": "bf472d30a2fac76145d3f68e819c92da4a1970ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/kmeans_gap_job.py", "max_forks_repo_name": "AndreasMadsen/grace", "max_forks_repo_head_hexsha": "bf472d30a2fac76145d3f68e819c92da4a1970ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.64, "max_line_length": 62, "alphanum_fraction": 0.7004680187, "include": true, "reason": "import numpy", "num_tokens": 195}
|
#include <boost/python/converter/arg_to_python_base.hpp>
|
{"hexsha": "250c907857fc0d9dfe2a93bd951bb4739e93c847", "size": 57, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_python_converter_arg_to_python_base.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_python_converter_arg_to_python_base.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_python_converter_arg_to_python_base.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 28.5, "max_line_length": 56, "alphanum_fraction": 0.8421052632, "num_tokens": 13}
|
using Random
using LinearAlgebra, Krylov
using Plots
using RandomizedLasso
const RL = RandomizedLasso
## Compare "best" vs random preconditioner on random example
# Data
n, r = 1000, 500
A = randn(n, r)
A = A*A'
μ = 1e-2
xtrue = randn(n)
b = A*xtrue
D, V = eigen(A)
function true_preconditioner(k, D, V, μ)
return (D[k+1] + μ)*V*Diagonal(1.0 ./ (D .+ μ))*V' + (I - V*V')
end
function cg_iters(A, b, P)
_, stats = cg(A, b; history=true, M = P)
!stats.solved && @warn "Did not correctly solve CG!!!"
return length(stats.residuals)
end
nys_iters = zeros(length(ks))
true_iters = zeros(length(ks))
nopc_iters = ones(length(ks)) * cg_iters(A, b, I)
for (ind, (k, r)) in enumerate(zip(ks, rs))
Anys = RL.NystromApprox(A, k, r)
P = RL.RandomizedNystromPreconditionerInverse(Anys, μ)
nys_iters[ind] = cg_iters(A, b, P)
true_iters[ind] = cg_iters(A, b, true_preconditioner(k, D, V, μ))
r % 100 == 0 && @info "Finished with r = $r"
end
plt_sketch_error = plot(ks[1:60],
[nys_iters[1:60], true_iters[1:60], nopc_iters[1:60]],
dpi=300,
lw=3,
label=["Nystrom Preconditioner" "True Preconditioner" "No Preconditioner"],
ylabel="CG Iters",
xlabel="Rank k",
title="Convergence vs Preconditioner Rank",
legend=:left
)
savefig(plt_sketch_error, joinpath(@__DIR__, "figs/cg_iters.pdf"))
## Real Dataset
using CSV, DataFrames
file = CSV.read("/Users/theodiamandis/Downloads/file7b5323e77330.csv", DataFrame)
M = Matrix(file)
b = M[:, 1]
A = M[:, 2:end]
m, n = size(A)
b = 1/m * A'*b
A = 1/m * A' * A
μ = 1e-3
_, stats = cg(A+μ*I, b; history=true)
npc_res = stats.residuals
nys_res = Vector{Float64}[]
rs = [10, 50, 100, 250, 500, 1000]
for r in rs
k = Int(round(.9r))
Anys = RL.NystromApprox(A, k, r)
P = RL.RandomizedNystromPreconditionerInverse(Anys, μ)
_, stats = cg(A+μ*I, b; history=true, M=P)
push!(nys_res, stats.residuals)
@info "Finished with r = $r"
end
plt_cg_real = plot(
npc_res,
dpi=300,
lw=2,
label="No Preconditioner",
ylabel="residual",
xlabel="iteration",
title="Convergence of CG",
legend=:topright,
yaxis=:log
)
for (ind, y) in enumerate(nys_res)
plot!(plt_cg_real, y, label="Nystrom, r = $(rs[ind])", lw=2)
end
savefig(plt_cg_real, joinpath(@__DIR__, "figs/cg_real_res.pdf"))
RL.deff(A, μ)
|
{"hexsha": "d01c6c129ba962dbc75a848b11beedfcfd89dcca", "size": 2333, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/pcg.jl", "max_stars_repo_name": "tjdiamandis/RandomizedLasso.jl", "max_stars_repo_head_hexsha": "13336c81c82a83b8a5889badae2195fbdd0da0af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/pcg.jl", "max_issues_repo_name": "tjdiamandis/RandomizedLasso.jl", "max_issues_repo_head_hexsha": "13336c81c82a83b8a5889badae2195fbdd0da0af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/pcg.jl", "max_forks_repo_name": "tjdiamandis/RandomizedLasso.jl", "max_forks_repo_head_hexsha": "13336c81c82a83b8a5889badae2195fbdd0da0af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9222222222, "max_line_length": 81, "alphanum_fraction": 0.6446635234, "num_tokens": 835}
|
# Crypto API testing script
import pandas_datareader as web
import pandas as ps
import numpy as np
import matplotlib.pyplot as plt
|
{"hexsha": "5088239026618421a0aab3a3ec20888978aa1e83", "size": 135, "ext": "py", "lang": "Python", "max_stars_repo_path": "crypto.py", "max_stars_repo_name": "andrewbowen19/stonkify", "max_stars_repo_head_hexsha": "31fd9bd8abd04f7f78b149396d4600613734ca0b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "crypto.py", "max_issues_repo_name": "andrewbowen19/stonkify", "max_issues_repo_head_hexsha": "31fd9bd8abd04f7f78b149396d4600613734ca0b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "crypto.py", "max_forks_repo_name": "andrewbowen19/stonkify", "max_forks_repo_head_hexsha": "31fd9bd8abd04f7f78b149396d4600613734ca0b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.5, "max_line_length": 31, "alphanum_fraction": 0.8, "include": true, "reason": "import numpy", "num_tokens": 29}
|
import torch
from torch import Tensor
import torch.nn as nn
import numpy as np
import torchvision
import torchaudio
class ToSampleCoords(nn.Module):
"""
Pytorch module to convert coordinates measured in seconds
into coordinates measured in sample Nos,
Default sample rate is 16kHz unless this is set through cfg.SAMPLE_RATE
"""
def __init__(self, cfg):
super(ToSampleCoords, self).__init__()
self.sample_rate = 16000
if hasattr(cfg, "SAMPLE_RATE"):
self.sample_rate = cfg.SAMPLE_RATE
def forward(self, x, lines=None, labels=None):
if lines is not None:
for idx, annotation in enumerate(lines):
onset = annotation[0]
offset = annotation[1]
new_onset = np.ceil(onset*self.sample_rate)
new_offset = np.floor(offset*self.sample_rate)
lines[idx] = [new_onset, new_offset]
return x, lines, labels
class Differentiate1D(nn.Module):
"""
Pytorch module to discretely differentiate a 1D input tensor
Differentiates by taking tensor[1:end] - tensor[0:end-1]
"""
def __init__(self, cfg):
super(Differentiate1D, self).__init__()
if hasattr(cfg.DIFFERENTIATE, "STEP"):
self.step = cfg.DIFFERENTIATE.STEP
else:
self.step = 1
def forward(self, x, lines = None, labels = None):
minuend = torch.narrow(x, 0, self.step, x.size()[0] - self.step)
subtrahend = torch.narrow(x, 0, 0, x.size()[0] - self.step)
x = minuend - subtrahend
return x, lines, labels
class RandFlip1D(nn.Module):
"""
Pytorch module to "flip" signal along the x-axis
Supports random application through cfg.RAND_FLIP.CHANCE or cfg.CHANCE
defaults to random application with p = 0.5
"""
def __init__(self, cfg):
super(RandFlip1D, self).__init__()
#Code block for setting up random application
if hasattr(cfg.RAND_FLIP, "CHANCE"):
self.chance = cfg.RAND_FLIP.CHANCE
elif hasattr(cfg, "CHANCE"):
self.chance = cfg.CHANCE
else:
self.chance = 0.5
def forward(self, x : Tensor, lines = None, labels = None):
if np.random.uniform() < self.chance:
x = x.mul(-1)
return x, lines, labels
class RandGauss1D(nn.Module):
"""
Pytorch module to add gaussian noise to 1D tensor
Supports random application through cfg.RAND_FLIP.CHANCE or cfg.CHANCE
Also supports noise intensity through INTENSITY
For uniformly distributed intensity, include a RAND to cfg.GAUSS
"""
def __init__(self, cfg):
super(RandGauss1D, self).__init__()
#Code block for random application
if hasattr(cfg.RAND_GAUSS, "CHANCE"):
self.chance = cfg.RAND_GAUSS.CHANCE
elif hasattr(cfg, "CHANCE"):
self.chance = cfg.CHANCE
else:
self.chance = 0.5
self.intensity = 1.0
if hasattr(cfg.RAND_GAUSS, "INTENSITY"):
self.intensity = cfg.RAND_GAUSS.INTENSITY
self.random_intensity = False
if hasattr(cfg.RAND_GAUSS, "RAND"):
self.random_intensity = True
def forward(self, x : Tensor, lines = None, labels = None):
if np.random.uniform() < self.chance:
#noise_factor = std(x) * intensity
noise_factor = x.std() * self.intensity
if self.random_intensity:
noise_factor *= float(np.random.uniform())
#x_i + N(0,1) * noise_factor
x += torch.randn(x.size()) * noise_factor
return x, lines, labels
class RandAmpAtt1D(nn.Module):
"""
Pytorch module to randomly amplify or attenuate signal
Supports random application through cfg.RAND_AMP_ATT.CHANCE or cfg.CHANCE
defaults to random application with p = 0.5
cfg.AMP_ATTEN is required to have parameter "FACTOR"
"""
def __init__(self, cfg):
super(RandAmpAtt1D, self).__init__()
#Code block for setting up random application
if hasattr(cfg.RAND_AMP_ATT, "CHANCE"):
self.chance = cfg.RAND_AMP_ATT.CHANCE
elif hasattr(cfg, "CHANCE"):
self.chance = cfg.CHANCE
else:
self.chance = 0.5
assert hasattr(cfg.RAND_AMP_ATT, "FACTOR"),\
"Transform AmpAtt1D requires parameter cfg.FACTOR"
self.factor = max(1/cfg.RAND_AMP_ATT.FACTOR, cfg.RAND_AMP_ATT.FACTOR)
def forward(self, x : Tensor, lines = None, labels = None):
if np.random.uniform() < self.chance:
factor = np.random.uniform(low = 1/self.factor, high = self.factor)
x.mul(factor)
return x, lines, labels
class RandContrast1D(nn.Module):
"""
Pytorch module to add random contrast to the data
Supports random application through cfg.RAND_CONTRAST.CHANCE or cfg.CHANCE
defaults to random application with p = 0.5
Contrast enhancement amount may range between 0-100
enhancement of 0 still yields a significant contrast enhancement
"""
def __init__(self, cfg):
super(RandContrast1D, self).__init__()
#Code block for setting up random application
if hasattr(cfg.RAND_CONTRAST, "CHANCE"):
self.chance = cfg.RAND_CONTRAST.CHANCE
elif hasattr(cfg, "CHANCE"):
self.chance = cfg.CHANCE
else:
self.chance = 0.5
assert hasattr(cfg.RAND_CONTRAST, "ENHANCE"), "RandContrast1D needs attribute cfg.RAND_CONTRAST.ENHANCE)"
self.enhancement = cfg.RAND_CONTRAST.ENHANCE
if self.enhancement < 0 or self.enhancement > 100:
print(f"enhancement not in 0-100 range, setting to {np.abs(self.enhancemnet % 100)}")
self.enhancement = np.abs(self.enhancement % 100)
def forward(self, x : Tensor, lines = None, labels = None):
if np.random.uniform() < self.chance:
amount = np.random.uniform()*self.enhancement
torchaudio.functional.contrast(waveform=x, enhancement_amount=amount)
return x, lines, labels
class Crop1D(nn.Module):
"""
Pytorch module to crop one dimensional signal
Supports "random" mode and "center" mode through cfg.CROP.TYPE
defaults to random application with p = 1.0 unless cfg.CROP.CHANCE is set
If the chance doesn't activate, it defaults to crop
to the middle of the input time series.
Onset/offset annotations can optionally be added through the lines variable
"""
def __init__(self, cfg):
super(Crop1D, self).__init__()
#Code block for setting random application
self.type = "random"
if hasattr(cfg.CROP, "TYPE"):
self.type = cfg.CROP.TYPE
if hasattr(cfg.CROP, "CHANCE"):
self.chance = cfg.CROP.CHANCE
else:
self.chance = 1
#Chance set to 0 => centercrop
if self.type == "center":
self.chance = 0
assert hasattr(cfg, "LENGTH"), "Crop1D needs output tensor length to function"
self.length = cfg.LENGTH
def forward(self, x : Tensor, lines=None, labels=None):
start = int(np.floor((x.size()[1]-self.length)/2))
if np.random.uniform() < self.chance:
min_start, max_start = (0, x.size()[1] - self.length - 1)
start = np.random.randint(low=min_start, high = max_start)
x = x.narrow(1, start, self.length)
#Onset/onset annotations need to be fixed if they're added
if lines is not None:
new_lines = []
new_labels = []
for idx, annotation in enumerate(lines):
#Have to deduct starting point from the onset
annotation_onset = annotation[0] - start
#End = starting point + annotation length
annotation_offset = annotation_onset + (annotation[1] - annotation[0])
#Fix out of bounds issues
if annotation_offset > self.length:
annotation_offset = self.length
if annotation_onset < 0:
annotation_onset = 0
if annotation_onset > self.length or annotation_offset < 0:
continue
else:
new_lines.append([annotation_onset, annotation_offset])
new_labels.append(labels[idx])
lines = np.zeros((len(new_lines), 2), dtype=np.float32)
labels = np.zeros((len(new_labels)), dtype=np.int64)
for idx, new_line in enumerate(new_lines):
lines[idx] = new_line
labels[idx] = new_labels[idx]
return x, lines, labels
class Spectrify(nn.Module):
"""
Pytorch module to convert 1D tensor to spectrogram(s)
cfg.RESOLUTION to specify [WIDTH, HEIGHT]
cfg.CHANNELS to specify channel order between ["mel", "log", "normal"]
cfg.FREQ_CROP, if only part of the spectrogram frequency dimension is needed
All outputs are normalized
"""
def __init__(self, cfg):
super(Spectrify, self).__init__()
self.length = cfg.LENGTH
#Setting imagenet resolution as defaults
self.width = 224
self.height = 224
sample_freq = 16000
if hasattr(cfg.SPECTROGRAM, "RESOLUTION"):
self.width = cfg.SPECTROGRAM.RESOLUTION[0]
self.height = cfg.SPECTROGRAM.RESOLUTION[1]
self.out_width = self.width
self.out_height = self.height
if hasattr(cfg.SPECTROGRAM, "FREQ_CROP"):
self.crop = cfg.SPECTROGRAM.FREQ_CROP
self.out_height = self.crop[1]
else:
self.crop = None
if hasattr(cfg, "SAMPLE_RATE"):
sample_freq = cfg.SAMPLE_RATE
self.transformations = nn.ModuleList()
self.channels = ["normal","log","mel"]
self.resize = torchvision.transforms.Resize((self.out_height, self.out_width))
if hasattr(cfg.SPECTROGRAM, "CHANNELS"):
self.channels = cfg.SPECTROGRAM.CHANNELS
for channel in self.channels:
if channel == "mel":
#Not sure whether this is right or not
out_height = self.height
if self.crop is not None:
out_height = self.crop[1]
hop_size = cfg.LENGTH // self.width
melify = nn.Sequential(
torchaudio.transforms.MelSpectrogram(
n_mels=self.height,
hop_length=hop_size
)
)
self.transformations.append(melify)
if channel == "log" or channel == "normal":
num_ffts = (self.height - 1)*2 + 1
#A bit unsure of line below, but think it should give right width
hop_size = cfg.LENGTH // self.width
self.transformations.append(
torchaudio.transforms.Spectrogram(
n_fft = num_ffts,
hop_length = hop_size
)
)
def forward(self, x : Tensor, lines = None, labels = None):
if lines is not None:
new_lines = np.zeros((len(lines), 2), dtype=np.float32)
for idx, annotation in enumerate(lines):
#Onset_pixel = onset_sample * spectrogram_width / waveform_sample_length
annotation_onset = int(np.round((annotation[0]*self.width)/self.length))
annotation_offset = int(np.round((annotation[1]*self.width)/self.length))
new_lines[idx] = [annotation_onset, annotation_offset]
lines = new_lines
#Convert first element of output to be able to just torch.cat it later
y = self.transformations[0](x)
if not (self.crop is None) and self.channels[0] != "mel":
y = torch.narrow(
input = y,
dim = y.dim() - 2,
start = self.crop[0],
length = self.crop[1]
)
#Normalization after crop
y = (y - torch.mean(y)) / torch.std(y)
width = y.size()[-1]
height = y.size()[-1]
if width != self.out_width or height != self.out_height:
y = self.resize(y)
#If more spectrograms are specified, use them
if len(self.transformations) > 1:
for i, transformation in enumerate(self.transformations[1:]):
channel = transformation(x)
#No point in cropping mel spectrogram as its done through resize
#This is because of how the melscale works.
if not (self.crop is None) and self.channels[i + 1] != "mel":
channel = torch.narrow(
input = channel,
dim = channel.dim() - 2,
start = self.crop[0],
length = self.crop[1]
)
width = channel.size()[-1]
height = channel.size()[-2]
if width != self.out_width or height != self.out_height:
channel = self.resize(channel)
#Statement for logarithmic output.
if self.channels[i+1] == "log":
channel = torch.log(channel)
#Normalization after crop
channel = (channel - torch.mean(channel)) / torch.std(channel)
y = torch.cat((y, channel), 0)
return y, lines, labels
class ValCrop(nn.Module):
"""
Pytorch module to crop one dimensional signal specified by
Supports "random" mode and "center" mode through cfg.CROP.TYPE
defaults to random application with p = 1.0 unless cfg.CROP.CHANCE is set
If the chance doesn't activate, it defaults to crop
to the middle of the input time series.
Onset/offset annotations can optionally be added through the lines variable
"""
def __init__(self, cfg):
super(ValCrop, self).__init__()
assert hasattr(cfg, "LENGTH"), "ValCrop needs output tensor length to function"
self.length = cfg.LENGTH
self.sample_freq = 16000
if hasattr(cfg, "SAMPLE_RATE"):
self.sample_freq = cfg.SAMPLE_RATE
#If data should be differentiated, a sample has to be added
if hasattr(cfg, "DIFFERENTATE"):
self.length += getattr(cfg.DIFFERENTATE, "STEP", default=1)
def forward(self, x : Tensor, start_second=0.0, lines=None, labels=None):
signal_length = x.size()[1]
leftover_samples = signal_length - start_second*self.sample_freq + self.length
#Implicates that this is the last audio bit in the file
#In case there is more samples, will then add them to the audio bit
if leftover_samples < (self.length / 2):
start_second += min(1.0, leftover_samples/self.sample_freq)
start = int(np.floor(self.sample_freq * start_second))
x = x.narrow(1, start, self.length)
#Onset/onset annotations need to be fixed if they're added
if lines is not None:
new_lines = []
new_labels = []
for idx, annotation in enumerate(lines):
#Have to deduct starting point from the onset
annotation_onset = annotation[0] - start_second
#End = starting point + annotation length
annotation_offset = annotation_onset + (annotation[1] - annotation[0])
#Fix out of bounds issues
if annotation_offset > self.length/self.sample_freq:
annotation_offset = self.length/self.sample_freq
if annotation_onset < 0:
annotation_onset = 0
if annotation_onset > self.length/self.sample_freq or annotation_offset < 0:
continue
else:
new_lines.append([annotation_onset, annotation_offset])
new_labels.append(labels[idx])
lines = np.zeros((len(new_lines), 2), dtype=np.float32)
labels = np.zeros((len(new_labels)), dtype=np.int64)
for idx, new_line in enumerate(new_lines):
lines[idx] = new_line
labels[idx] = new_labels[idx]
return x, lines, labels
class AudioTransformer(nn.Module):
def __init__(self, cfg, is_train=True):
super(AudioTransformer, self).__init__()
self.transforms = nn.ModuleList()
#Implementations of new transforms will have to be added in these
#dictionaries to be supported by YAML-specification
if is_train:
transform_dict = {
"SAMPLE_COORDS" : ToSampleCoords,
"DIFFERENTIATE" : Differentiate1D,
"CROP" : Crop1D,
"RAND_FLIP" : RandFlip1D,
"RAND_GAUSS" : RandGauss1D,
"RAND_AMP_ATT" : RandAmpAtt1D,
"RAND_CONTRAST" : RandContrast1D,
"SPECTROGRAM" : Spectrify
}
else:
#Data augmentation and crop is unnecessary on validation/test data
#As both test and validation data has their own cropping function
transform_dict = {
"SAMPLE_COORDS" : ToSampleCoords,
"DIFFERENTIATE" : Differentiate1D,
"RAND_CONTRAST" : RandContrast1D,
"SPECTROGRAM" : Spectrify
}
self.validation_crop = ValCrop(cfg)
for transform_name in transform_dict:
if hasattr(cfg, transform_name):
if getattr(cfg, transform_name).ACTIVE == True:
#This is not allowed with torchscript
transform_module = transform_dict[transform_name](cfg)
self.transforms.append(transform_module)
def forward(self, x, lines = None, labels = None):
for transform in self.transforms:
x, lines, labels = transform(
x,
lines = lines,
labels = labels
)
return x, lines, labels
|
{"hexsha": "c5b7e2e11947b7b1375a8247d57278669aac1f63", "size": 18664, "ext": "py", "lang": "Python", "max_stars_repo_path": "classifier/data/transform/transforms.py", "max_stars_repo_name": "bendikbo/SSED", "max_stars_repo_head_hexsha": "fdd0e74d419687bc8cba65341d7248ca6ccd1a4e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "classifier/data/transform/transforms.py", "max_issues_repo_name": "bendikbo/SSED", "max_issues_repo_head_hexsha": "fdd0e74d419687bc8cba65341d7248ca6ccd1a4e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classifier/data/transform/transforms.py", "max_forks_repo_name": "bendikbo/SSED", "max_forks_repo_head_hexsha": "fdd0e74d419687bc8cba65341d7248ca6ccd1a4e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5441527446, "max_line_length": 113, "alphanum_fraction": 0.5740462923, "include": true, "reason": "import numpy", "num_tokens": 4156}
|
from __future__ import absolute_import, division, print_function
import numpy as np
import theano
import theano.tensor as T
from theano.ifelse import ifelse
from models import rhn
from models import rnn
from models import lstm
from utils import shared_uniform, get_dropout_noise, shared_zeros, cast_floatX
floatX = theano.config.floatX
def model(_input_data, _noise_x, _lr, _is_training, config, _theano_rng):
embedding = shared_uniform(( config.vocab_size,config.hidden_size), config.init_scale)
params = [embedding]
inputs = embedding[_input_data.T] # (num_steps, batch_size, hidden_size)
inputs = ifelse(_is_training, inputs * T.shape_padright(_noise_x.T), inputs)
rhn_updates = []
sticky_hidden_states = [] # shared variables which are reset before each epoch
for _ in range(config.num_layers):
# y shape: (num_steps, batch_size, hidden_size)
if config.model == "rhn":
print(" with RHN cell")
y, y_0, sticky_state_updates = rhn.model(
inputs, _is_training, params,
config.depth, config.batch_size, config.hidden_size,
config.drop_i, config.drop_s,
config.init_scale, config.init_T_bias,config.init_scale,
config.tied_noise,
_theano_rng)
elif config.model == "lstm":
print(" with LSTM cell")
y, y_0, sticky_state_updates = lstm.model(
inputs, _is_training, params,
config.batch_size, config.hidden_size,
config.drop_i, config.drop_s,
config.init_scale, config.init_scale,
config.tied_noise,
_theano_rng)
else:
print(" with RNN cell")
y, y_0, sticky_state_updates = rnn.model(
inputs, _is_training, params,
config.batch_size, config.hidden_size,
config.drop_i, config.drop_s,
config.init_scale, config.init_scale,
_theano_rng)
rhn_updates += sticky_state_updates
inputs = y
# The recurrent hidden state of the RHN is sticky (the last hidden state of one batch is carried over to the next batch,
# to be used as an initial hidden state). These states are kept in shared variables and are reset before every epoch.
sticky_hidden_states.append(y_0)
noise_o = get_dropout_noise((config.batch_size, config.hidden_size), config.drop_o, _theano_rng)
outputs = ifelse(_is_training, y * T.shape_padleft(noise_o), y) # (num_steps, batch_size, hidden_size)
# logits
if config.tied_embeddings:
softmax_w = embedding.T
else:
softmax_w = shared_uniform((config.hidden_size, config.vocab_size), config.init_scale)
params = params + [softmax_w]
softmax_b = shared_uniform((config.vocab_size,), config.init_scale)
params = params + [softmax_b]
logits = T.dot(outputs, softmax_w) + softmax_b # (num_steps, batch_size, vocab_size)
# probabilities and prediction loss
flat_logits = logits.reshape((config.batch_size * config.num_steps, config.vocab_size))
flat_probs = T.nnet.softmax(flat_logits)
return flat_probs, params, rhn_updates, sticky_hidden_states
|
{"hexsha": "a5c009a62dde46740979b9ff5d0bd80070dc51f2", "size": 3353, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter10/models/stacked.py", "max_stars_repo_name": "PacktPublishing/Deep-Learning-with-Theano", "max_stars_repo_head_hexsha": "39b940f7c6993533a9744d0c1b792e408486e89a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2017-08-01T20:04:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T15:56:29.000Z", "max_issues_repo_path": "Chapter10/models/stacked.py", "max_issues_repo_name": "PacktPublishing/Deep-Learning-with-Theano", "max_issues_repo_head_hexsha": "39b940f7c6993533a9744d0c1b792e408486e89a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-01-11T18:50:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-01T14:52:01.000Z", "max_forks_repo_path": "Chapter10/models/stacked.py", "max_forks_repo_name": "PacktPublishing/Deep-Learning-with-Theano", "max_forks_repo_head_hexsha": "39b940f7c6993533a9744d0c1b792e408486e89a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2017-09-18T09:06:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-07T22:54:47.000Z", "avg_line_length": 41.9125, "max_line_length": 128, "alphanum_fraction": 0.6513569937, "include": true, "reason": "import numpy,import theano,from theano", "num_tokens": 737}
|
<center>
<h1> ILI285 - Computación Científica I / INF285 - Computación Científica </h1>
<h2> Least Squares </h2>
<h2> [[S]cientific [C]omputing [T]eam](#acknowledgements)</h2>
<h2> Version: 1.24</h2>
</center>
## Table of Contents
* [Introduction](#intro)
* [QR Factorization](#qr)
* [Examples](#ex)
* [Inconsistents Systems](#in)
* [A Survey of Models](#sm)
* [Acknowledgements](#acknowledgements)
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg as spla
%matplotlib inline
from sklearn import datasets
import ipywidgets as widgets
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
# from scipy.interpolate import CubicSpline # Starting scipy 0.19.0
M=8
```
<div id='intro' />
## Introduction
We have learned about square linear system of equations.
However, How can we solve a non-square system? (More equations that unknowns!) Well, we need to find a least squares approximation.
<div id='qr' />
# QR Factorization
## Gram-Schmidt Orthogonalization
This algorithm orthogonalize a set of input vectors, returning an **orthogonal set** that spans the same column space.
We will only consider now that the input set of vectors are **linearly independent**.
Let $A=[A_1\, ...., A_n]$ a matrix with linearly independent column vectors $\in\mathbb{R}^m$ and $n \le m$.
We know the following for the orthogonal set:
\begin{align*}
q_i^*\,q_i & = \|q_i\|_2^2= 1\\
q_i^*\,q_j & = 0, \, \text{ for } i\neq j
\end{align*}
Then the Gram-Schmidt orthogonalization finds the $q_i$ and $r_{ij}$ from the following set of equations and considering the previous constraints:
\begin{align*}
A_1 &= r_{11}\,q_1\\
r_{11} &= \|A_1\|_2\\
q_1 &= \dfrac{y_1}{r_{11}}\\
A_2 &= r_{12}\,q_1+r_{22}\,q_2\\
r_{12} &= q_1^*\,A_2\\
r_{22} &= \|A_2-r_{12}\,q_1\|\\
q_2 &= \dfrac{A_2-r_{12}\,q_1}{r_{22}}\\
\vdots &= \vdots\\
A_j &= \sum_{i=1}^j r_{ij}\,q_i\\
r_{ij} &= q_i^*\,A_j, \, \text{ for } i<j\\
r_{jj} &= \|A_j-\sum_{i=1}^{j-1} r_{ij}\,q_i\|\\
q_j &= \dfrac{A_j-\sum_{i=1}^{j-1} r_{ij}\,q_i}{r_jj}\\
\vdots &= \vdots\\
A_n &= \sum_{i=1}^n r_{in}\,q_i\\
r_{in} &= q_i^*\,A_n, \, \text{ for } i<n\\
r_{nn} &= \|A_n-\sum_{i=1}^{n-1} r_{in}\,q_i\|\\
q_n &= \dfrac{A_n-\sum_{i=1}^{n-1} r_{in}\,q_i}{r_{nn}}
\end{align*}
Thus, we obtain the QR decomposition as follows:
\begin{equation}
A_{m\times n} = Q_{m\times n}R_{n\times n}\\
\end{equation}
Where $Q$ is a matrix of vectors $q_{n}$, and $R$ is an upper-triangular matrix, with the coefficients $r_{ij}$:
This is known as the **Reduced QR Factorization**.
_**[IMPORTANT]** What is then a **full** QR decomposition?_
```python
# Inputs:
# A: A set of linearly independent columns
# type_factorization: reduced or full
# type_gram_schmidt: classic or modified
def QR(A, type_factorization = 'reduced', type_gram_schmidt='classic'):
A.astype('float')
if type_factorization == 'reduced':
Q = np.zeros(A.shape)
R = np.zeros((A.shape[1],A.shape[1]))
elif type_factorization == 'full':
Q = np.zeros((A.shape[0],A.shape[0]))
R = np.zeros(A.shape)
for j in np.arange(A.shape[1]):
y = A[:,j]
for i in np.arange(j):
if type_gram_schmidt == 'classic':
R[i,j] = np.dot(Q[:,i],A[:,j])
elif type_gram_schmidt == 'modified':
R[i,j] = np.dot(Q[:,i],y)
y=y-R[i,j]*Q[:,i]
R[j,j] = np.linalg.norm(y)
Q[:,j] = y/np.linalg.norm(R[j,j])
# The following lines must be completed by you!
#if type_factorization == 'full':
# (1) We need to add 0's to the R matrix so it is of the same shape as the matrix A,
# fortunately this was already done!
# (2) We need to add orthogonal vectors to Q so it is square,
# how do we do this?
return Q,R
```
```python
A = np.array([[1,-4],[2,3],[2,2]])
Qa, Ra = QR(A, type_factorization ='reduced', type_gram_schmidt='classic')
print(np.dot(Qa,Ra))
print(Qa)
print(Ra)
```
This method let us resolve a system of equations. However, exists a **Full QR Factorization**, creating the next system:
\begin{equation}
A_{m\times n} = Q_{m\times m}R_{m\times n}\\
\end{equation}
Q is a square orthogonal matrix, adding $m-n$ columns and R grows adding $m-n$ zero rows.
#### Theorem
A square matrix $Q$ is orthogonal if $Q^*\, = Q^{-1}$
<div id='ex' />
## Examples
### Normal vs Modified Gram-Schmidt
```python
d = 1e-10
A = np.array([[1,1,1],[d,0,0],[0,d,0],[0,0,d]])
Q1,R1 = QR(A, type_gram_schmidt = 'classic')
Q2,R2 = QR(A, type_gram_schmidt = 'modified')
# Are truly orthogonal the Q's?
print(Q1)
print(Q2)
# Do we recover A?
print(np.dot(Q1,R1))
print(np.dot(Q2,R2))
```
<div id='in' />
## Inconsistent Systems
There is cases where the number of equations is greater than variables. Many times, those systems don't have an exact solution (inconsistent system). Then, in this case we needs an approximation closest to the data. Based in orthogonality, the shortest distance from a point to plane. The orthogonal distance represents the error which would be minimum.
\begin{equation}
b - A\,x = \vec{r}\\
b - A\,x \perp \{A\,x | x \in \mathcal{R}\}
\end{equation}
The idea is that $\vec{r}$ would be closest to zero. We need to apply orthogonality to find the vector that satisfied this condition.
\begin{equation}
(Ax)^*\,(b-A─\overline{x})=0 \hspace{1cm} \text{for all } x \in \mathcal{R^n}\\
x^*\, A^*\,(b-A─\overline{x})=0 \hspace{1cm} \text{for all } x \in \mathcal{R^n}\\
A^*\,(b-A─\overline{x})=0 \\
A^*\,A\overline{x}= A^*\,b \\
\end{equation}
This last equation gives us a new square $n\times n$ matrix, which let us resolve the equation system.
This linear system of equations is known as the **Normal Equations**.
```python
def least_squares(A,b):
Q,R = QR(A,type_gram_schmidt='modified')
return spla.solve_triangular(R,np.dot(Q.T,b))
def solve_model(M):
A=M['A']
b=M['b']
M['x_bar']=least_squares(A,b)
return M
def create_model(data, type_model='linear'):
if type_model == 'linear': # f(x)=a0+a1*x
A = np.ones((data.shape[0],2))
A[:,1] = data[:,0]
b = data[:,1]
if type_model == 'parabollic': # f(x)=a0+a1*x+a_2*x^2
A = np.ones((data.shape[0],3))
A[:,1] = data[:,0]
A[:,2] = data[:,0]**2
b = data[:,1]
if type_model == 'exponential': #f(x)=a0 \exp(a1*x) = \exp(\log(a0)+a1*x) -> log(f(x))=log(a0)+a1*x = A0+a1+x (it is linear now!)
A = np.ones((data.shape[0],2))
A[:,1] = data[:,0]
b = np.log(data[:,1])
M = {'A':A,
'b':b,
'type_model':type_model}
M=solve_model(M)
return M
def evaluate_model(M,x):
x_bar=M['x_bar']
if M['type_model'] == 'linear':
return x_bar[0] + x_bar[1]*x
if M['type_model'] == 'parabollic':
return x_bar[0] + x_bar[1]*x + x_bar[2]*x**2
if M['type_model'] == 'exponential':
return np.exp(x_bar[0]+x_bar[1]*x)
```
# Adjusting some models
```python
def generate_data(type_of_data='linear'):
n=40
np.random.seed(0)
x = np.linspace(0,10,n)
y = np.random.rand(n)
x = np.concatenate((x,x,y),axis=0)
n = 3*n
if type_of_data=='linear':
y = x+0.1*np.random.normal(0,1,n)+1.5
elif type_of_data=='parabollic':
y = 4*x**2+0.1*x*np.random.normal(0,1,n)+1.5
elif type_of_data=='exponential':
y = np.exp(x+0.1*np.random.normal(0,1,n)+1.5)
elif type_of_data=='sinusoidal':
y = np.sin(2*np.pi*x/10)+0.1*np.random.normal(0,1,n)+1.5
elif type_of_data=='random':
y = 0.1*np.random.normal(0,1,n)+1.5
elif type_of_data=='boston house-prices':
x,y=datasets.load_boston(return_X_y=True)
x=x[:,5]
elif type_of_data=='diabetes':
x,y=datasets.load_diabetes(return_X_y=True)
x=x[:,2]
data = np.stack((x, y)).T
return data
```
```python
def looking_at_data(type_of_data='diabetes'):
data=generate_data(type_of_data)
Ml = create_model(data, type_model='linear')
Mp = create_model(data, type_model='parabollic')
Me = create_model(data, type_model='exponential')
xx=np.linspace(np.min(data[:,0])-0.1,np.max(data[:,0])+0.1,1000)
yyl=evaluate_model(Ml,xx)
yyp=evaluate_model(Mp,xx)
yye=evaluate_model(Me,xx)
error_l=data[:,1]-evaluate_model(Ml,data[:,0])
error_p=data[:,1]-evaluate_model(Mp,data[:,0])
error_e=data[:,1]-evaluate_model(Me,data[:,0])
plt.figure(figsize=(2*M,M))
plt.subplot(1, 2, 1)
plt.plot(xx,yyl,'k-',linewidth=5,label='linear model')
plt.plot(xx,yyp,'y--',linewidth=20,label='parabollic model')
plt.plot(xx,yye,'g-',linewidth=5,label='exponential model')
plt.plot(data[:,0],data[:,1],'.b',markersize=20,label='original data',alpha=0.3)
plt.grid(True)
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.legend(loc='best')
plt.subplot(1, 2, 2)
plt.title('What does this histogram tell us?')
three_errors=np.vstack((error_l, error_p, error_e)).T
plt.hist(three_errors, bins=20,
label=['linear','parabollic','exponential'],
color=['k','y','g'], alpha=0.5)
plt.legend(loc='best')
plt.grid(True)
plt.show()
widgets.interact(looking_at_data,type_of_data=['linear','parabollic','exponential','sinusoidal','random','boston house-prices','diabetes'])
```
## References
### Numpy Least Squares
http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
### Numpy QR Factorization
http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.qr.html
<div id='acknowledgements' />
# Acknowledgements
* _Material created by professor Claudio Torres_ (`ctorres@inf.utfsm.cl`) _and assistans: Laura Bermeo, Alvaro Salinas, Axel Símonsen and Martín Villanueva. DI UTFSM. April 2016._
* _Material updated by professor Claudio Torres_ (`ctorres@inf.utfsm.cl`) DI UTFSM. June 2017.
```python
```
|
{"hexsha": "b7541f259dc24c305057b0ea1f0c142e66b32e34", "size": 15155, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "SC1/09_Least_Squares.ipynb", "max_stars_repo_name": "cristopherarenas/Scientific-Computing", "max_stars_repo_head_hexsha": "7bbcd67aee343ad4561165fed21c3963307b3c14", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SC1/09_Least_Squares.ipynb", "max_issues_repo_name": "cristopherarenas/Scientific-Computing", "max_issues_repo_head_hexsha": "7bbcd67aee343ad4561165fed21c3963307b3c14", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SC1/09_Least_Squares.ipynb", "max_forks_repo_name": "cristopherarenas/Scientific-Computing", "max_forks_repo_head_hexsha": "7bbcd67aee343ad4561165fed21c3963307b3c14", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0561797753, "max_line_length": 362, "alphanum_fraction": 0.5055757176, "converted": true, "num_tokens": 3344}
|
from __future__ import print_function
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
import os
import time
import argparse
import numpy as np
from loss_functions import alpha_loss, foreground_loss, error_map_loss
#CUDA
print('CUDA Device: ' + os.environ["CUDA_VISIBLE_DEVICES"])
"""Parses arguments."""
parser = argparse.ArgumentParser(description='Training Background Matting on Adobe Dataset.')
parser.add_argument('-n', '--name', type=str, help='Name of tensorboard and model saving folders.')
parser.add_argument('-bs', '--batch_size', type=int, help='Batch Size.')
parser.add_argument('-res', '--reso', type=int, help='Input image resolution')
parser.add_argument('-init_model', '--init_model', type=str, help='Initial model file')
parser.add_argument('-epoch', '--epoch', type=int, default=15,help='Maximum Epoch')
parser.add_argument('-n_blocks1', '--n_blocks1', type=int, default=7,help='Number of residual blocks after Context Switching.')
parser.add_argument('-n_blocks2', '--n_blocks2', type=int, default=3,help='Number of residual blocks for Fg and alpha each.')
args=parser.parse_args()
for i in range(args.epoch):
pass
|
{"hexsha": "7859b025e7faec0916a38cce84f0f586e730e86e", "size": 1242, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/train_fake.py", "max_stars_repo_name": "kie4280/bg-matting-with-depth", "max_stars_repo_head_hexsha": "99cb87eb05342c7c6e3c871c6bccd8aef06a5451", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-07-13T03:17:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-09T14:46:59.000Z", "max_issues_repo_path": "training/train_fake.py", "max_issues_repo_name": "kie4280/bg-matting-with-depth", "max_issues_repo_head_hexsha": "99cb87eb05342c7c6e3c871c6bccd8aef06a5451", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training/train_fake.py", "max_forks_repo_name": "kie4280/bg-matting-with-depth", "max_forks_repo_head_hexsha": "99cb87eb05342c7c6e3c871c6bccd8aef06a5451", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.05, "max_line_length": 127, "alphanum_fraction": 0.7576489533, "include": true, "reason": "import numpy", "num_tokens": 284}
|
import unittest
import skrf
import numpy as np
import tempfile
import os
class VectorFittingTestCase(unittest.TestCase):
def test_vectorfitting_ring_slot(self):
# expected fitting parameters for skrf.data.ring_slot with 2 initial real poles
expected_poles = np.array([-7.80605445e+10+5.32645184e+11j])
expected_zeros = np.array([[7.01837934e+10+1.14737278e+10j],
[7.93470695e+10-4.54467471e+09j],
[7.93470695e+10-4.54467471e+09j],
[8.19724835e+10-2.11876421e+10j]])
expected_props = np.array([-2.06451610e-15,
-2.45016478e-14,
-2.45016478e-14,
7.79744644e-13])
expected_const = np.array([-0.9871906,
-0.06043898,
-0.06043898,
-0.99401152])
# perform the fit
vf = skrf.vectorFitting.VectorFitting(skrf.data.ring_slot)
vf.vector_fit(n_poles_real=2, n_poles_cmplx=0, fit_constant=True, fit_proportional=True)
# relax relative and absolute tolerances, as results from Python 2.7 are slightly different from Python 3.x
# basically, this disables the absolute tolerance criterion
rtol = 0.01
atol = rtol * np.amax(np.abs(expected_poles))
# compare both sets of parameters
self.assertTrue(np.allclose(vf.poles, expected_poles, rtol=rtol, atol=atol))
self.assertTrue(np.allclose(vf.zeros, expected_zeros, rtol=rtol, atol=atol))
self.assertTrue(np.allclose(vf.proportional_coeff, expected_props, rtol=rtol, atol=atol))
self.assertTrue(np.allclose(vf.constant_coeff, expected_const, rtol=rtol, atol=atol))
def test_model_response(self):
# fit ring slot example network
nw = skrf.data.ring_slot
vf = skrf.vectorFitting.VectorFitting(nw)
vf.vector_fit(n_poles_real=4, n_poles_cmplx=0, fit_constant=True, fit_proportional=True)
# compare fitted model responses to original network responses (should match with less than 1% error)
# s11
nw_s11 = nw.s[:, 0, 0]
fit_s11 = vf.get_model_response(0, 0, freqs=nw.f)
delta_s11_maxabs = np.amax(np.abs((fit_s11 - nw_s11) / nw_s11))
self.assertLess(delta_s11_maxabs, 0.05)
# s12
nw_s12 = nw.s[:, 0, 1]
fit_s12 = vf.get_model_response(0, 1, freqs=nw.f)
delta_s12_maxabs = np.amax(np.abs((fit_s12 - nw_s12) / nw_s12))
self.assertLess(delta_s12_maxabs, 0.05)
# s21
nw_s21 = nw.s[:, 1, 0]
fit_s21 = vf.get_model_response(1, 0, freqs=nw.f)
delta_s21_maxabs = np.amax(np.abs((fit_s21 - nw_s21) / nw_s21))
self.assertLess(delta_s21_maxabs, 0.05)
# s22
nw_s22 = nw.s[:, 1, 1]
fit_s22 = vf.get_model_response(1, 1, freqs=nw.f)
delta_s22_maxabs = np.amax(np.abs((fit_s22 - nw_s22) / nw_s22))
print(delta_s22_maxabs)
self.assertLess(delta_s22_maxabs, 0.05)
def test_spice_subcircuit(self):
# fit ring slot example network
nw = skrf.data.ring_slot
vf = skrf.vectorFitting.VectorFitting(nw)
vf.vector_fit(n_poles_real=4, n_poles_cmplx=0, fit_constant=True, fit_proportional=True)
# write equivalent SPICE subcircuit to tmp file
tmp_file = tempfile.NamedTemporaryFile(suffix='.sp')
vf.write_spice_subcircuit_s(tmp_file.name)
# written tmp file should contain 69 lines
n_lines = len(open(tmp_file.name, 'r').readlines())
self.assertEqual(n_lines, 69)
def test_read_write_npz(self):
# fit ring slot example network
nw = skrf.data.ring_slot
vf = skrf.vectorFitting.VectorFitting(nw)
vf.vector_fit(n_poles_real=2, n_poles_cmplx=0, fit_constant=True, fit_proportional=True)
# export (write) fitted parameters to .npz file in tmp directory
tmp_dir = tempfile.TemporaryDirectory()
vf.write_npz(tmp_dir.name)
# create a new vector fitting instance and import (read) those fitted parameters
vf2 = skrf.vectorFitting.VectorFitting(nw)
vf2.read_npz(os.path.join(tmp_dir.name, 'coefficients_{}.npz'.format(nw.name)))
# compare both sets of parameters
self.assertTrue(np.allclose(vf.poles, vf2.poles))
self.assertTrue(np.allclose(vf.zeros, vf2.zeros))
self.assertTrue(np.allclose(vf.proportional_coeff, vf2.proportional_coeff))
self.assertTrue(np.allclose(vf.constant_coeff, vf2.constant_coeff))
def test_matplotlib_missing(self):
vf = skrf.vectorFitting.VectorFitting(skrf.data.ring_slot)
skrf.vectorFitting.mplt = None
with self.assertRaises(RuntimeError):
vf.plot_convergence()
suite = unittest.TestLoader().loadTestsFromTestCase(VectorFittingTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{"hexsha": "d68c06ed714e1c5d2eb89d8082fa37a0925827d7", "size": 5041, "ext": "py", "lang": "Python", "max_stars_repo_path": "skrf/tests/test_vectorfitting.py", "max_stars_repo_name": "DavidLutton/scikit-rf", "max_stars_repo_head_hexsha": "1e0dfb2c560058ae21ddf255f395a753b6ea696f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skrf/tests/test_vectorfitting.py", "max_issues_repo_name": "DavidLutton/scikit-rf", "max_issues_repo_head_hexsha": "1e0dfb2c560058ae21ddf255f395a753b6ea696f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skrf/tests/test_vectorfitting.py", "max_forks_repo_name": "DavidLutton/scikit-rf", "max_forks_repo_head_hexsha": "1e0dfb2c560058ae21ddf255f395a753b6ea696f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0854700855, "max_line_length": 115, "alphanum_fraction": 0.6405475104, "include": true, "reason": "import numpy", "num_tokens": 1361}
|
from omegaconf import DictConfig, OmegaConf
import hydra
from hydra.core.hydra_config import HydraConfig
import itertools as it
import os.path as osp
import os
from subprocess import Popen, PIPE
from datetime import datetime
import numpy as np
import pandas as pd
from shutil import copy
import re
import ruamel.yaml
@hydra.main(config_path="conf", config_name="config")
def run(cfg: DictConfig):
if cfg.verbose: print(f'hydra working directory: {os.getcwd()}')
overrides = HydraConfig.get().overrides.task
overrides = [o for o in overrides if (not "task" in o and
not "model=" in o and
not "datasource=" in o and
not "model_dir=" in o)]
overrides = " ".join(overrides)
target_dir = osp.join(cfg.device.root, cfg.output_dir, cfg.datasource.name, cfg.model.name)
print(f'target_dir = {target_dir}')
os.makedirs(target_dir, exist_ok=True)
if cfg.datasource.test_year == 'all':
test_years = cfg.datasource.years
else:
test_years = [cfg.datasource.test_year]
if cfg.task.name == 'hp_search':
hp_grid_search(cfg, target_dir, test_years)
elif cfg.task.name == 'train_eval' or cfg.task.name == 'train':
train_eval(cfg, target_dir, test_years, overrides)
elif cfg.task.name == 'eval':
eval(cfg, target_dir, test_years, overrides)
def hp_grid_search(cfg: DictConfig, target_dir, test_years, timeout=10):
hp_file, n_comb = generate_hp_file(cfg, target_dir)
for year in test_years:
# run inner cv for all hyperparameter settings
output_dir = cfg.get('experiment', 'hp_grid_search')
output_path = osp.join(target_dir, f'test_{year}', output_dir)
if cfg.verbose: print(f"Start grid search for year {year}")
# directory created by hydra, containing current config
# including settings overwritten from command line
config_path = osp.join(os.getcwd(), '.hydra')
# option for running only parts of grid search
n_start = cfg.get('hp_start', 1)
# run inner cross-validation loop for all different hyperparameter settings
if cfg.device.slurm:
job_file = osp.join(cfg.device.root, cfg.task.slurm_job)
proc = Popen(['sbatch', f'--array={n_start}-{n_comb}', job_file, cfg.device.root, output_path, config_path,
hp_file, str(year)], stdout=PIPE, stderr=PIPE)
else:
job_file = osp.join(cfg.device.root, cfg.task.local_job)
os.environ['MKL_THREADING_LAYER'] = 'GNU'
os.environ['HYDRA_FULL_ERROR'] = '1'
proc = Popen([job_file, cfg.device.root, output_path, config_path,
hp_file, str(year), str(n_start), str(n_comb)], stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
start_time = datetime.now()
# wait until job has been submitted (at most 10s)
while True:
if stderr:
print(stderr.decode("utf-8")) # something went wrong
if stdout:
print(stdout.decode("utf-8")) # successful job submission
return
if (datetime.now() - start_time).seconds > timeout:
print(f'timeout after {timeout} seconds')
return
def train_eval(cfg: DictConfig, target_dir, test_years, overrides='', timeout=10):
os.environ['HYDRA_FULL_ERROR'] = '1'
for year in test_years:
# determine best hyperparameter setting
base_dir = osp.join(target_dir, f'test_{year}')
hp_search_dir = cfg.get('hp_search_dir', 'hp_grid_search')
input_dir = osp.join(base_dir, hp_search_dir)
if osp.isdir(input_dir):
determine_best_hp(input_dir)
best_hp_config = OmegaConf.load(osp.join(base_dir, 'config.yaml'))
best_hp_config.task = cfg.task
with open(osp.join(base_dir, 'config.yaml'), 'w') as f:
OmegaConf.save(config=best_hp_config, f=f)
else:
print(f'Directory "{hp_search_dir}" not found. Use standard config for training.')
os.makedirs(base_dir, exist_ok=True)
print(f'base_dir = {base_dir}')
with open(osp.join(base_dir, 'config.yaml'), 'w') as f:
OmegaConf.save(config=cfg, f=f)
overrides = re.sub('[+]', '', overrides)
# use this setting and train on all data except for one year
output_dir = cfg.get('experiment', 'final_evaluation')
# remove all '+' in overrides string
#overrides = re.sub('[+]', '', overrides)
output_path = osp.join(target_dir, f'test_{year}', output_dir)
if cfg.verbose:
print(f"Start train/eval for year {year}")
print(f"Use overrides: {overrides}")
config_path = osp.dirname(output_path)
print(f'config_path = {config_path}')
repeats = cfg.task.repeats
if hasattr(cfg, 'trial'):
array = cfg.trial
else:
array = f'1-{repeats}'
if cfg.device.slurm:
job_file = osp.join(cfg.device.root, cfg.task.slurm_job)
gres = 1 if cfg.device.cuda else 0
proc = Popen(['sbatch', f'--array={array}', f'--gres=gpu:{gres}', job_file, cfg.device.root, output_path, config_path,
str(year), overrides], stdout=PIPE, stderr=PIPE)
else:
job_file = osp.join(cfg.device.root, cfg.task.local_job)
os.environ['MKL_THREADING_LAYER'] = 'GNU'
os.environ['HYDRA_FULL_ERROR'] = '1'
proc = Popen([job_file, cfg.device.root, output_path, config_path,
str(year), str(repeats)], stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
start_time = datetime.now()
while True:
if stderr:
print(stderr.decode("utf-8"))
return
if stdout:
print(stdout.decode("utf-8"))
return
if (datetime.now() - start_time).seconds > timeout:
print(f'timeout after {timeout} seconds')
return
def eval(cfg: DictConfig, target_dir, test_years, overrides='', timeout=10):
os.environ['HYDRA_FULL_ERROR'] = '1'
for year in test_years:
assert hasattr(cfg, 'model_dir')
cfg.model_dir = osp.join(cfg.device.root, cfg.model_dir)
base_dir = osp.dirname(cfg.model_dir)
output_path = cfg.model_dir
cfg.sub_dir = ''
print(f'model dir: {cfg.model_dir}')
with open(osp.join(base_dir, 'config.yaml'), 'w') as f:
OmegaConf.save(config=cfg, f=f)
overrides = re.sub('[+]', '', overrides)
if cfg.verbose:
print(f"Eval for year {year}")
print(f"Use overrides: {overrides}")
config_path = base_dir
print(f'config_path = {config_path}')
if cfg.device.slurm:
job_file = osp.join(cfg.device.root, cfg.task.slurm_job)
proc = Popen(['sbatch', job_file, cfg.device.root, output_path, config_path,
str(year), overrides], stdout=PIPE, stderr=PIPE)
else:
job_file = osp.join(cfg.device.root, cfg.task.local_job)
os.environ['MKL_THREADING_LAYER'] = 'GNU'
os.environ['HYDRA_FULL_ERROR'] = '1'
proc = Popen([job_file, cfg.device.root, output_path, config_path,
str(year)], stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
start_time = datetime.now()
while True:
if stderr:
print(stderr.decode("utf-8"))
return
if stdout:
print(stdout.decode("utf-8"))
return
if (datetime.now() - start_time).seconds > timeout:
print(f'timeout after {timeout} seconds')
return
def determine_best_hp(input_dir: str):
job_dirs = [f.path for f in os.scandir(input_dir) if f.is_dir()]
best_loss = np.inf
for dir in job_dirs:
# load cv summary
df = pd.read_csv(osp.join(dir, 'summary.csv'))
loss = np.nanmean(df.final_val_loss.values)
if loss < best_loss:
# copy config file to parent directory
copy(osp.join(dir, 'config.yaml'), osp.dirname(input_dir))
best_loss = loss
def generate_hp_file(cfg: DictConfig, target_dir):
search_space = {k: v for k, v in cfg.hp_search_space.items() if k in cfg.model.keys()}
hp_file = osp.join(target_dir, 'hyperparameters.txt')
names, values = zip(*search_space.items())
all_combinations = [dict(zip(names, v)) for v in it.product(*values)]
with open(hp_file, 'w') as f:
for combi in all_combinations:
hp_str = " ".join([f'model.{name}={val}' for name, val in combi.items()]) + "\n"
f.write(hp_str)
if cfg.verbose:
print("successfully generated hyperparameter settings file")
print(f"File path: {hp_file}")
print(f"Number of combinations: {len(all_combinations)} \n")
return hp_file, len(all_combinations)
if __name__ == '__main__':
run()
|
{"hexsha": "7da3d4ef67298fd5cd819b74be2b20f1c353a301", "size": 9348, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/run_experiments.py", "max_stars_repo_name": "FionaLippert/FluxRGNN", "max_stars_repo_head_hexsha": "176f8f6bf24f65b9822e406f5de173cc5a17960a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/run_experiments.py", "max_issues_repo_name": "FionaLippert/FluxRGNN", "max_issues_repo_head_hexsha": "176f8f6bf24f65b9822e406f5de173cc5a17960a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/run_experiments.py", "max_forks_repo_name": "FionaLippert/FluxRGNN", "max_forks_repo_head_hexsha": "176f8f6bf24f65b9822e406f5de173cc5a17960a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.392, "max_line_length": 130, "alphanum_fraction": 0.5951005563, "include": true, "reason": "import numpy", "num_tokens": 2146}
|
#!/usr/bin/env python
# coding: utf-8
"""
@Time : 19-9-15 上午11:05
@Author : yangzh
@Email : 1725457378@qq.com
@File : image_util.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def preprocess_input(x):
x = x.astype(np.float32)
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
|
{"hexsha": "3409d61be5a8ebc804130e53149993e98216857e", "size": 574, "ext": "py", "lang": "Python", "max_stars_repo_path": "image_util.py", "max_stars_repo_name": "Yangget/Weath_classification", "max_stars_repo_head_hexsha": "6cab35de07f0b7bcdcf9cf5d4e4ec47d2eb890c7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-18T13:40:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-18T13:40:11.000Z", "max_issues_repo_path": "image_util.py", "max_issues_repo_name": "Yangget/Weath_classification", "max_issues_repo_head_hexsha": "6cab35de07f0b7bcdcf9cf5d4e4ec47d2eb890c7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "image_util.py", "max_forks_repo_name": "Yangget/Weath_classification", "max_forks_repo_head_hexsha": "6cab35de07f0b7bcdcf9cf5d4e4ec47d2eb890c7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-18T09:02:44.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-18T09:02:44.000Z", "avg_line_length": 19.7931034483, "max_line_length": 38, "alphanum_fraction": 0.5540069686, "include": true, "reason": "import numpy", "num_tokens": 212}
|
import numpy as np
from scipy.special import logsumexp
import ctypes
import os
import platform
if platform.system() == "Linux":
lpm_lib = np.ctypeslib.load_library("liblpm_lib.so", "bin/")
elif platform.system() == "Darwin":
lpm_lib = np.ctypeslib.load_library("liblpm_lib.dylib", "bin/")
np.random.seed(111)
curr_dir = os.getcwd()
model_lens = range(3, 10)
n_genes = 25
n_patients = 1000
fbp = 0.05
bgp = 0.05
n_reps = 100
_n_genes = ctypes.c_uint(n_genes)
_n_patients = ctypes.c_uint(n_patients)
_fbp = ctypes.c_double(fbp)
_bgp = ctypes.c_double(bgp)
for model_len in model_lens:
_model_len = ctypes.c_uint(model_len)
output_path = curr_dir + "/data/model_selection/model" + str(model_len)
for rep in range(n_reps):
dest = output_path + "/rep" + str(rep)
if not os.path.exists(dest):
os.makedirs(dest)
_dest = ctypes.create_string_buffer(dest.encode())
seed = np.random.randint(low=0, high=1000000)
_seed = ctypes.c_long(seed)
lpm_lib.generate_data(_seed, _dest, _model_len, _n_genes, _n_patients, _fbp, _bgp)
|
{"hexsha": "bca8673a2d48425768377a61b961e1397513b291", "size": 1104, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_data.py", "max_stars_repo_name": "junseonghwan/linear-progression", "max_stars_repo_head_hexsha": "feda9f18d44f2ccc54a3750d1fe9a9ad323dcd36", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generate_data.py", "max_issues_repo_name": "junseonghwan/linear-progression", "max_issues_repo_head_hexsha": "feda9f18d44f2ccc54a3750d1fe9a9ad323dcd36", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate_data.py", "max_forks_repo_name": "junseonghwan/linear-progression", "max_forks_repo_head_hexsha": "feda9f18d44f2ccc54a3750d1fe9a9ad323dcd36", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3076923077, "max_line_length": 90, "alphanum_fraction": 0.6920289855, "include": true, "reason": "import numpy,from scipy", "num_tokens": 320}
|
import os
import glob
import scipy
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.utils.rnn as rnn_utils
from tqdm import tqdm
def collate_fn(batch):
batch.sort(key=lambda x: len(x[1]), reverse=True)
seq, label = zip(*batch)
seq_length = [len(x) for x in label]
data = rnn_utils.pad_sequence(seq, batch_first=True, padding_value=0)
label = rnn_utils.pad_sequence(label, batch_first=True, padding_value=0)
return data, label, seq_length
def collate_fn_atten(batch):
batch.sort(key=lambda x: len(x[1]), reverse=True)
seq, label = zip(*batch)
seq_length = [len(x) for x in label]
data = rnn_utils.pad_sequence(seq, batch_first=True, padding_value=0)
labels = 0
for i in range(len(label)):
if i == 0:
labels = label[i]
else:
labels = torch.cat((labels, label[i]),-1)
return data, labels, seq_length
def collate_fn_cnn_atten(batch):
batch.sort(key=lambda x: len(x[1]), reverse=True)
seq, label = zip(*batch)
seq_length = [len(x) for x in label]
data = rnn_utils.pad_sequence(seq, batch_first=True, padding_value=0)
label_cnn = rnn_utils.pad_sequence(label, batch_first=True, padding_value=255)
labels = 0
label_cnn_ = 0
for i in range(len(label)):
if i == 0:
labels = label[i]
label_cnn_ = label_cnn[0]
else:
labels = torch.cat((labels, label[i]),-1)
label_cnn_ = torch.cat((label_cnn_, label_cnn[i]),-1)
return data, labels, label_cnn_, seq_length
class RawFeatures(data.Dataset):
def __init__(self, txt_path):
with open(txt_path, 'r') as f:
lines = f.readlines()
self.feature_list = [i.split()[0] for i in lines]
self.label_list = [i.split()[-1] for i in lines]
def __getitem__(self, index):
feature_path = self.feature_list[index]
feature = torch.from_numpy(np.load(feature_path, allow_pickle=True))
label = [int(x) for x in self.label_list[index]]
return feature, torch.LongTensor(label)
# return feature, label
def __len__(self):
return len(self.label_list)
def get_atten_mask(seq_lens, batch_size):
max_len = seq_lens[0]
atten_mask = torch.ones([batch_size, max_len, max_len])
for i in range(batch_size):
length = seq_lens[i]
atten_mask[i, :length,:length] = 0
return atten_mask.bool()
|
{"hexsha": "dffa19bed81466ebe56e33bcf1e203089c94eabb", "size": 2497, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_load.py", "max_stars_repo_name": "ishine/E2E-langauge-diarization", "max_stars_repo_head_hexsha": "0bcb3ec82bd6de6fac848c66fd5ad8fe7b284f0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-12-13T10:24:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-22T09:40:58.000Z", "max_issues_repo_path": "data_load.py", "max_issues_repo_name": "ishine/E2E-langauge-diarization", "max_issues_repo_head_hexsha": "0bcb3ec82bd6de6fac848c66fd5ad8fe7b284f0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_load.py", "max_forks_repo_name": "ishine/E2E-langauge-diarization", "max_forks_repo_head_hexsha": "0bcb3ec82bd6de6fac848c66fd5ad8fe7b284f0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-07T02:34:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-23T03:33:54.000Z", "avg_line_length": 31.2125, "max_line_length": 82, "alphanum_fraction": 0.6475770925, "include": true, "reason": "import numpy,import scipy", "num_tokens": 654}
|
import os
import numpy as np
from . import tf
from phi import math
from phi.physics.pressuresolver.solver_api import PoissonSolver
# --- Load Custom Ops ---
current_dir = os.path.dirname(os.path.realpath(__file__))
kernel_path = os.path.join(current_dir, 'cuda/build/pressure_solve_op.so')
if not os.path.isfile(kernel_path):
raise ImportError('CUDA binaries not found at %s. Run "python setup.py tf_cuda" to compile them' % kernel_path)
pressure_op = tf.load_op_library(kernel_path)
class CUDASolver(PoissonSolver):
def __init__(self, accuracy=1e-5, max_iterations=2000):
PoissonSolver.__init__(self, 'CUDA Conjugate Gradient', supported_devices=('GPU',), supports_loop_counter=True, supports_guess=False, supports_continuous_masks=False)
self.accuracy = accuracy
self.max_iterations = max_iterations
def solve(self, divergence, domain, guess, enable_backprop):
"""
:param guess: not used in this implementation, Kernel takes the last pressure value for initial_guess
"""
active_mask, accessible_mask = domain.active_tensor(extend=1), domain.accessible_tensor(extend=1)
# Setup
dimensions = math.staticshape(divergence)[1:-1]
dimensions = dimensions[::-1] # the custom op needs it in the x,y,z order
dim_array = np.array(dimensions)
dim_product = np.prod(dimensions)
mask_dimensions = dim_array + 2
laplace_matrix = tf.zeros(dim_product * (len(dimensions) * 2 + 1), dtype=tf.int8)
# Helper variables for CG, make sure new memory is allocated for each variable.
one_vector = tf.ones(dim_product, dtype=tf.float32)
p = tf.zeros_like(divergence, dtype=tf.float32) + 1
z = tf.zeros_like(divergence, dtype=tf.float32) + 2
r = tf.zeros_like(divergence, dtype=tf.float32) + 3
pressure = tf.zeros_like(divergence, dtype=tf.float32) + 4
# Solve
pressure, iteration = pressure_op.pressure_solve(
dimensions, mask_dimensions, active_mask, accessible_mask, laplace_matrix,
divergence, p, r, z, pressure, one_vector, dim_product, self.accuracy, self.max_iterations
)
return pressure, iteration
|
{"hexsha": "ad0fc2b967c8cac23558472635aace59e697524a", "size": 2217, "ext": "py", "lang": "Python", "max_stars_repo_path": "phi/tf/tf_cuda_pressuresolver.py", "max_stars_repo_name": "VemburajYadav/PhiFlow", "max_stars_repo_head_hexsha": "842c113d1850569b97e30ab0632866bb5bc4b300", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "phi/tf/tf_cuda_pressuresolver.py", "max_issues_repo_name": "VemburajYadav/PhiFlow", "max_issues_repo_head_hexsha": "842c113d1850569b97e30ab0632866bb5bc4b300", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "phi/tf/tf_cuda_pressuresolver.py", "max_forks_repo_name": "VemburajYadav/PhiFlow", "max_forks_repo_head_hexsha": "842c113d1850569b97e30ab0632866bb5bc4b300", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-04T16:31:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T16:31:41.000Z", "avg_line_length": 47.170212766, "max_line_length": 174, "alphanum_fraction": 0.7032025259, "include": true, "reason": "import numpy", "num_tokens": 522}
|
import sys
from collections import Counter
import math
import datetime
import re
import numpy as np
filepath = str(sys.argv[1])
output = str(sys.argv[2])
time = []
i = 0
for ar in sys.argv :
i = i + 1
if ar == "-h" :
time.append(sys.argv[i])
time.append(sys.argv[i+1])
if len(time) > 0 :
dfrom = datetime.datetime.strptime(time[0], '%H:%M')
dfrom = datetime.time(dfrom.hour,dfrom.minute)
dto = datetime.datetime.strptime(time[1], '%H:%M')
dto = datetime.time(dto.hour,dto.minute)
k = 0
past = []
with open(filepath) as f:
for tweet in f:
if math.fmod(k,100000) == 0 :
print(k)
tweet = re.findall('"((?:(?!(?:",")).)*)"', tweet)
tdate = datetime.datetime.strptime(tweet[1], '%Y-%m-%d %H:%M:%S')
tdate = datetime.time(tdate.hour,tdate.minute)
if len(time) == 0 or len(time) > 0 and tdate >= dfrom and tdate <= dto :
if "-medias" in sys.argv :
medias = re.findall('"((?:(?!(?:" ")).)*)"', tweet[12])
medias.extend(re.findall('"((?:(?!(?:",")).)*)"', tweet[13]))
for med in medias :
if med not in past :
past.append(med)
print(tweet[3])
print(med)
label = int(raw_input("garbage (0), informative (1) or involved (2) ? \n"))
outline = '"%s",%s\n' % (med,label)
out = open(output,'a')
out.write(outline)
out.close()
print(" ")
else :
rt = re.findall(r"RT @([a-zA-Z0-9-_]*): (.*)",tweet[3])
if len(rt) == 0 :
print(tweet[3])
label1 = raw_input("informative? \n")
if label1 == 'y' :
label1 = 1
elif label1 == 'n' :
label1 = 0
label2 = raw_input("involved? \n")
if label2 == 'y' :
label2 = 1
elif label2 == 'n' :
label2 = 0
outline = '%s,"%s","%s"\n' % (tweet[0],label1,label2)
out = open(output,'a')
out.write(outline)
out.close()
print(" ")
k = k + 1
|
{"hexsha": "6e71cb5ec6b278fa4c39bd2b78f7a9f774560ad9", "size": 2132, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/manual_labelization.py", "max_stars_repo_name": "jwheatp/twitter-riots", "max_stars_repo_head_hexsha": "cc3aa5586560e1195e0adc4c58eb881446356958", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/manual_labelization.py", "max_issues_repo_name": "jwheatp/twitter-riots", "max_issues_repo_head_hexsha": "cc3aa5586560e1195e0adc4c58eb881446356958", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2015-02-12T13:11:16.000Z", "max_issues_repo_issues_event_max_datetime": "2015-02-12T13:11:41.000Z", "max_forks_repo_path": "analysis/manual_labelization.py", "max_forks_repo_name": "jwheatp/twitter-riots", "max_forks_repo_head_hexsha": "cc3aa5586560e1195e0adc4c58eb881446356958", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3209876543, "max_line_length": 89, "alphanum_fraction": 0.4751407129, "include": true, "reason": "import numpy", "num_tokens": 606}
|
! PR fortran/64528
! { dg-do compile }
! { dg-options "-O -fno-tree-dce -fno-tree-ccp" }
program pr64528
interface
subroutine foo(x)
integer, value :: x
end subroutine foo
end interface
integer :: x
x = 10
call foo(x)
if(x .ne. 10) then
endif
end program pr64528
subroutine foo(x)
integer, value :: x
x = 11
end subroutine foo
|
{"hexsha": "f6cca4f73e002e5f62d3fee7fe13293db6b75c2e", "size": 363, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/pr64528.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/pr64528.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/pr64528.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 17.2857142857, "max_line_length": 49, "alphanum_fraction": 0.6363636364, "num_tokens": 122}
|
'''
This file might work differently in the future! Don't reuse it
'''
import scipy as sp
import mesh
import myOS
import FMM.inputDat as inputDat
base_folder = 'expected_new'
# Flagellum
def write_flag():
folder_expected = base_folder + '/flagellum'
s = sp.linspace(0, 2, 10)
radius = 0.1
azimuth_grid = 5
dt = 0.3
xs, ys, zs = (sp.sin(s / 3), sp.cos(s / 3), s)
positions = zip(xs, ys, zs)
future_positions = zip(xs + 0.1, ys, zs + 1)
flagellum_dict = {'type': 'flagellum',
'positions': positions,
'future positions': future_positions,
'radius': radius,
'azimuth grid': azimuth_grid,
'dt': dt}
system = {'flag': mesh.transform(flagellum_dict)}
with myOS.working_directory(folder_expected):
with open('remembery.py', 'w') as remembery:
if 'system' in system.keys():
inputDat.writeInputAndRemembery_NEW(system, remembery=remembery)
else: # 'Transform the system, so the Gary's code runs OK.
wrapped_system = mesh.transform(system)
inputDat.writeInputAndRemembery_NEW(wrapped_system, remembery=remembery)
# Cylinder
def write_cylinder():
folder_expected = base_folder + '/cylinder'
s = sp.linspace(0, 2, 10)
radius = 0.1
azimuth_grid = 5
dt = 0.3
xs, ys, zs = (sp.zeros_like(s), sp.zeros_like(s), s)
positions = zip(xs, ys, zs)
future_positions = zip(xs + 0.1, ys, zs + 1)
flagellum_dict = {'type': 'flagellum',
'positions': positions,
'future positions': future_positions,
'radius': radius,
'azimuth grid': azimuth_grid,
'dt': dt}
system = {'flag': mesh.transform(flagellum_dict)}
with myOS.working_directory(folder_expected):
with open('remembery.py', 'w') as remembery:
if 'system' in system.keys():
inputDat.writeInputAndRemembery_NEW(system, remembery=remembery)
else: # 'Transform the system, so the Gary's code runs OK.
wrapped_system = mesh.transform(system)
inputDat.writeInputAndRemembery_NEW(wrapped_system, remembery=remembery)
def write_ellipsoid():
folder_expected = base_folder + '/ellipsoid'
radius = 0.1
position = (0, 0, 1)
velocity = (0, 2, 0)
sphere_dict = {'type': 'ellipsoid',
'position': position, # - in Gary's code ellipse is always created in (0,0,0) -> add translation
'lengths': (radius, radius, radius), 'axe1': (1, 0, 0), 'axe2': (0, 1, 0), 'grid': 6}
'''
{'type': 'ellipsoid',
'position': (0, 0, 0),
'lengths': lengths, 'axe1': axe1, 'axe2': axe2, 'grid': grid}
'''
position2 = (-2, 0, 0)
lengths = (3, 0.4, 0.3)
axe1 = (1, 0, 0)
axe2 = (0, 1, 0)
velocity2 = (3, 0, 0)
grid2 = 4
ellipsoid_dict = {'type': 'ellipsoid',
'position': (0, 0, 0), # - in Gary's code ellipse is always created in (0,0,0) -> add translation
'lengths': lengths, 'axe1': axe1, 'axe2': axe2, 'grid': grid2}
system = {'sphere': mesh.transform(sphere_dict, velocity=velocity, translation=position),
'ellipsoid': mesh.transform(ellipsoid_dict, velocity=velocity2, translation=position2)}
with myOS.working_directory(folder_expected):
with open('remembery.py', 'w') as remembery:
if 'system' in system.keys():
inputDat.writeInputAndRemembery_NEW(system, remembery=remembery)
else: # 'Transform the system, so the Gary's code runs OK.
wrapped_system = mesh.transform(system)
inputDat.writeInputAndRemembery_NEW(wrapped_system, remembery=remembery)
def write_plane():
folder_expected = base_folder + '/plane'
sizeX, sizeY = 2, 6
gridX, gridY = 3, 4
width = 0.5
plane = plane_dict = {'type': 'plane',
'p0': (-sizeX / 2, -sizeY / 2, - width),
'p1': (sizeX / 2, -sizeY / 2, -width),
'p2': (-sizeX / 2, sizeY / 2, -width),
'width': width, 'grid1': gridX, 'grid2': gridY, 'centers': []}
radius = 0.1
position = (0, 0, 1)
velocity = (0, 0, -5)
sphere_dict = {'type': 'ellipsoid',
'position': position, # - in Gary's code ellipse is always created in (0,0,0) -> add translation
'lengths': (radius, radius, radius), 'axe1': (1, 0, 0), 'axe2': (0, 1, 0), 'grid': 3}
system = {'sphere': mesh.transform(sphere_dict, velocity=velocity, translation=position),
'plane': mesh.transform(plane_dict)}
with myOS.working_directory(folder_expected):
with open('remembery.py', 'w') as remembery:
if 'system' in system.keys():
inputDat.writeInputAndRemembery_NEW(system, remembery=remembery)
else: # 'Transform the system, so the Gary's code runs OK.
wrapped_system = mesh.transform(system)
inputDat.writeInputAndRemembery_NEW(wrapped_system, remembery=remembery)
def write_cuboid():
'''
In GK's code rectangle=cuboid
'''
folder_expected = base_folder + '/cuboid'
sizes = (2, 6, 0.5)
grids = (3, 4, 3)
velocityfield = lambda x, y, z: (2, 3, 4)
sizeX, sizeY, sizeZ = sizes
gridX, gridY, gridZ = grids
rectangle_dict = {'type': 'rectangle',
'p0': (-sizeX / 2, -sizeY / 2, - sizeZ),
'p1': (sizeX / 2, -sizeY / 2, -sizeZ),
'p2': (-sizeX / 2, sizeY / 2, -sizeZ),
'p3': (-sizeX / 2, -sizeY / 2, 0),
'grid1': gridX, 'grid2': gridY, 'grid3': gridZ,
'velocityfield': velocityfield}
system = {'cuboid': mesh.transform(rectangle_dict)}
with myOS.working_directory(folder_expected):
with open('remembery.py', 'w') as remembery:
if 'system' in system.keys():
inputDat.writeInputAndRemembery_NEW(system, remembery=remembery)
else: # 'Transform the system, so the Gary's code runs OK.
wrapped_system = mesh.transform(system)
inputDat.writeInputAndRemembery_NEW(wrapped_system, remembery=remembery)
def write_flat_ellipse():
'''
My new mesh object
'''
folder_expected = base_folder + '/flat_ellipse'
radiusX, radiusY, width = 4, 4, 10
n_elems, n_layers = 10, 2
position = (1, 23, 0)
flat_ellipse = mesh.flat_ellipse_create(name='flat_ellipse',
radiusX=radiusX, radiusY=radiusY, width=width,
n_elems=n_elems,
extra_side_node_layers=n_layers,
position=position)
import FBEM
FBEM.write_input_and_remembery(folder_expected, flat_ellipse)
write_flag()
write_cylinder()
write_ellipsoid()
write_plane()
write_cuboid()
#write_flat_ellipse()
|
{"hexsha": "4cd979db8f9304a901e07e45859e6d48b11aee58", "size": 7240, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/data/triangulation/create_input_for_test.py", "max_stars_repo_name": "icemtel/stokes", "max_stars_repo_head_hexsha": "022de2417919a18ed5b0262111e430384053137d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/data/triangulation/create_input_for_test.py", "max_issues_repo_name": "icemtel/stokes", "max_issues_repo_head_hexsha": "022de2417919a18ed5b0262111e430384053137d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/data/triangulation/create_input_for_test.py", "max_forks_repo_name": "icemtel/stokes", "max_forks_repo_head_hexsha": "022de2417919a18ed5b0262111e430384053137d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8415841584, "max_line_length": 120, "alphanum_fraction": 0.5620165746, "include": true, "reason": "import scipy", "num_tokens": 1951}
|
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn import model_selection
from functools import partial
import optuna
from . import regression_metrics
def optimize(trial, df):
n_estimators = trial.suggest_int("n_estimators", 50, 1000)
num_leaves = trial.suggest_int("num_leaves", 10, 500)
learning_rate = trial.suggest_uniform("learning_rate", 0.01, 1.0)
modely = lgb.LGBMRegressor(
n_estimators=n_estimators, num_leaves=num_leaves, learning_rate=learning_rate)
modelx = lgb.LGBMRegressor(
n_estimators=n_estimators, num_leaves=num_leaves, learning_rate=learning_rate)
modelf = lgb.LGBMClassifier(
n_estimators=n_estimators, num_leaves=num_leaves, learning_rate=learning_rate)
kf = model_selection.GroupKFold(n_splits=5, groups=df.iloc[:,-1])
comp_metric = []
for fold, (train_idx, val_idx) in enumerate(kf.split(df)):
df_train = df.loc[train_idx]
df_val = df.loc[val_idx]
x_train = df_train.iloc[:,:-4]
y_trainx = df_train.iloc[:,-4]
y_trainy = df_train.iloc[:,-3]
y_trainf = df_train.iloc[:,-2]
x_val = df_val.iloc[:,:-4]
y_valx = df_val.iloc[:,-4]
y_valy = df_val.iloc[:,-3]
y_valf = df_val.iloc[:,-2]
modelx.fit(x_train, y_trainx)
modely.fit(x_train, y_trainy)
modelf.fit(x_train, y_trainf)
test_predsx = modelx.predict(x_val)
test_predsy = modely.predict(x_val)
test_predsf = modelf.predict(x_val)
fold_metric = regression_metrics.iln_comp_metric(test_predsx, test_predsy, test_predsf, y_valx, y_valy, y_valf)
comp_metric.append(fold_metric)
return np.mean(comp_metric)
def hyperpara_search_optuna(df):
optimization_function = partial(optimize, df=df)
study = optuna.create_study(direction="minimize")
study.optimize(optimization_function, n_trials=15)
|
{"hexsha": "946581382aa0406357c81abaecbfe9399cdbfa14", "size": 1972, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/search_optuna.py", "max_stars_repo_name": "BAfsharmanesh/New_project", "max_stars_repo_head_hexsha": "766e43494bd55217abf9f8be22df42e2bc7e678c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/search_optuna.py", "max_issues_repo_name": "BAfsharmanesh/New_project", "max_issues_repo_head_hexsha": "766e43494bd55217abf9f8be22df42e2bc7e678c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/search_optuna.py", "max_forks_repo_name": "BAfsharmanesh/New_project", "max_forks_repo_head_hexsha": "766e43494bd55217abf9f8be22df42e2bc7e678c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3015873016, "max_line_length": 119, "alphanum_fraction": 0.6749492901, "include": true, "reason": "import numpy", "num_tokens": 518}
|
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import datetime
from keras import models
from keras.layers import Dense
if __name__ == "__main__":
startTime = datetime.datetime.now()
x = np.load('data/train_w2v_data_array.npy')
y = np.load('data/train_w2v_target_array.npy')
y = y.astype('int')
y = y.flatten()
z = np.load('data/test_w2v_data_array.npy')
t = np.load('data/test_w2v_target_array.npy')
t = t.astype('int')
t = t.flatten()
# print("Shape of x: ", np.shape(x))
# print("Shape of y: ", np.shape(y))
# print("Shape of z: ", np.shape(z))
# print("Shape of t: ", np.shape(t))
learningRate = [0.001]
for lr in learningRate:
clf = MLPClassifier(solver='sgd', hidden_layer_sizes=(30,20), batch_size='auto',
learning_rate='adaptive', learning_rate_init=lr, early_stopping=True)
clf.fit(x, y)
p = clf.predict(z)
y_scores = clf.predict_proba(z)
# predicted = predict_nn(x, y, z, clf)
print("For learning rate: ", lr)
print("Neural Network with 100 features")
# Compute accuracy
accuracy = accuracy_score(t, p, normalize=False)
print("Accuracy: ", (accuracy / len(t)) * 100)
# Confusion matrix
confusion_matrix = confusion_matrix(t, p)
print("Confusion Matrix:\n", confusion_matrix)
# Replace 4s with 1s
t[np.where(t == 4)] = 1
p[np.where(p == 4)] = 1
# Plot the Precision-Recall curve
precision, recall, _ = precision_recall_curve(t, y_scores[:, 1])
plt.figure()
plt.step(recall, precision, color='b', alpha=0.2, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
average_precision = average_precision_score(t, p)
plt.title('Neural Network Precision-Recall curve: AP={0:0.2f}'.format(average_precision))
filename = "data/w2v_NN_" + str(lr) + "_precisionRecall.png"
plt.savefig(filename)
# plt.show()
# NN = models.Sequential()
# NN.add(Dense(32,activation='relu',input_dim=100))
# NN.add(Dense(1,activation='sigmoid'))
# NN.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy'])
# NN.fit(x,y,epochs=9,batch_size=32,verbose=2)
# score = NN.evaluate(z,t,batch_size=128,verbose=2)
# print(NN.metrics_names)
# print(score[1])
#
# endTime = datetime.datetime.now() - startTime
# print("Total time taken to train: ", endTime)
|
{"hexsha": "176386321422adb476c5455a04952a065bd88616", "size": 2907, "ext": "py", "lang": "Python", "max_stars_repo_path": "W2V_NN.py", "max_stars_repo_name": "nivedit1/TwitterSentimentAnalysis", "max_stars_repo_head_hexsha": "972fdb46fab6f07748d685b94b80450cb5131c5f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "W2V_NN.py", "max_issues_repo_name": "nivedit1/TwitterSentimentAnalysis", "max_issues_repo_head_hexsha": "972fdb46fab6f07748d685b94b80450cb5131c5f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "W2V_NN.py", "max_forks_repo_name": "nivedit1/TwitterSentimentAnalysis", "max_forks_repo_head_hexsha": "972fdb46fab6f07748d685b94b80450cb5131c5f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-12-06T02:58:29.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-18T19:23:25.000Z", "avg_line_length": 32.3, "max_line_length": 97, "alphanum_fraction": 0.6339869281, "include": true, "reason": "import numpy", "num_tokens": 752}
|
# indicator of the L0 norm ball with given (integer) radius
"""
IndBallL0(r::Int=1)
Returns the function `g = ind{x : countnz(x) ⩽ r}`, for an integer parameter `r > 0`.
"""
immutable IndBallL0{I <: Integer} <: IndicatorNonconvex
r::I
function IndBallL0(r::I)
if r <= 0
error("parameter r must be a positive integer")
else
new(r)
end
end
end
IndBallL0{I <: Integer}(r::I) = IndBallL0{I}(r)
function (f::IndBallL0){T <: RealOrComplex}(x::AbstractArray{T})
if countnz(x) > f.r
return +Inf
end
return 0.0
end
function prox!{T <: RealOrComplex}(f::IndBallL0, x::AbstractArray{T}, y::AbstractArray{T}, gamma::Real=1.0)
p = []
if ndims(x) == 1
p = selectperm(x, 1:f.r, by=abs, rev=true)
else
p = selectperm(x[:], 1:f.r, by=abs, rev=true)
end
sort!(p)
idx = 1
for i = 1:length(p)
y[idx:p[i]-1] = 0.0
y[p[i]] = x[p[i]]
idx = p[i]+1
end
y[idx:end] = 0.0
return 0.0
end
fun_name(f::IndBallL0) = "indicator of an L0 pseudo-norm ball"
fun_dom(f::IndBallL0) = "AbstractArray{Real}, AbstractArray{Complex}"
fun_expr(f::IndBallL0) = "x ↦ 0 if countnz(x) ⩽ r, +∞ otherwise"
fun_params(f::IndBallL0) = "r = $(f.r)"
function prox_naive{T <: RealOrComplex}(f::IndBallL0, x::AbstractArray{T}, gamma::Real=1.0)
p = sortperm(abs.(x)[:], rev=true)
y = similar(x)
y[p[1:f.r]] = x[p[1:f.r]]
y[p[f.r+1:end]] = 0.0
return y, 0.0
end
|
{"hexsha": "a325fd66e8290bc029a55a02d2e30fbeaa8747e0", "size": 1406, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/functions/indBallL0.jl", "max_stars_repo_name": "mfalt/ProximalOperators.jl", "max_stars_repo_head_hexsha": "ab76ed9c93f9ec778281ad14f7bd3208b94c705d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/functions/indBallL0.jl", "max_issues_repo_name": "mfalt/ProximalOperators.jl", "max_issues_repo_head_hexsha": "ab76ed9c93f9ec778281ad14f7bd3208b94c705d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/functions/indBallL0.jl", "max_forks_repo_name": "mfalt/ProximalOperators.jl", "max_forks_repo_head_hexsha": "ab76ed9c93f9ec778281ad14f7bd3208b94c705d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8305084746, "max_line_length": 107, "alphanum_fraction": 0.6088193457, "num_tokens": 544}
|
include("./MT1D.jl")
module MT1DGeneticInversion
using MT1D
export LayerBC, Inversion, evolve!
"""
Description
===========
`LayerBC` defines a set of boundary conditions for a layer. One instance
represents either the resistivity or depth boundaries.
Fields
======
- `min::Integer`: Lower boundary for the layer.
- `max::Integer`: Upper boundary for the layer.
"""
type LayerBC
min::Integer
max::Integer
end
include("./Model.jl")
include("./Population.jl")
include("./Inversion.jl")
end
|
{"hexsha": "f5e69382464a409a959d1b682f8b35081e66001c", "size": 502, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MT1DGeneticInversion.jl", "max_stars_repo_name": "alexjohnj/genetic-mt1d", "max_stars_repo_head_hexsha": "0822ae95ae0d239b54a7b094cbd7c25a51557325", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-07-20T03:57:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-25T13:49:20.000Z", "max_issues_repo_path": "src/MT1DGeneticInversion.jl", "max_issues_repo_name": "alexjohnj/genetic-mt1d", "max_issues_repo_head_hexsha": "0822ae95ae0d239b54a7b094cbd7c25a51557325", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MT1DGeneticInversion.jl", "max_forks_repo_name": "alexjohnj/genetic-mt1d", "max_forks_repo_head_hexsha": "0822ae95ae0d239b54a7b094cbd7c25a51557325", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-12-17T13:30:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-03T07:31:34.000Z", "avg_line_length": 16.7333333333, "max_line_length": 72, "alphanum_fraction": 0.7051792829, "num_tokens": 129}
|
C***********************************************************************
C***********************************************************************
C
C Version: 0.3
C Last modified: December 27, 1994
C Authors: Esmond G. Ng and Barry W. Peyton
C
C Mathematical Sciences Section, Oak Ridge National Laboratory
C
C***********************************************************************
C***********************************************************************
C****** DSCAL .... SCALE A VECTOR **************
C***********************************************************************
C***********************************************************************
C
C PURPOSE - THIS ROUTINE COMPUTES A <-- AX, WHERE A IS A
C SCALAR AND X IS A VECTOR.
C
C INPUT PARAMETERS -
C N - LENGTH OF THE VECTOR X.
C A - SCALAR MULIPLIER.
C X - VECTOR TO BE SCALED.
C
C OUTPUT PARAMETERS -
C X - REPLACED BY THE SCALED VECTOR, AX.
C
C***********************************************************************
C
SUBROUTINE DSCAL ( N, A, X )
C
C***********************************************************************
C
C -----------
C PARAMETERS.
C -----------
INTEGER N
DOUBLE PRECISION A, X(N)
C
C ----------------
C LOCAL VARIABLES.
C ----------------
INTEGER I
C
C***********************************************************************
C
DO 100 I = 1, N
X(I) = A * X(I)
100 CONTINUE
RETURN
END
|
{"hexsha": "73b0c7560800186dde973d620e0f784f269d4683", "size": 1578, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "LIB/dscal.f", "max_stars_repo_name": "Pangqiyuangh/SeIInv", "max_stars_repo_head_hexsha": "2a6713dc19f0f816ecbc5d20c77b5c0a1974b852", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-30T06:30:22.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-30T06:37:54.000Z", "max_issues_repo_path": "LIB/dscal.f", "max_issues_repo_name": "Pangqiyuangh/SeIInv", "max_issues_repo_head_hexsha": "2a6713dc19f0f816ecbc5d20c77b5c0a1974b852", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LIB/dscal.f", "max_forks_repo_name": "Pangqiyuangh/SeIInv", "max_forks_repo_head_hexsha": "2a6713dc19f0f816ecbc5d20c77b5c0a1974b852", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9411764706, "max_line_length": 72, "alphanum_fraction": 0.2877059569, "num_tokens": 299}
|
[STATEMENT]
theorem wls_fresh_vsubst_ident[simp]:
assumes "wls s X" and "fresh ys y X"
shows "(X #[y1 // y]_ys) = X"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. X #[y1 // y]_ys = X
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
wls s X
fresh ys y X
goal (1 subgoal):
1. X #[y1 // y]_ys = X
[PROOF STEP]
by(simp add: fresh_vsubst_ident)
|
{"llama_tokens": 171, "file": "Binding_Syntax_Theory_Well_Sorted_Terms", "length": 2}
|
# [Introductory applied machine learning (INFR10069)](https://www.learn.ed.ac.uk/webapps/blackboard/execute/content/blankPage?cmd=view&content_id=_2651677_1&course_id=_53633_1)
# Lab 5: Neural Networks
*by [James Owers](https://jamesowers.github.io/), University of Edinburgh 2017*
1. [Introduction](#Introduction)
* [Lab Outline](#Lab-Outline)
* [The Data](#The-Data)
1. [Part 1 - Introducing the Neural Network Model](#Part-1---Introducing-the-Neural-Network-Model)
* [Resources to Watch and Read pt. 1](##Resources-to-Watch-and-Read-pt.-1)
* [Model Design](#Model-Design)
* [The Cost Space](#The-Cost-Space)
1. [Part 2 - Fitting the Model & Optimisation](#Part-2---Fitting-the-Model-&-Optimisation)
* [Resources to Watch and Read pt. 2](#Resources-to-Watch-and-Read-pt.-2)
* [Finding the Best Parameters](#Finding-the-Best-Parameters)
* [Gradient Descent](#Gradient-Descent)
* [Backpropagation](#Backpropagation)
1. [Part 3 - Implementation From Scratch](#Part-3---Implementation-From-Scratch!)
1. [Part 4 - Implementation With Sklearn](#Part-4---Implementation-with-Sklearn)
1. [Moar?!](#Please-sir...I-want-some-more)
## Import packages
```python
# https://docs.python.org/2/library/__future__.html
# make printing and division act like python 3
from __future__ import division, print_function
# General
import sys
import os
import copy
from IPython.display import Image, HTML
# Data structures
import numpy as np
import pandas as pd
# Modelling
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from scipy.optimize import check_grad
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns
# Local module adjacent to this notebook
import iaml
from iaml.data import load_letters
# http://ipython.readthedocs.io/en/stable/interactive/magics.html
%matplotlib inline
```
## Introduction
This lab:
1. introduces a simple neural network model in a supervised learning setting
1. provides impetus to understand the fitting procedure of that, and other networks
1. encourages you to implement a model from scratch
1. models the same problem with the sklearn package
1. makes you think about what you've done!
It does not discuss in detail:
1. any of the plethora of different activation functions you can use e.g. RELUs, SELUs, Tanh, ...
1. how to initialise the parameters and why that matters
1. issues with the fitting process e.g. local optima, and how to avoid them e.g. learning rate schedulers, momentum, RMSProp, Adam, cyclic learning rates
1. issues with model complexity e.g. overfitting, and solutions such as dropout, regularisation, or using [shedloads of data](https://what-if.xkcd.com/63/)
1. other tricks for speeding up and stablising fitting such as batch sizes, weight norm, layer norm
1. deep networks and their tricks like skip connections, pooling, convolutions
1. nor other more complex architectures like CNNs, RNNs, LSTMs, GANs, etc. etc.
1. many, many, MANY other things (that probably were published, like, [yesterday](https://arxiv.org/abs/1711.04340v1))
However, if you understand what is in this notebook well, **you will have the ability to understand [all of these things](https://i.imgflip.com/1zn8p9.jpg)**.
### Lab outline
I provide you with a function that creates data then link you to some excellent resources to learn the basics. These resources are superb, short, and free. I highly, highly recommend setting aside a couple of hours to give them a good watch/read and, at the very least, use them for reference.
After you have had a crack at the problems, I'll release the solutions. The solutions, particularly to part 3, walk you through the process of coding a simple neural neural network in detail.
Parts 3 & 4 are practical, parts 1 & 2 are links to external resources to read. Whilst I recommend you soak up some context first with 1 & 2, feel free to jump in at the deep end and get your hands dirty with part 3 or 4.
### The Data
Throughout this lab we are going to be using a simple classification example: the TC classification problem (not to be confused with the real [TC](https://www.youtube.com/watch?v=NToYkBYezZA)). This is a small toy problem where we, initially, try to distinguish between 3x3 grids that look like Ts and Cs. Let's create the dataset and have a look...
I have written a function `load_letters()` to generate synthetic data. For now, you will use the data generated below, but later you have opportunity to play with generating different data if you like. The function is located in the `iaml` module adjacent to this notebook - feel free to check out the code but I advise you **do not edit it**. Run (and don't edit) the next few cells to create and observe the data.
```python
bounds = [-1, 1]
X, y, y_labels = load_letters(categories=['T', 'C'],
num_obs=[50, 50],
bounds=bounds,
beta_params=[[1, 8], [8, 1]],
shuffle=True,
random_state=42)
```
Let's print the data (I'm just creating a Pandas DataFrame for display, I probably wont use this object again)
```python
pd.set_option("max_rows", 10)
df = pd.DataFrame(
np.hstack(
[np.around(X,2),
y[:, np.newaxis],
np.array([y_labels[ii] for ii in y])[:, np.newaxis]
]
),
columns = ['x{}'.format(ii) for ii in range(9)] + ['Class (numeric)', 'Class Label']
)
df
```
```python
pd.reset_option("max_rows")
```
The data are arranged as vectors for your convenience, but they're really `3 x 3` images. Here's a function to plot them.
```python
def plot_grid(x, shape=None, **heatmap_params):
"""Function for reshaping and plotting vector data.
If shape not given, assumed square.
"""
if shape is None:
width = int(np.sqrt(len(x)))
if width == np.sqrt(len(x)):
shape = (width, width)
else:
print('Data not square, supply shape argument')
sns.heatmap(x.reshape(shape), annot=True, **heatmap_params)
```
```python
for ii in range(3):
plt.figure()
plot_grid(X[ii], vmin=bounds[0], vmax=bounds[1], cmap='Greys')
plt.title('Observation {}: Class = {} (numeric label {})'.format(ii, y_labels[y[ii]], y[ii]))
plt.show()
```
Finally, let's make the train and test split. Again, don't alter this code.
```python
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=42)
```
```python
X_train, X_valid, y_train, y_valid = train_test_split(
X_train, y_train, test_size=0.33, random_state=42)
```
```python
[dd.shape for dd in [X_train, X_valid, X_test, y_train, y_valid, y_test]]
```
## Part 1 - Introducing the Neural Network Model
### Resources to Watch and Read pt. 1
**Reading/watching time:** 30 minutes
First, watch this video from 3 Blue 1 Brown: [But what *is* a Neural Network? | Deep learning, chapter 1](https://www.youtube.com/watch?v=aircAruvnKk)
If you prefer reading, try 2 sections of Nielsen's Book Chapter 1:
* [Sigmoid Neurons](http://neuralnetworksanddeeplearning.com/chap1.html#sigmoid_neurons)
* and [The Architecture of Neural Networks](http://neuralnetworksanddeeplearning.com/chap1.html#the_architecture_of_neural_networks)
### Model Design
Just so as there's something in this notebook to quickly reference - here's a nice illustration of what's going on in a neural net. Within the calculation of the $z$'s you'll see the learned **parameters**: $w$'s and $b$'s - these are the weights and biases respectively. *N.B. I omit the bias $b$ parameters in the Part 3 implementation.* The functions $g$ are the activation functions.
### The Cost Space
When we talk about the cost space, loss$^*$ space, or cost surface, we are talking about a function that changes with respect to the parameters. This function determines how well the network is performing - a low cost is good, a high cost is bad. A simple example for two parameters is shown below. **Our goal is to update the parameters such that we find the global minimum of the cost function.**
$^*$ 'loss' and 'cost' are interchangeable terms - you'll see them both around but I try to stick to 'cost'!
N.B. The cost function is often referred to with different letters e.g. $J(w)$, $C(\theta)$, $\mathcal{L}(x)$, and $E(w)$
## Part 2 - Fitting the Model & Optimisation
### Resources to Watch and Read pt. 2
**Watching/reading time:** ~1 hour
First, watch these two videos from 3 Blue 1 Brown:
1. [Gradient descent, how neural networks learn | Deep learning, chapter 2](https://www.youtube.com/watch?v=IHZwWFHWa-w)
2. [What is backpropagation and what is it actually doing? | Deep learning, chapter 3](https://www.youtube.com/watch?v=Ilg3gGewQ5U)
This will take you just over half an hour (if you watch at 1x speed). They are really excellent and well worth the time investment.
Again, if you prefer reading try Nielsen's section [Learning with Gradient Descent](http://neuralnetworksanddeeplearning.com/chap1.html#learning_with_gradient_descent)
### Finding the Best Parameters
So, we've got a function, let's call it $C(\theta)$ that puts a number on how well the neural network is doing. We provide the function with the parameters $\theta$ and it spits out the cost$^*$. We could just randomly chose values for $\theta$ and select the ones that result in the best cost...but that might take a long time. We'd also need to define a way to randomly select parameters as well. What if the best parameter setting is very unlikely to be selected?
**Calculus to the rescue!** The cost $C(\theta)$ is a function and, whilst we can't see the surface without evaluating it everywhere (expensive!), we can calculate the derivative with respect to the parameters $\frac{\partial C(\theta)}{\partial \theta}$. The derivative **tells you how the function value changes if you change $\theta$**.
For example, imagine $\theta$ is 1D and I tell you that $\frac{\partial C(\theta)}{\partial \theta} = 10\theta$. This means that if I increase $theta$ by 2, the cost function will go up by 20. Which way will you update $\theta$? You want to *decrease* the cost, so you would want to *decrease* $\theta$ by some amount.
The only thing we need to do is choose a cost function $C(\theta)$ that has a derivative function $\frac{\partial C(\theta)}{\partial \theta}$...and that is easy!
$^*$It's much easier if you imagine $\theta$ as just one number to start with, but the maths is basically the same as $\theta$ becomes a vector (or matrix) of numbers.
### Gradient Descent
So how do we actually update the parameters?! All update the parameters in the opposite direction to the gradient; you always try to take a step 'downhill'. Here's the formula:
$$
\theta \leftarrow \theta - \eta \frac{\partial C(\theta)}{\partial \theta}
$$
where "$\leftarrow$" means "update from", and $\eta$ is the "learning rate" - a hyperparameter you can choose. If you increase $\eta$ you make bigger updates to $\theta$, and vice versa.
There are many more complicated ways to update the parameters using the gradient of the cost function, but they all have this same starting point.
Below is an example cost surface. A few things to note:
* The axes should be labelled $\theta_0$ (1, -1.5) and $\theta_1$ (-1, 1) on the 'flat' axes, and $C(\theta)$ (-4, 4) on the vertical axis
* The surface is shown - we don't have direct access to this in reality. To show it, the creator has queried the cost function *at every [$\theta_0$, $\theta_1$] location* and plotted it
* The animated balls rolling along the surface are different gradient descent algorithms - each frame of the GIF shows one update. The equation shown above is SGD - the GIF highlights a potential issue with the algorithm!
Visualisation by [Alec Radford](https://blog.openai.com/tag/alec-radford/), summarised excellently in [this blog post](http://ruder.io/optimizing-gradient-descent/).
### Backpropagation
**Reading/watching time:** 1 hour
Right...it's time for some derivatives. If you've been liking the videos - go ahead and watch the next in the series:
1. [Backpropagation calculus | Appendix to deep learning chapter 3](https://www.youtube.com/watch?v=tIeHLnjs5U8)
If you have time, I recommend now having a crack at reading half of [Nielsen Chapter 2](http://neuralnetworksanddeeplearning.com/chap2.html), up to and including the section entitled [The Backpropagation Algorithm](http://neuralnetworksanddeeplearning.com/chap2.html#the_backpropagation_algorithm).
I'm just going to write out some derivatives you're going to find useful for Part 3 below:
$$
\begin{align}
z^{(L)} &= W^{(L)}a^{(L-1)} \\
\frac{\partial z^{(L)}}{\partial W} &= a^{(L-1)}
\end{align}
$$
$$
\begin{align}
\text{linear}[z] &= z \\
\frac{\partial \text{linear}[z]}{\partial z} &= 1 \\
\end{align}
$$
$$
\begin{align}
\text{sigmoid}[z] = \sigma[z] &= \frac{1}{1 + e^{-z}} = \frac{e^{z}}{e^{z} + 1}\\
\frac{\partial \sigma[z]}{\partial z} &= \frac{e^{z}}{e^{z} + 1} - (\frac{e^{z}}{e^{z} + 1})^2 \\
&= \frac{e^{z}}{e^{z} + 1} ( 1 - \frac{e^{z}}{e^{z} + 1} ) \\
&= \sigma[z] (1 - \sigma[z])
\end{align}
$$
$$
\begin{align}
\text{crossentropy}[y, a] = C[y, a] &= - \frac{1}{N} \sum_{i=1}^N y_i \log a_i + (1-y_i)\log(1-a_i) \\
\frac{\partial C[y_i, a_i]}{\partial a_i} &= \frac{1 - y_i}{1 - a_i} + \frac{y_i}{a_i}
\end{align}
$$
And finally, this is all backpropagation really is...
$$
\begin{align}
\frac{\partial C[y_i, a_i]}{\partial w_j} &= \frac{\partial a_i}{\partial w_j}\frac{\partial C[y_i, a_i]}{\partial a_i}\\
&= \frac{\partial z_k}{\partial w_j}\frac{\partial a_i}{\partial z_k}\frac{\partial C[y_i, a_i]}{\partial a_i}\\
\end{align}
$$
Challenge: derive these yourself.
#### Reading extension
For more on gradient based optimisers [check out this blog post](http://ruder.io/optimizing-gradient-descent/)
For another look at backpropagation - try [Christopher Olah's blog](http://colah.github.io/posts/2015-08-Backprop/)
## Part 3 - Implementation From Scratch!
### ========== Question 3.1 ==========
First thing is first: **don't get stuck on this**. I recommend you attempt this question for an hour and, if you don't get anywhere, move on to Question 3.2. You can even move straight on to Part 4. It's exactly the same problem addressed here in 3.1, but using sklearn instead of coding it yourself.
#### Model Specification
We are going to fit a very small neural network to classify the TC data. Here is the specification of the model:
1. Input of size 9
1. Hidden layer of size 3
* Linear activation function
1. Output layer of size 1
* Logistic activation function
As for the **cost function**: use Cross-Entropy. However, if you're getting bogged down with derivatives, feel free to try squared error to start with (this is what Nielsen and 3 Blue 1 Brown start with in their tutorials). Squared error is [not necessarily the right cost function to use](https://jamesmccaffrey.wordpress.com/2013/11/05/why-you-should-use-cross-entropy-error-instead-of-classification-error-or-mean-squared-error-for-neural-network-classifier-training/) but it will still work!
For a given input vector $x$, we can predict an output probability $a^{(2)}$ (were the $^{(2)}$ indicates the layer number, *not a power* - I'm following 3 Blue 1 Brown notation as best I can) using the following formula:
$$
\begin{align}
a^{(2)} &= f^{(2)}[z^{(2)}] \\
&= f^{(2)}[W^{(2)}a^{(1)}] \\
&= f^{(2)}[W^{(2)}f^{(1)}[z^{(1)}]] \\
&= f^{(2)}[W^{(2)}f^{(1)}[W^{(1)}a^{(0)}]] \\
&= f^{(2)}[W^{(2)}f^{(1)}[W^{(1)}x]] \\
&= \sigma[W^{(2)}(W^{(1)}x)]
\end{align}
$$
where:
* $f^{(2)}$ is the activation function of the output layer (a sigmoid function $\sigma[]$)
* $f^{(1)}$ is the activation function of the hidden layer (the identity - 'linear activation')
* $W^{(2)}$ and $W^{(1)}$ are the parameters to learn
* $a^{(L)} = f^{(L)}[z^{(L)}]$ are the activations **exiting** layer $^{(L)}$
* $z^{(L)} = W^{(L)}a^{(L-1)}$ is the pre-activation weighted sum calculated **within** layer $^{(L)}$
The formula for the Cross-Entropy cost function is:
$$
C(a) = - \frac{1}{N} \sum_{i=1}^N y_i \log a_i + (1-y_i)\log(1-a_i)
$$
Notice how only one term in the sum is ever non-zero because $y_i$ is only ever 0 or 1. In our case, $N$ is the number of data observations in the dataset.
##### Parameters
The parameters of the model are two matrices:
1. $W^{(2)}$ - $3 \times 9$ matrix
* used within the hidden layer (the $1^{st}$ layer) to get $z^{(1)} = W^{(1)}x$ for some $9 \times 1$ input vector $x$. $z^{(1)}$ is thus $3 \times 1$.
1. $W^{(1)}$ - $1 \times 3$ matrix
* used within the output layer (the $2^{nd}$ layer) to get $z^{(2)} = W^{(2)}a^{(1)}$ for some $3 \times 1$ input vector $a^{(1)}$. $z^{(2)}$ is thus $1 \times 1$.
**Note that I'm not asking you to fit *bias parameters*.**
You'll often see parameters referred to as $\theta$, it's a catch all term. In our case it's just a list of all the weights, $\theta = [W^{(1)}, W^{(2)}]$. **We have 3 x 9 + 3 x 1 = 30 parameters to learn in total.**
##### Advice
You can use any of the equations and code I've given you or linked you to in this lab but **you do not have to!** You're free to code as you please. Personally, since this is a simple example, I did not do anything fancy (I didn't create any objects with methods and attributes). I simply:
* created a list containing the two parameter matrices `theta = [W1, W2]`
* created a function to do prediction (the forward pass)
* created a function to do the backward pass (updating the weights)
* This is the tricky bit - I coded functions that are the [relevant derivatives](#http://localhost:8888/notebooks/10_Lab_5_Neural_Networks.ipynb#Backpropagation), and wrote code to iteratively pass back the 'deltas' - (I think Nielsen's equations [here](http://neuralnetworksanddeeplearning.com/chap2.html#the_backpropagation_algorithm) are very useful)
* wrote a training loop which called these two main functions
* each epoch calls the forward pass to predict, then the backward pass to update the parameters.
When the training was finished, my "model" was simply the parameters I had fitted, along with the 'forward pass' function - a function which uses those weights to predict a probability for any input data.
**You do not have to code it up like me**, you can do it however you like! The point of this part is for you to explore, code up all the equations, understand how to calculate the loss, and how to use that loss to update the parameters of the model by backpropagation.
**Debugging**: You're probably going to have issues particularly in the backprop section. You are welcome to make use of the `scipy.optimize.check_grad()` function. This takes as input a function f, g: a function that is (supposed to be) the function's derivative.
If you didn't watch it already, now is a great time to take 10 minutes and watch [Backpropagation calculus | Appendix to deep learning chapter 3](https://www.youtube.com/watch?v=tIeHLnjs5U8)
#### ===== What you actually need to do for this question! =====
Write a training loop which uses gradient descent to learn the parameters. Each iteration of the loop is called an **epoch**. Run your code for *no more than 100 epochs*. You should be able to achieve 100% accuracy on this problem.
In this case, for simplicity, you may initialise the weights to be samples from a normal distribution mean 0 variance 1, but please note that this [is not necessarily good practice](https://intoli.com/blog/neural-network-initialization/)!
**Do not code up a grid search for the learning rate hyperparameter**. You may instead play with the learning rate manually until you are happy. Try small values first like 0.0001 (if your backprop code is correct you **should** see your cost decreasing every epoch). Since this problem is so simple, a range of values should work. Again, with real data, you *must* do a search over hyperparameters, but here we are focussed on *coding* a working model.
To test whether or not what you have written has worked, please output the following:
1. After the training loop:
1. plot a graph of training and validation loss against epoch number
1. print or plot the final parameters you have learned using a Hinton diagram - feel free to use [code you can find online](http://bfy.tw/F74s)
1. pick one weight parameter and produce a plot of its value against epoch number
* Extension: do that for all the weights **leaving one specific input node** (i.e. the weights for one pixel of the input data)
1. use your model to:
1. print a few of the validation data examples and their predicted probabilities
1. print the output for a T and C with no noise (you can make that input data yourself)
1. print the output of a few random binary vectors i.e. 9x1 vectors of only 0s and 1s (again, you can make that input data yourself)
1. Within the training loop:
1. print the training and validation crossentropy loss **and** percentage accuracy every epoch
1. save the value of the training and validation losses for every epoch [for the plot after the loop]
1. save the value of a weight parameter of your choice [for the plot after the loop]
#### ===== Example outputs =====
Below I give you some examples of what I'd like you to produce. **I produced these using a learning rate of 0.003, 100 epochs, and weights initialised with N(0,1) with a random seed of 42**. I found that you could learn faster i.e. you can use a larger learning rate, but I wanted to make smooth plots for you.
You don't need to produce plots exactly like this, you can do them how you like, but try and display the same information. You can also use my plots for checking (if you use the same settings as me).
##### 1A
##### 1B
##### 1C
##### 1D
##### 2A
```python
# Your code goes here
```
### ========== Question 3.2 ==========
Did you need a network this large to do this classification task? Give the values for the parameters of a network with no hidden layers, one output node, and an output activation function of a sigmoid that would get 100% accuracy. This network only has 9 parameters.
*Your answer goes here*
### ========== Question 3.3 ==========
You should recognise the model described in question 3.2. What is it?
*Your answer goes here*
### ========== Question 3.4 ==========
Why did I create input data, `X`, that was between [-1, 1] i.e. why wasn't it between [0, 1] like normal?! Would the model specified in question 3.1 above have worked if `X` was in [0, 1]? Explain why or why not.
*Hint: if you're stuck, you can try it out by generating some new data and trying to fit it.*
*Your answer goes here*
### ========== Question 3.5 [EXTENSION] ==========
Create a dataset which makes the problem harder. Have a look at the dataset generation code. You can use the arguments to create data with:
* more letters (make the problem a multiclass classification)
* You'll need to implement the multiclass version of the sigmoid for the output activation function - [the softmax](https://en.wikipedia.org/wiki/Softmax_function) (and of course it's derivative)
* increase the noise on the data
Some other things you could implement:
* include rotated letters in the data
* make larger data (bigger than 3x3)
* make the letters non-centred e.g. 5x5 data with 3x3 letters in 1 of 9 different places
You'll probably need to adapt the code you wrote in 3.1, but you can probably copy and paste most of it. For an additional challenge: introduce [bias parameters](http://neuralnetworksanddeeplearning.com/chap1.html) and create your `X` data in range [0, 1] (i.e. set the bounds argument to [0, 1])...
Some other things to try if you get code happy:
* Implement stochastic gradient descent updates (updating parameters every training example, as opposed to every epoch) - tip: randomise data order each epoch
* Implement batch gradient descent updates - tip: randomise data order each epoch
**Requirements**:
1. Describe the modelling problem and your input data. Plot some examples of the data
1. Write down the model specification (I should be able to reproduce your model with this description):
* number of nodes in each layer
* a description of the parameters to learn (and a total number of parameters)
* the activation functions used for each layer
* cost function used
1. All the outputs asked for in Question 3.1: loss per epoch plot, final parameters, a weight against epoch plot, and example predictions
*Your answer goes here*
```python
# Your code goes here
```
## Part 4 - Implementation with Sklearn
### ========== Question 4.1 ==========
If you did Question 3.1, this should be a breeze! Use the same data and perform the same modelling task. This time you can use Sklearn's Neural Network object [MLPClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html#sklearn.neural_network.MLPClassifier).
Before you begin, read the [introduction](http://scikit-learn.org/stable/modules/neural_networks_supervised.html) (sections 1.17.1 and 1.17.2 at a minimum, 1.17.5, 1.17.6, 1.17.7 are recommended).
```python
# Your code goes here
```
### ========== Question 4.2 ==========
The learned parameters are stored in the fitted sklearn [MLPClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html#sklearn.neural_network.MLPClassifier) object **as two separate attributes**.
1. Print the parameters learned by your fitted model
1. Print the total number of parameters learned
Look at the number of parameters described in question 3.1 (you do not need to have done this question 3.1 - just read its description). Below the code:
1. Explain why the number of parameters learned by sklearn is different from the number specified in 3.1?
```python
# Your code goes here
```
*Your answer goes here*
# [Please sir...I want some more](https://www.youtube.com/watch?v=Ex2r86G0sdc)
Well done, you successfully covered the basics of Neural Networks!
If you enjoyed this lab, you'll love another course @ Edinburgh: [Machine Learning Practical](https://github.com/CSTR-Edinburgh/mlpractical). Check it out.
### Next steps
The first thing to do, if you haven't already, is do the extension question 3.5. **In particular, you should implement bias parameters in your model code**.
Next, go back to the very top of the notebook where I detail things I will not cover. Pick some words you don't understand (perhaps along with the keyword 'example' or 'introduction') and have fun reading/watching some tutorials about them online. Code up what you have learned; if you can code it up without peeking, you know you have understood it very well indeed. Another good "starter for 10" google is "a review of neural networks for [images|text|music|bat detection|captioning images|generation|...]".
Here are some things that you might find fun to read:
* [Visualising networks learning](http://playground.tensorflow.org/#activation=tanh&batchSize=10&dataset=circle®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=5&networkShape=3&seed=0.42978&showTestData=false&discretize=false&percTrainData=50&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false)
* [Trying to understand what features are learned by Deep Nets](https://distill.pub/2017/feature-visualization/)
* [Modelling sound waves](https://deepmind.com/blog/wavenet-generative-model-raw-audio/)
* ...and using that to [encode instruments](https://magenta.tensorflow.org/nsynth)
* An [Introduction to LSTMs](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) and their [unreasonable effectiveness](http://karpathy.github.io/2015/05/21/rnn-effectiveness/)
* How to encode the entire meaning of a word [in a few numbers](http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/)
* [Convolutions for text data?!](http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/)
### Learning resources
Also:
* [there](http://ufldl.stanford.edu/tutorial/supervised/MultiLayerNeuralNetworks/)
* [are](http://neuralnetworksanddeeplearning.com/chap1.html)
* [literally](https://www.coursera.org/learn/machine-learning)
* [so](https://www.coursera.org/learn/neural-networks)
* [many](http://deeplearning.net/)
* [learning](http://datasciencemasters.org/)
* [resources](https://metacademy.org/graphs/concepts/backpropagation)
* [online!](http://www.deeplearningbook.org/)
(about neural nets etc.)
In all seriousness, make sure you check out [metacademy](https://metacademy.org/). You can search for a topic and it gives you a list of free resources, an estimated time you need to understand it, and prerequisite topics.
# Attributions
Parts of this lab were inspired by D. E. Rumelhart, G. E. Hinton, and R. J. Williams, Parallel distributed processing: Explorations
in the microstructure of cognition, vol. 1, MIT Press, Cambridge, MA, USA, 1986,
pp. 318–362.
Thanks also to:
* [3 Blue 1 Brown](https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw)
* [Michael Nielsen](http://neuralnetworksanddeeplearning.com)
* [Christopher Olah](http://colah.github.io/)
for producing some excellent visualisations and learning resources and providing them free of charge.
Additionally, many thanks to the developers of open source software, in particular:
* [Numpy](http://www.numpy.org/)
* [Scipy](https://www.scipy.org/)
* [Sklearn](http://scikit-learn.org/stable/)
* [Matplotlib](https://matplotlib.org/)
* [Jupyter](http://jupyter.org/)
* and of course [Python](https://www.python.org/) itself!
your work is invaluable and appreciated.
# Credits
This lab was created by [James Owers](https://jamesowers.github.io/) in November 2017 and reviewed by [Patric Fulop](https://www.inf.ed.ac.uk/people/students/Patric_Fulop.html).
|
{"hexsha": "dc40600618bddbce6df2462713976c0c11fd42c4", "size": 44364, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "10_Lab_5_Neural_Networks.ipynb", "max_stars_repo_name": "CaesarZhang070497/Iaml", "max_stars_repo_head_hexsha": "cb13d2aa50c37563d50eaf380542578994effd91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 59, "max_stars_repo_stars_event_min_datetime": "2017-09-18T13:14:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T13:52:25.000Z", "max_issues_repo_path": "10_Lab_5_Neural_Networks.ipynb", "max_issues_repo_name": "pyian/iaml2017", "max_issues_repo_head_hexsha": "cb13d2aa50c37563d50eaf380542578994effd91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-09-22T14:27:50.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-23T09:57:59.000Z", "max_forks_repo_path": "10_Lab_5_Neural_Networks.ipynb", "max_forks_repo_name": "pyian/iaml2017", "max_forks_repo_head_hexsha": "cb13d2aa50c37563d50eaf380542578994effd91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 91, "max_forks_repo_forks_event_min_datetime": "2017-09-18T15:42:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T16:19:21.000Z", "avg_line_length": 36.9084858569, "max_line_length": 522, "alphanum_fraction": 0.5992471373, "converted": true, "num_tokens": 7976}
|
# coding: utf-8
# @时间 : 2022/1/18 2:09 下午
# @作者 : 文山
# @邮箱 : wolaizhinidexin@163.com
# @作用 :
# @文件 : model.py
# @微信 :qwentest123
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.keras import Model, Sequential, layers
from tensorflow.keras import Model
import time, json, os
from typing import Union
def correct_pad(input_size: Union[int, tuple], kernel_size: int):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Arguments:
input_size: Input tensor size.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
if isinstance(input_size, int):
input_size = (input_size, input_size)
kernel_size = (kernel_size, kernel_size)
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return ((correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]))
def MBConv(inputs, activation: str = 'swish',
name: str = "", input_channel: int = 32,
output_channel: int = 16, kernel_size: int = 3,
strides: int = 1, expand_ratio: int = 1, use_se: bool = True,
se_ration: float = 1 / 4.,
drop_rate=0.
):
# 第1个卷积核是输入特征矩阵channel的n的倍数.
filters = input_channel * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(filters=filters, kernel_size=1, strides=1, padding='same', use_bias=False,
name=name + "/expand_conv")(inputs)
else:
x = inputs
if strides == 2:
# 对图像进行补0对齐
x = layers.ZeroPadding2D(padding=correct_pad(filters, kernel_size),
name=name + "dwconv_pad")(x)
x = layers.DepthwiseConv2D(kernel_size=kernel_size, strides=strides,
padding='same' if strides == 1 else 'valid',
use_bias=False,
name=name + "dwconv"
)(x)
x = layers.BatchNormalization(name=name + "bn")(x)
x = layers.Activation(activation, name=name + "activation")(x)
if use_se:
filters_se = int(input_channel * se_ration)
se = layers.GlobalAveragePooling2D(name=name + "se")(x)
se = layers.Reshape((1, 1, filters), name=name + "se_reshape")(se)
# 第一个fc使用的是swish激活函数
se = layers.Conv2D(filters=filters_se,
kernel_size=1,
padding="same",
activation=activation,
name=name + "se_reduce")(se)
# 第二个fc使用的是sigmoid函数
se = layers.Conv2D(filters=filters,
kernel_size=1,
padding="same",
activation="sigmoid",
name=name + "se_expand")(se)
x = layers.multiply([x, se], name=name + "se_excite")
# 再经过一个1x1的卷积
x = layers.Conv2D(filters=output_channel,
kernel_size=1,
padding="same",
use_bias=False,
name=name + "project_conv")(x)
x = layers.BatchNormalization(name=name + "project_bn")(x)
# 只有s=1,并且输入通道与输出通道数相同时,才使用残差结构
# 并且只有在shortcut时才使用dropout
if strides == 1 and input_channel == output_channel:
if drop_rate > 0:
x = layers.Dropout(rate=drop_rate, noise_shape=(None, 1, 1, 1), name=name + 'drop')(x)
x = layers.add([x, inputs], name=name + "add")
# MBconv V2之后,直接就是线性输出,不再经过relu函数
return x
import math
def efficient_net(width_coefficient, depth_coefficient, input_shape=(224, 224, 3),
dropout_rate=0.2, drop_connect_rate=0.2, activation="swish",
model_name="efficientnet", num_class=4
):
# kernel_size, repeats, in_channel, out_channel, exp_ratio, strides, SE
block_args = [[3, 1, 32, 16, 1, 1, False],
[3, 2, 16, 24, 6, 2, False],
[5, 2, 24, 40, 6, 2, False],
[3, 3, 40, 80, 6, 2, True],
[5, 3, 80, 112, 6, 1, True],
[5, 4, 112, 192, 6, 2, True],
[3, 1, 192, 320, 6, 1, True]]
# 以下两个函数是用来确定,宽度、深度系数之后,能够得到一个整数
def round_filters(filters, divisor=8):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
image_input = layers.Input(shape=input_shape)
# 第一个卷积
x = layers.ZeroPadding2D(padding=correct_pad(input_shape[:2], 3),
name="stem_conv_pad")(image_input)
x = layers.Conv2D(filters=round_filters(32),
kernel_size=3,
strides=2,
padding="valid",
use_bias=False,
name="stem_conv")(x)
x = layers.BatchNormalization(name="stem_bn")(x)
x = layers.Activation(activation, name="stem_activation")(x)
# 开始构建block
b = 0
num_blocks = float(sum(round_repeats(i[1]) for i in block_args)) # 7
for i, args in enumerate(block_args):
assert args[1] > 0
# 需要保证输入的通道数是8的倍数
args[2] = round_filters(args[2]) # input_channel
args[3] = round_filters(args[3]) # output_channel
for j in range(round_repeats(args[1])):
# 重复的次数
# drop_connect_rate * b / num_blocks,实现线性dropout
x = MBConv(x, activation=activation, drop_rate=drop_connect_rate * b / num_blocks,
name='block{}{}'.format(i + 1, j),
kernel_size=args[0],
input_channel=args[2] if j == 0 else args[3], # 第1次重复时,使用args[2]的通道数,再次重复时使用输出通道数
output_channel=args[3],
expand_ratio=args[4],
strides=args[5] if j == 0 else 1,
use_se=args[6]
)
b += 1
# last
x = layers.Conv2D(filters=1280,
kernel_size=1,
strides=1,
padding="same",
use_bias=False,
name="last_conv")(x)
x = layers.BatchNormalization(name='lastBn')(x)
x = layers.Activation(activation, name='last_activation')(x)
x = layers.GlobalAveragePooling2D(name='global_avg_pool_last')(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name="top_dropout")(x)
x = layers.Dense(units=num_class,
activation="softmax",
name="predictions")(x)
model = Model(inputs=image_input, outputs=x, name=model_name)
return model
def efficientnet_b0(num_class=4,
input_shape=(224, 224, 3)):
# https://storage.googleapis.com/keras-applications/efficientnetb0.h5
return efficient_net(width_coefficient=1.0,
depth_coefficient=1.0,
input_shape=input_shape,
dropout_rate=0.2,
model_name="efficientnetb0",
num_class=num_class)
if __name__ == "__main__":
model = efficientnet_b0(num_class=4)
model.build([4, 224, 224, 3])
print(model.summary())
|
{"hexsha": "c3530b1153a221ebd0d40e23f8c01b56127cbd76", "size": 7707, "ext": "py", "lang": "Python", "max_stars_repo_path": "EfficientNet/model.py", "max_stars_repo_name": "qwentest/qwenAILearn", "max_stars_repo_head_hexsha": "c94e10417da9c5cd8e14e22bdcc884fb9142be68", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-01-20T01:50:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T02:24:25.000Z", "max_issues_repo_path": "EfficientNet/model.py", "max_issues_repo_name": "HeJinBuLangYa/qwenAILearn", "max_issues_repo_head_hexsha": "ea2e86ea70609be6c1f6baac6704ff0d677ef9c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EfficientNet/model.py", "max_forks_repo_name": "HeJinBuLangYa/qwenAILearn", "max_forks_repo_head_hexsha": "ea2e86ea70609be6c1f6baac6704ff0d677ef9c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2022-01-20T01:51:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T02:09:26.000Z", "avg_line_length": 39.5230769231, "max_line_length": 105, "alphanum_fraction": 0.550149215, "include": true, "reason": "import numpy", "num_tokens": 2073}
|
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import networkx as nx
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from copy import deepcopy
sys.path.append('%s/../common' % os.path.dirname(os.path.realpath(__file__)))
from cmd_args import cmd_args
sys.path.append('%s/../node_classification' % os.path.dirname(os.path.realpath(__file__)))
from node_utils import load_txt_data, load_binary_data, run_test, load_raw_graph, StaticGraph
from node_attack_common import load_base_model, ModifiedGraph, init_setup
def propose_add(grad):
idxes = np.argsort(grad)
added = []
mod = ModifiedGraph()
for p in idxes:
x = p // len(StaticGraph.graph)
y = p % len(StaticGraph.graph)
if x == y or x in dict_of_lists[y] or y in dict_of_lists[x]:
continue
if cmd_args.n_hops > 0 and not x in khop_neighbors[y]:
continue
assert cmd_args.n_hops <= 0 or (x in khop_neighbors[y] and y in khop_neighbors[x])
mod.add_edge(x, y, 1.0)
if len(mod.directed_edges) >= cmd_args.num_mod:
break
if len(mod.directed_edges) < cmd_args.num_mod:
extra = None
else:
extra = mod.get_extra_adj()
adj = base_model.norm_tool.norm_extra(extra)
_, _, acc = base_model(features, Variable(adj), [idx], labels)
acc = acc.double().cpu().numpy()
return acc[0] < 1.0, mod
def propose_del(grad):
idxes = np.argsort(-grad)
added = []
mod = ModifiedGraph()
for p in idxes:
x = p // len(StaticGraph.graph)
y = p % len(StaticGraph.graph)
if x == y:
continue
if not x in dict_of_lists[y] or not y in dict_of_lists[x]:
continue
mod.add_edge(x, y, -1.0)
if len(mod.directed_edges) >= cmd_args.num_mod:
break
if len(mod.directed_edges) < cmd_args.num_mod:
extra = None
else:
extra = mod.get_extra_adj()
adj = base_model.norm_tool.norm_extra(extra)
_, _, acc = base_model(features, Variable(adj), [idx], labels)
acc = acc.double().cpu().numpy()
return acc[0] < 1.0, mod
if __name__ == '__main__':
random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)
features, labels, _, idx_test, base_model, khop_neighbors = init_setup()
np_labels = labels.cpu().data.numpy()
method = propose_del
attacked = 0.0
pbar = tqdm(range(len(idx_test)))
ftxt = open('%s/%s-grad.txt' % (cmd_args.save_dir, cmd_args.dataset), 'w', 0)
dict_of_lists = load_raw_graph(cmd_args.data_folder + '/' + cmd_args.dataset, cmd_args.dataset)
_, _, all_acc = base_model(features, Variable(base_model.norm_tool.normed_adj), idx_test, labels)
all_acc = all_acc.cpu().numpy()
for pos in pbar:
if all_acc[pos] < 1.0:
attacked += 1
continue
idx = idx_test[pos]
fake_labels = labels.clone()
if cmd_args.targeted:
for i in range(cmd_args.num_class):
if i == np_labels[idx]:
continue
adj = Variable( base_model.norm_tool.normed_adj, requires_grad=True )
base_model.zero_grad()
fake_labels[idx] = i
_, loss, acc = base_model(features, adj, [idx], fake_labels)
loss.backward()
grad = adj.grad.data.cpu().numpy().flatten()
if method(grad)[0]:
attacked += 1
break
else:
adj = Variable( base_model.norm_tool.normed_adj, requires_grad=True )
base_model.zero_grad()
_, loss, acc = base_model(features, adj, [idx], labels)
loss = -loss
loss.backward()
grad = adj.grad.data.cpu().numpy().flatten()
succ, mod = method(grad)
if succ:
ftxt.write('%d: [%d %d]\n' % (idx, mod.directed_edges[0][0], mod.directed_edges[0][1]))
attacked += 1
pbar.set_description('cur_attack: %.2f' % (attacked) )
ftxt.close()
print( '%.6f\n' % (1.0 - attacked / len(idx_test)) )
|
{"hexsha": "1126d37997241bdc2b9432076811d6e00a224494", "size": 4380, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/node_attack/node_grad_attack.py", "max_stars_repo_name": "HenryKenlay/graph_adversarial_attack", "max_stars_repo_head_hexsha": "5282d1269aa637ecafb0af239c53fa8396e5ef66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 119, "max_stars_repo_stars_event_min_datetime": "2018-06-29T10:03:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T05:20:46.000Z", "max_issues_repo_path": "code/node_attack/node_grad_attack.py", "max_issues_repo_name": "HenryKenlay/graph_adversarial_attack", "max_issues_repo_head_hexsha": "5282d1269aa637ecafb0af239c53fa8396e5ef66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-07-24T11:46:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-25T00:00:28.000Z", "max_forks_repo_path": "code/node_attack/node_grad_attack.py", "max_forks_repo_name": "HenryKenlay/graph_adversarial_attack", "max_forks_repo_head_hexsha": "5282d1269aa637ecafb0af239c53fa8396e5ef66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2018-08-21T05:54:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T13:02:06.000Z", "avg_line_length": 33.1818181818, "max_line_length": 103, "alphanum_fraction": 0.603196347, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1084}
|
import numpy as np
from IMLearn.base import BaseEstimator
from typing import Callable, NoReturn
from IMLearn.metrics.loss_functions import misclassification_error
class AdaBoost(BaseEstimator):
"""
AdaBoost class for boosting a specified weak learner
Attributes
----------
self.wl_: Callable[[], BaseEstimator]
Callable for obtaining an instance of type BaseEstimator
self.iterations_: int
Number of boosting iterations to perform
self.models_: List[BaseEstimator]
List of fitted estimators, fitted along the boosting iterations
"""
def __init__(self, wl: Callable[[], BaseEstimator], iterations: int):
"""
Instantiate an AdaBoost class over the specified base estimator
Parameters
----------
wl: Callable[[], BaseEstimator]
Callable for obtaining an instance of type BaseEstimator
iterations: int
Number of boosting iterations to perform
"""
super().__init__()
self.wl_ = wl
self.iterations_ = iterations
self.models_, self.weights_, self.D_ = None, None, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit an AdaBoost classifier over given samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
##init:
m = X.shape[0]
D = np.zeros([m])
D.fill(1/m)
self.models_= []
self.weights_ = []
## loop:
for iter in range(self.iterations_):
model_t = self.wl_()
model_t._fit(X, y* D)
vec_miss=y*model_t._predict(X)
vec_miss[vec_miss>0] = 0
epsilon = np.abs(float(np.sum(D*vec_miss)))
w_t = 0.5 * np.log((1/epsilon)-1)
D = D*(np.exp(-y*w_t*model_t._predict(X)))
D = D / np.sum(D)
self.weights_.append(w_t)
self.models_.append(model_t)
self.D_ = D
def _predict(self, X):
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
return self.partial_predict(X, self.iterations_)
vec_answer = np.zeros([X.shape[0]])
vec_sum = np.zeros([X.shape[0]])
for index in range(self.iterations_):
vec_sum+=self.models_[index]._predict(X)*self.weights_[index]
vec_answer[vec_sum>=0] = 1
vec_answer[vec_sum<0] = -1
return vec_answer
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
return misclassification_error(y, self._predict(X))
def partial_predict(self, X: np.ndarray, T: int) -> np.ndarray:
"""
Predict responses for given samples using fitted estimators
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
T: int
The number of classifiers (from 1,...,T) to be used for prediction
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
vec_answer = np.zeros([X.shape[0]])
vec_sum = np.zeros([X.shape[0]])
for index in range(T):
vec_sum+=self.models_[index]._predict(X)*self.weights_[index]
vec_answer[vec_sum>=0] = 1
vec_answer[vec_sum<0] = -1
return vec_answer
temp = self.iterations_
self.iterations_=T
vec_answer = self._predict(X)
self.iterations_ = temp
return vec_answer
def partial_loss(self, X: np.ndarray, y: np.ndarray, T: int) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
T: int
The number of classifiers (from 1,...,T) to be used for prediction
Returns
-------
loss : float
Performance under missclassification loss function
"""
return misclassification_error(y, self.partial_predict(X,T))
|
{"hexsha": "b75f44cf16b6f0421ae5e047e3e3b279b71ffca5", "size": 5099, "ext": "py", "lang": "Python", "max_stars_repo_path": "IMLearn/metalearners/adaboost.py", "max_stars_repo_name": "DanitYanowsky/IML.HUJI", "max_stars_repo_head_hexsha": "391b661ede3fdbb72ecdf900c32df69445b3868b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "IMLearn/metalearners/adaboost.py", "max_issues_repo_name": "DanitYanowsky/IML.HUJI", "max_issues_repo_head_hexsha": "391b661ede3fdbb72ecdf900c32df69445b3868b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "IMLearn/metalearners/adaboost.py", "max_forks_repo_name": "DanitYanowsky/IML.HUJI", "max_forks_repo_head_hexsha": "391b661ede3fdbb72ecdf900c32df69445b3868b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9941176471, "max_line_length": 78, "alphanum_fraction": 0.5689350853, "include": true, "reason": "import numpy", "num_tokens": 1120}
|
import datetime as dt
import tempfile
import numpy as np
from ravenpy.config.commands import LU
from ravenpy.models import BLENDED, BLENDED_OST
from ravenpy.utilities.testdata import get_local_testdata
from .common import _convert_2d
TS = get_local_testdata(
"raven-gr4j-cemaneige/Salmon-River-Near-Prince-George_meteo_daily.nc"
)
hru = BLENDED.ForestHRU(
area=4250.6, elevation=843.0, latitude=54.4848, longitude=-123.3659, slope=0.01234
)
lu = LU("FOREST", impermeable_frac=0.0, forest_coverage=0.02345)
class TestBLENDED:
def test_simple(self):
model = BLENDED()
params = (
2.930702e-02, # par_x01
2.211166e00, # par_x02
2.166229e00, # par_x03
0.0002254976, # feed 10**par_x04; not par_x4=-3.646858076
2.173976e01, # par_x05
1.565091e00, # par_x06
6.211146e00, # par_x07
9.313578e-01, # par_x08
3.486263e-02, # par_x09
0.251835, # feed par_x09+x10; not par_x10=0.21697237
0.0002279250, # feed 10**par_x11; not par_x11=-3.642208036
1.214339e00, # par_x12
4.736668e-02, # par_x13
0.2070342, # feed par_x13+x14; not par_x14=0.15966752
7.806324e-02, # par_x15
-1.336429e00, # par_x16
2.189741e-01, # par_x17
3.845617e00, # par_x18
2.950022e-01, # par_x19
4.827523e-01, # par_x20
4.099820e00, # par_x21
1.283144e01, # par_x22
5.937894e-01, # par_x23
1.651588e00, # par_x24
1.705806, # feed par_x24+x25; not par_x25=0.054218
3.719308e-01, # par_x26
7.121015e-02, # par_x27
1.906440e-02, # par_x28
4.080660e-01, # par_x29
9.415693e-01, # par_x30
-1.856108e00, # par_x31
2.356995e00, # par_x32
1.0e00, # par_x33
1.0e00, # par_x34
7.510967e-03, # par_x35
5.321608e-01, # par_r01
2.891977e-02, # par_r02
9.605330e-01, # par_r03
6.128669e-01, # par_r04
9.558293e-01, # par_r05
1.008196e-01, # par_r06
9.275730e-02, # par_r07
7.469583e-01, # par_r08
)
model(
TS,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
hrus=(hru,),
land_use_classes=(lu,),
params=params,
suppress_output=True,
)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], -0.913785, 4)
class TestBLENDED_OST:
def test_simple(self):
model = BLENDED_OST()
params = (
2.930702e-02, # par_x01
2.211166e00, # par_x02
2.166229e00, # par_x03
0.0002254976, # feed 10**par_x04; not par_x4=-3.646858076
2.173976e01, # par_x05
1.565091e00, # par_x06
6.211146e00, # par_x07
9.313578e-01, # par_x08
3.486263e-02, # par_x09
0.251835, # feed par_x09+x10; not par_x10=0.21697237
0.0002279250, # feed 10**par_x11; not par_x11=-3.642208036
1.214339e00, # par_x12
4.736668e-02, # par_x13
0.2070342, # feed par_x13+x14; not par_x14=0.15966752
7.806324e-02, # par_x15
-1.336429e00, # par_x16
2.189741e-01, # par_x17
3.845617e00, # par_x18
2.950022e-01, # par_x19
4.827523e-01, # par_x20
4.099820e00, # par_x21
1.283144e01, # par_x22
5.937894e-01, # par_x23
1.651588e00, # par_x24
1.705806, # feed par_x24+x25; not par_x25=0.054218
3.719308e-01, # par_x26
7.121015e-02, # par_x27
1.906440e-02, # par_x28
4.080660e-01, # par_x29
9.415693e-01, # par_x30
-1.856108e00, # par_x31
2.356995e00, # par_x32
1.110496e00, # par_x33
1.042556e00, # par_x34
7.510967e-03, # par_x35
5.321608e-01, # par_r01
2.891977e-02, # par_r02
9.605330e-01, # par_r03
6.128669e-01, # par_r04
9.558293e-01, # par_r05
1.008196e-01, # par_r06
9.275730e-02, # par_r07
7.469583e-01, # par_r08
)
low = (
0.0, # par_x01
0.1, # par_x02
0.5, # par_x03
-5.0, # 10**par_x04
0.0, # par_x05
0.5, # par_x06
5.0, # par_x07
0.0, # par_x08
0.0, # par_x09
0.0, # par_x09+x10
-5.0, # 10**par_x11
0.5, # par_x12
0.0, # par_x13
0.01, # par_x13+x14
0.005, # par_x15
-5.0, # par_x16
0.0, # par_x17
0.0, # par_x18
0.0, # par_x19
0.3, # par_x20
0.01, # par_x21
0.5, # par_x22
0.15, # par_x23
1.5, # par_x24
0.0, # par_x24+x25
-1.0, # par_x26
0.01, # par_x27
0.00001, # par_x28
0.0, # par_x29
0.0, # par_x30
-3.0, # par_x31
0.5, # par_x32
0.8, # par_x33
0.8, # par_x34
0.0, # par_x35
0.0, # par_r01
0.0, # par_r02
0.0, # par_r03
0.0, # par_r04
0.0, # par_r05
0.0, # par_r06
0.0, # par_r07
0.0, # par_r08
)
high = (
1.0, # par_x01
3.0, # par_x02
3.0, # par_x03
-1.0, # 10**par_x04
100.0, # par_x05
2.0, # par_x06
10.0, # par_x07
3.0, # par_x08
0.05, # par_x09
0.45, # par_x09+x10
-2.0, # 10**par_x11
2.0, # par_x12
0.1, # par_x13
0.3, # par_x13+x14
0.1, # par_x15
2.0, # par_x16
1.0, # par_x17
5.0, # par_x18
0.4, # par_x19
20.0, # par_x20
5.0, # par_x21
13.0, # par_x22
1.5, # par_x23
3.0, # par_x24
5.0, # par_x24+x25
1.0, # par_x26
0.2, # par_x27
0.02, # par_x28
0.5, # par_x29
2.0, # par_x30
3.0, # par_x31
4.0, # par_x32
1.2, # par_x33
1.2, # par_x34
0.02, # par_x35
1.0, # par_r01
1.0, # par_r02
1.0, # par_r03
1.0, # par_r04
1.0, # par_r05
1.0, # par_r06
1.0, # par_r07
1.0, # par_r08
)
model.configure(
get_local_testdata("ostrich-gr4j-cemaneige/OstRandomNumbers.txt")
)
model(
TS,
start_date=dt.datetime(1954, 1, 1),
duration=208,
hrus=(hru,),
land_use_classes=(lu,),
params=params,
lowerBounds=low,
upperBounds=high,
algorithm="DDS",
random_seed=0,
max_iterations=10,
)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], -1.47169, 4)
# np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], -1.51237, 4) # true when x33 and x34 set to 1.0 and range [1.0,1.0]
opt_para = model.optimized_parameters
opt_func = model.obj_func
expected_value = [
2.400718e-02,
1.516941e00,
2.759658e00,
-4.075778e00,
3.483657e01,
1.972741e00,
7.476240e00,
2.966089e00,
2.035387e-03,
3.379254e-01,
-4.579974e00,
6.867875e-01,
8.920417e-02,
1.681178e-01,
8.827804e-02,
-1.475022e00,
4.722976e-01,
4.528209e00,
2.273521e-01,
8.036873e00,
3.461021e00,
6.880423e00,
1.312190e00,
2.752630e00,
1.515566e00,
-1.499868e-01,
6.437522e-02,
1.013312e-02,
1.089699e-01,
1.462368e00,
-1.620150e00,
3.619720e00,
1.130258e00,
1.020023e00,
1.622190e-02,
7.319023e-02,
1.081170e-01,
1.222980e-01,
4.622038e-01,
4.863545e-02,
3.800171e-01,
9.620678e-01,
7.091240e-01,
]
np.testing.assert_almost_equal(
opt_para,
expected_value,
4,
err_msg="calibrated parameter set is not matching expected value",
)
np.testing.assert_almost_equal(
opt_func,
1.47169,
4,
err_msg="calibrated NSE is not matching expected value",
)
blended = BLENDED()
blended(
TS,
start_date=dt.datetime(1954, 1, 1),
duration=208,
hrus=(hru,),
land_use_classes=(lu,),
params=model.calibrated_params,
)
np.testing.assert_almost_equal(
blended.diagnostics["DIAG_NASH_SUTCLIFFE"], d["DIAG_NASH_SUTCLIFFE"], 4
)
|
{"hexsha": "f836c4c15852706aa484113ee11e6b8097f32bbd", "size": 9775, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_blended.py", "max_stars_repo_name": "CSHS-CWRA/RavenPy", "max_stars_repo_head_hexsha": "279505d7270c3f796500f2cb992af1cd66dfb44c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-12-07T23:07:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T20:50:58.000Z", "max_issues_repo_path": "tests/test_blended.py", "max_issues_repo_name": "CSHS-CWRA/RavenPy", "max_issues_repo_head_hexsha": "279505d7270c3f796500f2cb992af1cd66dfb44c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 119, "max_issues_repo_issues_event_min_datetime": "2020-08-25T08:17:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T16:12:19.000Z", "max_forks_repo_path": "tests/test_blended.py", "max_forks_repo_name": "CSHS-CWRA/RavenPy", "max_forks_repo_head_hexsha": "279505d7270c3f796500f2cb992af1cd66dfb44c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-12-02T17:33:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-31T15:39:26.000Z", "avg_line_length": 30.2631578947, "max_line_length": 136, "alphanum_fraction": 0.4607672634, "include": true, "reason": "import numpy", "num_tokens": 3648}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.